summaryrefslogtreecommitdiff
path: root/nova/tests/unit
diff options
context:
space:
mode:
Diffstat (limited to 'nova/tests/unit')
-rw-r--r--nova/tests/unit/CA/cacert.pem17
-rw-r--r--nova/tests/unit/CA/private/cakey.pem15
-rw-r--r--nova/tests/unit/README.rst95
-rw-r--r--nova/tests/unit/__init__.py49
-rw-r--r--nova/tests/unit/api/__init__.py0
-rw-r--r--nova/tests/unit/api/ec2/__init__.py0
-rw-r--r--nova/tests/unit/api/ec2/public_key/dummy.fingerprint1
-rw-r--r--nova/tests/unit/api/ec2/public_key/dummy.pub1
-rw-r--r--nova/tests/unit/api/ec2/test_api.py635
-rw-r--r--nova/tests/unit/api/ec2/test_apirequest.py92
-rw-r--r--nova/tests/unit/api/ec2/test_cinder_cloud.py1096
-rw-r--r--nova/tests/unit/api/ec2/test_cloud.py3255
-rw-r--r--nova/tests/unit/api/ec2/test_ec2_validate.py277
-rw-r--r--nova/tests/unit/api/ec2/test_ec2utils.py61
-rw-r--r--nova/tests/unit/api/ec2/test_error_response.py132
-rw-r--r--nova/tests/unit/api/ec2/test_faults.py46
-rw-r--r--nova/tests/unit/api/ec2/test_middleware.py225
-rw-r--r--nova/tests/unit/api/openstack/__init__.py0
-rw-r--r--nova/tests/unit/api/openstack/common.py55
-rw-r--r--nova/tests/unit/api/openstack/compute/__init__.py0
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/__init__.py0
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_admin_actions.py734
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_admin_password.py111
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_agents.py352
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_aggregates.py670
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_attach_interfaces.py455
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_availability_zone.py512
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_baremetal_nodes.py159
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_block_device_mapping.py359
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_block_device_mapping_v1.py421
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_cells.py698
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_certificates.py140
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_cloudpipe.py210
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_cloudpipe_update.py99
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_config_drive.py260
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_console_auth_tokens.py103
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_console_output.py171
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_consoles.py587
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_createserverext.py387
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_deferred_delete.py147
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_disk_config.py449
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_evacuate.py268
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_availability_zone.py184
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_evacuate_find_host.py114
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_hypervisors.py101
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_ips.py189
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_ips_mac.py196
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_rescue_with_image.py62
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_server_attributes.py148
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_status.py148
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_virtual_interfaces_net.py123
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_volumes.py124
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_fixed_ips.py256
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_flavor_access.py402
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_flavor_disabled.py127
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_flavor_manage.py465
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_flavor_rxtx.py127
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_flavor_swap.py126
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_flavorextradata.py127
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_flavors_extra_specs.py403
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_floating_ip_dns.py412
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_floating_ip_pools.py83
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_floating_ips.py853
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_floating_ips_bulk.py139
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_fping.py106
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_hide_server_addresses.py172
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_hosts.py471
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_hypervisor_status.py92
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_hypervisors.py596
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_image_size.py138
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_instance_actions.py327
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_instance_usage_audit_log.py210
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_keypairs.py497
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_migrate_server.py231
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_migrations.py139
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_multinic.py204
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_networks.py610
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_neutron_security_groups.py918
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_quota_classes.py222
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_quotas.py648
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_rescue.py270
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_scheduler_hints.py220
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_security_group_default_rules.py515
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_security_groups.py1767
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_server_diagnostics.py132
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_server_external_events.py158
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_server_group_quotas.py188
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_server_groups.py521
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_server_password.py94
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_server_start_stop.py183
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_server_usage.py159
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_services.py576
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_shelve.py148
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_simple_tenant_usage.py539
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_snapshots.py209
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_tenant_networks.py76
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_used_limits.py306
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_virtual_interfaces.py127
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_volumes.py1083
-rw-r--r--nova/tests/unit/api/openstack/compute/extensions/__init__.py0
-rw-r--r--nova/tests/unit/api/openstack/compute/extensions/foxinsocks.py92
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/__init__.py0
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/__init__.py0
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/admin_only_action_common.py263
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_access_ips.py383
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_console_auth_tokens.py95
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_consoles.py270
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_create_backup.py261
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_extended_volumes.py387
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_extension_info.py98
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_lock_server.py57
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_migrations.py115
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_multiple_create.py547
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_pause_server.py60
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_pci.py236
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_server_actions.py1131
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_server_external_events.py140
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_server_password.py80
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_servers.py3353
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_services.py453
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_suspend_server.py48
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_user_data.py195
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/__init__.py0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/test_schemas.py106
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/mixed.xml8
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/partial.xml5
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/partial2.xml6
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/empty.xml2
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/full.xml4
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/refs.xml5
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/mixed.xml9
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/no-metadata.xml6
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/partial.xml5
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/partial2.xml7
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/empty.xml2
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/full.xml14
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/refs.xml5
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/mixed.xml18
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial.xml5
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial2.xml6
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial3.xml7
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/detailed.xml27
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/empty.xml2
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/full.xml16
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/refs.xml5
-rw-r--r--nova/tests/unit/api/openstack/compute/test_api.py186
-rw-r--r--nova/tests/unit/api/openstack/compute/test_auth.py61
-rw-r--r--nova/tests/unit/api/openstack/compute/test_consoles.py293
-rw-r--r--nova/tests/unit/api/openstack/compute/test_extensions.py747
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavors.py943
-rw-r--r--nova/tests/unit/api/openstack/compute/test_image_metadata.py366
-rw-r--r--nova/tests/unit/api/openstack/compute/test_images.py1046
-rw-r--r--nova/tests/unit/api/openstack/compute/test_limits.py1016
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_actions.py1556
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_metadata.py771
-rw-r--r--nova/tests/unit/api/openstack/compute/test_servers.py4625
-rw-r--r--nova/tests/unit/api/openstack/compute/test_urlmap.py171
-rw-r--r--nova/tests/unit/api/openstack/compute/test_v21_extensions.py196
-rw-r--r--nova/tests/unit/api/openstack/compute/test_v3_auth.py62
-rw-r--r--nova/tests/unit/api/openstack/compute/test_v3_extensions.py194
-rw-r--r--nova/tests/unit/api/openstack/compute/test_versions.py797
-rw-r--r--nova/tests/unit/api/openstack/fakes.py662
-rw-r--r--nova/tests/unit/api/openstack/test_common.py764
-rw-r--r--nova/tests/unit/api/openstack/test_faults.py315
-rw-r--r--nova/tests/unit/api/openstack/test_mapper.py46
-rw-r--r--nova/tests/unit/api/openstack/test_wsgi.py1244
-rw-r--r--nova/tests/unit/api/openstack/test_xmlutil.py948
-rw-r--r--nova/tests/unit/api/test_auth.py214
-rw-r--r--nova/tests/unit/api/test_compute_req_id.py40
-rw-r--r--nova/tests/unit/api/test_validator.py103
-rw-r--r--nova/tests/unit/api/test_wsgi.py65
-rw-r--r--nova/tests/unit/bundle/1mb.manifest.xml1
-rw-r--r--nova/tests/unit/bundle/1mb.no_kernel_or_ramdisk.manifest.xml1
-rw-r--r--nova/tests/unit/bundle/1mb.part.0bin0 -> 1024 bytes
-rw-r--r--nova/tests/unit/bundle/1mb.part.11
-rw-r--r--nova/tests/unit/cast_as_call.py49
-rw-r--r--nova/tests/unit/cells/__init__.py0
-rw-r--r--nova/tests/unit/cells/fakes.py207
-rw-r--r--nova/tests/unit/cells/test_cells_filters.py173
-rw-r--r--nova/tests/unit/cells/test_cells_manager.py808
-rw-r--r--nova/tests/unit/cells/test_cells_messaging.py2129
-rw-r--r--nova/tests/unit/cells/test_cells_rpc_driver.py207
-rw-r--r--nova/tests/unit/cells/test_cells_rpcapi.py760
-rw-r--r--nova/tests/unit/cells/test_cells_scheduler.py530
-rw-r--r--nova/tests/unit/cells/test_cells_state_manager.py259
-rw-r--r--nova/tests/unit/cells/test_cells_utils.py103
-rw-r--r--nova/tests/unit/cells/test_cells_weights.py218
-rw-r--r--nova/tests/unit/cert/__init__.py0
-rw-r--r--nova/tests/unit/cert/test_rpcapi.py123
-rw-r--r--nova/tests/unit/cmd/__init__.py0
-rw-r--r--nova/tests/unit/cmd/test_idmapshift.py636
-rw-r--r--nova/tests/unit/compute/__init__.py0
-rw-r--r--nova/tests/unit/compute/eventlet_utils.py23
-rw-r--r--nova/tests/unit/compute/fake_resource_tracker.py35
-rw-r--r--nova/tests/unit/compute/monitors/__init__.py0
-rw-r--r--nova/tests/unit/compute/monitors/test_cpu_monitor.py86
-rw-r--r--nova/tests/unit/compute/monitors/test_monitors.py144
-rw-r--r--nova/tests/unit/compute/test_arch.py65
-rw-r--r--nova/tests/unit/compute/test_claims.py320
-rw-r--r--nova/tests/unit/compute/test_compute.py11415
-rw-r--r--nova/tests/unit/compute/test_compute_api.py2635
-rw-r--r--nova/tests/unit/compute/test_compute_cells.py332
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py3053
-rw-r--r--nova/tests/unit/compute/test_compute_utils.py827
-rw-r--r--nova/tests/unit/compute/test_compute_xen.py67
-rw-r--r--nova/tests/unit/compute/test_flavors.py61
-rw-r--r--nova/tests/unit/compute/test_host_api.py480
-rw-r--r--nova/tests/unit/compute/test_hvtype.py46
-rw-r--r--nova/tests/unit/compute/test_keypairs.py221
-rw-r--r--nova/tests/unit/compute/test_multiple_nodes.py169
-rw-r--r--nova/tests/unit/compute/test_resource_tracker.py1539
-rw-r--r--nova/tests/unit/compute/test_resources.py344
-rw-r--r--nova/tests/unit/compute/test_rpcapi.py486
-rw-r--r--nova/tests/unit/compute/test_shelve.py414
-rw-r--r--nova/tests/unit/compute/test_stats.py222
-rw-r--r--nova/tests/unit/compute/test_tracker.py614
-rw-r--r--nova/tests/unit/compute/test_virtapi.py188
-rw-r--r--nova/tests/unit/compute/test_vmmode.py70
-rw-r--r--nova/tests/unit/conductor/__init__.py0
-rw-r--r--nova/tests/unit/conductor/tasks/__init__.py0
-rw-r--r--nova/tests/unit/conductor/tasks/test_live_migrate.py384
-rw-r--r--nova/tests/unit/conductor/test_conductor.py2151
-rw-r--r--nova/tests/unit/conf_fixture.py64
-rw-r--r--nova/tests/unit/console/__init__.py0
-rw-r--r--nova/tests/unit/console/test_console.py186
-rw-r--r--nova/tests/unit/console/test_rpcapi.py76
-rw-r--r--nova/tests/unit/console/test_serial.py137
-rw-r--r--nova/tests/unit/console/test_type.py61
-rw-r--r--nova/tests/unit/console/test_websocketproxy.py157
-rw-r--r--nova/tests/unit/consoleauth/__init__.py0
-rw-r--r--nova/tests/unit/consoleauth/test_consoleauth.py181
-rw-r--r--nova/tests/unit/consoleauth/test_rpcapi.py91
-rw-r--r--nova/tests/unit/db/__init__.py18
-rw-r--r--nova/tests/unit/db/fakes.py473
-rw-r--r--nova/tests/unit/db/test_db_api.py7517
-rw-r--r--nova/tests/unit/db/test_migration_utils.py256
-rw-r--r--nova/tests/unit/db/test_migrations.conf26
-rw-r--r--nova/tests/unit/db/test_migrations.py913
-rw-r--r--nova/tests/unit/db/test_sqlite.py53
-rw-r--r--nova/tests/unit/fake_block_device.py44
-rw-r--r--nova/tests/unit/fake_crypto.py109
-rw-r--r--nova/tests/unit/fake_hosts.py37
-rw-r--r--nova/tests/unit/fake_instance.py107
-rw-r--r--nova/tests/unit/fake_ldap.py330
-rw-r--r--nova/tests/unit/fake_loadables/__init__.py27
-rw-r--r--nova/tests/unit/fake_loadables/fake_loadable1.py44
-rw-r--r--nova/tests/unit/fake_loadables/fake_loadable2.py39
-rw-r--r--nova/tests/unit/fake_network.py457
-rw-r--r--nova/tests/unit/fake_network_cache_model.py77
-rw-r--r--nova/tests/unit/fake_notifier.py69
-rw-r--r--nova/tests/unit/fake_policy.py412
-rw-r--r--nova/tests/unit/fake_processutils.py108
-rw-r--r--nova/tests/unit/fake_server_actions.py119
-rw-r--r--nova/tests/unit/fake_utils.py36
-rw-r--r--nova/tests/unit/fake_volume.py290
-rw-r--r--nova/tests/unit/functional/__init__.py0
-rw-r--r--nova/tests/unit/image/__init__.py0
-rw-r--r--nova/tests/unit/image/abs.tar.gzbin0 -> 153 bytes
-rw-r--r--nova/tests/unit/image/fake.py257
-rw-r--r--nova/tests/unit/image/rel.tar.gzbin0 -> 165 bytes
-rw-r--r--nova/tests/unit/image/test_fake.py117
-rw-r--r--nova/tests/unit/image/test_glance.py1231
-rw-r--r--nova/tests/unit/image/test_s3.py267
-rw-r--r--nova/tests/unit/image/test_transfer_modules.py101
-rw-r--r--nova/tests/unit/image_fixtures.py79
-rw-r--r--nova/tests/unit/integrated/__init__.py18
-rw-r--r--nova/tests/unit/integrated/api/__init__.py18
-rw-r--r--nova/tests/unit/integrated/api/client.py304
-rw-r--r--nova/tests/unit/integrated/api_samples/NMN/multinic-add-fixed-ip-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/NMN/multinic-add-fixed-ip-req.xml.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.xml.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/NMN/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/NMN/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/NMN/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/NMN/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/image-get-resp.json.tpl34
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/image-get-resp.xml.tpl12
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/image-list-resp.json.tpl214
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/image-list-resp.xml.tpl71
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/list-servers-detail-get.json.tpl57
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/list-servers-detail-get.xml.tpl21
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-req.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-resp.json.tpl56
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-resp.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-get-resp.json.tpl55
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-get-resp.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-post-req.json.tpl17
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-post-resp.json.tpl17
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-resize-post-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-resize-post-req.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-req.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-resp.json.tpl55
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-resp.xml.tpl24
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-get-resp.json.tpl55
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-get-resp.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.json.tpl57
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.xml.tpl20
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json.tpl34
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml.tpl12
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json.tpl219
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml.tpl71
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-get-resp.json.tpl55
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-get-resp.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/servers-detail-resp.json.tpl56
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/servers-detail-resp.xml.tpl21
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-get-resp.json.tpl55
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-get-resp.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.json.tpl56
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.xml.tpl21
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json.tpl57
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json.tpl59
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml.tpl21
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-get-resp.json.tpl57
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-get-resp.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-STS/servers-detail-resp.json.tpl58
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-STS/servers-detail-resp.xml.tpl21
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/vifs-list-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/vifs-list-resp.xml.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.json.tpl94
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.xml.tpl23
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.json.tpl20
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.json.tpl20
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.json.tpl94
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.xml.tpl23
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.json.tpl12
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.xml.tpl11
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.json.tpl20
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-req.json.tpl11
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-req.xml.tpl12
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-get-resp.json.tpl56
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-get-resp.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SRV-USG/servers-detail-resp.json.tpl57
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SRV-USG/servers-detail-resp.xml.tpl21
-rw-r--r--nova/tests/unit/integrated/api_samples/README.rst29
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl716
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl269
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/flavor-get-resp.json.tpl24
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/flavor-get-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/flavors-list-resp.json.tpl74
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/flavors-list-resp.xml.tpl23
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-changepassword.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-changepassword.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-confirmresize.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-confirmresize.xml.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-createimage.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-createimage.xml.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-reboot.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-reboot.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild-resp.json.tpl56
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild-resp.xml.tpl39
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild.json.tpl18
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild.xml.tpl25
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-resize.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-resize.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-revertresize.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-revertresize.xml.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-get-resp.json.tpl74
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-get-resp.xml.tpl23
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-post-resp.json.tpl22
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-post-resp.xml.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/servers-details-resp.json.tpl76
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl25
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/servers-list-resp.json.tpl18
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/servers-list-resp.xml.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/flavor-get-resp.json.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/flavor-get-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/flavors-list-resp.json.tpl74
-rw-r--r--nova/tests/unit/integrated/api_samples/flavors-list-resp.xml.tpl23
-rw-r--r--nova/tests/unit/integrated/api_samples/image-get-resp.json.tpl33
-rw-r--r--nova/tests/unit/integrated/api_samples/image-get-resp.xml.tpl12
-rw-r--r--nova/tests/unit/integrated/api_samples/image-meta-key-get.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/image-meta-key-get.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/image-meta-key-put-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/image-meta-key-put-req.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/image-meta-key-put-resp.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/image-meta-key-put-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-get-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-get-resp.xml.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-post-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-post-req.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-post-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-post-resp.xml.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-put-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-put-req.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-put-resp.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-put-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-resp.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/images-details-get-resp.json.tpl212
-rw-r--r--nova/tests/unit/integrated/api_samples/images-details-get-resp.xml.tpl71
-rw-r--r--nova/tests/unit/integrated/api_samples/images-details-resp.json.tpl212
-rw-r--r--nova/tests/unit/integrated/api_samples/images-details-resp.xml.tpl71
-rw-r--r--nova/tests/unit/integrated/api_samples/images-list-get-resp.json.tpl137
-rw-r--r--nova/tests/unit/integrated/api_samples/images-list-get-resp.xml.tpl38
-rw-r--r--nova/tests/unit/integrated/api_samples/images-list-resp.json.tpl137
-rw-r--r--nova/tests/unit/integrated/api_samples/images-list-resp.xml.tpl38
-rw-r--r--nova/tests/unit/integrated/api_samples/limit-get-resp.json.tpl85
-rw-r--r--nova/tests/unit/integrated/api_samples/limit-get-resp.xml.tpl32
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-backup-server.json.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-backup-server.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.json.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-lock-server.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-lock-server.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-migrate.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-migrate.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-pause.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-pause.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-network.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-state.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-resume.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-resume.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-suspend.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-suspend.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unpause.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unpause.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-agents/agent-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/api_samples/os-agents/agent-post-req.xml.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-agents/agent-post-resp.json.tpl12
-rw-r--r--nova/tests/unit/integrated/api_samples/os-agents/agent-post-resp.xml.tpl10
-rw-r--r--nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-req.json.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-req.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-resp.xml.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-agents/agents-get-resp.json.tpl13
-rw-r--r--nova/tests/unit/integrated/api_samples/os-agents/agents-get-resp.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-req.json.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-req.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-resp.xml.tpl10
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-req.json.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-req.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl15
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl14
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl17
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl15
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl14
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl17
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl15
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl15
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl14
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.xml.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl12
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.xml.tpl12
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.xml.tpl15
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl14
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.xml.tpl13
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-details-resp.json.tpl48
-rw-r--r--nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-details-resp.xml.tpl44
-rw-r--r--nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-get-resp.json.tpl18
-rw-r--r--nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-get-resp.xml.tpl12
-rw-r--r--nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-req.json.tpl17
-rw-r--r--nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-req.xml.tpl23
-rw-r--r--nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-resp.xml.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-req.json.tpl33
-rw-r--r--nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-req.xml.tpl23
-rw-r--r--nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cell-capacities/cells-capacities-resp.json.tpl18
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cell-capacities/cells-capacities-resp.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cells/cells-get-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cells/cells-get-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cells/cells-list-resp.json.tpl39
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cells/cells-list-resp.xml.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-req.json.tpl0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-req.xml.tpl0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-resp.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-certificates/certificate-get-root-resp.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-certificates/certificate-get-root-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.xml.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.xml.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl13
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.xml.tpl12
-rw-r--r--nova/tests/unit/integrated/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl55
-rw-r--r--nova/tests/unit/integrated/api_samples/os-config-drive/server-config-drive-get-resp.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-config-drive/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-config-drive/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-config-drive/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-config-drive/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl57
-rw-r--r--nova/tests/unit/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.xml.tpl21
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.xml.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-rdp-console-post-req.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-req.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-resp.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-output/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-output/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-output/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-output/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-req.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-resp.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-req.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-resp.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-req.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-resp.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-resp.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-deferred-delete/force-delete-post-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-deferred-delete/force-delete-post-req.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-deferred-delete/restore-post-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-deferred-delete/restore-post-req.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-req.json.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-req.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-resp.xml.tpl1
-rw-r--r--nova/tests/unit/integrated/api_samples/os-evacuate/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-evacuate/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-evacuate/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-evacuate/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-nopool-req.json.tpl0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-nopool-req.xml.tpl0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-req.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-get-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-get-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-empty-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-empty-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-resp.json.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json.tpl25
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-req.json.tpl12
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-req.xml.tpl10
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-resp.json.tpl36
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-resp.xml.tpl34
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-networks/network-show-resp.json.tpl37
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-networks/network-show-resp.xml.tpl35
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-networks/networks-list-resp.json.tpl72
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-networks/networks-list-resp.xml.tpl71
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-req.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-resp.xml.tpl15
-rwxr-xr-xnova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-get-resp-rescue.json.tpl53
-rwxr-xr-xnova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-get-resp-rescue.xml.tpl19
-rwxr-xr-xnova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-req.json.tpl16
-rwxr-xr-xnova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-req.xml.tpl19
-rwxr-xr-xnova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-resp.json.tpl16
-rwxr-xr-xnova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-resp.xml.tpl6
-rwxr-xr-xnova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue-req.json.tpl6
-rwxr-xr-xnova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue-req.xml.tpl3
-rwxr-xr-xnova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue.json.tpl3
-rwxr-xr-xnova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-services-delete/services-get-resp.json.tpl40
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-services-delete/services-get-resp.xml.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-volumes/server-get-resp.json.tpl58
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-volumes/server-get-resp.xml.tpl21
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-volumes/servers-detail-resp.json.tpl59
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-volumes/servers-detail-resp.xml.tpl23
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedip-post-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedip-post-req.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedips-get-resp.xml.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-req.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-resp.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-req.xml.tpl10
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-resp.json.tpl20
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-detail-resp.json.tpl94
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-detail-resp.xml.tpl23
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-list-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-list-resp.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-req.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-show-resp.json.tpl20
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-show-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-req.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-req.xml.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.json.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl20
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl94
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml.tpl23
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl20
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl20
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl94
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl23
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl20
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl10
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl10
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.xml.tpl10
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl25
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.xml.tpl24
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-nopool-req.json.tpl0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-nopool-req.xml.tpl0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-req.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-get-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-get-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-empty-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-empty-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-resp.json.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fping/fping-get-details-resp.json.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fping/fping-get-details-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fping/fping-get-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fping/fping-get-resp.xml.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fping/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fping/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fping/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fping/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-get-resp.json.tpl54
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-get-resp.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl56
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-details-resp.xml.tpl21
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl18
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-list-resp.xml.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-get-reboot.json.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-get-reboot.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-get-resp.json.tpl31
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-get-resp.xml.tpl24
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-get-shutdown.json.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-get-shutdown.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-get-startup.json.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-get-startup.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-req.json.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-req.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-resp.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl39
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl10
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json.tpl27
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl27
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-list-resp.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-search-resp.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl24
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-instance-actions/instance-action-get-resp.json.tpl27
-rw-r--r--nova/tests/unit/integrated/api_samples/os-instance-actions/instance-action-get-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl22
-rw-r--r--nova/tests/unit/integrated/api_samples/os-instance-actions/instance-actions-list-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json.tpl17
-rw-r--r--nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json.tpl17
-rw-r--r--nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-get-resp.json.tpl13
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-get-resp.xml.tpl13
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-req.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-resp.xml.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-list-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-list-resp.xml.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-req.xml.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-resp.xml.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-migrations/migrations-get.json.tpl32
-rw-r--r--nova/tests/unit/integrated/api_samples/os-migrations/migrations-get.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl18
-rw-r--r--nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.xml.tpl24
-rw-r--r--nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.xml.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-req.json.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-req.xml.tpl25
-rw-r--r--nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks-associate/network-associate-host-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks-associate/network-associate-host-req.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-host-req.xml.tpl1
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-project-req.xml.tpl1
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-req.xml.tpl1
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/network-add-req.json.tpl1
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/network-add-req.xml.tpl1
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/network-create-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/network-create-req.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/network-create-resp.json.tpl32
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/network-create-resp.xml.tpl30
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/network-show-resp.json.tpl33
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/network-show-resp.xml.tpl31
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/networks-disassociate-req.json.tpl1
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/networks-disassociate-req.xml.tpl1
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/networks-list-resp.json.tpl64
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/networks-list-resp.xml.tpl63
-rw-r--r--nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-resp.json.tpl55
-rw-r--r--nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-resp.xml.tpl37
-rw-r--r--nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild.json.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild.xml.tpl26
-rw-r--r--nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl17
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl15
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl15
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl15
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl17
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl15
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl17
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl15
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-req.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl15
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-rescue.json.tpl53
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-rescue.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-unrescue.json.tpl54
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-unrescue.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-rescue-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-rescue-req.xml.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-rescue.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-rescue.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-unrescue-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-unrescue-req.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.xml.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.xml.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl13
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.xml.tpl11
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.xml.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-group-add-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-group-add-post-req.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-group-post-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-group-post-req.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-group-remove-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-group-remove-post-req.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-create-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-create-resp.xml.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-get-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-get-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-list-get-resp.xml.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/server-post-resp.json.tpl21
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/server-post-resp.xml.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/server-security-groups-list-resp.xml.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl17
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.xml.tpl18
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-req.xml.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-resp.xml.tpl10
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/limit-get-resp.json.tpl87
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/limit-get-resp.xml.tpl34
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-show-get-resp.json.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-show-get-resp.xml.tpl17
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-req.json.tpl18
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-req.xml.tpl17
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-resp.json.tpl18
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-resp.xml.tpl17
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-defaults-get-resp.json.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-defaults-get-resp.xml.tpl17
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-get-resp.json.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-get-resp.xml.tpl17
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-req.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-resp.json.tpl18
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-resp.xml.tpl17
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/usedlimits-get-resp.json.tpl93
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/usedlimits-get-resp.xml.tpl40
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-get-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-get-resp.xml.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-list-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-list-resp.xml.tpl10
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-req.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-resp.json.tpl10
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-resp.xml.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-list-multi-status/servers-list-resp.json.tpl18
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-list-multi-status/servers-list-resp.xml.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-password/get-password-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-password/get-password-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-password/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-password/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-password/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-password/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-start-stop/server_start_stop.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-start-stop/server_start_stop.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-req.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-disable-put-req.json.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-disable-put-req.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-disable-put-resp.json.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-disable-put-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-enable-put-req.json.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-enable-put-req.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-enable-put-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-enable-put-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/services-get-resp.json.tpl40
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/services-get-resp.xml.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/services-list-get-resp.json.tpl36
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/services-list-get-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-shelve/os-shelve-offload.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-shelve/os-shelve-offload.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-shelve/os-shelve.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-shelve/os-shelve.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-shelve/os-unshelve.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/os-shelve/os-unshelve.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-shelve/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-shelve/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-shelve/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-shelve/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl27
-rw-r--r--nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.xml.tpl26
-rw-r--r--nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl13
-rw-r--r--nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.xml.tpl13
-rw-r--r--nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl14
-rw-r--r--nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-list-res.xml.tpl13
-rw-r--r--nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-req.xml.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-res.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-used-limits-for-admin/usedlimitsforadmin-get-resp.json.tpl90
-rw-r--r--nova/tests/unit/integrated/api_samples/os-used-limits-for-admin/usedlimitsforadmin-get-resp.xml.tpl37
-rw-r--r--nova/tests/unit/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl90
-rw-r--r--nova/tests/unit/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl37
-rw-r--r--nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-req.json.tpl17
-rw-r--r--nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-req.xml.tpl25
-rw-r--r--nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-resp.xml.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-show-get-resp.json.tpl17
-rw-r--r--nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-show-get-resp.xml.tpl15
-rw-r--r--nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-req.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-resp.xml.tpl15
-rw-r--r--nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-virtual-interfaces/vifs-list-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-virtual-interfaces/vifs-list-resp.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volume-attachment-update/update-volume-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volume-attachment-update/update-volume-req.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-req.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/list-volume-attachments-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/list-volume-attachments-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-detail-resp.json.tpl24
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-detail-resp.xml.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-get-resp.json.tpl22
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-get-resp.xml.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-index-resp.json.tpl24
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-index-resp.xml.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-req.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-req.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-resp.json.tpl21
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-resp.xml.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-req.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-req.xml.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/snapshots-detail-resp.json.tpl31
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/snapshots-detail-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/snapshots-list-resp.json.tpl31
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/snapshots-list-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/snapshots-show-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/snapshots-show-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/volume-attachment-detail-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/volume-attachment-detail-resp.xml.tpl2
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-changepassword.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-changepassword.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-confirmresize.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-confirmresize.xml.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-createimage.json.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-createimage.xml.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-reboot.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-reboot.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-rebuild-resp.json.tpl55
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-rebuild-resp.xml.tpl37
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-rebuild.json.tpl18
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-rebuild.xml.tpl25
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-resize.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-resize.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-revertresize.json.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-revertresize.xml.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/server-get-resp.json.tpl54
-rw-r--r--nova/tests/unit/integrated/api_samples/server-get-resp.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/server-ips-network-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/api_samples/server-ips-network-resp.xml.tpl4
-rw-r--r--nova/tests/unit/integrated/api_samples/server-ips-resp.json.tpl10
-rw-r--r--nova/tests/unit/integrated/api_samples/server-ips-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/server-metadata-all-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/server-metadata-all-req.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/server-metadata-all-resp.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/server-metadata-all-resp.xml.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/server-metadata-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/server-metadata-req.xml.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/server-metadata-resp.json.tpl5
-rw-r--r--nova/tests/unit/integrated/api_samples/server-metadata-resp.xml.tpl3
-rw-r--r--nova/tests/unit/integrated/api_samples/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/server-post-req.xml.tpl19
-rw-r--r--nova/tests/unit/integrated/api_samples/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/api_samples/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/unit/integrated/api_samples/servers-details-resp.json.tpl56
-rw-r--r--nova/tests/unit/integrated/api_samples/servers-details-resp.xml.tpl21
-rw-r--r--nova/tests/unit/integrated/api_samples/servers-list-resp.json.tpl18
-rw-r--r--nova/tests/unit/integrated/api_samples/servers-list-resp.xml.tpl7
-rw-r--r--nova/tests/unit/integrated/api_samples/versions-get-resp.json.tpl26
-rw-r--r--nova/tests/unit/integrated/api_samples/versions-get-resp.xml.tpl9
-rw-r--r--nova/tests/unit/integrated/api_samples_test_base.py323
-rw-r--r--nova/tests/unit/integrated/integrated_helpers.py160
-rw-r--r--nova/tests/unit/integrated/test_api_samples.py4433
-rw-r--r--nova/tests/unit/integrated/test_extensions.py42
-rw-r--r--nova/tests/unit/integrated/test_login.py36
-rw-r--r--nova/tests/unit/integrated/test_servers.py522
-rw-r--r--nova/tests/unit/integrated/test_xml.py51
-rw-r--r--nova/tests/unit/integrated/v3/__init__.py0
-rw-r--r--nova/tests/unit/integrated/v3/api_sample_base.py79
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl76
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/all_extensions/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/all_extensions/server-post-resp.json.tpl24
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl78
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/all_extensions/servers-list-resp.json.tpl18
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/consoles/consoles-create-req.json.tpl0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/consoles/consoles-get-resp.json.tpl1
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/consoles/consoles-list-get-resp.json.tpl1
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/consoles/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/consoles/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/extension-info/extensions-get-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/extension-info/extensions-list-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-add-tenant-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-add-tenant-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-create-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-create-resp.json.tpl23
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-detail-resp.json.tpl109
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-list-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-remove-tenant-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-remove-tenant-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-show-resp.json.tpl23
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-manage/flavor-create-post-req.json.tpl9
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-manage/flavor-create-post-resp.json.tpl23
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavors/flavor-get-resp.json.tpl23
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavors/flavors-detail-resp.json.tpl109
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavors/flavors-list-resp.json.tpl74
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/image-size/image-get-resp.json.tpl34
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/image-size/images-details-get-resp.json.tpl219
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/image-get-resp.json.tpl33
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-get.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-put-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-put-resp.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/image-metadata-get-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/image-metadata-post-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/image-metadata-post-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/image-metadata-put-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/image-metadata-put-resp.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/images-details-get-resp.json.tpl212
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/images-list-get-resp.json.tpl137
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-get-resp.json.tpl13
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-import-post-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-import-post-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-list-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-post-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl57
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-action-rebuild.json.tpl17
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-get-resp.json.tpl57
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-post-req.json.tpl18
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-post-resp.json.tpl18
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-put-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-put-resp.json.tpl56
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-access-ips/servers-details-resp.json.tpl59
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-access-ips/servers-list-resp.json.tpl18
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-admin-actions/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-admin-actions/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-admin-password/admin-password-change-password.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-admin-password/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-admin-password/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-agents/agent-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-agents/agent-post-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-agents/agent-update-put-req.json.tpl7
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-agents/agent-update-put-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-agents/agents-get-resp.json.tpl13
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl9
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-post-req.json.tpl7
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-post-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-update-post-req.json.tpl7
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl15
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl17
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-get-resp.json.tpl15
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl17
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl15
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl14
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl14
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-availability-zone/availability-zone-detail-resp.json.tpl69
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-availability-zone/availability-zone-list-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-availability-zone/server-post-req.json.tpl17
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-availability-zone/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-cells/cells-capacities-resp.json.tpl26
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-cells/cells-get-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-cells/cells-list-empty-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-cells/cells-list-resp.json.tpl39
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-create-req.json.tpl0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-create-resp.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-get-root-resp.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl13
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-update-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl56
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl58
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-console-output/console-output-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-console-output/console-output-post-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-console-output/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-console-output/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-create-backup/create-backup-req.json.tpl7
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-create-backup/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-create-backup/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/force-delete-post-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/restore-post-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/image-get-resp.json.tpl34
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/image-list-resp.json.tpl214
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/list-servers-detail-get.json.tpl58
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-action-rebuild-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-action-rebuild-resp.json.tpl56
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-get-resp.json.tpl56
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-post-req.json.tpl17
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-post-resp.json.tpl17
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-resize-post-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-update-put-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-update-put-resp.json.tpl55
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-req.json.tpl7
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-get-resp.json.tpl56
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json.tpl58
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl58
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl60
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl60
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl60
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/attach-volume-req.json.tpl8
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/detach-volume-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-get-resp.json.tpl59
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/servers-detail-resp.json.tpl60
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/swap-volume-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-fixed-ips/fixedip-post-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl24
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl114
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl24
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl7
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl7
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl25
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-fping/fping-get-details-resp.json.tpl7
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-fping/fping-get-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-fping/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-fping/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-get-resp.json.tpl47
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl48
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl18
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-reboot.json.tpl4
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-resp.json.tpl31
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-shutdown.json.tpl4
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-startup.json.tpl4
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hosts/host-put-maintenance-req.json.tpl4
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hosts/host-put-maintenance-resp.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hosts/hosts-list-compute-service-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hosts/hosts-list-resp.json.tpl39
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl30
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl28
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-action-get-resp.json.tpl27
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl22
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-instance-get-resp.json.tpl27
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-lock-server/lock-server.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-lock-server/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-lock-server/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-lock-server/unlock-server.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-migrate-server/live-migrate-server.json.tpl7
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-migrate-server/migrate-server.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-migrate-server/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-migrate-server/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-migrations/migrations-get.json.tpl32
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-multinic/multinic-add-fixed-ip-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-multinic/multinic-remove-fixed-ip-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-multinic/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-multinic/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl18
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-post-req.json.tpl19
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-associate-host-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-networks/network-add-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-networks/network-create-req.json.tpl12
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-networks/network-create-resp.json.tpl36
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-networks/network-show-resp.json.tpl37
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-networks/networks-disassociate-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-networks/networks-list-resp.json.tpl72
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pause-server/pause-server.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pause-server/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pause-server/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pause-server/unpause-server.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl42
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl40
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pci/pci-detail-resp.json.tpl36
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pci/pci-index-resp.json.tpl20
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pci/pci-show-resp.json.tpl18
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pci/server-get-resp.json.tpl60
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pci/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pci/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pci/servers-detail-resp.json.tpl62
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl19
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl19
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-post-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-post-resp.json.tpl19
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-resp.json.tpl19
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl19
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-show-get-resp.json.tpl19
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-update-post-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-update-post-resp.json.tpl19
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-rdp-console-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-rdp-console-post-resp.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-serial-console-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-serial-console-post-resp.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-spice-console-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-spice-console-post-resp.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-vnc-console-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-vnc-console-post-resp.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-rescue/server-get-resp-rescue.json.tpl54
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-rescue/server-get-resp-unrescue.json.tpl55
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-rescue/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-rescue/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue-req-with-image-ref.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-rescue/server-unrescue-req.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl8
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl13
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-add-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-post-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-remove-post-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-create-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-get-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-get-resp.json.tpl56
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-post-req.json.tpl11
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-post-resp.json.tpl17
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/servers-detail-resp.json.tpl57
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl17
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-external-events/event-create-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-external-events/event-create-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-external-events/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-external-events/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-get-resp.json.tpl9
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-list-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-post-req.json.tpl6
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-post-resp.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-get-resp.json.tpl57
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-usage/servers-detail-resp.json.tpl58
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-log-put-req.json.tpl8
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-log-put-resp.json.tpl8
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-put-req.json.tpl7
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-put-resp.json.tpl7
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-services/service-enable-put-req.json.tpl7
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-services/service-enable-put-resp.json.tpl7
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-services/services-list-get-resp.json.tpl44
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-shelve/os-shelve-offload.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-shelve/os-shelve.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-shelve/os-unshelve.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-shelve/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-shelve/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl27
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl13
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-resume.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-suspend.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-list-res.json.tpl14
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-post-req.json.tpl9
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-post-res.json.tpl7
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-used-limits/usedlimits-get-resp.json.tpl26
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-user-data/userdata-post-req.json.tpl11
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-user-data/userdata-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-detail-resp.json.tpl24
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-get-resp.json.tpl22
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-index-resp.json.tpl24
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-post-req.json.tpl8
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-post-resp.json.tpl21
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/server-post-req.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshot-create-req.json.tpl8
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshot-create-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-detail-resp.json.tpl31
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-list-resp.json.tpl31
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-show-resp.json.tpl11
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/server-ips/server-ips-network-resp.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/server-ips/server-ips-resp.json.tpl12
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/server-ips/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/server-ips/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-all-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-all-resp.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-req.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-resp.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/server-metadata/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/server-metadata/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-confirm-resize.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-create-image.json.tpl9
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-reboot.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl55
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl17
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl55
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-resize.json.tpl5
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-revert-resize.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-start.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-stop.json.tpl3
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-get-resp.json.tpl55
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-post-req.json.tpl10
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-post-resp.json.tpl16
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/servers-details-resp.json.tpl57
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/servers-list-resp.json.tpl18
-rw-r--r--nova/tests/unit/integrated/v3/test_access_ips.py93
-rw-r--r--nova/tests/unit/integrated/v3/test_admin_actions.py46
-rw-r--r--nova/tests/unit/integrated/v3/test_admin_password.py29
-rw-r--r--nova/tests/unit/integrated/v3/test_agents.py98
-rw-r--r--nova/tests/unit/integrated/v3/test_aggregates.py80
-rw-r--r--nova/tests/unit/integrated/v3/test_attach_interfaces.py166
-rw-r--r--nova/tests/unit/integrated/v3/test_availability_zone.py49
-rw-r--r--nova/tests/unit/integrated/v3/test_cells.py107
-rw-r--r--nova/tests/unit/integrated/v3/test_certificates.py31
-rw-r--r--nova/tests/unit/integrated/v3/test_cloudpipe.py80
-rw-r--r--nova/tests/unit/integrated/v3/test_config_drive.py48
-rw-r--r--nova/tests/unit/integrated/v3/test_console_auth_tokens.py51
-rw-r--r--nova/tests/unit/integrated/v3/test_console_output.py27
-rw-r--r--nova/tests/unit/integrated/v3/test_consoles.py55
-rw-r--r--nova/tests/unit/integrated/v3/test_create_backup.py38
-rw-r--r--nova/tests/unit/integrated/v3/test_deferred_delete.py42
-rw-r--r--nova/tests/unit/integrated/v3/test_disk_config.py80
-rw-r--r--nova/tests/unit/integrated/v3/test_evacuate.py91
-rw-r--r--nova/tests/unit/integrated/v3/test_extended_availability_zone.py34
-rw-r--r--nova/tests/unit/integrated/v3/test_extended_server_attributes.py42
-rw-r--r--nova/tests/unit/integrated/v3/test_extended_status.py35
-rw-r--r--nova/tests/unit/integrated/v3/test_extended_volumes.py151
-rw-r--r--nova/tests/unit/integrated/v3/test_extension_info.py71
-rw-r--r--nova/tests/unit/integrated/v3/test_fixed_ips.py109
-rw-r--r--nova/tests/unit/integrated/v3/test_flavor_access.py89
-rw-r--r--nova/tests/unit/integrated/v3/test_flavor_extraspecs.py62
-rw-r--r--nova/tests/unit/integrated/v3/test_flavor_manage.py43
-rw-r--r--nova/tests/unit/integrated/v3/test_flavor_rxtx.py46
-rw-r--r--nova/tests/unit/integrated/v3/test_flavors.py35
-rw-r--r--nova/tests/unit/integrated/v3/test_floating_ip_dns.py91
-rw-r--r--nova/tests/unit/integrated/v3/test_floating_ip_pools.py35
-rw-r--r--nova/tests/unit/integrated/v3/test_floating_ips_bulk.py86
-rw-r--r--nova/tests/unit/integrated/v3/test_fping.py45
-rw-r--r--nova/tests/unit/integrated/v3/test_hide_server_addresses.py39
-rw-r--r--nova/tests/unit/integrated/v3/test_hosts.py57
-rw-r--r--nova/tests/unit/integrated/v3/test_hypervisors.py69
-rw-r--r--nova/tests/unit/integrated/v3/test_image_size.py37
-rw-r--r--nova/tests/unit/integrated/v3/test_images.py85
-rw-r--r--nova/tests/unit/integrated/v3/test_instance_actions.py84
-rw-r--r--nova/tests/unit/integrated/v3/test_keypairs.py72
-rw-r--r--nova/tests/unit/integrated/v3/test_lock_server.py41
-rw-r--r--nova/tests/unit/integrated/v3/test_migrate_server.py71
-rw-r--r--nova/tests/unit/integrated/v3/test_migrations.py72
-rw-r--r--nova/tests/unit/integrated/v3/test_multinic.py49
-rw-r--r--nova/tests/unit/integrated/v3/test_multiple_create.py45
-rw-r--r--nova/tests/unit/integrated/v3/test_networks.py73
-rw-r--r--nova/tests/unit/integrated/v3/test_networks_associate.py76
-rw-r--r--nova/tests/unit/integrated/v3/test_pause_server.py41
-rw-r--r--nova/tests/unit/integrated/v3/test_pci.py182
-rw-r--r--nova/tests/unit/integrated/v3/test_quota_sets.py70
-rw-r--r--nova/tests/unit/integrated/v3/test_remote_consoles.py70
-rw-r--r--nova/tests/unit/integrated/v3/test_rescue.py82
-rw-r--r--nova/tests/unit/integrated/v3/test_scheduler_hints.py32
-rw-r--r--nova/tests/unit/integrated/v3/test_security_group_default_rules.py40
-rw-r--r--nova/tests/unit/integrated/v3/test_security_groups.py166
-rw-r--r--nova/tests/unit/integrated/v3/test_server_diagnostics.py27
-rw-r--r--nova/tests/unit/integrated/v3/test_server_external_events.py40
-rw-r--r--nova/tests/unit/integrated/v3/test_server_groups.py66
-rw-r--r--nova/tests/unit/integrated/v3/test_server_metadata.py80
-rw-r--r--nova/tests/unit/integrated/v3/test_server_usage.py39
-rw-r--r--nova/tests/unit/integrated/v3/test_servers.py188
-rw-r--r--nova/tests/unit/integrated/v3/test_servers_ips.py35
-rw-r--r--nova/tests/unit/integrated/v3/test_services.py87
-rw-r--r--nova/tests/unit/integrated/v3/test_shelve.py50
-rw-r--r--nova/tests/unit/integrated/v3/test_simple_tenant_usage.py61
-rw-r--r--nova/tests/unit/integrated/v3/test_suspend_server.py41
-rw-r--r--nova/tests/unit/integrated/v3/test_tenant_networks.py61
-rw-r--r--nova/tests/unit/integrated/v3/test_used_limits.py34
-rw-r--r--nova/tests/unit/integrated/v3/test_user_data.py36
-rw-r--r--nova/tests/unit/integrated/v3/test_volumes.py184
-rw-r--r--nova/tests/unit/keymgr/__init__.py0
-rw-r--r--nova/tests/unit/keymgr/fake.py24
-rw-r--r--nova/tests/unit/keymgr/test_conf_key_mgr.py59
-rw-r--r--nova/tests/unit/keymgr/test_key.py67
-rw-r--r--nova/tests/unit/keymgr/test_key_mgr.py31
-rw-r--r--nova/tests/unit/keymgr/test_mock_key_mgr.py102
-rw-r--r--nova/tests/unit/keymgr/test_not_implemented_key_mgr.py47
-rw-r--r--nova/tests/unit/keymgr/test_single_key_mgr.py72
-rw-r--r--nova/tests/unit/matchers.py466
-rw-r--r--nova/tests/unit/monkey_patch_example/__init__.py31
-rw-r--r--nova/tests/unit/monkey_patch_example/example_a.py27
-rw-r--r--nova/tests/unit/monkey_patch_example/example_b.py28
-rw-r--r--nova/tests/unit/network/__init__.py0
-rw-r--r--nova/tests/unit/network/security_group/__init__.py0
-rw-r--r--nova/tests/unit/network/security_group/test_neutron_driver.py247
-rw-r--r--nova/tests/unit/network/test_api.py589
-rw-r--r--nova/tests/unit/network/test_linux_net.py1115
-rw-r--r--nova/tests/unit/network/test_manager.py3358
-rw-r--r--nova/tests/unit/network/test_network_info.py800
-rw-r--r--nova/tests/unit/network/test_neutronv2.py3194
-rw-r--r--nova/tests/unit/network/test_rpcapi.py353
-rw-r--r--nova/tests/unit/objects/__init__.py0
-rw-r--r--nova/tests/unit/objects/test_agent.py103
-rw-r--r--nova/tests/unit/objects/test_aggregate.py199
-rw-r--r--nova/tests/unit/objects/test_bandwidth_usage.py124
-rw-r--r--nova/tests/unit/objects/test_block_device.py333
-rw-r--r--nova/tests/unit/objects/test_compute_node.py240
-rw-r--r--nova/tests/unit/objects/test_dns_domain.py85
-rw-r--r--nova/tests/unit/objects/test_ec2.py192
-rw-r--r--nova/tests/unit/objects/test_external_event.py46
-rw-r--r--nova/tests/unit/objects/test_fields.py393
-rw-r--r--nova/tests/unit/objects/test_fixed_ip.py339
-rw-r--r--nova/tests/unit/objects/test_flavor.py253
-rw-r--r--nova/tests/unit/objects/test_floating_ip.py259
-rw-r--r--nova/tests/unit/objects/test_hv_spec.py58
-rw-r--r--nova/tests/unit/objects/test_instance.py1196
-rw-r--r--nova/tests/unit/objects/test_instance_action.py365
-rw-r--r--nova/tests/unit/objects/test_instance_fault.py126
-rw-r--r--nova/tests/unit/objects/test_instance_group.py350
-rw-r--r--nova/tests/unit/objects/test_instance_info_cache.py117
-rw-r--r--nova/tests/unit/objects/test_instance_numa_topology.py78
-rw-r--r--nova/tests/unit/objects/test_instance_pci_requests.py191
-rw-r--r--nova/tests/unit/objects/test_keypair.py109
-rw-r--r--nova/tests/unit/objects/test_migration.py184
-rw-r--r--nova/tests/unit/objects/test_network.py232
-rw-r--r--nova/tests/unit/objects/test_network_request.py102
-rw-r--r--nova/tests/unit/objects/test_objects.py1126
-rw-r--r--nova/tests/unit/objects/test_pci_device.py254
-rw-r--r--nova/tests/unit/objects/test_quotas.py167
-rw-r--r--nova/tests/unit/objects/test_security_group.py175
-rw-r--r--nova/tests/unit/objects/test_security_group_rule.py95
-rw-r--r--nova/tests/unit/objects/test_service.py226
-rw-r--r--nova/tests/unit/objects/test_virtual_interface.py126
-rw-r--r--nova/tests/unit/pci/__init__.py0
-rw-r--r--nova/tests/unit/pci/fakes.py38
-rw-r--r--nova/tests/unit/pci/test_device.py119
-rw-r--r--nova/tests/unit/pci/test_devspec.py177
-rw-r--r--nova/tests/unit/pci/test_manager.py364
-rw-r--r--nova/tests/unit/pci/test_request.py209
-rw-r--r--nova/tests/unit/pci/test_stats.py267
-rw-r--r--nova/tests/unit/pci/test_utils.py61
-rw-r--r--nova/tests/unit/pci/test_whitelist.py66
-rw-r--r--nova/tests/unit/policy_fixture.py73
-rw-r--r--nova/tests/unit/scheduler/__init__.py0
-rw-r--r--nova/tests/unit/scheduler/fakes.py268
-rw-r--r--nova/tests/unit/scheduler/filters/__init__.py0
-rw-r--r--nova/tests/unit/scheduler/filters/test_affinity_filters.py258
-rw-r--r--nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py98
-rw-r--r--nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py72
-rw-r--r--nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py53
-rw-r--r--nova/tests/unit/scheduler/filters/test_availability_zone_filters.py48
-rw-r--r--nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py99
-rw-r--r--nova/tests/unit/scheduler/filters/test_compute_filters.py50
-rw-r--r--nova/tests/unit/scheduler/filters/test_core_filters.py87
-rw-r--r--nova/tests/unit/scheduler/filters/test_disk_filters.py100
-rw-r--r--nova/tests/unit/scheduler/filters/test_extra_specs_ops.py200
-rw-r--r--nova/tests/unit/scheduler/filters/test_image_props_filters.py189
-rw-r--r--nova/tests/unit/scheduler/filters/test_io_ops_filters.py63
-rw-r--r--nova/tests/unit/scheduler/filters/test_isolated_hosts_filter.py90
-rw-r--r--nova/tests/unit/scheduler/filters/test_json_filters.py289
-rw-r--r--nova/tests/unit/scheduler/filters/test_metrics_filters.py34
-rw-r--r--nova/tests/unit/scheduler/filters/test_num_instances_filters.py63
-rw-r--r--nova/tests/unit/scheduler/filters/test_numa_topology_filters.py151
-rw-r--r--nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py67
-rw-r--r--nova/tests/unit/scheduler/filters/test_ram_filters.py89
-rw-r--r--nova/tests/unit/scheduler/filters/test_retry_filters.py46
-rw-r--r--nova/tests/unit/scheduler/filters/test_trusted_filters.py203
-rw-r--r--nova/tests/unit/scheduler/filters/test_type_filters.py56
-rw-r--r--nova/tests/unit/scheduler/ironic_fakes.py75
-rw-r--r--nova/tests/unit/scheduler/test_baremetal_host_manager.py81
-rw-r--r--nova/tests/unit/scheduler/test_caching_scheduler.py199
-rw-r--r--nova/tests/unit/scheduler/test_chance_scheduler.py182
-rw-r--r--nova/tests/unit/scheduler/test_client.py113
-rw-r--r--nova/tests/unit/scheduler/test_filter_scheduler.py596
-rw-r--r--nova/tests/unit/scheduler/test_filters.py206
-rw-r--r--nova/tests/unit/scheduler/test_filters_utils.py44
-rw-r--r--nova/tests/unit/scheduler/test_host_filters.py38
-rw-r--r--nova/tests/unit/scheduler/test_host_manager.py545
-rw-r--r--nova/tests/unit/scheduler/test_ironic_host_manager.py430
-rw-r--r--nova/tests/unit/scheduler/test_rpcapi.py69
-rw-r--r--nova/tests/unit/scheduler/test_scheduler.py378
-rw-r--r--nova/tests/unit/scheduler/test_scheduler_options.py138
-rw-r--r--nova/tests/unit/scheduler/test_scheduler_utils.py314
-rw-r--r--nova/tests/unit/scheduler/test_weights.py338
-rw-r--r--nova/tests/unit/servicegroup/__init__.py0
-rw-r--r--nova/tests/unit/servicegroup/test_db_servicegroup.py144
-rw-r--r--nova/tests/unit/servicegroup/test_mc_servicegroup.py213
-rw-r--r--nova/tests/unit/servicegroup/test_zk_driver.py65
-rw-r--r--nova/tests/unit/ssl_cert/ca.crt35
-rw-r--r--nova/tests/unit/ssl_cert/certificate.crt30
-rw-r--r--nova/tests/unit/ssl_cert/privatekey.key51
-rw-r--r--nova/tests/unit/test_api_validation.py872
-rw-r--r--nova/tests/unit/test_availability_zones.py255
-rw-r--r--nova/tests/unit/test_baserpc.py50
-rw-r--r--nova/tests/unit/test_bdm.py248
-rw-r--r--nova/tests/unit/test_block_device.py604
-rw-r--r--nova/tests/unit/test_cinder.py405
-rw-r--r--nova/tests/unit/test_configdrive2.py104
-rw-r--r--nova/tests/unit/test_context.py121
-rw-r--r--nova/tests/unit/test_crypto.py256
-rw-r--r--nova/tests/unit/test_exception.py179
-rw-r--r--nova/tests/unit/test_flavors.py593
-rw-r--r--nova/tests/unit/test_hacking.py403
-rw-r--r--nova/tests/unit/test_hooks.py205
-rw-r--r--nova/tests/unit/test_instance_types_extra_specs.py142
-rw-r--r--nova/tests/unit/test_iptables_network.py277
-rw-r--r--nova/tests/unit/test_ipv6.py88
-rw-r--r--nova/tests/unit/test_linuxscsi.py134
-rw-r--r--nova/tests/unit/test_loadables.py113
-rw-r--r--nova/tests/unit/test_matchers.py349
-rw-r--r--nova/tests/unit/test_metadata.py865
-rw-r--r--nova/tests/unit/test_notifications.py394
-rw-r--r--nova/tests/unit/test_nova_manage.py467
-rw-r--r--nova/tests/unit/test_objectstore.py155
-rw-r--r--nova/tests/unit/test_pipelib.py74
-rw-r--r--nova/tests/unit/test_policy.py231
-rw-r--r--nova/tests/unit/test_quota.py2765
-rw-r--r--nova/tests/unit/test_safeutils.py98
-rw-r--r--nova/tests/unit/test_service.py370
-rw-r--r--nova/tests/unit/test_test.py60
-rw-r--r--nova/tests/unit/test_test_utils.py70
-rw-r--r--nova/tests/unit/test_utils.py981
-rw-r--r--nova/tests/unit/test_versions.py60
-rw-r--r--nova/tests/unit/test_weights.py53
-rw-r--r--nova/tests/unit/test_wsgi.py263
-rw-r--r--nova/tests/unit/utils.py217
-rw-r--r--nova/tests/unit/virt/__init__.py0
-rw-r--r--nova/tests/unit/virt/disk/__init__.py0
-rw-r--r--nova/tests/unit/virt/disk/mount/__init__.py0
-rw-r--r--nova/tests/unit/virt/disk/mount/test_loop.py98
-rw-r--r--nova/tests/unit/virt/disk/mount/test_nbd.py331
-rw-r--r--nova/tests/unit/virt/disk/test_api.py153
-rw-r--r--nova/tests/unit/virt/disk/test_inject.py284
-rw-r--r--nova/tests/unit/virt/disk/vfs/__init__.py0
-rw-r--r--nova/tests/unit/virt/disk/vfs/fakeguestfs.py188
-rw-r--r--nova/tests/unit/virt/disk/vfs/test_guestfs.py264
-rw-r--r--nova/tests/unit/virt/disk/vfs/test_localfs.py385
-rw-r--r--nova/tests/unit/virt/hyperv/__init__.py0
-rw-r--r--nova/tests/unit/virt/hyperv/db_fakes.py167
-rw-r--r--nova/tests/unit/virt/hyperv/fake.py90
-rw-r--r--nova/tests/unit/virt/hyperv/test_basevolumeutils.py157
-rw-r--r--nova/tests/unit/virt/hyperv/test_hostutils.py97
-rw-r--r--nova/tests/unit/virt/hyperv/test_hypervapi.py1967
-rw-r--r--nova/tests/unit/virt/hyperv/test_ioutils.py61
-rw-r--r--nova/tests/unit/virt/hyperv/test_migrationops.py79
-rw-r--r--nova/tests/unit/virt/hyperv/test_networkutils.py82
-rw-r--r--nova/tests/unit/virt/hyperv/test_networkutilsv2.py45
-rw-r--r--nova/tests/unit/virt/hyperv/test_pathutils.py58
-rw-r--r--nova/tests/unit/virt/hyperv/test_rdpconsoleutils.py28
-rw-r--r--nova/tests/unit/virt/hyperv/test_rdpconsoleutilsv2.py37
-rw-r--r--nova/tests/unit/virt/hyperv/test_utilsfactory.py57
-rw-r--r--nova/tests/unit/virt/hyperv/test_vhdutils.py161
-rw-r--r--nova/tests/unit/virt/hyperv/test_vhdutilsv2.py249
-rw-r--r--nova/tests/unit/virt/hyperv/test_vmops.py230
-rw-r--r--nova/tests/unit/virt/hyperv/test_vmutils.py668
-rw-r--r--nova/tests/unit/virt/hyperv/test_vmutilsv2.py197
-rw-r--r--nova/tests/unit/virt/hyperv/test_volumeutils.py151
-rw-r--r--nova/tests/unit/virt/hyperv/test_volumeutilsv2.py147
-rw-r--r--nova/tests/unit/virt/ironic/__init__.py0
-rw-r--r--nova/tests/unit/virt/ironic/test_client_wrapper.py126
-rw-r--r--nova/tests/unit/virt/ironic/test_driver.py1268
-rw-r--r--nova/tests/unit/virt/ironic/test_patcher.py139
-rw-r--r--nova/tests/unit/virt/ironic/utils.py115
-rw-r--r--nova/tests/unit/virt/libvirt/__init__.py0
-rw-r--r--nova/tests/unit/virt/libvirt/fake_imagebackend.py75
-rw-r--r--nova/tests/unit/virt/libvirt/fake_libvirt_utils.py211
-rw-r--r--nova/tests/unit/virt/libvirt/fakelibvirt.py1108
-rw-r--r--nova/tests/unit/virt/libvirt/test_blockinfo.py991
-rw-r--r--nova/tests/unit/virt/libvirt/test_config.py2344
-rw-r--r--nova/tests/unit/virt/libvirt/test_designer.py30
-rw-r--r--nova/tests/unit/virt/libvirt/test_dmcrypt.py72
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py12576
-rw-r--r--nova/tests/unit/virt/libvirt/test_fakelibvirt.py386
-rw-r--r--nova/tests/unit/virt/libvirt/test_firewall.py749
-rw-r--r--nova/tests/unit/virt/libvirt/test_imagebackend.py1309
-rw-r--r--nova/tests/unit/virt/libvirt/test_imagecache.py887
-rw-r--r--nova/tests/unit/virt/libvirt/test_lvm.py183
-rw-r--r--nova/tests/unit/virt/libvirt/test_rbd.py283
-rw-r--r--nova/tests/unit/virt/libvirt/test_utils.py652
-rw-r--r--nova/tests/unit/virt/libvirt/test_vif.py959
-rw-r--r--nova/tests/unit/virt/libvirt/test_volume.py1160
-rw-r--r--nova/tests/unit/virt/test_block_device.py684
-rw-r--r--nova/tests/unit/virt/test_configdrive.py30
-rw-r--r--nova/tests/unit/virt/test_diagnostics.py231
-rw-r--r--nova/tests/unit/virt/test_driver.py58
-rw-r--r--nova/tests/unit/virt/test_events.py36
-rw-r--r--nova/tests/unit/virt/test_hardware.py1439
-rw-r--r--nova/tests/unit/virt/test_imagecache.py122
-rw-r--r--nova/tests/unit/virt/test_images.py45
-rw-r--r--nova/tests/unit/virt/test_virt.py287
-rw-r--r--nova/tests/unit/virt/test_virt_drivers.py881
-rw-r--r--nova/tests/unit/virt/test_volumeutils.py47
-rw-r--r--nova/tests/unit/virt/vmwareapi/__init__.py0
-rw-r--r--nova/tests/unit/virt/vmwareapi/fake.py1606
-rw-r--r--nova/tests/unit/virt/vmwareapi/stubs.py131
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_configdrive.py168
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_driver_api.py2650
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_ds_util.py548
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_ds_util_datastore_selection.py163
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_imagecache.py277
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_images.py216
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_io_util.py33
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_read_write_util.py39
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vif.py346
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vim_util.py117
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vm_util.py1069
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vmops.py1293
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_volumeops.py95
-rw-r--r--nova/tests/unit/virt/xenapi/__init__.py0
-rw-r--r--nova/tests/unit/virt/xenapi/client/__init__.py0
-rw-r--r--nova/tests/unit/virt/xenapi/client/test_objects.py113
-rw-r--r--nova/tests/unit/virt/xenapi/client/test_session.py158
-rw-r--r--nova/tests/unit/virt/xenapi/image/__init__.py0
-rw-r--r--nova/tests/unit/virt/xenapi/image/test_bittorrent.py163
-rw-r--r--nova/tests/unit/virt/xenapi/image/test_glance.py256
-rw-r--r--nova/tests/unit/virt/xenapi/image/test_utils.py252
-rw-r--r--nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py182
-rw-r--r--nova/tests/unit/virt/xenapi/stubs.py365
-rw-r--r--nova/tests/unit/virt/xenapi/test_agent.py468
-rw-r--r--nova/tests/unit/virt/xenapi/test_driver.py101
-rw-r--r--nova/tests/unit/virt/xenapi/test_network_utils.py76
-rw-r--r--nova/tests/unit/virt/xenapi/test_vm_utils.py2422
-rw-r--r--nova/tests/unit/virt/xenapi/test_vmops.py1124
-rw-r--r--nova/tests/unit/virt/xenapi/test_volume_utils.py232
-rw-r--r--nova/tests/unit/virt/xenapi/test_volumeops.py549
-rw-r--r--nova/tests/unit/virt/xenapi/test_xenapi.py4105
-rw-r--r--nova/tests/unit/virt/xenapi/vm_rrd.xml1101
-rw-r--r--nova/tests/unit/volume/__init__.py0
-rw-r--r--nova/tests/unit/volume/encryptors/__init__.py0
-rw-r--r--nova/tests/unit/volume/encryptors/test_base.py54
-rw-r--r--nova/tests/unit/volume/encryptors/test_cryptsetup.py83
-rw-r--r--nova/tests/unit/volume/encryptors/test_luks.py71
-rw-r--r--nova/tests/unit/volume/encryptors/test_nop.py28
-rw-r--r--nova/tests/unit/volume/test_cinder.py451
1826 files changed, 232767 insertions, 0 deletions
diff --git a/nova/tests/unit/CA/cacert.pem b/nova/tests/unit/CA/cacert.pem
new file mode 100644
index 0000000000..9ffb5bb807
--- /dev/null
+++ b/nova/tests/unit/CA/cacert.pem
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE-----
+MIICyzCCAjSgAwIBAgIJANiqHZUcbScCMA0GCSqGSIb3DQEBBAUAME4xEjAQBgNV
+BAoTCU5PVkEgUk9PVDEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzETMBEGA1UECBMK
+Q2FsaWZvcm5pYTELMAkGA1UEBhMCVVMwHhcNMTAwNTI4MDExOTI1WhcNMTEwNTI4
+MDExOTI1WjBOMRIwEAYDVQQKEwlOT1ZBIFJPT1QxFjAUBgNVBAcTDU1vdW50YWlu
+IFZpZXcxEzARBgNVBAgTCkNhbGlmb3JuaWExCzAJBgNVBAYTAlVTMIGfMA0GCSqG
+SIb3DQEBAQUAA4GNADCBiQKBgQDobUnq8rpXA/HQZ2Uu9Me3SlqCayz3ws2wtvFQ
+koWPUzpriIYPkpprz2EaVu07Zb9uJHvjcoY07nYntl4jR8S7PH4XZhlVFn8AQWzs
+iThU4KJF71UfVM00dDrarSgVpyOIcFXO3iUvLoJj7+RUPjrWdLuJoMqnhicgLeHZ
+LAZ8ewIDAQABo4GwMIGtMAwGA1UdEwQFMAMBAf8wHQYDVR0OBBYEFMh1RMlTVtt8
+EdESYpsTU08r0FnpMH4GA1UdIwR3MHWAFMh1RMlTVtt8EdESYpsTU08r0FnpoVKk
+UDBOMRIwEAYDVQQKEwlOT1ZBIFJPT1QxFjAUBgNVBAcTDU1vdW50YWluIFZpZXcx
+EzARBgNVBAgTCkNhbGlmb3JuaWExCzAJBgNVBAYTAlVTggkA2KodlRxtJwIwDQYJ
+KoZIhvcNAQEEBQADgYEAq+YCgflK36HCdodNu2ya3O6UDRUE2dW8n96tAOmvHqmR
+v38k8GIW0pjWDo+lZYnFmeJYd+QGcJl9fLzXxffV5k+rNCfr/gEYtznWLNUX7AZB
+b/VC7L+yK9qz08C8n51TslXaf3fUGkfkQxsvEP7+hi0qavdd/8eTbdheWahYwWg=
+-----END CERTIFICATE-----
diff --git a/nova/tests/unit/CA/private/cakey.pem b/nova/tests/unit/CA/private/cakey.pem
new file mode 100644
index 0000000000..eee54cc387
--- /dev/null
+++ b/nova/tests/unit/CA/private/cakey.pem
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICXQIBAAKBgQDobUnq8rpXA/HQZ2Uu9Me3SlqCayz3ws2wtvFQkoWPUzpriIYP
+kpprz2EaVu07Zb9uJHvjcoY07nYntl4jR8S7PH4XZhlVFn8AQWzsiThU4KJF71Uf
+VM00dDrarSgVpyOIcFXO3iUvLoJj7+RUPjrWdLuJoMqnhicgLeHZLAZ8ewIDAQAB
+AoGBANQonmZ2Nh2jniFrn/LiwULP/ho6Fov6J6N8+n1focaYZCUwM58XZRmv7KUM
+X/PuBnVVnDibm2HJodTSJM/zfODnGO15kdmJ9X23FkkdTyuvphO5tYF0ONARXdfX
+9LbPcLYA14VSCZCKCye6mbv/xi0C/s7q6ZBoMl7XaeD9hgUxAkEA9lxQY/ZxcLV0
+Ae5I2spBbtuXEGns11YnKnppc59RrAono1gaDeYY2WZRwztIcD6VtUv7qkzH6ubo
+shAG4fvnPQJBAPGFaDODs2ckPvxnILEbjpnZXGQqDCpQ3sVJ6nfu+qdAWS92ESNo
+Y6DC8zFjFaQFbKy6Jxr1VsvYDXhF8cmy7hcCQHkLElSLGWGPRdhNA268QTn+mlJu
+OPf0VHoCex1cAfzNYHxZJTP/AeaO501NK2I63cOd+aDK6M75dQtH5JnT8uECQQCg
+jVydkhk6oV+1jiCvW3BKWbIPa9w2bRgJ8n8JRzYc5Kvk3wm5jfVcsvvTgtip9mkt
+0XmZdCpEy9T4dRasTGP1AkBMhShiVP7+P+SIQlZtSn8ckTt9G6cefEjxsv0kVFZe
+SjkUO0ZifahF8r3Q1eEUSzdXEvicEwONvcpc7MLwfSD7
+-----END RSA PRIVATE KEY-----
diff --git a/nova/tests/unit/README.rst b/nova/tests/unit/README.rst
new file mode 100644
index 0000000000..8ac999c740
--- /dev/null
+++ b/nova/tests/unit/README.rst
@@ -0,0 +1,95 @@
+=====================================
+OpenStack Nova Testing Infrastructure
+=====================================
+
+This README file attempts to provide current and prospective contributors with
+everything they need to know in order to start creating unit tests for nova.
+
+Note: the content for the rest of this file will be added as the work items in
+the following blueprint are completed:
+ https://blueprints.launchpad.net/nova/+spec/consolidate-testing-infrastructure
+
+
+Test Types: Unit vs. Functional vs. Integration
+-----------------------------------------------
+
+TBD
+
+Writing Unit Tests
+------------------
+
+TBD
+
+Using Fakes
+~~~~~~~~~~~
+
+TBD
+
+test.TestCase
+-------------
+The TestCase class from nova.test (generally imported as test) will
+automatically manage self.stubs using the stubout module and self.mox
+using the mox module during the setUp step. They will automatically
+verify and clean up during the tearDown step.
+
+If using test.TestCase, calling the super class setUp is required and
+calling the super class tearDown is required to be last if tearDown
+is overridden.
+
+Writing Functional Tests
+------------------------
+
+TBD
+
+Writing Integration Tests
+-------------------------
+
+TBD
+
+Tests and Exceptions
+--------------------
+A properly written test asserts that particular behavior occurs. This can
+be a success condition or a failure condition, including an exception.
+When asserting that a particular exception is raised, the most specific
+exception possible should be used.
+
+In particular, testing for Exception being raised is almost always a
+mistake since it will match (almost) every exception, even those
+unrelated to the exception intended to be tested.
+
+This applies to catching exceptions manually with a try/except block,
+or using assertRaises().
+
+Example::
+
+ self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
+ elevated, instance_uuid)
+
+If a stubbed function/method needs a generic exception for testing
+purposes, test.TestingException is available.
+
+Example::
+
+ def stubbed_method(self):
+ raise test.TestingException()
+ self.stubs.Set(cls, 'inner_method', stubbed_method)
+
+ obj = cls()
+ self.assertRaises(test.TestingException, obj.outer_method)
+
+
+Stubbing and Mocking
+--------------------
+
+Whenever possible, tests SHOULD NOT stub and mock out the same function.
+
+If it's unavoidable, tests SHOULD define stubs before mocks since the
+`TestCase` cleanup routine will un-mock before un-stubbing. Doing otherwise
+results in a test that leaks stubbed functions, causing hard-to-debug
+interference between tests [1]_.
+
+If a mock must take place before a stub, any stubs after the mock call MUST be
+manually unset using `self.cleanUp` calls within the test.
+
+
+.. [1] https://bugs.launchpad.net/nova/+bug/1180671
diff --git a/nova/tests/unit/__init__.py b/nova/tests/unit/__init__.py
new file mode 100644
index 0000000000..31822c1516
--- /dev/null
+++ b/nova/tests/unit/__init__.py
@@ -0,0 +1,49 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`nova.tests.unit` -- Nova Unittests
+=====================================================
+
+.. automodule:: nova.tests.unit
+ :platform: Unix
+"""
+
+# TODO(mikal): move eventlet imports to nova.__init__ once we move to PBR
+import os
+import sys
+import traceback
+
+
+# NOTE(mikal): All of this is because if dnspython is present in your
+# environment then eventlet monkeypatches socket.getaddrinfo() with an
+# implementation which doesn't work for IPv6. What we're checking here is
+# that the magic environment variable was set when the import happened.
+# NOTE(dims): Prevent this code from kicking in under docs generation
+# as it leads to spurious errors/warning.
+stack = traceback.extract_stack()
+if ('eventlet' in sys.modules and
+ os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes' and
+ (len(stack) < 2 or 'sphinx' not in stack[-2][0])):
+ raise ImportError('eventlet imported before nova/cmd/__init__ '
+ '(env var set to %s)'
+ % os.environ.get('EVENTLET_NO_GREENDNS'))
+
+os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
+
+import eventlet
+
+eventlet.monkey_patch(os=False)
diff --git a/nova/tests/unit/api/__init__.py b/nova/tests/unit/api/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/api/__init__.py
diff --git a/nova/tests/unit/api/ec2/__init__.py b/nova/tests/unit/api/ec2/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/api/ec2/__init__.py
diff --git a/nova/tests/unit/api/ec2/public_key/dummy.fingerprint b/nova/tests/unit/api/ec2/public_key/dummy.fingerprint
new file mode 100644
index 0000000000..715bca27a2
--- /dev/null
+++ b/nova/tests/unit/api/ec2/public_key/dummy.fingerprint
@@ -0,0 +1 @@
+1c:87:d1:d9:32:fd:62:3c:78:2b:c0:ad:c0:15:88:df
diff --git a/nova/tests/unit/api/ec2/public_key/dummy.pub b/nova/tests/unit/api/ec2/public_key/dummy.pub
new file mode 100644
index 0000000000..d4cf2bc0d8
--- /dev/null
+++ b/nova/tests/unit/api/ec2/public_key/dummy.pub
@@ -0,0 +1 @@
+ssh-dss AAAAB3NzaC1kc3MAAACBAMGJlY9XEIm2X234pdO5yFWMp2JuOQx8U0E815IVXhmKxYCBK9ZakgZOIQmPbXoGYyV+mziDPp6HJ0wKYLQxkwLEFr51fAZjWQvRss0SinURRuLkockDfGFtD4pYJthekr/rlqMKlBSDUSpGq8jUWW60UJ18FGooFpxR7ESqQRx/AAAAFQC96LRglaUeeP+E8U/yblEJocuiWwAAAIA3XiMR8Skiz/0aBm5K50SeQznQuMJTyzt9S9uaz5QZWiFu69hOyGSFGw8fqgxEkXFJIuHobQQpGYQubLW0NdaYRqyE/Vud3JUJUb8Texld6dz8vGemyB5d1YvtSeHIo8/BGv2msOqR3u5AZTaGCBD9DhpSGOKHEdNjTtvpPd8S8gAAAIBociGZ5jf09iHLVENhyXujJbxfGRPsyNTyARJfCOGl0oFV6hEzcQyw8U/ePwjgvjc2UizMWLl8tsb2FXKHRdc2v+ND3Us+XqKQ33X3ADP4FZ/+Oj213gMyhCmvFTP0u5FmHog9My4CB7YcIWRuUR42WlhQ2IfPvKwUoTk3R+T6Og== www-data@mk
diff --git a/nova/tests/unit/api/ec2/test_api.py b/nova/tests/unit/api/ec2/test_api.py
new file mode 100644
index 0000000000..cc4a2adb75
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_api.py
@@ -0,0 +1,635 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for the API endpoint."""
+
+import random
+import re
+import StringIO
+
+import boto
+import boto.connection
+from boto.ec2 import regioninfo
+from boto import exception as boto_exc
+# newer versions of boto use their own wrapper on top of httplib.HTTPResponse
+if hasattr(boto.connection, 'HTTPResponse'):
+ httplib = boto.connection
+else:
+ import httplib
+import fixtures
+import webob
+
+from nova.api import auth
+from nova.api import ec2
+from nova.api.ec2 import ec2utils
+from nova import block_device
+from nova import context
+from nova import exception
+from nova.openstack.common import versionutils
+from nova import test
+from nova.tests.unit import matchers
+
+
+class FakeHttplibSocket(object):
+ """a fake socket implementation for httplib.HTTPResponse, trivial."""
+ def __init__(self, response_string):
+ self.response_string = response_string
+ self._buffer = StringIO.StringIO(response_string)
+
+ def makefile(self, _mode, _other):
+ """Returns the socket's internal buffer."""
+ return self._buffer
+
+
+class FakeHttplibConnection(object):
+ """A fake httplib.HTTPConnection for boto to use
+
+ requests made via this connection actually get translated and routed into
+ our WSGI app, we then wait for the response and turn it back into
+ the HTTPResponse that boto expects.
+ """
+ def __init__(self, app, host, is_secure=False):
+ self.app = app
+ self.host = host
+
+ def request(self, method, path, data, headers):
+ req = webob.Request.blank(path)
+ req.method = method
+ req.body = data
+ req.headers = headers
+ req.headers['Accept'] = 'text/html'
+ req.host = self.host
+ # Call the WSGI app, get the HTTP response
+ resp = str(req.get_response(self.app))
+ # For some reason, the response doesn't have "HTTP/1.0 " prepended; I
+ # guess that's a function the web server usually provides.
+ resp = "HTTP/1.0 %s" % resp
+ self.sock = FakeHttplibSocket(resp)
+ self.http_response = httplib.HTTPResponse(self.sock)
+ # NOTE(vish): boto is accessing private variables for some reason
+ self._HTTPConnection__response = self.http_response
+ self.http_response.begin()
+
+ def getresponse(self):
+ return self.http_response
+
+ def getresponsebody(self):
+ return self.sock.response_string
+
+ def close(self):
+ """Required for compatibility with boto/tornado."""
+ pass
+
+
+class XmlConversionTestCase(test.NoDBTestCase):
+ """Unit test api xml conversion."""
+ def test_number_conversion(self):
+ conv = ec2utils._try_convert
+ self.assertIsNone(conv('None'))
+ self.assertEqual(conv('True'), True)
+ self.assertEqual(conv('TRUE'), True)
+ self.assertEqual(conv('true'), True)
+ self.assertEqual(conv('False'), False)
+ self.assertEqual(conv('FALSE'), False)
+ self.assertEqual(conv('false'), False)
+ self.assertEqual(conv('0'), 0)
+ self.assertEqual(conv('42'), 42)
+ self.assertEqual(conv('3.14'), 3.14)
+ self.assertEqual(conv('-57.12'), -57.12)
+ self.assertEqual(conv('0x57'), 0x57)
+ self.assertEqual(conv('-0x57'), -0x57)
+ self.assertEqual(conv('-'), '-')
+ self.assertEqual(conv('-0'), 0)
+ self.assertEqual(conv('0.0'), 0.0)
+ self.assertEqual(conv('1e-8'), 0.0)
+ self.assertEqual(conv('-1e-8'), 0.0)
+ self.assertEqual(conv('0xDD8G'), '0xDD8G')
+ self.assertEqual(conv('0XDD8G'), '0XDD8G')
+ self.assertEqual(conv('-stringy'), '-stringy')
+ self.assertEqual(conv('stringy'), 'stringy')
+ self.assertEqual(conv('add'), 'add')
+ self.assertEqual(conv('remove'), 'remove')
+ self.assertEqual(conv(''), '')
+
+
+class Ec2utilsTestCase(test.NoDBTestCase):
+ def test_ec2_id_to_id(self):
+ self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30)
+ self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29)
+ self.assertEqual(ec2utils.ec2_id_to_id('snap-0000001c'), 28)
+ self.assertEqual(ec2utils.ec2_id_to_id('vol-0000001b'), 27)
+
+ def test_bad_ec2_id(self):
+ self.assertRaises(exception.InvalidEc2Id,
+ ec2utils.ec2_id_to_id,
+ 'badone')
+
+ def test_id_to_ec2_id(self):
+ self.assertEqual(ec2utils.id_to_ec2_id(30), 'i-0000001e')
+ self.assertEqual(ec2utils.id_to_ec2_id(29, 'ami-%08x'), 'ami-0000001d')
+ self.assertEqual(ec2utils.id_to_ec2_snap_id(28), 'snap-0000001c')
+ self.assertEqual(ec2utils.id_to_ec2_vol_id(27), 'vol-0000001b')
+
+ def test_dict_from_dotted_str(self):
+ in_str = [('BlockDeviceMapping.1.DeviceName', '/dev/sda1'),
+ ('BlockDeviceMapping.1.Ebs.SnapshotId', 'snap-0000001c'),
+ ('BlockDeviceMapping.1.Ebs.VolumeSize', '80'),
+ ('BlockDeviceMapping.1.Ebs.DeleteOnTermination', 'false'),
+ ('BlockDeviceMapping.2.DeviceName', '/dev/sdc'),
+ ('BlockDeviceMapping.2.VirtualName', 'ephemeral0')]
+ expected_dict = {
+ 'block_device_mapping': {
+ '1': {'device_name': '/dev/sda1',
+ 'ebs': {'snapshot_id': 'snap-0000001c',
+ 'volume_size': 80,
+ 'delete_on_termination': False}},
+ '2': {'device_name': '/dev/sdc',
+ 'virtual_name': 'ephemeral0'}}}
+ out_dict = ec2utils.dict_from_dotted_str(in_str)
+
+ self.assertThat(out_dict, matchers.DictMatches(expected_dict))
+
+ def test_properties_root_defice_name(self):
+ mappings = [{"device": "/dev/sda1", "virtual": "root"}]
+ properties0 = {'mappings': mappings}
+ properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings}
+
+ root_device_name = block_device.properties_root_device_name(
+ properties0)
+ self.assertEqual(root_device_name, '/dev/sda1')
+
+ root_device_name = block_device.properties_root_device_name(
+ properties1)
+ self.assertEqual(root_device_name, '/dev/sdb')
+
+ def test_regex_from_ec2_regex(self):
+ def _test_re(ec2_regex, expected, literal, match=True):
+ regex = ec2utils.regex_from_ec2_regex(ec2_regex)
+ self.assertEqual(regex, expected)
+ if match:
+ self.assertIsNotNone(re.match(regex, literal))
+ else:
+ self.assertIsNone(re.match(regex, literal))
+
+ # wildcards
+ _test_re('foo', '\Afoo\Z(?s)', 'foo')
+ _test_re('foo', '\Afoo\Z(?s)', 'baz', match=False)
+ _test_re('foo?bar', '\Afoo.bar\Z(?s)', 'foo bar')
+ _test_re('foo?bar', '\Afoo.bar\Z(?s)', 'foo bar', match=False)
+ _test_re('foo*bar', '\Afoo.*bar\Z(?s)', 'foo QUUX bar')
+
+ # backslashes and escaped wildcards
+ _test_re('foo\\', '\Afoo\\\\\Z(?s)', 'foo\\')
+ _test_re('foo*bar', '\Afoo.*bar\Z(?s)', 'zork QUUX bar', match=False)
+ _test_re('foo\\?bar', '\Afoo[?]bar\Z(?s)', 'foo?bar')
+ _test_re('foo\\?bar', '\Afoo[?]bar\Z(?s)', 'foo bar', match=False)
+ _test_re('foo\\*bar', '\Afoo[*]bar\Z(?s)', 'foo*bar')
+ _test_re('foo\\*bar', '\Afoo[*]bar\Z(?s)', 'foo bar', match=False)
+
+ # analog to the example given in the EC2 API docs
+ ec2_regex = '\*nova\?\\end'
+ expected = r'\A[*]nova[?]\\end\Z(?s)'
+ literal = r'*nova?\end'
+ _test_re(ec2_regex, expected, literal)
+
+ def test_mapping_prepend_dev(self):
+ mappings = [
+ {'virtual': 'ami',
+ 'device': 'sda1'},
+ {'virtual': 'root',
+ 'device': '/dev/sda1'},
+
+ {'virtual': 'swap',
+ 'device': 'sdb1'},
+ {'virtual': 'swap',
+ 'device': '/dev/sdb2'},
+
+ {'virtual': 'ephemeral0',
+ 'device': 'sdc1'},
+ {'virtual': 'ephemeral1',
+ 'device': '/dev/sdc1'}]
+ expected_result = [
+ {'virtual': 'ami',
+ 'device': 'sda1'},
+ {'virtual': 'root',
+ 'device': '/dev/sda1'},
+
+ {'virtual': 'swap',
+ 'device': '/dev/sdb1'},
+ {'virtual': 'swap',
+ 'device': '/dev/sdb2'},
+
+ {'virtual': 'ephemeral0',
+ 'device': '/dev/sdc1'},
+ {'virtual': 'ephemeral1',
+ 'device': '/dev/sdc1'}]
+ self.assertThat(block_device.mappings_prepend_dev(mappings),
+ matchers.DictListMatches(expected_result))
+
+
+class ApiEc2TestCase(test.TestCase):
+ """Unit test for the cloud controller on an EC2 API."""
+ def setUp(self):
+ super(ApiEc2TestCase, self).setUp()
+ self.host = '127.0.0.1'
+ # NOTE(vish): skipping the Authorizer
+ roles = ['sysadmin', 'netadmin']
+ ctxt = context.RequestContext('fake', 'fake', roles=roles)
+ self.app = auth.InjectContext(ctxt, ec2.FaultWrapper(
+ ec2.RequestLogging(ec2.Requestify(ec2.Authorizer(ec2.Executor()
+ ), 'nova.api.ec2.cloud.CloudController'))))
+ self.useFixture(fixtures.FakeLogger('boto'))
+
+ def expect_http(self, host=None, is_secure=False, api_version=None):
+ """Returns a new EC2 connection."""
+ self.ec2 = boto.connect_ec2(
+ aws_access_key_id='fake',
+ aws_secret_access_key='fake',
+ is_secure=False,
+ region=regioninfo.RegionInfo(None, 'test', self.host),
+ port=8773,
+ path='/services/Cloud')
+ if api_version:
+ self.ec2.APIVersion = api_version
+
+ self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
+ self.http = FakeHttplibConnection(
+ self.app, '%s:8773' % (self.host), False)
+ # pylint: disable=E1103
+ if versionutils.is_compatible('2.14', boto.Version, same_major=False):
+ self.ec2.new_http_connection(host or self.host, 8773,
+ is_secure).AndReturn(self.http)
+ elif versionutils.is_compatible('2', boto.Version, same_major=False):
+ self.ec2.new_http_connection(host or '%s:8773' % (self.host),
+ is_secure).AndReturn(self.http)
+ else:
+ self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
+ return self.http
+
+ def test_xmlns_version_matches_request_version(self):
+ self.expect_http(api_version='2010-10-30')
+ self.mox.ReplayAll()
+
+ # Any request should be fine
+ self.ec2.get_all_instances()
+ self.assertTrue(self.ec2.APIVersion in self.http.getresponsebody(),
+ 'The version in the xmlns of the response does '
+ 'not match the API version given in the request.')
+
+ def test_describe_instances(self):
+ """Test that, after creating a user and a project, the describe
+ instances call to the API works properly.
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.assertEqual(self.ec2.get_all_instances(), [])
+
+ def test_terminate_invalid_instance(self):
+ # Attempt to terminate an invalid instance.
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.assertRaises(boto_exc.EC2ResponseError,
+ self.ec2.terminate_instances, "i-00000005")
+
+ def test_get_all_key_pairs(self):
+ """Test that, after creating a user and project and generating
+ a key pair, that the API call to list key pairs works properly.
+ """
+ keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
+ for x in range(random.randint(4, 8)))
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.ec2.create_key_pair(keyname)
+ rv = self.ec2.get_all_key_pairs()
+ results = [k for k in rv if k.name == keyname]
+ self.assertEqual(len(results), 1)
+
+ def test_create_duplicate_key_pair(self):
+ """Test that, after successfully generating a keypair,
+ requesting a second keypair with the same name fails sanely.
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.ec2.create_key_pair('test')
+
+ try:
+ self.ec2.create_key_pair('test')
+ except boto_exc.EC2ResponseError as e:
+ if e.code == 'InvalidKeyPair.Duplicate':
+ pass
+ else:
+ self.assertEqual('InvalidKeyPair.Duplicate', e.code)
+ else:
+ self.fail('Exception not raised.')
+
+ def test_get_all_security_groups(self):
+ # Test that we can retrieve security groups.
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+
+ self.assertEqual(len(rv), 1)
+ self.assertEqual(rv[0].name, 'default')
+
+ def test_create_delete_security_group(self):
+ # Test that we can create a security group.
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
+ for x in range(random.randint(4, 8)))
+
+ self.ec2.create_security_group(security_group_name, 'test group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+ self.assertEqual(len(rv), 2)
+ self.assertIn(security_group_name, [group.name for group in rv])
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ self.ec2.delete_security_group(security_group_name)
+
+ def test_group_name_valid_chars_security_group(self):
+ """Test that we sanely handle invalid security group names.
+
+ EC2 API Spec states we should only accept alphanumeric characters,
+ spaces, dashes, and underscores. Amazon implementation
+ accepts more characters - so, [:print:] is ok.
+ """
+ bad_strict_ec2 = "aa \t\x01\x02\x7f"
+ bad_amazon_ec2 = "aa #^% -=99"
+ test_raise = [
+ (True, bad_amazon_ec2, "test desc"),
+ (True, "test name", bad_amazon_ec2),
+ (False, bad_strict_ec2, "test desc"),
+ ]
+ for t in test_raise:
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.flags(ec2_strict_validation=t[0])
+ self.assertRaises(boto_exc.EC2ResponseError,
+ self.ec2.create_security_group,
+ t[1],
+ t[2])
+ test_accept = [
+ (False, bad_amazon_ec2, "test desc"),
+ (False, "test name", bad_amazon_ec2),
+ ]
+ for t in test_accept:
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.flags(ec2_strict_validation=t[0])
+ self.ec2.create_security_group(t[1], t[2])
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.ec2.delete_security_group(t[1])
+
+ def test_group_name_valid_length_security_group(self):
+ """Test that we sanely handle invalid security group names.
+
+ API Spec states that the length should not exceed 255 char.
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ # Test block group_name > 255 chars
+ security_group_name = "".join(random.choice("poiuytrewqasdfghjklmnbvc")
+ for x in range(random.randint(256, 266)))
+
+ self.assertRaises(boto_exc.EC2ResponseError,
+ self.ec2.create_security_group,
+ security_group_name,
+ 'test group')
+
+ def test_authorize_revoke_security_group_cidr(self):
+ """Test that we can add and remove CIDR based rules
+ to a security group
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
+ for x in range(random.randint(4, 8)))
+
+ group = self.ec2.create_security_group(security_group_name,
+ 'test group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.authorize('tcp', 80, 81, '0.0.0.0/0')
+ group.authorize('icmp', -1, -1, '0.0.0.0/0')
+ group.authorize('udp', 80, 81, '0.0.0.0/0')
+ group.authorize('tcp', 1, 65535, '0.0.0.0/0')
+ group.authorize('udp', 1, 65535, '0.0.0.0/0')
+ group.authorize('icmp', 1, 0, '0.0.0.0/0')
+ group.authorize('icmp', 0, 1, '0.0.0.0/0')
+ group.authorize('icmp', 0, 0, '0.0.0.0/0')
+
+ def _assert(message, *args):
+ try:
+ group.authorize(*args)
+ except boto_exc.EC2ResponseError as e:
+ self.assertEqual(e.status, 400, 'Expected status to be 400')
+ self.assertIn(message, e.error_message)
+ else:
+ raise self.failureException, 'EC2ResponseError not raised'
+
+ # Invalid CIDR address
+ _assert('Invalid CIDR', 'tcp', 80, 81, '0.0.0.0/0444')
+ # Missing ports
+ _assert('Not enough parameters', 'tcp', '0.0.0.0/0')
+ # from port cannot be greater than to port
+ _assert('Invalid port range', 'tcp', 100, 1, '0.0.0.0/0')
+ # For tcp, negative values are not allowed
+ _assert('Invalid port range', 'tcp', -1, 1, '0.0.0.0/0')
+ # For tcp, valid port range 1-65535
+ _assert('Invalid port range', 'tcp', 1, 65599, '0.0.0.0/0')
+ # Invalid Cidr for ICMP type
+ _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.444.0/4')
+ # Invalid protocol
+ _assert('Invalid IP protocol', 'xyz', 1, 14, '0.0.0.0/0')
+ # Invalid port
+ _assert('Invalid input received: To and From ports must be integers',
+ 'tcp', " ", "81", '0.0.0.0/0')
+ # Invalid icmp port
+ _assert('Invalid input received: '
+ 'Type and Code must be integers for ICMP protocol type',
+ 'icmp', " ", "81", '0.0.0.0/0')
+ # Invalid CIDR Address
+ _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0')
+ # Invalid CIDR Address
+ _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0/')
+ # Invalid Cidr ports
+ _assert('Invalid port range', 'icmp', 1, 256, '0.0.0.0/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+
+ group = [grp for grp in rv if grp.name == security_group_name][0]
+
+ self.assertEqual(len(group.rules), 8)
+ self.assertEqual(int(group.rules[0].from_port), 80)
+ self.assertEqual(int(group.rules[0].to_port), 81)
+ self.assertEqual(len(group.rules[0].grants), 1)
+ self.assertEqual(str(group.rules[0].grants[0]), '0.0.0.0/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.revoke('tcp', 80, 81, '0.0.0.0/0')
+ group.revoke('icmp', -1, -1, '0.0.0.0/0')
+ group.revoke('udp', 80, 81, '0.0.0.0/0')
+ group.revoke('tcp', 1, 65535, '0.0.0.0/0')
+ group.revoke('udp', 1, 65535, '0.0.0.0/0')
+ group.revoke('icmp', 1, 0, '0.0.0.0/0')
+ group.revoke('icmp', 0, 1, '0.0.0.0/0')
+ group.revoke('icmp', 0, 0, '0.0.0.0/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ self.ec2.delete_security_group(security_group_name)
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ rv = self.ec2.get_all_security_groups()
+
+ self.assertEqual(len(rv), 1)
+ self.assertEqual(rv[0].name, 'default')
+
+ def test_authorize_revoke_security_group_cidr_v6(self):
+ """Test that we can add and remove CIDR based rules
+ to a security group for IPv6
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
+ for x in range(random.randint(4, 8)))
+
+ group = self.ec2.create_security_group(security_group_name,
+ 'test group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.authorize('tcp', 80, 81, '::/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+
+ group = [grp for grp in rv if grp.name == security_group_name][0]
+ self.assertEqual(len(group.rules), 1)
+ self.assertEqual(int(group.rules[0].from_port), 80)
+ self.assertEqual(int(group.rules[0].to_port), 81)
+ self.assertEqual(len(group.rules[0].grants), 1)
+ self.assertEqual(str(group.rules[0].grants[0]), '::/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.revoke('tcp', 80, 81, '::/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ self.ec2.delete_security_group(security_group_name)
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ rv = self.ec2.get_all_security_groups()
+
+ self.assertEqual(len(rv), 1)
+ self.assertEqual(rv[0].name, 'default')
+
+ def test_authorize_revoke_security_group_foreign_group(self):
+ """Test that we can grant and revoke another security group access
+ to a security group
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rand_string = 'sdiuisudfsdcnpaqwertasd'
+ security_group_name = "".join(random.choice(rand_string)
+ for x in range(random.randint(4, 8)))
+ other_security_group_name = "".join(random.choice(rand_string)
+ for x in range(random.randint(4, 8)))
+
+ group = self.ec2.create_security_group(security_group_name,
+ 'test group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ other_group = self.ec2.create_security_group(other_security_group_name,
+ 'some other group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.authorize(src_group=other_group)
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+
+ # I don't bother checkng that we actually find it here,
+ # because the create/delete unit test further up should
+ # be good enough for that.
+ for group in rv:
+ if group.name == security_group_name:
+ self.assertEqual(len(group.rules), 3)
+ self.assertEqual(len(group.rules[0].grants), 1)
+ self.assertEqual(str(group.rules[0].grants[0]),
+ '%s-%s' % (other_security_group_name, 'fake'))
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+
+ for group in rv:
+ if group.name == security_group_name:
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+ group.revoke(src_group=other_group)
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ self.ec2.delete_security_group(security_group_name)
+ self.ec2.delete_security_group(other_security_group_name)
diff --git a/nova/tests/unit/api/ec2/test_apirequest.py b/nova/tests/unit/api/ec2/test_apirequest.py
new file mode 100644
index 0000000000..4b2dee96f8
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_apirequest.py
@@ -0,0 +1,92 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+#
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for the API Request internals."""
+
+import copy
+
+from oslo.utils import timeutils
+
+from nova.api.ec2 import apirequest
+from nova import test
+
+
+class APIRequestTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(APIRequestTestCase, self).setUp()
+ self.req = apirequest.APIRequest("FakeController", "FakeAction",
+ "FakeVersion", {})
+ self.resp = {
+ 'string': 'foo',
+ 'int': 1,
+ 'long': long(1),
+ 'bool': False,
+ 'dict': {
+ 'string': 'foo',
+ 'int': 1,
+ }
+ }
+
+ # The previous will produce an output that looks like the
+ # following (excusing line wrap for 80 cols):
+ #
+ # <FakeActionResponse xmlns="http://ec2.amazonaws.com/doc/\
+ # FakeVersion/">
+ # <requestId>uuid</requestId>
+ # <int>1</int>
+ # <dict>
+ # <int>1</int>
+ # <string>foo</string>
+ # </dict>
+ # <bool>false</bool>
+ # <string>foo</string>
+ # </FakeActionResponse>
+ #
+ # We don't attempt to ever test for the full document because
+ # hash seed order might impact it's rendering order. The fact
+ # that running the function doesn't explode is a big part of
+ # the win.
+
+ def test_render_response_ascii(self):
+ data = self.req._render_response(self.resp, 'uuid')
+ self.assertIn('<FakeActionResponse xmlns="http://ec2.amazonaws.com/'
+ 'doc/FakeVersion/', data)
+ self.assertIn('<int>1</int>', data)
+ self.assertIn('<string>foo</string>', data)
+
+ def test_render_response_utf8(self):
+ resp = copy.deepcopy(self.resp)
+ resp['utf8'] = unichr(40960) + u'abcd' + unichr(1972)
+ data = self.req._render_response(resp, 'uuid')
+ self.assertIn('<utf8>&#40960;abcd&#1972;</utf8>', data)
+
+ # Tests for individual data element format functions
+
+ def test_return_valid_isoformat(self):
+ """Ensure that the ec2 api returns datetime in xs:dateTime
+ (which apparently isn't datetime.isoformat())
+ NOTE(ken-pepple): https://bugs.launchpad.net/nova/+bug/721297
+ """
+ conv = apirequest._database_to_isoformat
+ # sqlite database representation with microseconds
+ time_to_convert = timeutils.parse_strtime("2011-02-21 20:14:10.634276",
+ "%Y-%m-%d %H:%M:%S.%f")
+ self.assertEqual(conv(time_to_convert), '2011-02-21T20:14:10.634Z')
+ # mysqlite database representation
+ time_to_convert = timeutils.parse_strtime("2011-02-21 19:56:18",
+ "%Y-%m-%d %H:%M:%S")
+ self.assertEqual(conv(time_to_convert), '2011-02-21T19:56:18.000Z')
diff --git a/nova/tests/unit/api/ec2/test_cinder_cloud.py b/nova/tests/unit/api/ec2/test_cinder_cloud.py
new file mode 100644
index 0000000000..78db126aee
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_cinder_cloud.py
@@ -0,0 +1,1096 @@
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid
+
+import fixtures
+from oslo.config import cfg
+
+from nova.api.ec2 import cloud
+from nova.api.ec2 import ec2utils
+from nova.compute import api as compute_api
+from nova.compute import flavors
+from nova.compute import utils as compute_utils
+from nova import context
+from nova import db
+from nova import exception
+from nova import objects
+from nova import test
+from nova.tests.unit import cast_as_call
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_notifier
+from nova.tests.unit import fake_utils
+from nova.tests.unit.image import fake
+from nova.tests.unit import matchers
+from nova import volume
+
+CONF = cfg.CONF
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+CONF.import_opt('default_flavor', 'nova.compute.flavors')
+CONF.import_opt('use_ipv6', 'nova.netconf')
+
+
+def get_fake_cache():
+ def _ip(ip, fixed=True, floats=None):
+ ip_dict = {'address': ip, 'type': 'fixed'}
+ if not fixed:
+ ip_dict['type'] = 'floating'
+ if fixed and floats:
+ ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
+ return ip_dict
+
+ info = [{'address': 'aa:bb:cc:dd:ee:ff',
+ 'id': 1,
+ 'network': {'bridge': 'br0',
+ 'id': 1,
+ 'label': 'private',
+ 'subnets': [{'cidr': '192.168.0.0/24',
+ 'ips': [_ip('192.168.0.3',
+ floats=['1.2.3.4',
+ '5.6.7.8']),
+ _ip('192.168.0.4')]}]}}]
+ if CONF.use_ipv6:
+ ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
+ info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
+ 'ips': [_ip(ipv6_addr)]})
+ return info
+
+
+def get_instances_with_cached_ips(orig_func, *args, **kwargs):
+ """Kludge the cache into instance(s) without having to create DB
+ entries
+ """
+ instances = orig_func(*args, **kwargs)
+ if isinstance(instances, list):
+ for instance in instances:
+ instance['info_cache'] = {'network_info': get_fake_cache()}
+ else:
+ instances['info_cache'] = {'network_info': get_fake_cache()}
+ return instances
+
+
+class CinderCloudTestCase(test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(CinderCloudTestCase, self).setUp()
+ ec2utils.reset_cache()
+ self.useFixture(fixtures.TempDir()).path
+ fake_utils.stub_out_utils_spawn_n(self.stubs)
+ self.flags(compute_driver='nova.virt.fake.FakeDriver',
+ volume_api_class='nova.tests.unit.fake_volume.API')
+
+ def fake_show(meh, context, id, **kwargs):
+ return {'id': id,
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine',
+ 'image_state': 'available'}}
+
+ def fake_detail(_self, context, **kwargs):
+ image = fake_show(None, context, None)
+ image['name'] = kwargs.get('filters', {}).get('name')
+ return [image]
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+ fake.stub_out_image_service(self.stubs)
+
+ def dumb(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # set up our cloud
+ self.cloud = cloud.CloudController()
+ self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
+
+ # Short-circuit the conductor service
+ self.flags(use_local=True, group='conductor')
+
+ # Stub out the notification service so we use the no-op serializer
+ # and avoid lazy-load traces with the wrap_exception decorator in
+ # the compute service.
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ # set up services
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
+ self.compute = self.start_service('compute')
+ self.scheduler = self.start_service('scheduler')
+ self.network = self.start_service('network')
+ self.consoleauth = self.start_service('consoleauth')
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id,
+ is_admin=True)
+ self.volume_api = volume.API()
+ self.volume_api.reset_fake_api(self.context)
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ # make sure we can map ami-00000001/2 to a uuid in FakeImageService
+ db.s3_image_create(self.context,
+ 'cedef40a-ed67-4d10-800e-17455edce175')
+ db.s3_image_create(self.context,
+ '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
+
+ def tearDown(self):
+ self.volume_api.reset_fake_api(self.context)
+ super(CinderCloudTestCase, self).tearDown()
+ fake.FakeImageService_reset()
+
+ def _stub_instance_get_with_fixed_ips(self, func_name):
+ orig_func = getattr(self.cloud.compute_api, func_name)
+
+ def fake_get(*args, **kwargs):
+ return get_instances_with_cached_ips(orig_func, *args, **kwargs)
+ self.stubs.Set(self.cloud.compute_api, func_name, fake_get)
+
+ def _create_key(self, name):
+ # NOTE(vish): create depends on pool, so just call helper directly
+ keypair_api = compute_api.KeypairAPI()
+ return keypair_api.create_key_pair(self.context, self.context.user_id,
+ name)
+
+ def test_describe_volumes(self):
+ # Makes sure describe_volumes works and filters results.
+
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ name='test-1',
+ description='test volume 1')
+ self.assertEqual(vol1['status'], 'available')
+ vol2 = self.cloud.create_volume(self.context,
+ size=1,
+ name='test-2',
+ description='test volume 2')
+ result = self.cloud.describe_volumes(self.context)
+ self.assertEqual(len(result['volumeSet']), 2)
+ result = self.cloud.describe_volumes(self.context,
+ [vol1['volumeId']])
+ self.assertEqual(len(result['volumeSet']), 1)
+ self.assertEqual(vol1['volumeId'], result['volumeSet'][0]['volumeId'])
+
+ self.cloud.delete_volume(self.context, vol1['volumeId'])
+ self.cloud.delete_volume(self.context, vol2['volumeId'])
+
+ def test_format_volume_maps_status(self):
+ fake_volume = {'id': 1,
+ 'status': 'creating',
+ 'availability_zone': 'nova',
+ 'volumeId': 'vol-0000000a',
+ 'attachmentSet': [{}],
+ 'snapshotId': None,
+ 'created_at': '2013-04-18T06:03:35.025626',
+ 'size': 1,
+ 'mountpoint': None,
+ 'attach_status': None}
+
+ self.assertEqual(self.cloud._format_volume(self.context,
+ fake_volume)['status'],
+ 'creating')
+
+ fake_volume['status'] = 'attaching'
+ self.assertEqual(self.cloud._format_volume(self.context,
+ fake_volume)['status'],
+ 'in-use')
+ fake_volume['status'] = 'detaching'
+ self.assertEqual(self.cloud._format_volume(self.context,
+ fake_volume)['status'],
+ 'in-use')
+ fake_volume['status'] = 'banana'
+ self.assertEqual(self.cloud._format_volume(self.context,
+ fake_volume)['status'],
+ 'banana')
+
+ def test_create_volume_in_availability_zone(self):
+ """Makes sure create_volume works when we specify an availability
+ zone
+ """
+ availability_zone = 'zone1:host1'
+
+ result = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ volume_id = result['volumeId']
+ availabilityZone = result['availabilityZone']
+ self.assertEqual(availabilityZone, availability_zone)
+ result = self.cloud.describe_volumes(self.context)
+ self.assertEqual(len(result['volumeSet']), 1)
+ self.assertEqual(result['volumeSet'][0]['volumeId'], volume_id)
+ self.assertEqual(result['volumeSet'][0]['availabilityZone'],
+ availabilityZone)
+
+ self.cloud.delete_volume(self.context, volume_id)
+
+ def test_create_volume_from_snapshot(self):
+ # Makes sure create_volume works when we specify a snapshot.
+ availability_zone = 'zone1:host1'
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ snap = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-1',
+ description='test snap of vol %s'
+ % vol1['volumeId'])
+
+ vol2 = self.cloud.create_volume(self.context,
+ snapshot_id=snap['snapshotId'])
+ volume1_id = vol1['volumeId']
+ volume2_id = vol2['volumeId']
+
+ result = self.cloud.describe_volumes(self.context)
+ self.assertEqual(len(result['volumeSet']), 2)
+ self.assertEqual(result['volumeSet'][1]['volumeId'], volume2_id)
+
+ self.cloud.delete_volume(self.context, volume2_id)
+ self.cloud.delete_snapshot(self.context, snap['snapshotId'])
+ self.cloud.delete_volume(self.context, volume1_id)
+
+ def test_volume_status_of_attaching_volume(self):
+ """Test the volume's status in response when attaching a volume."""
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ name='test-ls',
+ description='test volume ls')
+ self.assertEqual('available', vol1['status'])
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ ec2_instance_id = self._run_instance(**kwargs)
+ resp = self.cloud.attach_volume(self.context,
+ vol1['volumeId'],
+ ec2_instance_id,
+ '/dev/sde')
+ # Here,the status should be 'attaching',but it can be 'attached' in
+ # unittest scenario if the attach action is very fast.
+ self.assertIn(resp['status'], ('attaching', 'attached'))
+
+ def test_volume_status_of_detaching_volume(self):
+ """Test the volume's status in response when detaching a volume."""
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ name='test-ls',
+ description='test volume ls')
+ self.assertEqual('available', vol1['status'])
+ vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1,
+ 'block_device_mapping': [{'device_name': '/dev/sdb',
+ 'volume_id': vol1_uuid,
+ 'delete_on_termination': True}]}
+ self._run_instance(**kwargs)
+ resp = self.cloud.detach_volume(self.context,
+ vol1['volumeId'])
+
+ # Here,the status should be 'detaching',but it can be 'detached' in
+ # unittest scenario if the detach action is very fast.
+ self.assertIn(resp['status'], ('detaching', 'detached'))
+
+ def test_describe_snapshots(self):
+ # Makes sure describe_snapshots works and filters results.
+ availability_zone = 'zone1:host1'
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ snap1 = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-1',
+ description='test snap1 of vol %s' %
+ vol1['volumeId'])
+ snap2 = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-1',
+ description='test snap2 of vol %s' %
+ vol1['volumeId'])
+
+ result = self.cloud.describe_snapshots(self.context)
+ self.assertEqual(len(result['snapshotSet']), 2)
+ result = self.cloud.describe_snapshots(
+ self.context,
+ snapshot_id=[snap2['snapshotId']])
+ self.assertEqual(len(result['snapshotSet']), 1)
+
+ self.cloud.delete_snapshot(self.context, snap1['snapshotId'])
+ self.cloud.delete_snapshot(self.context, snap2['snapshotId'])
+ self.cloud.delete_volume(self.context, vol1['volumeId'])
+
+ def test_format_snapshot_maps_status(self):
+ fake_snapshot = {'status': 'new',
+ 'id': 1,
+ 'volume_id': 1,
+ 'created_at': 1353560191.08117,
+ 'progress': 90,
+ 'project_id': str(uuid.uuid4()),
+ 'volume_size': 10000,
+ 'display_description': 'desc'}
+
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'pending')
+
+ fake_snapshot['status'] = 'creating'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'pending')
+
+ fake_snapshot['status'] = 'available'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'completed')
+
+ fake_snapshot['status'] = 'active'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'completed')
+
+ fake_snapshot['status'] = 'deleting'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'pending')
+
+ fake_snapshot['status'] = 'deleted'
+ self.assertIsNone(self.cloud._format_snapshot(self.context,
+ fake_snapshot))
+
+ fake_snapshot['status'] = 'error'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'error')
+
+ fake_snapshot['status'] = 'banana'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'banana')
+
+ def test_create_snapshot(self):
+ # Makes sure create_snapshot works.
+ availability_zone = 'zone1:host1'
+ result = self.cloud.describe_snapshots(self.context)
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ snap1 = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-1',
+ description='test snap1 of vol %s' %
+ vol1['volumeId'])
+
+ snapshot_id = snap1['snapshotId']
+ result = self.cloud.describe_snapshots(self.context)
+ self.assertEqual(len(result['snapshotSet']), 1)
+ self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id)
+
+ self.cloud.delete_snapshot(self.context, snap1['snapshotId'])
+ self.cloud.delete_volume(self.context, vol1['volumeId'])
+
+ def test_delete_snapshot(self):
+ # Makes sure delete_snapshot works.
+ availability_zone = 'zone1:host1'
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ snap1 = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-1',
+ description='test snap1 of vol %s' %
+ vol1['volumeId'])
+
+ snapshot_id = snap1['snapshotId']
+ result = self.cloud.delete_snapshot(self.context,
+ snapshot_id=snapshot_id)
+ self.assertTrue(result)
+ self.cloud.delete_volume(self.context, vol1['volumeId'])
+
+ def _block_device_mapping_create(self, instance_uuid, mappings):
+ volumes = []
+ for bdm in mappings:
+ db.block_device_mapping_create(self.context, bdm)
+ if 'volume_id' in bdm:
+ values = {'id': bdm['volume_id']}
+ for bdm_key, vol_key in [('snapshot_id', 'snapshot_id'),
+ ('snapshot_size', 'volume_size'),
+ ('delete_on_termination',
+ 'delete_on_termination')]:
+ if bdm_key in bdm:
+ values[vol_key] = bdm[bdm_key]
+ kwargs = {'name': 'bdmtest-volume',
+ 'description': 'bdm test volume description',
+ 'status': 'available',
+ 'host': 'fake',
+ 'size': 1,
+ 'attach_status': 'detached',
+ 'volume_id': values['id']}
+ vol = self.volume_api.create_with_kwargs(self.context,
+ **kwargs)
+ if 'snapshot_id' in values:
+ self.volume_api.create_snapshot(self.context,
+ vol['id'],
+ 'snapshot-bdm',
+ 'fake snap for bdm tests',
+ values['snapshot_id'])
+
+ self.volume_api.attach(self.context, vol['id'],
+ instance_uuid, bdm['device_name'])
+ volumes.append(vol)
+ return volumes
+
+ def _setUpBlockDeviceMapping(self):
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ inst0 = db.instance_create(self.context,
+ {'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'root_device_name': '/dev/sdb1',
+ 'system_metadata': sys_meta})
+ inst1 = db.instance_create(self.context,
+ {'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'root_device_name': '/dev/sdc1',
+ 'system_metadata': sys_meta})
+ inst2 = db.instance_create(self.context,
+ {'image_ref': '',
+ 'instance_type_id': 1,
+ 'root_device_name': '/dev/vda',
+ 'system_metadata': sys_meta})
+
+ instance0_uuid = inst0['uuid']
+ mappings0 = [
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb1',
+ 'snapshot_id': '1',
+ 'volume_id': '2'},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb2',
+ 'volume_id': '3',
+ 'volume_size': 1},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb3',
+ 'delete_on_termination': True,
+ 'snapshot_id': '4',
+ 'volume_id': '5'},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb4',
+ 'delete_on_termination': False,
+ 'snapshot_id': '6',
+ 'volume_id': '7'},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb5',
+ 'snapshot_id': '8',
+ 'volume_id': '9',
+ 'volume_size': 0},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb6',
+ 'snapshot_id': '10',
+ 'volume_id': '11',
+ 'volume_size': 1},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb7',
+ 'no_device': True},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb8',
+ 'virtual_name': 'swap'},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb9',
+ 'virtual_name': 'ephemeral3'}]
+ instance2_uuid = inst2['uuid']
+ mappings2 = [
+ {'instance_uuid': instance2_uuid,
+ 'device_name': 'vda',
+ 'snapshot_id': '1',
+ 'volume_id': '21'}]
+
+ volumes0 = self._block_device_mapping_create(instance0_uuid, mappings0)
+ volumes2 = self._block_device_mapping_create(instance2_uuid, mappings2)
+ return ((inst0, inst1, inst2), (volumes0, [], volumes2))
+
+ def _tearDownBlockDeviceMapping(self, instances, volumes):
+ for vols in volumes:
+ for vol in vols:
+ self.volume_api.delete(self.context, vol['id'])
+ for instance in instances:
+ for bdm in db.block_device_mapping_get_all_by_instance(
+ self.context, instance['uuid']):
+ db.block_device_mapping_destroy(self.context, bdm['id'])
+ db.instance_destroy(self.context, instance['uuid'])
+
+ _expected_instance_bdm0 = {
+ 'instanceId': 'i-00000001',
+ 'rootDeviceName': '/dev/sdb1',
+ 'rootDeviceType': 'ebs'}
+
+ _expected_block_device_mapping0 = [
+ {'deviceName': '/dev/sdb1',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': False,
+ 'volumeId': 'vol-00000002',
+ }},
+ {'deviceName': '/dev/sdb2',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': False,
+ 'volumeId': 'vol-00000003',
+ }},
+ {'deviceName': '/dev/sdb3',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': True,
+ 'volumeId': 'vol-00000005',
+ }},
+ {'deviceName': '/dev/sdb4',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': False,
+ 'volumeId': 'vol-00000007',
+ }},
+ {'deviceName': '/dev/sdb5',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': False,
+ 'volumeId': 'vol-00000009',
+ }},
+ {'deviceName': '/dev/sdb6',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': False,
+ 'volumeId': 'vol-0000000b', }}]
+ # NOTE(yamahata): swap/ephemeral device case isn't supported yet.
+
+ _expected_instance_bdm1 = {
+ 'instanceId': 'i-00000002',
+ 'rootDeviceName': '/dev/sdc1',
+ 'rootDeviceType': 'instance-store'}
+
+ _expected_instance_bdm2 = {
+ 'instanceId': 'i-00000003',
+ 'rootDeviceName': '/dev/vda',
+ 'rootDeviceType': 'ebs'}
+
+ def test_format_instance_bdm(self):
+ (instances, volumes) = self._setUpBlockDeviceMapping()
+
+ result = {}
+ self.cloud._format_instance_bdm(self.context, instances[0]['uuid'],
+ '/dev/sdb1', result)
+ self.assertThat(
+ {'rootDeviceType': self._expected_instance_bdm0['rootDeviceType']},
+ matchers.IsSubDictOf(result))
+ self._assertEqualBlockDeviceMapping(
+ self._expected_block_device_mapping0, result['blockDeviceMapping'])
+
+ result = {}
+ self.cloud._format_instance_bdm(self.context, instances[1]['uuid'],
+ '/dev/sdc1', result)
+ self.assertThat(
+ {'rootDeviceType': self._expected_instance_bdm1['rootDeviceType']},
+ matchers.IsSubDictOf(result))
+
+ self._tearDownBlockDeviceMapping(instances, volumes)
+
+ def _assertInstance(self, instance_id):
+ ec2_instance_id = ec2utils.id_to_ec2_id(instance_id)
+ result = self.cloud.describe_instances(self.context,
+ instance_id=[ec2_instance_id])
+ result = result['reservationSet'][0]
+ self.assertEqual(len(result['instancesSet']), 1)
+ result = result['instancesSet'][0]
+ self.assertEqual(result['instanceId'], ec2_instance_id)
+ return result
+
+ def _assertEqualBlockDeviceMapping(self, expected, result):
+ self.assertEqual(len(expected), len(result))
+ for x in expected:
+ found = False
+ for y in result:
+ if x['deviceName'] == y['deviceName']:
+ self.assertThat(x, matchers.IsSubDictOf(y))
+ found = True
+ break
+ self.assertTrue(found)
+
+ def test_describe_instances_bdm(self):
+ """Make sure describe_instances works with root_device_name and
+ block device mappings
+ """
+ (instances, volumes) = self._setUpBlockDeviceMapping()
+
+ result = self._assertInstance(instances[0]['id'])
+ self.assertThat(
+ self._expected_instance_bdm0,
+ matchers.IsSubDictOf(result))
+ self._assertEqualBlockDeviceMapping(
+ self._expected_block_device_mapping0, result['blockDeviceMapping'])
+
+ result = self._assertInstance(instances[1]['id'])
+ self.assertThat(
+ self._expected_instance_bdm1,
+ matchers.IsSubDictOf(result))
+
+ result = self._assertInstance(instances[2]['id'])
+ self.assertThat(
+ self._expected_instance_bdm2,
+ matchers.IsSubDictOf(result))
+
+ self._tearDownBlockDeviceMapping(instances, volumes)
+
+ def _setUpImageSet(self, create_volumes_and_snapshots=False):
+ self.flags(max_local_block_devices=-1)
+ mappings1 = [
+ {'device': '/dev/sda1', 'virtual': 'root'},
+
+ {'device': 'sdb0', 'virtual': 'ephemeral0'},
+ {'device': 'sdb1', 'virtual': 'ephemeral1'},
+ {'device': 'sdb2', 'virtual': 'ephemeral2'},
+ {'device': 'sdb3', 'virtual': 'ephemeral3'},
+ {'device': 'sdb4', 'virtual': 'ephemeral4'},
+
+ {'device': 'sdc0', 'virtual': 'swap'},
+ {'device': 'sdc1', 'virtual': 'swap'},
+ {'device': 'sdc2', 'virtual': 'swap'},
+ {'device': 'sdc3', 'virtual': 'swap'},
+ {'device': 'sdc4', 'virtual': 'swap'}]
+ block_device_mapping1 = [
+ {'device_name': '/dev/sdb1', 'snapshot_id': 1234567},
+ {'device_name': '/dev/sdb2', 'volume_id': 1234567},
+ {'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'},
+ {'device_name': '/dev/sdb4', 'no_device': True},
+
+ {'device_name': '/dev/sdc1', 'snapshot_id': 12345678},
+ {'device_name': '/dev/sdc2', 'volume_id': 12345678},
+ {'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'},
+ {'device_name': '/dev/sdc4', 'no_device': True}]
+ image1 = {
+ 'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine',
+ 'image_state': 'available',
+ 'mappings': mappings1,
+ 'block_device_mapping': block_device_mapping1,
+ }
+ }
+
+ mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
+ block_device_mapping2 = [{'device_name': '/dev/sdb1',
+ 'snapshot_id': 1234567}]
+ image2 = {
+ 'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ 'type': 'machine',
+ 'root_device_name': '/dev/sdb1',
+ 'mappings': mappings2,
+ 'block_device_mapping': block_device_mapping2}}
+
+ def fake_show(meh, context, image_id, **kwargs):
+ _images = [copy.deepcopy(image1), copy.deepcopy(image2)]
+ for i in _images:
+ if str(i['id']) == str(image_id):
+ return i
+ raise exception.ImageNotFound(image_id=image_id)
+
+ def fake_detail(meh, context):
+ return [copy.deepcopy(image1), copy.deepcopy(image2)]
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+
+ volumes = []
+ snapshots = []
+ if create_volumes_and_snapshots:
+ for bdm in block_device_mapping1:
+ if 'volume_id' in bdm:
+ vol = self._volume_create(bdm['volume_id'])
+ volumes.append(vol['id'])
+ if 'snapshot_id' in bdm:
+ kwargs = {'volume_id': 76543210,
+ 'volume_size': 1,
+ 'name': 'test-snap',
+ 'description': 'test snap desc',
+ 'snap_id': bdm['snapshot_id'],
+ 'status': 'available'}
+ snap = self.volume_api.create_snapshot_with_kwargs(
+ self.context, **kwargs)
+ snapshots.append(snap['id'])
+ return (volumes, snapshots)
+
+ def _assertImageSet(self, result, root_device_type, root_device_name):
+ self.assertEqual(1, len(result['imagesSet']))
+ result = result['imagesSet'][0]
+ self.assertIn('rootDeviceType', result)
+ self.assertEqual(result['rootDeviceType'], root_device_type)
+ self.assertIn('rootDeviceName', result)
+ self.assertEqual(result['rootDeviceName'], root_device_name)
+ self.assertIn('blockDeviceMapping', result)
+
+ return result
+
+ _expected_root_device_name1 = '/dev/sda1'
+ # NOTE(yamahata): noDevice doesn't make sense when returning mapping
+ # It makes sense only when user overriding existing
+ # mapping.
+ _expected_bdms1 = [
+ {'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'},
+ {'deviceName': '/dev/sdb1', 'ebs': {'snapshotId':
+ 'snap-00053977'}},
+ {'deviceName': '/dev/sdb2', 'ebs': {'snapshotId':
+ 'vol-00053977'}},
+ {'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'},
+
+ {'deviceName': '/dev/sdc0', 'virtualName': 'swap'},
+ {'deviceName': '/dev/sdc1', 'ebs': {'snapshotId':
+ 'snap-00bc614e'}},
+ {'deviceName': '/dev/sdc2', 'ebs': {'snapshotId':
+ 'vol-00bc614e'}},
+ {'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'},
+ ]
+
+ _expected_root_device_name2 = '/dev/sdb1'
+ _expected_bdms2 = [{'deviceName': '/dev/sdb1',
+ 'ebs': {'snapshotId': 'snap-00053977'}}]
+
+ def _run_instance(self, **kwargs):
+ rv = self.cloud.run_instances(self.context, **kwargs)
+ instance_id = rv['instancesSet'][0]['instanceId']
+ return instance_id
+
+ def _restart_compute_service(self, periodic_interval_max=None):
+ """restart compute service. NOTE: fake driver forgets all instances."""
+ self.compute.kill()
+ if periodic_interval_max:
+ self.compute = self.start_service(
+ 'compute', periodic_interval_max=periodic_interval_max)
+ else:
+ self.compute = self.start_service('compute')
+
+ def _volume_create(self, volume_id=None):
+ kwargs = {'name': 'test-volume',
+ 'description': 'test volume description',
+ 'status': 'available',
+ 'host': 'fake',
+ 'size': 1,
+ 'attach_status': 'detached'}
+ if volume_id:
+ kwargs['volume_id'] = volume_id
+ return self.volume_api.create_with_kwargs(self.context, **kwargs)
+
+ def _assert_volume_attached(self, vol, instance_uuid, mountpoint):
+ self.assertEqual(vol['instance_uuid'], instance_uuid)
+ self.assertEqual(vol['mountpoint'], mountpoint)
+ self.assertEqual(vol['status'], "in-use")
+ self.assertEqual(vol['attach_status'], "attached")
+
+ def _assert_volume_detached(self, vol):
+ self.assertIsNone(vol['instance_uuid'])
+ self.assertIsNone(vol['mountpoint'])
+ self.assertEqual(vol['status'], "available")
+ self.assertEqual(vol['attach_status'], "detached")
+
+ def test_stop_start_with_volume(self):
+ # Make sure run instance with block device mapping works.
+ availability_zone = 'zone1:host1'
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ vol2 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
+ vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval_max=0.3)
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1,
+ 'block_device_mapping': [{'device_name': '/dev/sdb',
+ 'volume_id': vol1_uuid,
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdc',
+ 'volume_id': vol2_uuid,
+ 'delete_on_termination': True},
+ ]}
+ ec2_instance_id = self._run_instance(**kwargs)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
+ ec2_instance_id)
+ vols = self.volume_api.get_all(self.context)
+ vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
+
+ self.assertEqual(len(vols), 2)
+ for vol in vols:
+ self.assertIn(str(vol['id']), [str(vol1_uuid), str(vol2_uuid)])
+ if str(vol['id']) == str(vol1_uuid):
+ self.volume_api.attach(self.context, vol['id'],
+ instance_uuid, '/dev/sdb')
+ elif str(vol['id']) == str(vol2_uuid):
+ self.volume_api.attach(self.context, vol['id'],
+ instance_uuid, '/dev/sdc')
+
+ vol = self.volume_api.get(self.context, vol1_uuid)
+ self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
+
+ vol = self.volume_api.get(self.context, vol2_uuid)
+ self._assert_volume_attached(vol, instance_uuid, '/dev/sdc')
+
+ result = self.cloud.stop_instances(self.context, [ec2_instance_id])
+ self.assertTrue(result)
+
+ vol = self.volume_api.get(self.context, vol1_uuid)
+ self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
+
+ vol = self.volume_api.get(self.context, vol1_uuid)
+ self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
+
+ vol = self.volume_api.get(self.context, vol2_uuid)
+ self._assert_volume_attached(vol, instance_uuid, '/dev/sdc')
+
+ self.cloud.start_instances(self.context, [ec2_instance_id])
+ vols = self.volume_api.get_all(self.context)
+ vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
+ self.assertEqual(len(vols), 2)
+ for vol in vols:
+ self.assertIn(str(vol['id']), [str(vol1_uuid), str(vol2_uuid)])
+ self.assertIn(vol['mountpoint'], ['/dev/sdb', '/dev/sdc'])
+ self.assertEqual(vol['instance_uuid'], instance_uuid)
+ self.assertEqual(vol['status'], "in-use")
+ self.assertEqual(vol['attach_status'], "attached")
+
+ # Here we puke...
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+
+ admin_ctxt = context.get_admin_context(read_deleted="no")
+ vol = self.volume_api.get(admin_ctxt, vol2_uuid)
+ self.assertFalse(vol['deleted'])
+ self.cloud.delete_volume(self.context, vol1['volumeId'])
+ self._restart_compute_service()
+
+ def test_stop_with_attached_volume(self):
+ # Make sure attach info is reflected to block device mapping.
+
+ availability_zone = 'zone1:host1'
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ vol2 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
+ vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
+
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval_max=0.3)
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1,
+ 'block_device_mapping': [{'device_name': '/dev/sdb',
+ 'volume_id': vol1_uuid,
+ 'delete_on_termination': True}]}
+ ec2_instance_id = self._run_instance(**kwargs)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
+ ec2_instance_id)
+
+ vols = self.volume_api.get_all(self.context)
+ vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
+ self.assertEqual(len(vols), 1)
+ for vol in vols:
+ self.assertEqual(vol['id'], vol1_uuid)
+ self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
+ vol = self.volume_api.get(self.context, vol2_uuid)
+ self._assert_volume_detached(vol)
+
+ inst_obj = objects.Instance.get_by_uuid(self.context, instance_uuid)
+ self.cloud.compute_api.attach_volume(self.context,
+ inst_obj,
+ volume_id=vol2_uuid,
+ device='/dev/sdc')
+
+ vol1 = self.volume_api.get(self.context, vol1_uuid)
+ self._assert_volume_attached(vol1, instance_uuid, '/dev/sdb')
+
+ vol2 = self.volume_api.get(self.context, vol2_uuid)
+ self._assert_volume_attached(vol2, instance_uuid, '/dev/sdc')
+
+ self.cloud.compute_api.detach_volume(self.context,
+ inst_obj, vol1)
+
+ vol1 = self.volume_api.get(self.context, vol1_uuid)
+ self._assert_volume_detached(vol1)
+
+ result = self.cloud.stop_instances(self.context, [ec2_instance_id])
+ self.assertTrue(result)
+
+ vol2 = self.volume_api.get(self.context, vol2_uuid)
+ self._assert_volume_attached(vol2, instance_uuid, '/dev/sdc')
+
+ self.cloud.start_instances(self.context, [ec2_instance_id])
+ vols = self.volume_api.get_all(self.context)
+ vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
+ self.assertEqual(len(vols), 1)
+
+ self._assert_volume_detached(vol1)
+
+ vol1 = self.volume_api.get(self.context, vol1_uuid)
+ self._assert_volume_detached(vol1)
+
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+
+ def _create_snapshot(self, ec2_volume_id):
+ result = self.cloud.create_snapshot(self.context,
+ volume_id=ec2_volume_id)
+ return result['snapshotId']
+
+ def test_run_with_snapshot(self):
+ # Makes sure run/stop/start instance with snapshot works.
+ availability_zone = 'zone1:host1'
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+
+ snap1 = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-1',
+ description='test snap of vol %s' %
+ vol1['volumeId'])
+ snap1_uuid = ec2utils.ec2_snap_id_to_uuid(snap1['snapshotId'])
+
+ snap2 = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-2',
+ description='test snap of vol %s' %
+ vol1['volumeId'])
+ snap2_uuid = ec2utils.ec2_snap_id_to_uuid(snap2['snapshotId'])
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1,
+ 'block_device_mapping': [{'device_name': '/dev/vdb',
+ 'snapshot_id': snap1_uuid,
+ 'delete_on_termination': False, },
+ {'device_name': '/dev/vdc',
+ 'snapshot_id': snap2_uuid,
+ 'delete_on_termination': True}]}
+ ec2_instance_id = self._run_instance(**kwargs)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
+ ec2_instance_id)
+
+ vols = self.volume_api.get_all(self.context)
+ vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
+
+ self.assertEqual(len(vols), 2)
+
+ vol1_id = None
+ vol2_id = None
+ for vol in vols:
+ snapshot_uuid = vol['snapshot_id']
+ if snapshot_uuid == snap1_uuid:
+ vol1_id = vol['id']
+ mountpoint = '/dev/vdb'
+ elif snapshot_uuid == snap2_uuid:
+ vol2_id = vol['id']
+ mountpoint = '/dev/vdc'
+ else:
+ self.fail()
+
+ self._assert_volume_attached(vol, instance_uuid, mountpoint)
+
+ # Just make sure we found them
+ self.assertTrue(vol1_id)
+ self.assertTrue(vol2_id)
+
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+
+ admin_ctxt = context.get_admin_context(read_deleted="no")
+ vol = self.volume_api.get(admin_ctxt, vol1_id)
+ self._assert_volume_detached(vol)
+ self.assertFalse(vol['deleted'])
+
+ def test_create_image(self):
+ # Make sure that CreateImage works.
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval_max=0.3)
+
+ (volumes, snapshots) = self._setUpImageSet(
+ create_volumes_and_snapshots=True)
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ ec2_instance_id = self._run_instance(**kwargs)
+
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+ self._restart_compute_service()
+
+ @staticmethod
+ def _fake_bdm_get(ctxt, id):
+ return [{'volume_id': 87654321,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'virtual_name': None,
+ 'delete_on_termination': True,
+ 'device_name': '/dev/sdh'},
+ {'volume_id': None,
+ 'snapshot_id': 98765432,
+ 'no_device': None,
+ 'virtual_name': None,
+ 'delete_on_termination': True,
+ 'device_name': '/dev/sdi'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': True,
+ 'virtual_name': None,
+ 'delete_on_termination': None,
+ 'device_name': None},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'virtual_name': 'ephemeral0',
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdb'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'virtual_name': 'swap',
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdc'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'virtual_name': 'ephemeral1',
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdd'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'virtual_name': 'ephemeral2',
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sd3'},
+ ]
diff --git a/nova/tests/unit/api/ec2/test_cloud.py b/nova/tests/unit/api/ec2/test_cloud.py
new file mode 100644
index 0000000000..113af8c96c
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_cloud.py
@@ -0,0 +1,3255 @@
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import copy
+import datetime
+import functools
+import os
+import string
+import tempfile
+
+import fixtures
+import iso8601
+import mock
+from oslo.config import cfg
+from oslo.utils import timeutils
+
+from nova.api.ec2 import cloud
+from nova.api.ec2 import ec2utils
+from nova.api.ec2 import inst_state
+from nova.api.metadata import password
+from nova.compute import api as compute_api
+from nova.compute import flavors
+from nova.compute import power_state
+from nova.compute import rpcapi as compute_rpcapi
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.image import s3
+from nova.network import api as network_api
+from nova.network import base_api as base_network_api
+from nova.network import model
+from nova.network import neutronv2
+from nova import objects
+from nova.objects import base as obj_base
+from nova.openstack.common import log as logging
+from nova.openstack.common import policy as common_policy
+from nova.openstack.common import uuidutils
+from nova import policy
+from nova import test
+from nova.tests.unit.api.openstack.compute.contrib import (
+ test_neutron_security_groups as test_neutron)
+from nova.tests.unit import cast_as_call
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_notifier
+from nova.tests.unit import fake_utils
+from nova.tests.unit.image import fake
+from nova.tests.unit import matchers
+from nova import utils
+from nova.virt import fake as fake_virt
+from nova import volume
+
+CONF = cfg.CONF
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+CONF.import_opt('default_flavor', 'nova.compute.flavors')
+CONF.import_opt('use_ipv6', 'nova.netconf')
+LOG = logging.getLogger(__name__)
+
+HOST = "testhost"
+
+
+def get_fake_cache(get_floating):
+ def _ip(ip, fixed=True, floats=None):
+ ip_dict = {'address': ip, 'type': 'fixed'}
+ if not fixed:
+ ip_dict['type'] = 'floating'
+ if fixed and floats:
+ ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
+ return ip_dict
+
+ if get_floating:
+ ip_info = [_ip('192.168.0.3',
+ floats=['1.2.3.4', '5.6.7.8']),
+ _ip('192.168.0.4')]
+ else:
+ ip_info = [_ip('192.168.0.3'),
+ _ip('192.168.0.4')]
+
+ info = [{'address': 'aa:bb:cc:dd:ee:ff',
+ 'id': 1,
+ 'network': {'bridge': 'br0',
+ 'id': 1,
+ 'label': 'private',
+ 'subnets': [{'cidr': '192.168.0.0/24',
+ 'ips': ip_info}]}}]
+
+ if CONF.use_ipv6:
+ ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
+ info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
+ 'ips': [_ip(ipv6_addr)]})
+
+ return model.NetworkInfo.hydrate(info)
+
+
+def get_instances_with_cached_ips(orig_func, get_floating,
+ *args, **kwargs):
+ """Kludge the cache into instance(s) without having to create DB
+ entries
+ """
+ instances = orig_func(*args, **kwargs)
+
+ if kwargs.get('want_objects', False):
+ info_cache = objects.InstanceInfoCache()
+ info_cache.network_info = get_fake_cache(get_floating)
+ info_cache.obj_reset_changes()
+ else:
+ info_cache = {'network_info': get_fake_cache(get_floating)}
+
+ if isinstance(instances, (list, obj_base.ObjectListBase)):
+ for instance in instances:
+ instance['info_cache'] = info_cache
+ else:
+ instances['info_cache'] = info_cache
+ return instances
+
+
+class CloudTestCase(test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(CloudTestCase, self).setUp()
+ self.useFixture(test.SampleNetworks())
+ ec2utils.reset_cache()
+ self.flags(compute_driver='nova.virt.fake.FakeDriver',
+ volume_api_class='nova.tests.unit.fake_volume.API')
+ self.useFixture(fixtures.FakeLogger('boto'))
+ fake_utils.stub_out_utils_spawn_n(self.stubs)
+
+ def fake_show(meh, context, id, **kwargs):
+ return {'id': id,
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine',
+ 'image_state': 'available'}}
+
+ def fake_detail(_self, context, **kwargs):
+ image = fake_show(None, context, None)
+ image['name'] = kwargs.get('filters', {}).get('name')
+ return [image]
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+ fake.stub_out_image_service(self.stubs)
+
+ def dumb(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # set up our cloud
+ self.cloud = cloud.CloudController()
+ self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
+
+ # Short-circuit the conductor service
+ self.flags(use_local=True, group='conductor')
+
+ # Stub out the notification service so we use the no-op serializer
+ # and avoid lazy-load traces with the wrap_exception decorator in
+ # the compute service.
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ # set up services
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
+ self.compute = self.start_service('compute')
+ self.scheduler = self.start_service('scheduler')
+ self.network = self.start_service('network')
+ self.consoleauth = self.start_service('consoleauth')
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id,
+ is_admin=True)
+ self.volume_api = volume.API()
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ # make sure we can map ami-00000001/2 to a uuid in FakeImageService
+ db.s3_image_create(self.context,
+ 'cedef40a-ed67-4d10-800e-17455edce175')
+ db.s3_image_create(self.context,
+ '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
+
+ def tearDown(self):
+ self.volume_api.reset_fake_api(self.context)
+ super(CloudTestCase, self).tearDown()
+ fake.FakeImageService_reset()
+
+ def fake_get_target(obj, iqn):
+ return 1
+
+ def fake_remove_iscsi_target(obj, tid, lun, vol_id, **kwargs):
+ pass
+
+ def _stub_instance_get_with_fixed_ips(self,
+ func_name, get_floating=True):
+ orig_func = getattr(self.cloud.compute_api, func_name)
+
+ def fake_get(*args, **kwargs):
+ return get_instances_with_cached_ips(orig_func, get_floating,
+ *args, **kwargs)
+ self.stubs.Set(self.cloud.compute_api, func_name, fake_get)
+
+ def _create_key(self, name):
+ # NOTE(vish): create depends on pool, so just call helper directly
+ keypair_api = compute_api.KeypairAPI()
+ return keypair_api.create_key_pair(self.context, self.context.user_id,
+ name)
+
+ def test_describe_regions(self):
+ # Makes sure describe regions runs without raising an exception.
+ result = self.cloud.describe_regions(self.context)
+ self.assertEqual(len(result['regionInfo']), 1)
+ self.flags(region_list=["one=test_host1", "two=test_host2"])
+ result = self.cloud.describe_regions(self.context)
+ self.assertEqual(len(result['regionInfo']), 2)
+
+ def test_describe_addresses(self):
+ # Makes sure describe addresses runs without raising an exception.
+ address = "10.10.10.10"
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ self.flags(network_api_class='nova.network.api.API')
+ self.cloud.allocate_address(self.context)
+ self.cloud.describe_addresses(self.context)
+ self.cloud.release_address(self.context,
+ public_ip=address)
+ db.floating_ip_destroy(self.context, address)
+
+ def test_describe_addresses_in_neutron(self):
+ # Makes sure describe addresses runs without raising an exception.
+ address = "10.10.10.10"
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ self.cloud.allocate_address(self.context)
+ self.cloud.describe_addresses(self.context)
+ self.cloud.release_address(self.context,
+ public_ip=address)
+ db.floating_ip_destroy(self.context, address)
+
+ def test_describe_specific_address(self):
+ # Makes sure describe specific address works.
+ addresses = ["10.10.10.10", "10.10.10.11"]
+ for address in addresses:
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ self.cloud.allocate_address(self.context)
+ result = self.cloud.describe_addresses(self.context)
+ self.assertEqual(len(result['addressesSet']), 2)
+ result = self.cloud.describe_addresses(self.context,
+ public_ip=['10.10.10.10'])
+ self.assertEqual(len(result['addressesSet']), 1)
+ for address in addresses:
+ self.cloud.release_address(self.context,
+ public_ip=address)
+ db.floating_ip_destroy(self.context, address)
+
+ def test_allocate_address(self):
+ address = "10.10.10.10"
+ allocate = self.cloud.allocate_address
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ self.assertEqual(allocate(self.context)['publicIp'], address)
+ db.floating_ip_destroy(self.context, address)
+ self.assertRaises(exception.NoMoreFloatingIps,
+ allocate,
+ self.context)
+
+ def test_release_address(self):
+ address = "10.10.10.10"
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova',
+ 'project_id': self.project_id})
+ result = self.cloud.release_address(self.context, address)
+ self.assertEqual(result.get('return', None), 'true')
+
+ def test_associate_disassociate_address(self):
+ # Verifies associate runs cleanly without raising an exception.
+ address = "10.10.10.10"
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ self.cloud.allocate_address(self.context)
+ # TODO(jkoelker) Probably need to query for instance_type_id and
+ # make sure we get a valid one
+ inst = db.instance_create(self.context, {'host': self.compute.host,
+ 'display_name': HOST,
+ 'instance_type_id': 1})
+ networks = db.network_get_all(self.context)
+ for network in networks:
+ db.network_update(self.context, network['id'],
+ {'host': self.network.host})
+ project_id = self.context.project_id
+ nw_info = self.network.allocate_for_instance(self.context,
+ instance_id=inst['id'],
+ instance_uuid=inst['uuid'],
+ host=inst['host'],
+ vpn=None,
+ rxtx_factor=3,
+ project_id=project_id,
+ macs=None)
+
+ fixed_ips = nw_info.fixed_ips()
+ ec2_id = ec2utils.id_to_ec2_inst_id(inst['uuid'])
+
+ self.stubs.Set(ec2utils, 'get_ip_info_for_instance',
+ lambda *args: {'fixed_ips': ['10.0.0.1'],
+ 'fixed_ip6s': [],
+ 'floating_ips': []})
+ self.stubs.Set(network_api.API, 'get_instance_id_by_floating_address',
+ lambda *args: 1)
+
+ def fake_update_instance_cache_with_nw_info(api, context, instance,
+ nw_info=None,
+ update_cells=True):
+
+ return
+
+ self.stubs.Set(base_network_api, "update_instance_cache_with_nw_info",
+ fake_update_instance_cache_with_nw_info)
+
+ self.cloud.associate_address(self.context,
+ instance_id=ec2_id,
+ public_ip=address)
+ self.cloud.disassociate_address(self.context,
+ public_ip=address)
+ self.cloud.release_address(self.context,
+ public_ip=address)
+ self.network.deallocate_fixed_ip(self.context, fixed_ips[0]['address'],
+ inst['host'])
+ db.instance_destroy(self.context, inst['uuid'])
+ db.floating_ip_destroy(self.context, address)
+
+ def test_disassociate_auto_assigned_address(self):
+ """Verifies disassociating auto assigned floating IP
+ raises an exception
+ """
+ address = "10.10.10.10"
+
+ def fake_get(*args, **kwargs):
+ pass
+
+ def fake_disassociate_floating_ip(*args, **kwargs):
+ raise exception.CannotDisassociateAutoAssignedFloatingIP()
+
+ self.stubs.Set(network_api.API, 'get_instance_id_by_floating_address',
+ lambda *args: 1)
+ self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
+ self.stubs.Set(network_api.API, 'disassociate_floating_ip',
+ fake_disassociate_floating_ip)
+
+ self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
+ self.cloud.disassociate_address,
+ self.context, public_ip=address)
+
+ def test_disassociate_unassociated_address(self):
+ address = "10.10.10.10"
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ self.cloud.allocate_address(self.context)
+ self.cloud.describe_addresses(self.context)
+ self.assertRaises(exception.InvalidAssociation,
+ self.cloud.disassociate_address,
+ self.context, public_ip=address)
+ db.floating_ip_destroy(self.context, address)
+
+ def test_describe_security_groups(self):
+ # Makes sure describe_security_groups works and filters results.
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ result = self.cloud.describe_security_groups(self.context)
+ # NOTE(vish): should have the default group as well
+ self.assertEqual(len(result['securityGroupInfo']), 2)
+ result = self.cloud.describe_security_groups(self.context,
+ group_name=[sec['name']])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(
+ result['securityGroupInfo'][0]['groupName'],
+ sec['name'])
+ db.security_group_destroy(self.context, sec['id'])
+
+ def test_describe_security_groups_all_tenants(self):
+ # Makes sure describe_security_groups works and filters results.
+ sec = db.security_group_create(self.context,
+ {'project_id': 'foobar',
+ 'name': 'test'})
+
+ def _check_name(result, i, expected):
+ self.assertEqual(result['securityGroupInfo'][i]['groupName'],
+ expected)
+
+ # include all tenants
+ filter = [{'name': 'all-tenants', 'value': {'1': 1}}]
+ result = self.cloud.describe_security_groups(self.context,
+ filter=filter)
+ self.assertEqual(len(result['securityGroupInfo']), 2)
+ _check_name(result, 0, 'default')
+ _check_name(result, 1, sec['name'])
+
+ # exclude all tenants
+ filter = [{'name': 'all-tenants', 'value': {'1': 0}}]
+ result = self.cloud.describe_security_groups(self.context,
+ filter=filter)
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ _check_name(result, 0, 'default')
+
+ # default all tenants
+ result = self.cloud.describe_security_groups(self.context)
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ _check_name(result, 0, 'default')
+
+ db.security_group_destroy(self.context, sec['id'])
+
+ def test_describe_security_groups_by_id(self):
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ result = self.cloud.describe_security_groups(self.context,
+ group_id=[sec['id']])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(
+ result['securityGroupInfo'][0]['groupName'],
+ sec['name'])
+ default = db.security_group_get_by_name(self.context,
+ self.context.project_id,
+ 'default')
+ result = self.cloud.describe_security_groups(self.context,
+ group_id=[default['id']])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(
+ result['securityGroupInfo'][0]['groupName'],
+ 'default')
+ db.security_group_destroy(self.context, sec['id'])
+
+ def test_create_delete_security_group(self):
+ descript = 'test description'
+ create = self.cloud.create_security_group
+ result = create(self.context, 'testgrp', descript)
+ group_descript = result['securityGroupSet'][0]['groupDescription']
+ self.assertEqual(descript, group_descript)
+ delete = self.cloud.delete_security_group
+ self.assertTrue(delete(self.context, 'testgrp'))
+
+ def test_security_group_quota_limit(self):
+ self.flags(quota_security_groups=10)
+ for i in range(1, CONF.quota_security_groups):
+ name = 'test name %i' % i
+ descript = 'test description %i' % i
+ create = self.cloud.create_security_group
+ create(self.context, name, descript)
+
+ # 11'th group should fail
+ self.assertRaises(exception.SecurityGroupLimitExceeded,
+ create, self.context, 'foo', 'bar')
+
+ def test_delete_security_group_by_id(self):
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ delete = self.cloud.delete_security_group
+ self.assertTrue(delete(self.context, group_id=sec['id']))
+
+ def test_delete_security_group_with_bad_name(self):
+ delete = self.cloud.delete_security_group
+ notfound = exception.SecurityGroupNotFound
+ self.assertRaises(notfound, delete, self.context, 'badname')
+
+ def test_delete_security_group_with_bad_group_id(self):
+ delete = self.cloud.delete_security_group
+ notfound = exception.SecurityGroupNotFound
+ self.assertRaises(notfound, delete, self.context, group_id=999)
+
+ def test_delete_security_group_no_params(self):
+ delete = self.cloud.delete_security_group
+ self.assertRaises(exception.MissingParameter, delete, self.context)
+
+ def test_delete_security_group_policy_not_allowed(self):
+ rules = {'compute_extension:security_groups':
+ common_policy.parse_rule('project_id:%(project_id)s')}
+ policy.set_rules(rules)
+
+ with mock.patch.object(self.cloud.security_group_api,
+ 'get') as get:
+ get.return_value = {'project_id': 'invalid'}
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.cloud.delete_security_group, self.context,
+ 'fake-name', 'fake-id')
+
+ def test_authorize_security_group_ingress_policy_not_allowed(self):
+ rules = {'compute_extension:security_groups':
+ common_policy.parse_rule('project_id:%(project_id)s')}
+ policy.set_rules(rules)
+
+ with mock.patch.object(self.cloud.security_group_api,
+ 'get') as get:
+ get.return_value = {'project_id': 'invalid'}
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.cloud.authorize_security_group_ingress, self.context,
+ 'fake-name', 'fake-id')
+
+ def test_authorize_security_group_ingress(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
+
+ def test_authorize_security_group_ingress_ip_permissions_ip_ranges(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
+ 'ip_ranges':
+ {'1': {'cidr_ip': u'0.0.0.0/0'},
+ '2': {'cidr_ip': u'10.10.10.10/32'}},
+ 'ip_protocol': u'tcp'}]}
+ self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
+
+ def test_authorize_security_group_fail_missing_source_group(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
+ 'ip_ranges': {'1': {'cidr_ip': u'0.0.0.0/0'},
+ '2': {'cidr_ip': u'10.10.10.10/32'}},
+ 'groups': {'1': {'user_id': u'someuser',
+ 'group_name': u'somegroup1'}},
+ 'ip_protocol': u'tcp'}]}
+ self.assertRaises(exception.SecurityGroupNotFound, authz,
+ self.context, group_name=sec['name'], **kwargs)
+
+ def test_authorize_security_group_ingress_ip_permissions_groups(self):
+ kwargs = {
+ 'project_id': self.context.project_id,
+ 'user_id': self.context.user_id,
+ 'name': 'test'
+ }
+ sec = db.security_group_create(self.context,
+ {'project_id': 'someuser',
+ 'user_id': 'someuser',
+ 'description': '',
+ 'name': 'somegroup1'})
+ sec = db.security_group_create(self.context,
+ {'project_id': 'someuser',
+ 'user_id': 'someuser',
+ 'description': '',
+ 'name': 'othergroup2'})
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
+ 'groups': {'1': {'user_id': u'someuser',
+ 'group_name': u'somegroup1'},
+ '2': {'user_id': u'someuser',
+ 'group_name': u'othergroup2'}},
+ 'ip_protocol': u'tcp'}]}
+ self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
+
+ def test_describe_security_group_ingress_groups(self):
+ kwargs = {
+ 'project_id': self.context.project_id,
+ 'user_id': self.context.user_id,
+ 'name': 'test'
+ }
+ sec1 = db.security_group_create(self.context, kwargs)
+ sec2 = db.security_group_create(self.context,
+ {'project_id': 'someuser',
+ 'user_id': 'someuser',
+ 'description': '',
+ 'name': 'somegroup1'})
+ sec3 = db.security_group_create(self.context,
+ {'project_id': 'someuser',
+ 'user_id': 'someuser',
+ 'description': '',
+ 'name': 'othergroup2'})
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'ip_permissions': [
+ {'groups': {'1': {'user_id': u'someuser',
+ 'group_name': u'somegroup1'}}},
+ {'ip_protocol': 'tcp',
+ 'from_port': 80,
+ 'to_port': 80,
+ 'groups': {'1': {'user_id': u'someuser',
+ 'group_name': u'othergroup2'}}}]}
+ self.assertTrue(authz(self.context, group_name=sec1['name'], **kwargs))
+ describe = self.cloud.describe_security_groups
+ groups = describe(self.context, group_name=['test'])
+ self.assertEqual(len(groups['securityGroupInfo']), 1)
+ actual_rules = groups['securityGroupInfo'][0]['ipPermissions']
+ self.assertEqual(len(actual_rules), 4)
+ expected_rules = [{'fromPort': -1,
+ 'groups': [{'groupName': 'somegroup1',
+ 'userId': 'someuser'}],
+ 'ipProtocol': 'icmp',
+ 'ipRanges': [],
+ 'toPort': -1},
+ {'fromPort': 1,
+ 'groups': [{'groupName': u'somegroup1',
+ 'userId': u'someuser'}],
+ 'ipProtocol': 'tcp',
+ 'ipRanges': [],
+ 'toPort': 65535},
+ {'fromPort': 1,
+ 'groups': [{'groupName': u'somegroup1',
+ 'userId': u'someuser'}],
+ 'ipProtocol': 'udp',
+ 'ipRanges': [],
+ 'toPort': 65535},
+ {'fromPort': 80,
+ 'groups': [{'groupName': u'othergroup2',
+ 'userId': u'someuser'}],
+ 'ipProtocol': u'tcp',
+ 'ipRanges': [],
+ 'toPort': 80}]
+ for rule in expected_rules:
+ self.assertIn(rule, actual_rules)
+
+ db.security_group_destroy(self.context, sec3['id'])
+ db.security_group_destroy(self.context, sec2['id'])
+ db.security_group_destroy(self.context, sec1['id'])
+
+ def test_revoke_security_group_ingress_policy_not_allowed(self):
+ rules = {'compute_extension:security_groups':
+ common_policy.parse_rule('project_id:%(project_id)s')}
+ policy.set_rules(rules)
+
+ with mock.patch.object(self.cloud.security_group_api,
+ 'get') as get:
+ get.return_value = {'project_id': 'invalid'}
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.cloud.revoke_security_group_ingress, self.context,
+ 'fake-name', 'fake-id')
+
+ def test_revoke_security_group_ingress(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ authz(self.context, group_id=sec['id'], **kwargs)
+ revoke = self.cloud.revoke_security_group_ingress
+ self.assertTrue(revoke(self.context, group_name=sec['name'], **kwargs))
+
+ def test_authorize_revoke_security_group_ingress_by_id(self):
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ authz(self.context, group_id=sec['id'], **kwargs)
+ revoke = self.cloud.revoke_security_group_ingress
+ self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs))
+
+ def test_authorize_security_group_ingress_missing_protocol_params(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ self.assertRaises(exception.MissingParameter, authz, self.context,
+ 'test')
+
+ def test_authorize_security_group_ingress_missing_group_name_or_id(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ authz = self.cloud.authorize_security_group_ingress
+ self.assertRaises(exception.MissingParameter, authz, self.context,
+ **kwargs)
+
+ def test_authorize_security_group_ingress_already_exists(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ authz(self.context, group_name=sec['name'], **kwargs)
+ self.assertRaises(exception.SecurityGroupRuleExists, authz,
+ self.context, group_name=sec['name'], **kwargs)
+
+ def test_security_group_ingress_quota_limit(self):
+ self.flags(quota_security_group_rules=20)
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec_group = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ for i in range(100, 120):
+ kwargs = {'to_port': i, 'from_port': i, 'ip_protocol': 'tcp'}
+ authz(self.context, group_id=sec_group['id'], **kwargs)
+
+ kwargs = {'to_port': 121, 'from_port': 121, 'ip_protocol': 'tcp'}
+ self.assertRaises(exception.SecurityGroupLimitExceeded, authz,
+ self.context, group_id=sec_group['id'], **kwargs)
+
+ def _test_authorize_security_group_no_ports_with_source_group(self, proto):
+ kwargs = {
+ 'project_id': self.context.project_id,
+ 'user_id': self.context.user_id,
+ 'description': '',
+ 'name': 'test'
+ }
+ sec = db.security_group_create(self.context, kwargs)
+
+ authz = self.cloud.authorize_security_group_ingress
+ auth_kwargs = {'ip_protocol': proto,
+ 'groups': {'1': {'user_id': self.context.user_id,
+ 'group_name': u'test'}}}
+ self.assertTrue(authz(self.context, group_name=sec['name'],
+ **auth_kwargs))
+
+ describe = self.cloud.describe_security_groups
+ groups = describe(self.context, group_name=['test'])
+ self.assertEqual(len(groups['securityGroupInfo']), 1)
+ actual_rules = groups['securityGroupInfo'][0]['ipPermissions']
+ expected_rules = [{'groups': [{'groupName': 'test',
+ 'userId': self.context.user_id}],
+ 'ipProtocol': proto,
+ 'ipRanges': []}]
+ if proto == 'icmp':
+ expected_rules[0]['fromPort'] = -1
+ expected_rules[0]['toPort'] = -1
+ else:
+ expected_rules[0]['fromPort'] = 1
+ expected_rules[0]['toPort'] = 65535
+ self.assertTrue(expected_rules == actual_rules)
+ describe = self.cloud.describe_security_groups
+ groups = describe(self.context, group_name=['test'])
+
+ db.security_group_destroy(self.context, sec['id'])
+
+ def _test_authorize_security_group_no_ports_no_source_group(self, proto):
+ kwargs = {
+ 'project_id': self.context.project_id,
+ 'user_id': self.context.user_id,
+ 'description': '',
+ 'name': 'test'
+ }
+ sec = db.security_group_create(self.context, kwargs)
+
+ authz = self.cloud.authorize_security_group_ingress
+ auth_kwargs = {'ip_protocol': proto}
+ self.assertRaises(exception.MissingParameter, authz, self.context,
+ group_name=sec['name'], **auth_kwargs)
+
+ db.security_group_destroy(self.context, sec['id'])
+
+ def test_authorize_security_group_no_ports_icmp(self):
+ self._test_authorize_security_group_no_ports_with_source_group('icmp')
+ self._test_authorize_security_group_no_ports_no_source_group('icmp')
+
+ def test_authorize_security_group_no_ports_tcp(self):
+ self._test_authorize_security_group_no_ports_with_source_group('tcp')
+ self._test_authorize_security_group_no_ports_no_source_group('tcp')
+
+ def test_authorize_security_group_no_ports_udp(self):
+ self._test_authorize_security_group_no_ports_with_source_group('udp')
+ self._test_authorize_security_group_no_ports_no_source_group('udp')
+
+ def test_revoke_security_group_ingress_missing_group_name_or_id(self):
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ revoke = self.cloud.revoke_security_group_ingress
+ self.assertRaises(exception.MissingParameter, revoke,
+ self.context, **kwargs)
+
+ def test_delete_security_group_in_use_by_group(self):
+ self.cloud.create_security_group(self.context, 'testgrp1',
+ "test group 1")
+ self.cloud.create_security_group(self.context, 'testgrp2',
+ "test group 2")
+ kwargs = {'groups': {'1': {'user_id': u'%s' % self.context.user_id,
+ 'group_name': u'testgrp2'}},
+ }
+ self.cloud.authorize_security_group_ingress(self.context,
+ group_name='testgrp1', **kwargs)
+
+ group1 = db.security_group_get_by_name(self.context,
+ self.project_id, 'testgrp1')
+ get_rules = db.security_group_rule_get_by_security_group
+
+ self.assertTrue(get_rules(self.context, group1['id']))
+ self.cloud.delete_security_group(self.context, 'testgrp2')
+ self.assertFalse(get_rules(self.context, group1['id']))
+
+ def test_delete_security_group_in_use_by_instance(self):
+ # Ensure that a group can not be deleted if in use by an instance.
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ args = {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'vm_state': 'active'}
+ inst = db.instance_create(self.context, args)
+
+ args = {'user_id': self.context.user_id,
+ 'project_id': self.context.project_id,
+ 'name': 'testgrp',
+ 'description': 'Test group'}
+ group = db.security_group_create(self.context, args)
+
+ db.instance_add_security_group(self.context, inst['uuid'], group['id'])
+
+ self.assertRaises(exception.InvalidGroup,
+ self.cloud.delete_security_group,
+ self.context, 'testgrp')
+
+ db.instance_destroy(self.context, inst['uuid'])
+
+ self.cloud.delete_security_group(self.context, 'testgrp')
+
+ def test_describe_availability_zones(self):
+ # Makes sure describe_availability_zones works and filters results.
+ service1 = db.service_create(self.context, {'host': 'host1_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0})
+ service2 = db.service_create(self.context, {'host': 'host2_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0})
+ # Aggregate based zones
+ agg = db.aggregate_create(self.context,
+ {'name': 'agg1'}, {'availability_zone': 'zone1'})
+ db.aggregate_host_add(self.context, agg['id'], 'host1_zones')
+ agg = db.aggregate_create(self.context,
+ {'name': 'agg2'}, {'availability_zone': 'zone2'})
+ db.aggregate_host_add(self.context, agg['id'], 'host2_zones')
+ result = self.cloud.describe_availability_zones(self.context)
+ self.assertEqual(len(result['availabilityZoneInfo']), 3)
+ admin_ctxt = context.get_admin_context(read_deleted="no")
+ result = self.cloud.describe_availability_zones(admin_ctxt,
+ zone_name='verbose')
+ self.assertEqual(len(result['availabilityZoneInfo']), 18)
+ db.service_destroy(self.context, service1['id'])
+ db.service_destroy(self.context, service2['id'])
+
+ def test_describe_availability_zones_verbose(self):
+ # Makes sure describe_availability_zones works and filters results.
+ service1 = db.service_create(self.context, {'host': 'host1_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0})
+ service2 = db.service_create(self.context, {'host': 'host2_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0})
+ agg = db.aggregate_create(self.context,
+ {'name': 'agg1'}, {'availability_zone': 'second_zone'})
+ db.aggregate_host_add(self.context, agg['id'], 'host2_zones')
+
+ admin_ctxt = context.get_admin_context(read_deleted="no")
+ result = self.cloud.describe_availability_zones(admin_ctxt,
+ zone_name='verbose')
+
+ self.assertEqual(len(result['availabilityZoneInfo']), 17)
+ db.service_destroy(self.context, service1['id'])
+ db.service_destroy(self.context, service2['id'])
+
+ def assertEqualSorted(self, x, y):
+ self.assertEqual(sorted(x), sorted(y))
+
+ def test_describe_instances(self):
+ # Makes sure describe_instances works and filters results.
+ self.flags(use_ipv6=True)
+
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+
+ sys_meta['EC2_client_token'] = "client-token-1"
+ inst1 = db.instance_create(self.context, {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'hostname': 'server-1234',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta})
+
+ sys_meta['EC2_client_token'] = "client-token-2"
+ inst2 = db.instance_create(self.context, {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host2',
+ 'hostname': 'server-4321',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta})
+ comp1 = db.service_create(self.context, {'host': 'host1',
+ 'topic': "compute"})
+ agg = db.aggregate_create(self.context,
+ {'name': 'agg1'}, {'availability_zone': 'zone1'})
+ db.aggregate_host_add(self.context, agg['id'], 'host1')
+
+ comp2 = db.service_create(self.context, {'host': 'host2',
+ 'topic': "compute"})
+ agg2 = db.aggregate_create(self.context,
+ {'name': 'agg2'}, {'availability_zone': 'zone2'})
+ db.aggregate_host_add(self.context, agg2['id'], 'host2')
+
+ result = self.cloud.describe_instances(self.context)
+ result = result['reservationSet'][0]
+ self.assertEqual(len(result['instancesSet']), 2)
+
+ # Now try filtering.
+ instance_id = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
+ result = self.cloud.describe_instances(self.context,
+ instance_id=[instance_id])
+ result = result['reservationSet'][0]
+ self.assertEqual(len(result['instancesSet']), 1)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], instance_id)
+ self.assertEqual(instance['placement']['availabilityZone'], 'zone2')
+ self.assertEqual(instance['ipAddress'], '1.2.3.4')
+ self.assertEqual(instance['dnsName'], '1.2.3.4')
+ self.assertEqual(instance['tagSet'], [])
+ self.assertEqual(instance['privateDnsName'], 'server-4321')
+ self.assertEqual(instance['privateIpAddress'], '192.168.0.3')
+ self.assertEqual(instance['dnsNameV6'],
+ 'fe80:b33f::a8bb:ccff:fedd:eeff')
+ self.assertEqual(instance['clientToken'], 'client-token-2')
+
+ # A filter with even one invalid id should cause an exception to be
+ # raised
+ self.assertRaises(exception.InstanceNotFound,
+ self.cloud.describe_instances, self.context,
+ instance_id=[instance_id, '435679'])
+
+ db.instance_destroy(self.context, inst1['uuid'])
+ db.instance_destroy(self.context, inst2['uuid'])
+ db.service_destroy(self.context, comp1['id'])
+ db.service_destroy(self.context, comp2['id'])
+
+ def test_describe_instances_all_invalid(self):
+ # Makes sure describe_instances works and filters results.
+ self.flags(use_ipv6=True)
+
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ instance_id = ec2utils.id_to_ec2_inst_id('435679')
+ self.assertRaises(exception.InstanceNotFound,
+ self.cloud.describe_instances, self.context,
+ instance_id=[instance_id])
+
+ def test_describe_instances_with_filters(self):
+ # Makes sure describe_instances works and filters results.
+ filters = {'filter': [{'name': 'test',
+ 'value': ['a', 'b']},
+ {'name': 'another_test',
+ 'value': 'a string'}]}
+
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': []})
+
+ def test_describe_instances_with_filters_tags(self):
+ # Makes sure describe_instances works and filters tag results.
+
+ # We need to stub network calls
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ # We need to stub out the MQ call - it won't succeed. We do want
+ # to check that the method is called, though
+ meta_changes = [None]
+
+ def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
+ instance_uuid=None):
+ meta_changes[0] = diff
+
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
+ fake_change_instance_metadata)
+
+ utc = iso8601.iso8601.Utc()
+
+ # Create some test images
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ inst1_kwargs = {
+ 'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'vm_state': 'active',
+ 'launched_at': timeutils.utcnow(),
+ 'hostname': 'server-1111',
+ 'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1,
+ tzinfo=utc),
+ 'system_metadata': sys_meta
+ }
+
+ inst2_kwargs = {
+ 'reservation_id': 'b',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host2',
+ 'vm_state': 'active',
+ 'launched_at': timeutils.utcnow(),
+ 'hostname': 'server-1112',
+ 'created_at': datetime.datetime(2012, 5, 1, 1, 1, 2,
+ tzinfo=utc),
+ 'system_metadata': sys_meta
+ }
+
+ inst1 = db.instance_create(self.context, inst1_kwargs)
+ ec2_id1 = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
+
+ inst2 = db.instance_create(self.context, inst2_kwargs)
+ ec2_id2 = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
+
+ # Create some tags
+ # We get one overlapping pair, one overlapping key, and a
+ # disparate pair
+ # inst1 : {'foo': 'bar', 'baz': 'wibble', 'bax': 'wobble'}
+ # inst2 : {'foo': 'bar', 'baz': 'quux', 'zog': 'bobble'}
+
+ md = {'key': 'foo', 'value': 'bar'}
+ self.cloud.create_tags(self.context, resource_id=[ec2_id1, ec2_id2],
+ tag=[md])
+
+ md2 = {'key': 'baz', 'value': 'wibble'}
+ md3 = {'key': 'bax', 'value': 'wobble'}
+ self.cloud.create_tags(self.context, resource_id=[ec2_id1],
+ tag=[md2, md3])
+
+ md4 = {'key': 'baz', 'value': 'quux'}
+ md5 = {'key': 'zog', 'value': 'bobble'}
+ self.cloud.create_tags(self.context, resource_id=[ec2_id2],
+ tag=[md4, md5])
+ # We should be able to search by:
+
+ inst1_ret = {
+ 'groupSet': None,
+ 'instancesSet': [{'amiLaunchIndex': None,
+ 'dnsName': '1.2.3.4',
+ 'dnsNameV6': 'fe80:b33f::a8bb:ccff:fedd:eeff',
+ 'imageId': 'ami-00000001',
+ 'instanceId': 'i-00000001',
+ 'instanceState': {'code': 16,
+ 'name': 'running'},
+ 'instanceType': u'm1.medium',
+ 'ipAddress': '1.2.3.4',
+ 'keyName': 'None (None, host1)',
+ 'launchTime':
+ datetime.datetime(2012, 5, 1, 1, 1, 1,
+ tzinfo=utc),
+ 'placement': {
+ 'availabilityZone': 'nova'},
+ 'privateDnsName': u'server-1111',
+ 'privateIpAddress': '192.168.0.3',
+ 'productCodesSet': None,
+ 'rootDeviceName': '/dev/sda1',
+ 'rootDeviceType': 'instance-store',
+ 'tagSet': [{'key': u'foo',
+ 'value': u'bar'},
+ {'key': u'baz',
+ 'value': u'wibble'},
+ {'key': u'bax',
+ 'value': u'wobble'}]}],
+ 'ownerId': None,
+ 'reservationId': u'a'}
+
+ inst2_ret = {
+ 'groupSet': None,
+ 'instancesSet': [{'amiLaunchIndex': None,
+ 'dnsName': '1.2.3.4',
+ 'dnsNameV6': 'fe80:b33f::a8bb:ccff:fedd:eeff',
+ 'imageId': 'ami-00000001',
+ 'instanceId': 'i-00000002',
+ 'instanceState': {'code': 16,
+ 'name': 'running'},
+ 'instanceType': u'm1.medium',
+ 'ipAddress': '1.2.3.4',
+ 'keyName': u'None (None, host2)',
+ 'launchTime':
+ datetime.datetime(2012, 5, 1, 1, 1, 2,
+ tzinfo=utc),
+ 'placement': {
+ 'availabilityZone': 'nova'},
+ 'privateDnsName': u'server-1112',
+ 'privateIpAddress': '192.168.0.3',
+ 'productCodesSet': None,
+ 'rootDeviceName': '/dev/sda1',
+ 'rootDeviceType': 'instance-store',
+ 'tagSet': [{'key': u'foo',
+ 'value': u'bar'},
+ {'key': u'baz',
+ 'value': u'quux'},
+ {'key': u'zog',
+ 'value': u'bobble'}]}],
+ 'ownerId': None,
+ 'reservationId': u'b'}
+
+ # No filter
+ result = self.cloud.describe_instances(self.context)
+ self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
+
+ # Key search
+ # Both should have tags with key 'foo' and value 'bar'
+ filters = {'filter': [{'name': 'tag:foo',
+ 'value': ['bar']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
+
+ # Both should have tags with key 'foo'
+ filters = {'filter': [{'name': 'tag-key',
+ 'value': ['foo']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
+
+ # Value search
+ # Only inst2 should have tags with key 'baz' and value 'quux'
+ filters = {'filter': [{'name': 'tag:baz',
+ 'value': ['quux']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst2_ret]})
+
+ # Only inst2 should have tags with value 'quux'
+ filters = {'filter': [{'name': 'tag-value',
+ 'value': ['quux']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst2_ret]})
+
+ # Multiple values
+ # Both should have tags with key 'baz' and values in the set
+ # ['quux', 'wibble']
+ filters = {'filter': [{'name': 'tag:baz',
+ 'value': ['quux', 'wibble']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
+
+ # Both should have tags with key 'baz' or tags with value 'bar'
+ filters = {'filter': [{'name': 'tag-key',
+ 'value': ['baz']},
+ {'name': 'tag-value',
+ 'value': ['bar']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
+
+ # Confirm deletion of tags
+ # Check for format 'tag:'
+ self.cloud.delete_tags(self.context, resource_id=[ec2_id1], tag=[md])
+ filters = {'filter': [{'name': 'tag:foo',
+ 'value': ['bar']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst2_ret]})
+
+ # Check for format 'tag-'
+ filters = {'filter': [{'name': 'tag-key',
+ 'value': ['foo']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst2_ret]})
+ filters = {'filter': [{'name': 'tag-value',
+ 'value': ['bar']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst2_ret]})
+
+ # destroy the test instances
+ db.instance_destroy(self.context, inst1['uuid'])
+ db.instance_destroy(self.context, inst2['uuid'])
+
+ def test_describe_instances_sorting(self):
+ # Makes sure describe_instances works and is sorted as expected.
+ self.flags(use_ipv6=True)
+
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ inst_base = {
+ 'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta,
+ }
+
+ utc = iso8601.iso8601.Utc()
+
+ inst1_kwargs = {}
+ inst1_kwargs.update(inst_base)
+ inst1_kwargs['host'] = 'host1'
+ inst1_kwargs['hostname'] = 'server-1111'
+ inst1_kwargs['created_at'] = datetime.datetime(2012, 5, 1, 1, 1, 1,
+ tzinfo=utc)
+ inst1 = db.instance_create(self.context, inst1_kwargs)
+
+ inst2_kwargs = {}
+ inst2_kwargs.update(inst_base)
+ inst2_kwargs['host'] = 'host2'
+ inst2_kwargs['hostname'] = 'server-2222'
+ inst2_kwargs['created_at'] = datetime.datetime(2012, 2, 1, 1, 1, 1,
+ tzinfo=utc)
+ inst2 = db.instance_create(self.context, inst2_kwargs)
+
+ inst3_kwargs = {}
+ inst3_kwargs.update(inst_base)
+ inst3_kwargs['host'] = 'host3'
+ inst3_kwargs['hostname'] = 'server-3333'
+ inst3_kwargs['created_at'] = datetime.datetime(2012, 2, 5, 1, 1, 1,
+ tzinfo=utc)
+ inst3 = db.instance_create(self.context, inst3_kwargs)
+
+ comp1 = db.service_create(self.context, {'host': 'host1',
+ 'topic': "compute"})
+
+ comp2 = db.service_create(self.context, {'host': 'host2',
+ 'topic': "compute"})
+
+ result = self.cloud.describe_instances(self.context)
+ result = result['reservationSet'][0]['instancesSet']
+ self.assertEqual(result[0]['launchTime'], inst2_kwargs['created_at'])
+ self.assertEqual(result[1]['launchTime'], inst3_kwargs['created_at'])
+ self.assertEqual(result[2]['launchTime'], inst1_kwargs['created_at'])
+
+ db.instance_destroy(self.context, inst1['uuid'])
+ db.instance_destroy(self.context, inst2['uuid'])
+ db.instance_destroy(self.context, inst3['uuid'])
+ db.service_destroy(self.context, comp1['id'])
+ db.service_destroy(self.context, comp2['id'])
+
+ def test_describe_instance_state(self):
+ # Makes sure describe_instances for instanceState works.
+
+ def test_instance_state(expected_code, expected_name,
+ power_state_, vm_state_, values=None):
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ values = values or {}
+ values.update({'image_ref': image_uuid, 'instance_type_id': 1,
+ 'power_state': power_state_, 'vm_state': vm_state_,
+ 'system_metadata': sys_meta})
+ inst = db.instance_create(self.context, values)
+
+ instance_id = ec2utils.id_to_ec2_inst_id(inst['uuid'])
+ result = self.cloud.describe_instances(self.context,
+ instance_id=[instance_id])
+ result = result['reservationSet'][0]
+ result = result['instancesSet'][0]['instanceState']
+
+ name = result['name']
+ code = result['code']
+ self.assertEqual(code, expected_code)
+ self.assertEqual(name, expected_name)
+
+ db.instance_destroy(self.context, inst['uuid'])
+
+ test_instance_state(inst_state.RUNNING_CODE, inst_state.RUNNING,
+ power_state.RUNNING, vm_states.ACTIVE)
+ test_instance_state(inst_state.STOPPED_CODE, inst_state.STOPPED,
+ power_state.NOSTATE, vm_states.STOPPED,
+ {'shutdown_terminate': False})
+
+ def test_describe_instances_no_ipv6(self):
+ # Makes sure describe_instances w/ no ipv6 works.
+ self.flags(use_ipv6=False)
+
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ inst1 = db.instance_create(self.context, {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'hostname': 'server-1234',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta})
+ comp1 = db.service_create(self.context, {'host': 'host1',
+ 'topic': "compute"})
+ result = self.cloud.describe_instances(self.context)
+ result = result['reservationSet'][0]
+ self.assertEqual(len(result['instancesSet']), 1)
+ instance = result['instancesSet'][0]
+ instance_id = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
+ self.assertEqual(instance['instanceId'], instance_id)
+ self.assertEqual(instance['ipAddress'], '1.2.3.4')
+ self.assertEqual(instance['dnsName'], '1.2.3.4')
+ self.assertEqual(instance['privateDnsName'], 'server-1234')
+ self.assertEqual(instance['privateIpAddress'], '192.168.0.3')
+ self.assertNotIn('dnsNameV6', instance)
+ db.instance_destroy(self.context, inst1['uuid'])
+ db.service_destroy(self.context, comp1['id'])
+
+ def test_describe_instances_deleted(self):
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ args1 = {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta}
+ inst1 = db.instance_create(self.context, args1)
+ args2 = {'reservation_id': 'b',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta}
+ inst2 = db.instance_create(self.context, args2)
+ db.instance_destroy(self.context, inst1['uuid'])
+ result = self.cloud.describe_instances(self.context)
+ self.assertEqual(len(result['reservationSet']), 1)
+ result1 = result['reservationSet'][0]['instancesSet']
+ self.assertEqual(result1[0]['instanceId'],
+ ec2utils.id_to_ec2_inst_id(inst2['uuid']))
+
+ def test_describe_instances_with_image_deleted(self):
+ image_uuid = 'aebef54a-ed67-4d10-912f-14455edce176'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ args1 = {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta}
+ db.instance_create(self.context, args1)
+ args2 = {'reservation_id': 'b',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta}
+ db.instance_create(self.context, args2)
+ result = self.cloud.describe_instances(self.context)
+ self.assertEqual(len(result['reservationSet']), 2)
+
+ def test_describe_instances_dnsName_set(self):
+ # Verifies dnsName doesn't get set if floating IP is set.
+ self._stub_instance_get_with_fixed_ips('get_all', get_floating=False)
+ self._stub_instance_get_with_fixed_ips('get', get_floating=False)
+
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ db.instance_create(self.context, {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'hostname': 'server-1234',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta})
+ result = self.cloud.describe_instances(self.context)
+ result = result['reservationSet'][0]
+ instance = result['instancesSet'][0]
+ self.assertIsNone(instance['dnsName'])
+
+ def test_describe_instances_booting_from_a_volume(self):
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ inst = objects.Instance(self.context)
+ inst.reservation_id = 'a'
+ inst.image_ref = ''
+ inst.root_device_name = '/dev/sdh'
+ inst.instance_type_id = 1
+ inst.vm_state = vm_states.ACTIVE
+ inst.host = 'host1'
+ inst.system_metadata = sys_meta
+ inst.create()
+ result = self.cloud.describe_instances(self.context)
+ result = result['reservationSet'][0]
+ instance = result['instancesSet'][0]
+ self.assertIsNone(instance['imageId'])
+
+ def test_describe_images(self):
+ describe_images = self.cloud.describe_images
+
+ def fake_detail(meh, context, **kwargs):
+ return [{'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'}}]
+
+ def fake_show_none(meh, context, id):
+ raise exception.ImageNotFound(image_id='bad_image_id')
+
+ def fake_detail_none(self, context, **kwargs):
+ return []
+
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+ # list all
+ result1 = describe_images(self.context)
+ result1 = result1['imagesSet'][0]
+ self.assertEqual(result1['imageId'], 'ami-00000001')
+ # provided a valid image_id
+ result2 = describe_images(self.context, ['ami-00000001'])
+ self.assertEqual(1, len(result2['imagesSet']))
+ # provide more than 1 valid image_id
+ result3 = describe_images(self.context, ['ami-00000001',
+ 'ami-00000002'])
+ self.assertEqual(2, len(result3['imagesSet']))
+ # provide a non-existing image_id
+ self.stubs.UnsetAll()
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show_none)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_none)
+ self.assertRaises(exception.ImageNotFound, describe_images,
+ self.context, ['ami-fake'])
+
+ def assertDictListUnorderedMatch(self, L1, L2, key):
+ self.assertEqual(len(L1), len(L2))
+ for d1 in L1:
+ self.assertIn(key, d1)
+ for d2 in L2:
+ self.assertIn(key, d2)
+ if d1[key] == d2[key]:
+ self.assertThat(d1, matchers.DictMatches(d2))
+
+ def _setUpImageSet(self, create_volumes_and_snapshots=False):
+ self.flags(max_local_block_devices=-1)
+ mappings1 = [
+ {'device': '/dev/sda1', 'virtual': 'root'},
+
+ {'device': 'sdb0', 'virtual': 'ephemeral0'},
+ {'device': 'sdb1', 'virtual': 'ephemeral1'},
+ {'device': 'sdb2', 'virtual': 'ephemeral2'},
+ {'device': 'sdb3', 'virtual': 'ephemeral3'},
+ {'device': 'sdb4', 'virtual': 'ephemeral4'},
+
+ {'device': 'sdc0', 'virtual': 'swap'},
+ {'device': 'sdc1', 'virtual': 'swap'},
+ {'device': 'sdc2', 'virtual': 'swap'},
+ {'device': 'sdc3', 'virtual': 'swap'},
+ {'device': 'sdc4', 'virtual': 'swap'}]
+ block_device_mapping1 = [
+ {'device_name': '/dev/sdb1',
+ 'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e3'},
+ {'device_name': '/dev/sdb2',
+ 'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e4'},
+ {'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'},
+ {'device_name': '/dev/sdb4', 'no_device': True},
+
+ {'device_name': '/dev/sdc1',
+ 'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e5'},
+ {'device_name': '/dev/sdc2',
+ 'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e6'},
+ {'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'},
+ {'device_name': '/dev/sdc4', 'no_device': True}]
+ image1 = {
+ 'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine',
+ 'image_state': 'available',
+ 'mappings': mappings1,
+ 'block_device_mapping': block_device_mapping1,
+ }
+ }
+
+ mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
+ block_device_mapping2 = [{'device_name': '/dev/sdb1',
+ 'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e7',
+ 'volume_id': None}]
+ image2 = {
+ 'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ 'type': 'machine',
+ 'root_device_name': '/dev/sdb1',
+ 'mappings': mappings2,
+ 'block_device_mapping': block_device_mapping2}}
+
+ def fake_show(meh, context, image_id, **kwargs):
+ _images = [copy.deepcopy(image1), copy.deepcopy(image2)]
+ for i in _images:
+ if str(i['id']) == str(image_id):
+ return i
+ raise exception.ImageNotFound(image_id=image_id)
+
+ def fake_detail(meh, context, **kwargs):
+ return [copy.deepcopy(image1), copy.deepcopy(image2)]
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+
+ volumes = []
+ snapshots = []
+ if create_volumes_and_snapshots:
+ for bdm in block_device_mapping1:
+ if 'volume_id' in bdm:
+ vol = self._volume_create(bdm['volume_id'])
+ volumes.append(vol['id'])
+ if 'snapshot_id' in bdm:
+ snap = self._snapshot_create(bdm['snapshot_id'])
+ snapshots.append(snap['id'])
+ return (volumes, snapshots)
+
+ def _assertImageSet(self, result, root_device_type, root_device_name):
+ self.assertEqual(1, len(result['imagesSet']))
+ result = result['imagesSet'][0]
+ self.assertIn('rootDeviceType', result)
+ self.assertEqual(result['rootDeviceType'], root_device_type)
+ self.assertIn('rootDeviceName', result)
+ self.assertEqual(result['rootDeviceName'], root_device_name)
+ self.assertIn('blockDeviceMapping', result)
+
+ return result
+
+ _expected_root_device_name1 = '/dev/sda1'
+ # NOTE(yamahata): noDevice doesn't make sense when returning mapping
+ # It makes sense only when user overriding existing
+ # mapping.
+ _expected_bdms1 = [
+ {'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'},
+ {'deviceName': '/dev/sdb1', 'ebs': {'snapshotId':
+ 'snap-00000001'}},
+ {'deviceName': '/dev/sdb2', 'ebs': {'snapshotId':
+ 'vol-00000001'}},
+ {'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'},
+ # {'deviceName': '/dev/sdb4', 'noDevice': True},
+
+ {'deviceName': '/dev/sdc0', 'virtualName': 'swap'},
+ {'deviceName': '/dev/sdc1', 'ebs': {'snapshotId':
+ 'snap-00000002'}},
+ {'deviceName': '/dev/sdc2', 'ebs': {'snapshotId':
+ 'vol-00000002'}},
+ {'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'},
+ # {'deviceName': '/dev/sdc4', 'noDevice': True}
+ ]
+
+ _expected_root_device_name2 = '/dev/sdb1'
+ _expected_bdms2 = [{'deviceName': '/dev/sdb1',
+ 'ebs': {'snapshotId': 'snap-00000003'}}]
+
+ # NOTE(yamahata):
+ # InstanceBlockDeviceMappingItemType
+ # rootDeviceType
+ # rootDeviceName
+ # blockDeviceMapping
+ # deviceName
+ # virtualName
+ # ebs
+ # snapshotId
+ # volumeSize
+ # deleteOnTermination
+ # noDevice
+ def test_describe_image_mapping(self):
+ # test for rootDeviceName and blockDeviceMapping.
+ describe_images = self.cloud.describe_images
+ self._setUpImageSet()
+
+ result = describe_images(self.context, ['ami-00000001'])
+ result = self._assertImageSet(result, 'instance-store',
+ self._expected_root_device_name1)
+
+ self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
+ self._expected_bdms1, 'deviceName')
+
+ result = describe_images(self.context, ['ami-00000002'])
+ result = self._assertImageSet(result, 'ebs',
+ self._expected_root_device_name2)
+
+ self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
+ self._expected_bdms2, 'deviceName')
+
+ def test_describe_image_attribute(self):
+ describe_image_attribute = self.cloud.describe_image_attribute
+
+ def fake_show(meh, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'container_format': 'ami',
+ 'is_public': True}
+
+ def fake_detail(self, context, **kwargs):
+ image = fake_show(None, context, None)
+ image['name'] = kwargs.get('filters', {}).get('name')
+ return [image]
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+ result = describe_image_attribute(self.context, 'ami-00000001',
+ 'launchPermission')
+ self.assertEqual([{'group': 'all'}], result['launchPermission'])
+ result = describe_image_attribute(self.context, 'ami-00000001',
+ 'kernel')
+ self.assertEqual('aki-00000001', result['kernel']['value'])
+ result = describe_image_attribute(self.context, 'ami-00000001',
+ 'ramdisk')
+ self.assertEqual('ari-00000001', result['ramdisk']['value'])
+
+ def test_describe_image_attribute_root_device_name(self):
+ describe_image_attribute = self.cloud.describe_image_attribute
+ self._setUpImageSet()
+
+ result = describe_image_attribute(self.context, 'ami-00000001',
+ 'rootDeviceName')
+ self.assertEqual(result['rootDeviceName'],
+ self._expected_root_device_name1)
+ result = describe_image_attribute(self.context, 'ami-00000002',
+ 'rootDeviceName')
+ self.assertEqual(result['rootDeviceName'],
+ self._expected_root_device_name2)
+
+ def test_describe_image_attribute_block_device_mapping(self):
+ describe_image_attribute = self.cloud.describe_image_attribute
+ self._setUpImageSet()
+
+ result = describe_image_attribute(self.context, 'ami-00000001',
+ 'blockDeviceMapping')
+ self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
+ self._expected_bdms1, 'deviceName')
+ result = describe_image_attribute(self.context, 'ami-00000002',
+ 'blockDeviceMapping')
+ self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
+ self._expected_bdms2, 'deviceName')
+
+ def test_modify_image_attribute(self):
+ modify_image_attribute = self.cloud.modify_image_attribute
+
+ fake_metadata = {
+ 'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'is_public': False}
+
+ def fake_show(meh, context, id, **kwargs):
+ return copy.deepcopy(fake_metadata)
+
+ def fake_detail(self, context, **kwargs):
+ image = fake_show(None, context, None)
+ image['name'] = kwargs.get('filters', {}).get('name')
+ return [image]
+
+ def fake_update(meh, context, image_id, metadata, data=None):
+ self.assertEqual(metadata['properties']['kernel_id'],
+ fake_metadata['properties']['kernel_id'])
+ self.assertEqual(metadata['properties']['ramdisk_id'],
+ fake_metadata['properties']['ramdisk_id'])
+ self.assertTrue(metadata['is_public'])
+ image = copy.deepcopy(fake_metadata)
+ image.update(metadata)
+ return image
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+ self.stubs.Set(fake._FakeImageService, 'update', fake_update)
+ result = modify_image_attribute(self.context, 'ami-00000001',
+ 'launchPermission', 'add',
+ user_group=['all'])
+ self.assertTrue(result['is_public'])
+
+ def test_register_image(self):
+ register_image = self.cloud.register_image
+
+ def fake_create(*args, **kwargs):
+ # NOTE(vish): We are mocking s3 so make sure we have converted
+ # to ids instead of uuids.
+ return {'id': 1,
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'properties': {'kernel_id': 1,
+ 'ramdisk_id': 1,
+ 'type': 'machine'
+ },
+ 'is_public': False
+ }
+
+ self.stubs.Set(s3.S3ImageService, 'create', fake_create)
+ image_location = 'fake_bucket/fake.img.manifest.xml'
+ result = register_image(self.context, image_location)
+ self.assertEqual(result['imageId'], 'ami-00000001')
+
+ def test_register_image_empty(self):
+ register_image = self.cloud.register_image
+ self.assertRaises(exception.MissingParameter, register_image,
+ self.context, image_location=None)
+
+ def test_register_image_name(self):
+ register_image = self.cloud.register_image
+
+ def fake_create(_self, context, metadata, data=None):
+ self.assertEqual(metadata['name'], self.expected_name)
+ metadata['id'] = 1
+ metadata['container_format'] = 'ami'
+ metadata['is_public'] = False
+ return metadata
+
+ self.stubs.Set(s3.S3ImageService, 'create', fake_create)
+ self.expected_name = 'fake_bucket/fake.img.manifest.xml'
+ register_image(self.context,
+ image_location=self.expected_name,
+ name=None)
+ self.expected_name = 'an image name'
+ register_image(self.context,
+ image_location='some_location',
+ name=self.expected_name)
+
+ def test_format_image(self):
+ image = {
+ 'id': 1,
+ 'container_format': 'ami',
+ 'name': 'name',
+ 'owner': 'someone',
+ 'properties': {
+ 'image_location': 'location',
+ 'kernel_id': 1,
+ 'ramdisk_id': 1,
+ 'type': 'machine'},
+ 'is_public': False}
+ expected = {'name': 'name',
+ 'imageOwnerId': 'someone',
+ 'isPublic': False,
+ 'imageId': 'ami-00000001',
+ 'imageState': None,
+ 'rootDeviceType': 'instance-store',
+ 'architecture': None,
+ 'imageLocation': 'location',
+ 'kernelId': 'aki-00000001',
+ 'ramdiskId': 'ari-00000001',
+ 'rootDeviceName': '/dev/sda1',
+ 'imageType': 'machine',
+ 'description': None}
+ result = self.cloud._format_image(image)
+ self.assertThat(result, matchers.DictMatches(expected))
+ image['properties']['image_location'] = None
+ expected['imageLocation'] = 'None (name)'
+ result = self.cloud._format_image(image)
+ self.assertThat(result, matchers.DictMatches(expected))
+ image['name'] = None
+ image['properties']['image_location'] = 'location'
+ expected['imageLocation'] = 'location'
+ expected['name'] = 'location'
+ result = self.cloud._format_image(image)
+ self.assertThat(result, matchers.DictMatches(expected))
+
+ def test_deregister_image(self):
+ deregister_image = self.cloud.deregister_image
+
+ def fake_delete(self, context, id):
+ return None
+
+ self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
+ # valid image
+ result = deregister_image(self.context, 'ami-00000001')
+ self.assertTrue(result)
+ # invalid image
+ self.stubs.UnsetAll()
+
+ def fake_detail_empty(self, context, **kwargs):
+ return []
+
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_empty)
+ self.assertRaises(exception.ImageNotFound, deregister_image,
+ self.context, 'ami-bad001')
+
+ def test_deregister_image_wrong_container_type(self):
+ deregister_image = self.cloud.deregister_image
+
+ def fake_delete(self, context, id):
+ return None
+
+ self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
+ self.assertRaises(exception.NotFound, deregister_image, self.context,
+ 'aki-00000001')
+
+ def _run_instance(self, **kwargs):
+ rv = self.cloud.run_instances(self.context, **kwargs)
+ instance_id = rv['instancesSet'][0]['instanceId']
+ return instance_id
+
+ def test_get_password_data(self):
+ instance_id = self._run_instance(
+ image_id='ami-1',
+ instance_type=CONF.default_flavor,
+ max_count=1)
+ self.stubs.Set(password, 'extract_password', lambda i: 'fakepass')
+ output = self.cloud.get_password_data(context=self.context,
+ instance_id=[instance_id])
+ self.assertEqual(output['passwordData'], 'fakepass')
+ self.cloud.terminate_instances(self.context, [instance_id])
+
+ def test_console_output(self):
+ instance_id = self._run_instance(
+ image_id='ami-1',
+ instance_type=CONF.default_flavor,
+ max_count=1)
+ output = self.cloud.get_console_output(context=self.context,
+ instance_id=[instance_id])
+ self.assertEqual(base64.b64decode(output['output']),
+ 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE')
+ # TODO(soren): We need this until we can stop polling in the rpc code
+ # for unit tests.
+ self.cloud.terminate_instances(self.context, [instance_id])
+
+ def test_key_generation(self):
+ result, private_key = self._create_key('test')
+
+ expected = db.key_pair_get(self.context,
+ self.context.user_id,
+ 'test')['public_key']
+
+ (fd, fname) = tempfile.mkstemp()
+ os.write(fd, private_key)
+
+ public_key, err = utils.execute('ssh-keygen', '-e', '-f', fname)
+
+ os.unlink(fname)
+
+ # assert key fields are equal
+ self.assertEqual(''.join(public_key.split("\n")[2:-2]),
+ expected.split(" ")[1].strip())
+
+ def test_describe_key_pairs(self):
+ self._create_key('test1')
+ self._create_key('test2')
+ result = self.cloud.describe_key_pairs(self.context)
+ keys = result["keySet"]
+ self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
+ self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
+
+ def test_describe_bad_key_pairs(self):
+ self.assertRaises(exception.KeypairNotFound,
+ self.cloud.describe_key_pairs, self.context,
+ key_name=['DoesNotExist'])
+
+ def test_import_key_pair(self):
+ pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
+ with open(pubkey_path + '/dummy.pub') as f:
+ dummypub = f.readline().rstrip()
+ with open(pubkey_path + '/dummy.fingerprint') as f:
+ dummyfprint = f.readline().rstrip()
+ key_name = 'testimportkey'
+ public_key_material = base64.b64encode(dummypub)
+ result = self.cloud.import_key_pair(self.context,
+ key_name,
+ public_key_material)
+ self.assertEqual(result['keyName'], key_name)
+ self.assertEqual(result['keyFingerprint'], dummyfprint)
+ keydata = db.key_pair_get(self.context,
+ self.context.user_id,
+ key_name)
+ self.assertEqual(dummypub, keydata['public_key'])
+ self.assertEqual(dummyfprint, keydata['fingerprint'])
+
+ def test_import_key_pair_quota_limit(self):
+ self.flags(quota_key_pairs=0)
+ pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
+ f = open(pubkey_path + '/dummy.pub', 'r')
+ dummypub = f.readline().rstrip()
+ f.close
+ f = open(pubkey_path + '/dummy.fingerprint', 'r')
+ f.readline().rstrip()
+ f.close
+ key_name = 'testimportkey'
+ public_key_material = base64.b64encode(dummypub)
+ self.assertRaises(exception.KeypairLimitExceeded,
+ self.cloud.import_key_pair, self.context, key_name,
+ public_key_material)
+
+ def test_create_key_pair(self):
+ good_names = ('a', 'a' * 255, string.ascii_letters + ' -_')
+ bad_names = ('', 'a' * 256, '*', '/')
+
+ for key_name in good_names:
+ result = self.cloud.create_key_pair(self.context,
+ key_name)
+ self.assertEqual(result['keyName'], key_name)
+
+ for key_name in bad_names:
+ self.assertRaises(exception.InvalidKeypair,
+ self.cloud.create_key_pair,
+ self.context,
+ key_name)
+
+ def test_create_key_pair_quota_limit(self):
+ self.flags(quota_key_pairs=10)
+ for i in range(0, 10):
+ key_name = 'key_%i' % i
+ result = self.cloud.create_key_pair(self.context,
+ key_name)
+ self.assertEqual(result['keyName'], key_name)
+
+ # 11'th group should fail
+ self.assertRaises(exception.KeypairLimitExceeded,
+ self.cloud.create_key_pair,
+ self.context,
+ 'foo')
+
+ def test_delete_key_pair(self):
+ self._create_key('test')
+ self.cloud.delete_key_pair(self.context, 'test')
+
+ def test_run_instances(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ run_instances = self.cloud.run_instances
+
+ def fake_show(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'container_format': 'ami',
+ 'status': 'active'}
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+
+ def dumb(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['imageId'], 'ami-00000001')
+ self.assertEqual(instance['instanceId'], 'i-00000001')
+ self.assertEqual(instance['instanceState']['name'], 'running')
+ self.assertEqual(instance['instanceType'], 'm1.small')
+
+ def test_run_instances_invalid_maxcount(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 0}
+ run_instances = self.cloud.run_instances
+
+ def fake_show(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'status': 'active'}
+ self.stubs.UnsetAll()
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.assertRaises(exception.InvalidInput, run_instances,
+ self.context, **kwargs)
+
+ def test_run_instances_invalid_mincount(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'min_count': 0}
+ run_instances = self.cloud.run_instances
+
+ def fake_show(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'status': 'active'}
+ self.stubs.UnsetAll()
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.assertRaises(exception.InvalidInput, run_instances,
+ self.context, **kwargs)
+
+ def test_run_instances_invalid_count(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1,
+ 'min_count': 2}
+ run_instances = self.cloud.run_instances
+
+ def fake_show(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'status': 'active'}
+ self.stubs.UnsetAll()
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.assertRaises(exception.InvalidInput, run_instances,
+ self.context, **kwargs)
+
+ def test_run_instances_availability_zone(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1,
+ 'placement': {'availability_zone': 'fake'},
+ }
+ run_instances = self.cloud.run_instances
+
+ def fake_show(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'container_format': 'ami',
+ 'status': 'active'}
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ def fake_format(*args, **kwargs):
+ pass
+
+ self.stubs.Set(self.cloud, '_format_run_instances', fake_format)
+
+ def fake_create(*args, **kwargs):
+ self.assertEqual(kwargs['availability_zone'], 'fake')
+ return ({'id': 'fake-instance'}, 'fake-res-id')
+
+ self.stubs.Set(self.cloud.compute_api, 'create', fake_create)
+
+ # NOTE(vish) the assert for this call is in the fake_create method.
+ run_instances(self.context, **kwargs)
+
+ def test_empty_reservation_id_from_token(self):
+ client_token = 'client-token-1'
+
+ def fake_get_all_system_metadata(context, search_filts):
+ reference = [{'key': ['EC2_client_token']},
+ {'value': ['client-token-1']}]
+ self.assertEqual(search_filts, reference)
+ return []
+
+ self.stubs.Set(self.cloud.compute_api, 'get_all_system_metadata',
+ fake_get_all_system_metadata)
+ resv_id = self.cloud._resv_id_from_token(self.context, client_token)
+ self.assertIsNone(resv_id)
+
+ def test_run_instances_idempotent(self):
+ # Ensure subsequent run_instances calls with same client token
+ # are idempotent and that ones with different client_token are not
+
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+
+ run_instances = self.cloud.run_instances
+
+ def fake_show(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'container_format': 'ami',
+ 'status': 'active'}
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+
+ def dumb(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ kwargs['client_token'] = 'client-token-1'
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], 'i-00000001')
+
+ kwargs['client_token'] = 'client-token-2'
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], 'i-00000002')
+
+ kwargs['client_token'] = 'client-token-2'
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], 'i-00000002')
+
+ kwargs['client_token'] = 'client-token-1'
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], 'i-00000001')
+
+ kwargs['client_token'] = 'client-token-3'
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], 'i-00000003')
+
+ # make sure terminated instances lose their client tokens
+ self.cloud.stop_instances(self.context,
+ instance_id=[instance['instanceId']])
+ self.cloud.terminate_instances(self.context,
+ instance_id=[instance['instanceId']])
+
+ kwargs['client_token'] = 'client-token-3'
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], 'i-00000004')
+
+ def test_run_instances_image_state_none(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ run_instances = self.cloud.run_instances
+
+ def fake_show_no_state(self, context, id):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'}, 'container_format': 'ami'}
+
+ self.stubs.UnsetAll()
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state)
+ self.assertRaises(exception.ImageNotActive, run_instances,
+ self.context, **kwargs)
+
+ def test_run_instances_image_state_invalid(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ run_instances = self.cloud.run_instances
+
+ def fake_show_decrypt(self, context, id):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine', 'image_state': 'decrypting'}}
+
+ self.stubs.UnsetAll()
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show_decrypt)
+ self.assertRaises(exception.ImageNotActive, run_instances,
+ self.context, **kwargs)
+
+ def test_run_instances_image_status_active(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ run_instances = self.cloud.run_instances
+
+ def fake_show_stat_active(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'status': 'active'}
+
+ def fake_id_to_glance_id(context, id):
+ return 'cedef40a-ed67-4d10-800e-17455edce175'
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active)
+ self.stubs.Set(ec2utils, 'id_to_glance_id', fake_id_to_glance_id)
+
+ result = run_instances(self.context, **kwargs)
+ self.assertEqual(len(result['instancesSet']), 1)
+
+ def _restart_compute_service(self, periodic_interval_max=None):
+ """restart compute service. NOTE: fake driver forgets all instances."""
+ self.compute.kill()
+ if periodic_interval_max:
+ self.compute = self.start_service(
+ 'compute', periodic_interval_max=periodic_interval_max)
+ else:
+ self.compute = self.start_service('compute')
+
+ def test_stop_start_instance(self):
+ # Makes sure stop/start instance works.
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval_max=0.3)
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ # a running instance can't be started.
+ self.assertRaises(exception.InstanceInvalidState,
+ self.cloud.start_instances,
+ self.context, [instance_id])
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 64,
+ 'name': 'stopping'}}]}
+ result = self.cloud.stop_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 80,
+ 'name': 'stopped'},
+ 'currentState': {'code': 0,
+ 'name': 'pending'}}]}
+ result = self.cloud.start_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 64,
+ 'name': 'stopping'}}]}
+ result = self.cloud.stop_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 80,
+ 'name': 'stopped'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ def test_start_instances(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ result = self.cloud.stop_instances(self.context, [instance_id])
+ self.assertTrue(result)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 80,
+ 'name': 'stopped'},
+ 'currentState': {'code': 0,
+ 'name': 'pending'}}]}
+ result = self.cloud.start_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+ self._restart_compute_service()
+
+ def test_start_instances_policy_failed(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+ rules = {
+ "compute:start":
+ common_policy.parse_rule("project_id:non_fake"),
+ }
+ policy.set_rules(rules)
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.cloud.start_instances,
+ self.context, [instance_id])
+ self.assertIn("compute:start", exc.format_message())
+ self._restart_compute_service()
+
+ def test_stop_instances(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 64,
+ 'name': 'stopping'}}]}
+ result = self.cloud.stop_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 80,
+ 'name': 'stopped'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+ self._restart_compute_service()
+
+ def test_stop_instances_policy_failed(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+ rules = {
+ "compute:stop":
+ common_policy.parse_rule("project_id:non_fake")
+ }
+ policy.set_rules(rules)
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.cloud.stop_instances,
+ self.context, [instance_id])
+ self.assertIn("compute:stop", exc.format_message())
+ self._restart_compute_service()
+
+ def test_terminate_instances(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ # a running instance can't be started.
+ self.assertRaises(exception.InstanceInvalidState,
+ self.cloud.start_instances,
+ self.context, [instance_id])
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+ self._restart_compute_service()
+
+ def test_terminate_instances_invalid_instance_id(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ self._run_instance(**kwargs)
+
+ self.assertRaises(exception.InstanceNotFound,
+ self.cloud.terminate_instances,
+ self.context, ['i-2'])
+ self._restart_compute_service()
+
+ def test_terminate_instances_disable_terminate(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ internal_uuid = db.get_instance_uuid_by_ec2_id(self.context,
+ ec2utils.ec2_id_to_id(instance_id))
+ db.instance_update(self.context, internal_uuid,
+ {'disable_terminate': True})
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 16,
+ 'name': 'running'}}]}
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ db.instance_update(self.context, internal_uuid,
+ {'disable_terminate': False})
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+ self._restart_compute_service()
+
+ def test_terminate_instances_two_instances(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ inst1 = self._run_instance(**kwargs)
+ inst2 = self._run_instance(**kwargs)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 64,
+ 'name': 'stopping'}}]}
+ result = self.cloud.stop_instances(self.context, [inst1])
+ self.assertEqual(result, expected)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 80,
+ 'name': 'stopped'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}},
+ {'instanceId': 'i-00000002',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context, [inst1, inst2])
+ self.assertEqual(result, expected)
+ self._restart_compute_service()
+
+ def test_reboot_instances(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ # a running instance can't be started.
+ self.assertRaises(exception.InstanceInvalidState,
+ self.cloud.start_instances,
+ self.context, [instance_id])
+
+ result = self.cloud.reboot_instances(self.context, [instance_id])
+ self.assertTrue(result)
+
+ def _volume_create(self, volume_id=None):
+ kwargs = {'name': 'test-volume',
+ 'description': 'test volume description',
+ 'status': 'available',
+ 'host': 'fake',
+ 'size': 1,
+ 'attach_status': 'detached'}
+ if volume_id:
+ kwargs['volume_id'] = volume_id
+ return self.volume_api.create_with_kwargs(self.context, **kwargs)
+
+ def _snapshot_create(self, snapshot_id=None):
+ kwargs = {'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e4',
+ 'status': "available",
+ 'volume_size': 1}
+ if snapshot_id:
+ kwargs['snap_id'] = snapshot_id
+ return self.volume_api.create_snapshot_with_kwargs(self.context,
+ **kwargs)
+
+ def _create_snapshot(self, ec2_volume_id):
+ result = self.cloud.create_snapshot(self.context,
+ volume_id=ec2_volume_id)
+ return result['snapshotId']
+
+ def _do_test_create_image(self, no_reboot):
+ """Make sure that CreateImage works."""
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval_max=0.3)
+
+ (volumes, snapshots) = self._setUpImageSet(
+ create_volumes_and_snapshots=True)
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ ec2_instance_id = self._run_instance(**kwargs)
+
+ def fake_show(meh, context, id, **kwargs):
+ bdm = [dict(snapshot_id=snapshots[0],
+ volume_size=1,
+ device_name='sda1',
+ delete_on_termination=False)]
+ props = dict(kernel_id='cedef40a-ed67-4d10-800e-17455edce175',
+ ramdisk_id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ root_device_name='/dev/sda1',
+ block_device_mapping=bdm)
+ return dict(id=id,
+ properties=props,
+ container_format='ami',
+ status='active',
+ is_public=True)
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+
+ def fake_block_device_mapping_get_all_by_instance(context, inst_id,
+ use_slave=False):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': volumes[0],
+ 'snapshot_id': snapshots[0],
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'device_name': 'sda1',
+ 'boot_index': 0,
+ 'delete_on_termination': False,
+ 'connection_info': '{"foo":"bar"}',
+ 'no_device': None})]
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_block_device_mapping_get_all_by_instance)
+
+ virt_driver = {}
+
+ def fake_power_on(self, context, instance, network_info,
+ block_device_info):
+ virt_driver['powered_on'] = True
+
+ self.stubs.Set(fake_virt.FakeDriver, 'power_on', fake_power_on)
+
+ def fake_power_off(self, instance,
+ shutdown_timeout, shutdown_attempts):
+ virt_driver['powered_off'] = True
+
+ self.stubs.Set(fake_virt.FakeDriver, 'power_off', fake_power_off)
+
+ result = self.cloud.create_image(self.context, ec2_instance_id,
+ no_reboot=no_reboot)
+ ec2_ids = [result['imageId']]
+ created_image = self.cloud.describe_images(self.context,
+ ec2_ids)['imagesSet'][0]
+
+ self.assertIn('blockDeviceMapping', created_image)
+ bdm = created_image['blockDeviceMapping'][0]
+ self.assertEqual(bdm.get('deviceName'), 'sda1')
+ self.assertIn('ebs', bdm)
+ self.assertEqual(bdm['ebs'].get('snapshotId'),
+ ec2utils.id_to_ec2_snap_id(snapshots[0]))
+ self.assertEqual(created_image.get('kernelId'), 'aki-00000001')
+ self.assertEqual(created_image.get('ramdiskId'), 'ari-00000002')
+ self.assertEqual(created_image.get('rootDeviceType'), 'ebs')
+ self.assertNotEqual(virt_driver.get('powered_on'), no_reboot)
+ self.assertNotEqual(virt_driver.get('powered_off'), no_reboot)
+
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+
+ self._restart_compute_service()
+
+ def test_create_image_no_reboot(self):
+ # Make sure that CreateImage works.
+ self._do_test_create_image(True)
+
+ def test_create_image_with_reboot(self):
+ # Make sure that CreateImage works.
+ self._do_test_create_image(False)
+
+ def test_create_image_instance_store(self):
+ """Ensure CreateImage fails as expected for an instance-store-backed
+ instance
+ """
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval_max=0.3)
+
+ (volumes, snapshots) = self._setUpImageSet(
+ create_volumes_and_snapshots=True)
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ ec2_instance_id = self._run_instance(**kwargs)
+
+ def fake_block_device_mapping_get_all_by_instance(context, inst_id,
+ use_slave=False):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': volumes[0],
+ 'snapshot_id': snapshots[0],
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'device_name': 'vda',
+ 'delete_on_termination': False,
+ 'no_device': None})]
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_block_device_mapping_get_all_by_instance)
+
+ self.assertRaises(exception.InvalidParameterValue,
+ self.cloud.create_image,
+ self.context,
+ ec2_instance_id,
+ no_reboot=True)
+
+ @staticmethod
+ def _fake_bdm_get(ctxt, id, use_slave=False):
+ blockdms = [{'volume_id': 87654321,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'delete_on_termination': True,
+ 'device_name': '/dev/sdh'},
+ {'volume_id': None,
+ 'snapshot_id': 98765432,
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'no_device': None,
+ 'delete_on_termination': True,
+ 'device_name': '/dev/sdi'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': True,
+ 'source_type': 'blank',
+ 'destination_type': None,
+ 'delete_on_termination': None,
+ 'device_name': None},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': None,
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdb'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': 'swap',
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdc'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': None,
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdd'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': None,
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sd3'},
+ ]
+
+ extra = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'id': 0,
+ 'device_type': None,
+ 'disk_bus': None,
+ 'instance_uuid': '',
+ 'image_id': None,
+ 'volume_size': None,
+ 'connection_info': None,
+ 'boot_index': None,
+ 'guest_format': None,
+ }
+
+ for bdm in blockdms:
+ bdm.update(extra)
+
+ return blockdms
+
+ def test_describe_instance_attribute(self):
+ # Make sure that describe_instance_attribute works.
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ self._fake_bdm_get)
+
+ def fake_get(ctxt, instance_id, want_objects=False):
+ self.assertTrue(want_objects)
+ inst_type = flavors.get_default_flavor()
+ inst_type['name'] = 'fake_type'
+ sys_meta = flavors.save_flavor_info({}, inst_type)
+ secgroups = objects.SecurityGroupList()
+ secgroups.objects.append(
+ objects.SecurityGroup(name='fake0'))
+ secgroups.objects.append(
+ objects.SecurityGroup(name='fake1'))
+ instance = objects.Instance(ctxt)
+ instance.id = 0
+ instance.uuid = 'e5fe5518-0288-4fa3-b0c4-c79764101b85'
+ instance.root_device_name = '/dev/sdh'
+ instance.security_groups = secgroups
+ instance.vm_state = vm_states.STOPPED
+ instance.kernel_id = 'cedef40a-ed67-4d10-800e-17455edce175'
+ instance.ramdisk_id = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ instance.user_data = 'fake-user data'
+ instance.shutdown_terminate = False
+ instance.disable_terminate = False
+ instance.system_metadata = sys_meta
+ return instance
+ self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
+
+ def fake_ec2_instance_get_by_id(ctxt, int_id):
+ if int_id == 305419896:
+ fake_map = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'id': 305419896,
+ 'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
+ }
+ return fake_map
+ raise exception.InstanceNotFound(instance_id=int_id)
+ self.stubs.Set(db, 'ec2_instance_get_by_id',
+ fake_ec2_instance_get_by_id)
+
+ get_attribute = functools.partial(
+ self.cloud.describe_instance_attribute,
+ self.context, 'i-12345678')
+
+ bdm = get_attribute('blockDeviceMapping')
+ bdm['blockDeviceMapping'].sort()
+
+ expected_bdm = {'instance_id': 'i-12345678',
+ 'rootDeviceType': 'ebs',
+ 'blockDeviceMapping': [
+ {'deviceName': '/dev/sdh',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': True,
+ 'volumeId': 'vol-05397fb1',
+ 'attachTime': '13:56:24'}}]}
+ expected_bdm['blockDeviceMapping'].sort()
+ self.assertEqual(bdm, expected_bdm)
+ groupSet = get_attribute('groupSet')
+ groupSet['groupSet'].sort()
+ expected_groupSet = {'instance_id': 'i-12345678',
+ 'groupSet': [{'groupId': 'fake0'},
+ {'groupId': 'fake1'}]}
+ expected_groupSet['groupSet'].sort()
+ self.assertEqual(groupSet, expected_groupSet)
+ self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'),
+ {'instance_id': 'i-12345678',
+ 'instanceInitiatedShutdownBehavior': 'stop'})
+ self.assertEqual(get_attribute('disableApiTermination'),
+ {'instance_id': 'i-12345678',
+ 'disableApiTermination': False})
+ self.assertEqual(get_attribute('instanceType'),
+ {'instance_id': 'i-12345678',
+ 'instanceType': 'fake_type'})
+ self.assertEqual(get_attribute('kernel'),
+ {'instance_id': 'i-12345678',
+ 'kernel': 'aki-00000001'})
+ self.assertEqual(get_attribute('ramdisk'),
+ {'instance_id': 'i-12345678',
+ 'ramdisk': 'ari-00000002'})
+ self.assertEqual(get_attribute('rootDeviceName'),
+ {'instance_id': 'i-12345678',
+ 'rootDeviceName': '/dev/sdh'})
+ # NOTE(yamahata): this isn't supported
+ # get_attribute('sourceDestCheck')
+ self.assertEqual(get_attribute('userData'),
+ {'instance_id': 'i-12345678',
+ 'userData': '}\xa9\x1e\xba\xc7\xabu\xabZ'})
+
+ def test_instance_initiated_shutdown_behavior(self):
+ def test_dia_iisb(expected_result, **kwargs):
+ """test describe_instance_attribute
+ attribute instance_initiated_shutdown_behavior
+ """
+ kwargs.update({'instance_type': CONF.default_flavor,
+ 'max_count': 1})
+ instance_id = self._run_instance(**kwargs)
+
+ result = self.cloud.describe_instance_attribute(self.context,
+ instance_id, 'instanceInitiatedShutdownBehavior')
+ self.assertEqual(result['instanceInitiatedShutdownBehavior'],
+ expected_result)
+
+ expected = {'instancesSet': [
+ {'instanceId': instance_id,
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context,
+ [instance_id])
+ self.assertEqual(result, expected)
+ self._restart_compute_service()
+
+ test_dia_iisb('stop', image_id='ami-1')
+
+ block_device_mapping = [{'device_name': '/dev/vdb',
+ 'virtual_name': 'ephemeral0'}]
+ test_dia_iisb('stop', image_id='ami-2',
+ block_device_mapping=block_device_mapping)
+
+ def fake_show(self, context, id_, **kwargs):
+ LOG.debug("id_ %s", id_)
+
+ prop = {}
+ if id_ == 'ami-3':
+ pass
+ elif id_ == 'ami-4':
+ prop = {'mappings': [{'device': 'sdb0',
+ 'virtual': 'ephemeral0'}]}
+ elif id_ == 'ami-5':
+ prop = {'block_device_mapping':
+ [{'device_name': '/dev/sdb0',
+ 'virtual_name': 'ephemeral0'}]}
+ elif id_ == 'ami-6':
+ prop = {'mappings': [{'device': 'sdb0',
+ 'virtual': 'ephemeral0'}],
+ 'block_device_mapping':
+ [{'device_name': '/dev/sdb0',
+ 'virtual_name': 'ephemeral0'}]}
+
+ prop_base = {'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'}
+ prop_base.update(prop)
+
+ return {
+ 'id': id_,
+ 'name': 'fake_name',
+ 'properties': prop_base,
+ 'container_format': 'ami',
+ 'status': 'active'}
+
+ # NOTE(yamahata): create ami-3 ... ami-7
+ # ami-1 and ami-2 is already created by setUp()
+ for i in range(3, 8):
+ db.s3_image_create(self.context, 'ami-%d' % i)
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+
+ test_dia_iisb('stop', image_id='ami-3')
+ test_dia_iisb('stop', image_id='ami-4')
+ test_dia_iisb('stop', image_id='ami-5')
+ test_dia_iisb('stop', image_id='ami-6')
+ test_dia_iisb('terminate', image_id='ami-7',
+ instance_initiated_shutdown_behavior='terminate')
+
+ def test_create_delete_tags(self):
+
+ # We need to stub network calls
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ # We need to stub out the MQ call - it won't succeed. We do want
+ # to check that the method is called, though
+ meta_changes = [None]
+
+ def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
+ instance_uuid=None):
+ meta_changes[0] = diff
+
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
+ fake_change_instance_metadata)
+
+ # Create a test image
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ inst1_kwargs = {
+ 'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'vm_state': 'active',
+ 'launched_at': timeutils.utcnow(),
+ 'hostname': 'server-1111',
+ 'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1)
+ }
+
+ inst1 = db.instance_create(self.context, inst1_kwargs)
+ ec2_id = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
+
+ # Create some tags
+ md = {'key': 'foo', 'value': 'bar'}
+ md_result = {'foo': 'bar'}
+ self.cloud.create_tags(self.context, resource_id=[ec2_id],
+ tag=[md])
+
+ metadata = self.cloud.compute_api.get_instance_metadata(self.context,
+ inst1)
+ self.assertEqual(metadata, md_result)
+ self.assertEqual(meta_changes, [{'foo': ['+', 'bar']}])
+
+ # Delete them
+ self.cloud.delete_tags(self.context, resource_id=[ec2_id],
+ tag=[{'key': 'foo', 'value': 'bar'}])
+
+ metadata = self.cloud.compute_api.get_instance_metadata(self.context,
+ inst1)
+ self.assertEqual(metadata, {})
+ self.assertEqual(meta_changes, [{'foo': ['-']}])
+
+ def test_describe_tags(self):
+ # We need to stub network calls
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ # We need to stub out the MQ call - it won't succeed. We do want
+ # to check that the method is called, though
+ meta_changes = [None]
+
+ def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
+ instance_uuid=None):
+ meta_changes[0] = diff
+
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
+ fake_change_instance_metadata)
+
+ # Create some test images
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ inst1_kwargs = {
+ 'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'vm_state': 'active',
+ 'launched_at': timeutils.utcnow(),
+ 'hostname': 'server-1111',
+ 'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1)
+ }
+
+ inst2_kwargs = {
+ 'reservation_id': 'b',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'vm_state': 'active',
+ 'launched_at': timeutils.utcnow(),
+ 'hostname': 'server-1112',
+ 'created_at': datetime.datetime(2012, 5, 1, 1, 1, 2)
+ }
+
+ inst1 = db.instance_create(self.context, inst1_kwargs)
+ ec2_id1 = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
+
+ inst2 = db.instance_create(self.context, inst2_kwargs)
+ ec2_id2 = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
+
+ # Create some tags
+ # We get one overlapping pair, and each has a different key value pair
+ # inst1 : {'foo': 'bar', 'bax': 'wibble'}
+ # inst1 : {'foo': 'bar', 'baz': 'quux'}
+
+ md = {'key': 'foo', 'value': 'bar'}
+ md_result = {'foo': 'bar'}
+ self.cloud.create_tags(self.context, resource_id=[ec2_id1, ec2_id2],
+ tag=[md])
+
+ self.assertEqual(meta_changes, [{'foo': ['+', 'bar']}])
+
+ metadata = self.cloud.compute_api.get_instance_metadata(self.context,
+ inst1)
+ self.assertEqual(metadata, md_result)
+
+ metadata = self.cloud.compute_api.get_instance_metadata(self.context,
+ inst2)
+ self.assertEqual(metadata, md_result)
+
+ md2 = {'key': 'baz', 'value': 'quux'}
+ md2_result = {'baz': 'quux'}
+ md2_result.update(md_result)
+ self.cloud.create_tags(self.context, resource_id=[ec2_id2],
+ tag=[md2])
+
+ self.assertEqual(meta_changes, [{'baz': ['+', 'quux']}])
+
+ metadata = self.cloud.compute_api.get_instance_metadata(self.context,
+ inst2)
+ self.assertEqual(metadata, md2_result)
+
+ md3 = {'key': 'bax', 'value': 'wibble'}
+ md3_result = {'bax': 'wibble'}
+ md3_result.update(md_result)
+ self.cloud.create_tags(self.context, resource_id=[ec2_id1],
+ tag=[md3])
+
+ self.assertEqual(meta_changes, [{'bax': ['+', 'wibble']}])
+
+ metadata = self.cloud.compute_api.get_instance_metadata(self.context,
+ inst1)
+ self.assertEqual(metadata, md3_result)
+
+ inst1_key_foo = {'key': u'foo', 'resource_id': 'i-00000001',
+ 'resource_type': 'instance', 'value': u'bar'}
+ inst1_key_bax = {'key': u'bax', 'resource_id': 'i-00000001',
+ 'resource_type': 'instance', 'value': u'wibble'}
+ inst2_key_foo = {'key': u'foo', 'resource_id': 'i-00000002',
+ 'resource_type': 'instance', 'value': u'bar'}
+ inst2_key_baz = {'key': u'baz', 'resource_id': 'i-00000002',
+ 'resource_type': 'instance', 'value': u'quux'}
+
+ # We should be able to search by:
+ # No filter
+ tags = self.cloud.describe_tags(self.context)['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo,
+ inst2_key_baz, inst1_key_bax])
+
+ # Resource ID
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'resource-id',
+ 'value': [ec2_id1]}])['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_foo, inst1_key_bax])
+
+ # Resource Type
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'resource-type',
+ 'value': ['instance']}])['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo,
+ inst2_key_baz, inst1_key_bax])
+
+ # Key, either bare or with wildcards
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['foo']}])['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo])
+
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['baz']}])['tagSet']
+ self.assertEqualSorted(tags, [inst2_key_baz])
+
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['ba?']}])['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_bax, inst2_key_baz])
+
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['b*']}])['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_bax, inst2_key_baz])
+
+ # Value, either bare or with wildcards
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'value',
+ 'value': ['bar']}])['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo])
+
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'value',
+ 'value': ['wi*']}])['tagSet']
+ self.assertEqual(tags, [inst1_key_bax])
+
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'value',
+ 'value': ['quu?']}])['tagSet']
+ self.assertEqual(tags, [inst2_key_baz])
+
+ # Multiple values
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['baz', 'bax']}])['tagSet']
+ self.assertEqualSorted(tags, [inst2_key_baz, inst1_key_bax])
+
+ # Multiple filters (AND): no match
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['baz']},
+ {'name': 'value',
+ 'value': ['wibble']}])['tagSet']
+ self.assertEqual(tags, [])
+
+ # Multiple filters (AND): match
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['baz']},
+ {'name': 'value',
+ 'value': ['quux']}])['tagSet']
+ self.assertEqualSorted(tags, [inst2_key_baz])
+
+ # And we should fail on supported resource types
+ self.assertRaises(exception.InvalidParameterValue,
+ self.cloud.describe_tags,
+ self.context,
+ filter=[{'name': 'resource-type',
+ 'value': ['instance', 'volume']}])
+
+ def test_resource_type_from_id(self):
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'i-12345'),
+ 'instance')
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'r-12345'),
+ 'reservation')
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'vol-12345'),
+ 'volume')
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'snap-12345'),
+ 'snapshot')
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'ami-12345'),
+ 'image')
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'ari-12345'),
+ 'image')
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'aki-12345'),
+ 'image')
+ self.assertIsNone(
+ ec2utils.resource_type_from_id(self.context, 'x-12345'))
+
+ @mock.patch.object(ec2utils, 'ec2_vol_id_to_uuid',
+ side_effect=lambda
+ ec2_volume_id: uuidutils.generate_uuid())
+ def test_detach_volume_unattched_error(self, mock_ec2_vol_id_to_uuid):
+ # Validates that VolumeUnattached is raised if the volume doesn't
+ # have an instance_uuid value.
+ ec2_volume_id = 'vol-987654321'
+
+ with mock.patch.object(self.cloud.volume_api, 'get',
+ side_effect=lambda context, volume_id:
+ {'id': volume_id}) as mock_get:
+ self.assertRaises(exception.VolumeUnattached,
+ self.cloud.detach_volume,
+ self.context,
+ ec2_volume_id)
+ mock_get.assert_called_once_with(self.context, mock.ANY)
+ mock_ec2_vol_id_to_uuid.assert_called_once_with(ec2_volume_id)
+
+
+class CloudTestCaseNeutronProxy(test.NoDBTestCase):
+ def setUp(self):
+ super(CloudTestCaseNeutronProxy, self).setUp()
+ cfg.CONF.set_override('security_group_api', 'neutron')
+ self.cloud = cloud.CloudController()
+ self.original_client = neutronv2.get_client
+ neutronv2.get_client = test_neutron.get_client
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id,
+ is_admin=True)
+
+ def tearDown(self):
+ neutronv2.get_client = self.original_client
+ test_neutron.get_client()._reset()
+ super(CloudTestCaseNeutronProxy, self).tearDown()
+
+ def test_describe_security_groups(self):
+ # Makes sure describe_security_groups works and filters results.
+ group_name = 'test'
+ description = 'test'
+ self.cloud.create_security_group(self.context, group_name,
+ description)
+ result = self.cloud.describe_security_groups(self.context)
+ # NOTE(vish): should have the default group as well
+ self.assertEqual(len(result['securityGroupInfo']), 2)
+ result = self.cloud.describe_security_groups(self.context,
+ group_name=[group_name])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(result['securityGroupInfo'][0]['groupName'],
+ group_name)
+ self.cloud.delete_security_group(self.context, group_name)
+
+ def test_describe_security_groups_by_id(self):
+ group_name = 'test'
+ description = 'test'
+ self.cloud.create_security_group(self.context, group_name,
+ description)
+ neutron = test_neutron.get_client()
+ # Get id from neutron since cloud.create_security_group
+ # does not expose it.
+ search_opts = {'name': group_name}
+ groups = neutron.list_security_groups(
+ **search_opts)['security_groups']
+ result = self.cloud.describe_security_groups(self.context,
+ group_id=[groups[0]['id']])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(
+ result['securityGroupInfo'][0]['groupName'],
+ group_name)
+ self.cloud.delete_security_group(self.context, group_name)
+
+ def test_create_delete_security_group(self):
+ descript = 'test description'
+ create = self.cloud.create_security_group
+ result = create(self.context, 'testgrp', descript)
+ group_descript = result['securityGroupSet'][0]['groupDescription']
+ self.assertEqual(descript, group_descript)
+ delete = self.cloud.delete_security_group
+ self.assertTrue(delete(self.context, 'testgrp'))
+
+
+class FormatMappingTestCase(test.TestCase):
+
+ def test_format_mapping(self):
+ properties = {'block_device_mapping':
+ [{'guest_format': None, 'boot_index': 0,
+ 'no_device': None, 'volume_id': None,
+ 'volume_size': None, 'disk_bus': 'virtio',
+ 'image_id': None, 'source_type': 'snapshot',
+ 'device_type': 'disk',
+ 'snapshot_id': '993b31ac-452e-4fed-b745-7718385f1811',
+ 'destination_type': 'volume',
+ 'delete_on_termination': None},
+ {'guest_format': None, 'boot_index': None,
+ 'no_device': None, 'volume_id': None,
+ 'volume_size': None, 'disk_bus': None,
+ 'image_id': None, 'source_type': 'snapshot',
+ 'device_type': None,
+ 'snapshot_id': 'b409a2de-1c79-46bf-aa7e-ebdb4bf427ef',
+ 'destination_type': 'volume',
+ 'delete_on_termination': None}],
+ 'checksum': '50bdc35edb03a38d91b1b071afb20a3c',
+ 'min_ram': '0', 'disk_format': 'qcow2',
+ 'image_name': 'cirros-0.3.0-x86_64-disk', 'bdm_v2': 'True',
+ 'image_id': '4fce9db9-d89e-4eea-8d20-e2bae15292c1',
+ 'root_device_name': '/dev/vda', 'container_format': 'bare',
+ 'min_disk': '0', 'size': '9761280'}
+ result = {'description': None,
+ 'imageOwnerId': '9fd1513f52f14fe49fa1c83e40c63541',
+ 'isPublic': False, 'imageId': 'ami-00000002',
+ 'imageState': 'available', 'architecture': None,
+ 'imageLocation': 'None (xb)',
+ 'rootDeviceType': 'instance-store',
+ 'rootDeviceName': '/dev/vda',
+ 'imageType': 'machine', 'name': 'xb'}
+ cloud._format_mappings(properties, result)
+ expected = {'architecture': None,
+ 'blockDeviceMapping':
+ [{'ebs': {'snapshotId': 'snap-00000002'}}],
+ 'description': None,
+ 'imageId': 'ami-00000002',
+ 'imageLocation': 'None (xb)',
+ 'imageOwnerId': '9fd1513f52f14fe49fa1c83e40c63541',
+ 'imageState': 'available',
+ 'imageType': 'machine',
+ 'isPublic': False,
+ 'name': 'xb',
+ 'rootDeviceName': '/dev/vda',
+ 'rootDeviceType': 'instance-store'}
+ self.assertEqual(expected, result)
diff --git a/nova/tests/unit/api/ec2/test_ec2_validate.py b/nova/tests/unit/api/ec2/test_ec2_validate.py
new file mode 100644
index 0000000000..53ae8c110e
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_ec2_validate.py
@@ -0,0 +1,277 @@
+# Copyright 2012 Cloudscaling, Inc.
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from oslo.config import cfg
+from oslo.utils import timeutils
+
+from nova.api.ec2 import cloud
+from nova.api.ec2 import ec2utils
+from nova.compute import utils as compute_utils
+from nova import context
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit import cast_as_call
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_notifier
+from nova.tests.unit.image import fake
+
+CONF = cfg.CONF
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+
+
+class EC2ValidateTestCase(test.TestCase):
+ def setUp(self):
+ super(EC2ValidateTestCase, self).setUp()
+ self.flags(compute_driver='nova.virt.fake.FakeDriver')
+
+ def dumb(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # set up our cloud
+ self.cloud = cloud.CloudController()
+
+ # Short-circuit the conductor service
+ self.flags(use_local=True, group='conductor')
+
+ # Stub out the notification service so we use the no-op serializer
+ # and avoid lazy-load traces with the wrap_exception decorator in
+ # the compute service.
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ # set up services
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
+ self.compute = self.start_service('compute')
+ self.scheduter = self.start_service('scheduler')
+ self.network = self.start_service('network')
+ self.image_service = fake.FakeImageService()
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id,
+ is_admin=True)
+
+ self.EC2_MALFORMED_IDS = ['foobar', '', 123]
+ self.EC2_VALID__IDS = ['i-284f3a41', 'i-001', 'i-deadbeef']
+
+ self.ec2_id_exception_map = [(x,
+ exception.InvalidInstanceIDMalformed)
+ for x in self.EC2_MALFORMED_IDS]
+ self.ec2_id_exception_map.extend([(x, exception.InstanceNotFound)
+ for x in self.EC2_VALID__IDS])
+ self.volume_id_exception_map = [(x,
+ exception.InvalidVolumeIDMalformed)
+ for x in self.EC2_MALFORMED_IDS]
+ self.volume_id_exception_map.extend([(x, exception.VolumeNotFound)
+ for x in self.EC2_VALID__IDS])
+
+ def fake_show(meh, context, id, **kwargs):
+ return {'id': id,
+ 'container_format': 'ami',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine',
+ 'image_state': 'available'}}
+
+ def fake_detail(self, context, **kwargs):
+ image = fake_show(self, context, None)
+ image['name'] = kwargs.get('name')
+ return [image]
+
+ fake.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ # make sure we can map ami-00000001/2 to a uuid in FakeImageService
+ db.s3_image_create(self.context,
+ 'cedef40a-ed67-4d10-800e-17455edce175')
+ db.s3_image_create(self.context,
+ '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
+
+ def tearDown(self):
+ super(EC2ValidateTestCase, self).tearDown()
+ fake.FakeImageService_reset()
+
+ # EC2_API tests (InvalidInstanceID.Malformed)
+ def test_console_output(self):
+ for ec2_id, e in self.ec2_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.get_console_output,
+ context=self.context,
+ instance_id=[ec2_id])
+
+ def test_describe_instance_attribute(self):
+ for ec2_id, e in self.ec2_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.describe_instance_attribute,
+ context=self.context,
+ instance_id=ec2_id,
+ attribute='kernel')
+
+ def test_instance_lifecycle(self):
+ lifecycle = [self.cloud.terminate_instances,
+ self.cloud.reboot_instances,
+ self.cloud.stop_instances,
+ self.cloud.start_instances,
+ ]
+ for cmd in lifecycle:
+ for ec2_id, e in self.ec2_id_exception_map:
+ self.assertRaises(e,
+ cmd,
+ context=self.context,
+ instance_id=[ec2_id])
+
+ def test_create_image(self):
+ for ec2_id, e in self.ec2_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.create_image,
+ context=self.context,
+ instance_id=ec2_id)
+
+ def test_create_snapshot(self):
+ for ec2_id, e in self.volume_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.create_snapshot,
+ context=self.context,
+ volume_id=ec2_id)
+
+ def test_describe_volumes(self):
+ for ec2_id, e in self.volume_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.describe_volumes,
+ context=self.context,
+ volume_id=[ec2_id])
+
+ def test_delete_volume(self):
+ for ec2_id, e in self.volume_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.delete_volume,
+ context=self.context,
+ volume_id=ec2_id)
+
+ def test_detach_volume(self):
+ for ec2_id, e in self.volume_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.detach_volume,
+ context=self.context,
+ volume_id=ec2_id)
+
+
+class EC2TimestampValidationTestCase(test.NoDBTestCase):
+ """Test case for EC2 request timestamp validation."""
+
+ def test_validate_ec2_timestamp_valid(self):
+ params = {'Timestamp': '2011-04-22T11:29:49Z'}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertFalse(expired)
+
+ def test_validate_ec2_timestamp_old_format(self):
+ params = {'Timestamp': '2011-04-22T11:29:49'}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertTrue(expired)
+
+ def test_validate_ec2_timestamp_not_set(self):
+ params = {}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertFalse(expired)
+
+ def test_validate_ec2_timestamp_ms_time_regex(self):
+ result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123Z')
+ self.assertIsNotNone(result)
+ result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123456Z')
+ self.assertIsNotNone(result)
+ result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.1234567Z')
+ self.assertIsNone(result)
+ result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123')
+ self.assertIsNone(result)
+ result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49Z')
+ self.assertIsNone(result)
+
+ def test_validate_ec2_timestamp_aws_sdk_format(self):
+ params = {'Timestamp': '2011-04-22T11:29:49.123Z'}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertFalse(expired)
+ expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
+ self.assertTrue(expired)
+
+ def test_validate_ec2_timestamp_invalid_format(self):
+ params = {'Timestamp': '2011-04-22T11:29:49.000P'}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertTrue(expired)
+
+ def test_validate_ec2_timestamp_advanced_time(self):
+
+ # EC2 request with Timestamp in advanced time
+ timestamp = timeutils.utcnow() + datetime.timedelta(seconds=250)
+ params = {'Timestamp': timeutils.strtime(timestamp,
+ "%Y-%m-%dT%H:%M:%SZ")}
+ expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
+ self.assertFalse(expired)
+
+ def test_validate_ec2_timestamp_advanced_time_expired(self):
+ timestamp = timeutils.utcnow() + datetime.timedelta(seconds=350)
+ params = {'Timestamp': timeutils.strtime(timestamp,
+ "%Y-%m-%dT%H:%M:%SZ")}
+ expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
+ self.assertTrue(expired)
+
+ def test_validate_ec2_req_timestamp_not_expired(self):
+ params = {'Timestamp': timeutils.isotime()}
+ expired = ec2utils.is_ec2_timestamp_expired(params, expires=15)
+ self.assertFalse(expired)
+
+ def test_validate_ec2_req_timestamp_expired(self):
+ params = {'Timestamp': '2011-04-22T12:00:00Z'}
+ compare = ec2utils.is_ec2_timestamp_expired(params, expires=300)
+ self.assertTrue(compare)
+
+ def test_validate_ec2_req_expired(self):
+ params = {'Expires': timeutils.isotime()}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertTrue(expired)
+
+ def test_validate_ec2_req_not_expired(self):
+ expire = timeutils.utcnow() + datetime.timedelta(seconds=350)
+ params = {'Expires': timeutils.strtime(expire, "%Y-%m-%dT%H:%M:%SZ")}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertFalse(expired)
+
+ def test_validate_Expires_timestamp_invalid_format(self):
+
+ # EC2 request with invalid Expires
+ params = {'Expires': '2011-04-22T11:29:49'}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertTrue(expired)
+
+ def test_validate_ec2_req_timestamp_Expires(self):
+
+ # EC2 request with both Timestamp and Expires
+ params = {'Timestamp': '2011-04-22T11:29:49Z',
+ 'Expires': timeutils.isotime()}
+ self.assertRaises(exception.InvalidRequest,
+ ec2utils.is_ec2_timestamp_expired,
+ params)
diff --git a/nova/tests/unit/api/ec2/test_ec2utils.py b/nova/tests/unit/api/ec2/test_ec2utils.py
new file mode 100644
index 0000000000..9dceb7de12
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_ec2utils.py
@@ -0,0 +1,61 @@
+# Copyright 2014 - Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.ec2 import ec2utils
+from nova import context
+from nova import objects
+from nova import test
+
+
+class EC2UtilsTestCase(test.TestCase):
+ def setUp(self):
+ self.ctxt = context.get_admin_context()
+ ec2utils.reset_cache()
+ super(EC2UtilsTestCase, self).setUp()
+
+ def test_get_int_id_from_snapshot_uuid(self):
+ smap = objects.EC2SnapshotMapping(self.ctxt, uuid='fake-uuid')
+ smap.create()
+ smap_id = ec2utils.get_int_id_from_snapshot_uuid(self.ctxt,
+ 'fake-uuid')
+ self.assertEqual(smap.id, smap_id)
+
+ def test_get_int_id_from_snapshot_uuid_creates_mapping(self):
+ smap_id = ec2utils.get_int_id_from_snapshot_uuid(self.ctxt,
+ 'fake-uuid')
+ smap = objects.EC2SnapshotMapping.get_by_id(self.ctxt, smap_id)
+ self.assertEqual('fake-uuid', smap.uuid)
+
+ def test_get_snapshot_uuid_from_int_id(self):
+ smap = objects.EC2SnapshotMapping(self.ctxt, uuid='fake-uuid')
+ smap.create()
+ smap_uuid = ec2utils.get_snapshot_uuid_from_int_id(self.ctxt, smap.id)
+ self.assertEqual(smap.uuid, smap_uuid)
+
+ def test_id_to_glance_id(self):
+ s3imap = objects.S3ImageMapping(self.ctxt, uuid='fake-uuid')
+ s3imap.create()
+ uuid = ec2utils.id_to_glance_id(self.ctxt, s3imap.id)
+ self.assertEqual(uuid, s3imap.uuid)
+
+ def test_glance_id_to_id(self):
+ s3imap = objects.S3ImageMapping(self.ctxt, uuid='fake-uuid')
+ s3imap.create()
+ s3imap_id = ec2utils.glance_id_to_id(self.ctxt, s3imap.uuid)
+ self.assertEqual(s3imap_id, s3imap.id)
+
+ def test_glance_id_to_id_creates_mapping(self):
+ s3imap_id = ec2utils.glance_id_to_id(self.ctxt, 'fake-uuid')
+ s3imap = objects.S3ImageMapping.get_by_id(self.ctxt, s3imap_id)
+ self.assertEqual('fake-uuid', s3imap.uuid)
diff --git a/nova/tests/unit/api/ec2/test_error_response.py b/nova/tests/unit/api/ec2/test_error_response.py
new file mode 100644
index 0000000000..925d6723ed
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_error_response.py
@@ -0,0 +1,132 @@
+#
+# Copyright 2013 - Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+Unit tests for EC2 error responses.
+"""
+
+from lxml import etree
+
+from nova.api import ec2
+from nova import context
+from nova import test
+from nova import wsgi
+
+
+class TestClientExceptionEC2(Exception):
+ ec2_code = 'ClientException.Test'
+ message = "Test Client Exception."
+ code = 400
+
+
+class TestServerExceptionEC2(Exception):
+ ec2_code = 'ServerException.Test'
+ message = "Test Server Exception."
+ code = 500
+
+
+class Ec2ErrorResponseTestCase(test.NoDBTestCase):
+ """Test EC2 error responses.
+
+ This deals mostly with api/ec2/__init__.py code, especially
+ the ec2_error_ex helper.
+ """
+ def setUp(self):
+ super(Ec2ErrorResponseTestCase, self).setUp()
+ self.context = context.RequestContext('test_user_id',
+ 'test_project_id')
+ self.req = wsgi.Request.blank('/test')
+ self.req.environ['nova.context'] = self.context
+
+ def _validate_ec2_error(self, response, http_status, ec2_code, msg=None,
+ unknown_msg=False):
+ self.assertEqual(response.status_code, http_status,
+ 'Expected HTTP status %s' % http_status)
+ root_e = etree.XML(response.body)
+ self.assertEqual(root_e.tag, 'Response',
+ "Top element must be Response.")
+ errors_e = root_e.find('Errors')
+ self.assertEqual(len(errors_e), 1,
+ "Expected exactly one Error element in Errors.")
+ error_e = errors_e[0]
+ self.assertEqual(error_e.tag, 'Error',
+ "Expected Error element.")
+ # Code
+ code_e = error_e.find('Code')
+ self.assertIsNotNone(code_e, "Code element must be present.")
+ self.assertEqual(code_e.text, ec2_code)
+ # Message
+ if msg or unknown_msg:
+ message_e = error_e.find('Message')
+ self.assertIsNotNone(code_e, "Message element must be present.")
+ if msg:
+ self.assertEqual(message_e.text, msg)
+ elif unknown_msg:
+ self.assertEqual(message_e.text, "Unknown error occurred.",
+ "Error message should be anonymous.")
+ # RequestID
+ requestid_e = root_e.find('RequestID')
+ self.assertIsNotNone(requestid_e,
+ 'RequestID element should be present.')
+ self.assertEqual(requestid_e.text, self.context.request_id)
+
+ def test_exception_ec2_4xx(self):
+ """Test response to EC2 exception with code = 400."""
+ msg = "Test client failure."
+ err = ec2.ec2_error_ex(TestClientExceptionEC2(msg), self.req)
+ self._validate_ec2_error(err, TestClientExceptionEC2.code,
+ TestClientExceptionEC2.ec2_code, msg)
+
+ def test_exception_ec2_5xx(self):
+ """Test response to EC2 exception with code = 500.
+
+ Expected errors are treated as client ones even with 5xx code.
+ """
+ msg = "Test client failure with 5xx error code."
+ err = ec2.ec2_error_ex(TestServerExceptionEC2(msg), self.req)
+ self._validate_ec2_error(err, 400, TestServerExceptionEC2.ec2_code,
+ msg)
+
+ def test_unexpected_exception_ec2_4xx(self):
+ """Test response to unexpected EC2 exception with code = 400."""
+ msg = "Test unexpected client failure."
+ err = ec2.ec2_error_ex(TestClientExceptionEC2(msg), self.req,
+ unexpected=True)
+ self._validate_ec2_error(err, TestClientExceptionEC2.code,
+ TestClientExceptionEC2.ec2_code, msg)
+
+ def test_unexpected_exception_ec2_5xx(self):
+ """Test response to unexpected EC2 exception with code = 500.
+
+ Server exception messages (with code >= 500 or without code) should
+ be filtered as they might contain sensitive information.
+ """
+ msg = "Test server failure."
+ err = ec2.ec2_error_ex(TestServerExceptionEC2(msg), self.req,
+ unexpected=True)
+ self._validate_ec2_error(err, TestServerExceptionEC2.code,
+ TestServerExceptionEC2.ec2_code,
+ unknown_msg=True)
+
+ def test_unexpected_exception_builtin(self):
+ """Test response to builtin unexpected exception.
+
+ Server exception messages (with code >= 500 or without code) should
+ be filtered as they might contain sensitive information.
+ """
+ msg = "Test server failure."
+ err = ec2.ec2_error_ex(RuntimeError(msg), self.req, unexpected=True)
+ self._validate_ec2_error(err, 500, 'RuntimeError', unknown_msg=True)
diff --git a/nova/tests/unit/api/ec2/test_faults.py b/nova/tests/unit/api/ec2/test_faults.py
new file mode 100644
index 0000000000..ae71be9bbf
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_faults.py
@@ -0,0 +1,46 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mox
+import webob
+
+from nova.api.ec2 import faults
+from nova import test
+from nova import wsgi
+
+
+class TestFaults(test.NoDBTestCase):
+ """Tests covering ec2 Fault class."""
+
+ def test_fault_exception(self):
+ # Ensure the status_int is set correctly on faults.
+ fault = faults.Fault(webob.exc.HTTPBadRequest(
+ explanation='test'))
+ self.assertIsInstance(fault.wrapped_exc, webob.exc.HTTPBadRequest)
+
+ def test_fault_exception_status_int(self):
+ # Ensure the status_int is set correctly on faults.
+ fault = faults.Fault(webob.exc.HTTPNotFound(explanation='test'))
+ self.assertEqual(fault.wrapped_exc.status_int, 404)
+
+ def test_fault_call(self):
+ # Ensure proper EC2 response on faults.
+ message = 'test message'
+ ex = webob.exc.HTTPNotFound(explanation=message)
+ fault = faults.Fault(ex)
+ req = wsgi.Request.blank('/test')
+ req.GET['AWSAccessKeyId'] = "test_user_id:test_project_id"
+ self.mox.StubOutWithMock(faults, 'ec2_error_response')
+ faults.ec2_error_response(mox.IgnoreArg(), 'HTTPNotFound',
+ message=message, status=ex.status_int)
+ self.mox.ReplayAll()
+ fault(req)
diff --git a/nova/tests/unit/api/ec2/test_middleware.py b/nova/tests/unit/api/ec2/test_middleware.py
new file mode 100644
index 0000000000..3eb9c703da
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_middleware.py
@@ -0,0 +1,225 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from eventlet.green import httplib
+from lxml import etree
+import mox
+from oslo.config import cfg
+from oslo.utils import timeutils
+import webob
+import webob.dec
+import webob.exc
+
+from nova.api import ec2
+from nova import context
+from nova import exception
+from nova import test
+from nova import wsgi
+
+CONF = cfg.CONF
+
+
+@webob.dec.wsgify
+def conditional_forbid(req):
+ """Helper wsgi app returns 403 if param 'die' is 1."""
+ if 'die' in req.params and req.params['die'] == '1':
+ raise webob.exc.HTTPForbidden()
+ return 'OK'
+
+
+class LockoutTestCase(test.NoDBTestCase):
+ """Test case for the Lockout middleware."""
+ def setUp(self): # pylint: disable=C0103
+ super(LockoutTestCase, self).setUp()
+ timeutils.set_time_override()
+ self.lockout = ec2.Lockout(conditional_forbid)
+
+ def tearDown(self): # pylint: disable=C0103
+ timeutils.clear_time_override()
+ super(LockoutTestCase, self).tearDown()
+
+ def _send_bad_attempts(self, access_key, num_attempts=1):
+ """Fail x."""
+ for i in xrange(num_attempts):
+ req = webob.Request.blank('/?AWSAccessKeyId=%s&die=1' % access_key)
+ self.assertEqual(req.get_response(self.lockout).status_int, 403)
+
+ def _is_locked_out(self, access_key):
+ """Sends a test request to see if key is locked out."""
+ req = webob.Request.blank('/?AWSAccessKeyId=%s' % access_key)
+ return (req.get_response(self.lockout).status_int == 403)
+
+ def test_lockout(self):
+ self._send_bad_attempts('test', CONF.lockout_attempts)
+ self.assertTrue(self._is_locked_out('test'))
+
+ def test_timeout(self):
+ self._send_bad_attempts('test', CONF.lockout_attempts)
+ self.assertTrue(self._is_locked_out('test'))
+ timeutils.advance_time_seconds(CONF.lockout_minutes * 60)
+ self.assertFalse(self._is_locked_out('test'))
+
+ def test_multiple_keys(self):
+ self._send_bad_attempts('test1', CONF.lockout_attempts)
+ self.assertTrue(self._is_locked_out('test1'))
+ self.assertFalse(self._is_locked_out('test2'))
+ timeutils.advance_time_seconds(CONF.lockout_minutes * 60)
+ self.assertFalse(self._is_locked_out('test1'))
+ self.assertFalse(self._is_locked_out('test2'))
+
+ def test_window_timeout(self):
+ self._send_bad_attempts('test', CONF.lockout_attempts - 1)
+ self.assertFalse(self._is_locked_out('test'))
+ timeutils.advance_time_seconds(CONF.lockout_window * 60)
+ self._send_bad_attempts('test', CONF.lockout_attempts - 1)
+ self.assertFalse(self._is_locked_out('test'))
+
+
+class ExecutorTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(ExecutorTestCase, self).setUp()
+ self.executor = ec2.Executor()
+
+ def _execute(self, invoke):
+ class Fake(object):
+ pass
+ fake_ec2_request = Fake()
+ fake_ec2_request.invoke = invoke
+
+ fake_wsgi_request = Fake()
+
+ fake_wsgi_request.environ = {
+ 'nova.context': context.get_admin_context(),
+ 'ec2.request': fake_ec2_request,
+ }
+ return self.executor(fake_wsgi_request)
+
+ def _extract_message(self, result):
+ tree = etree.fromstring(result.body)
+ return tree.findall('./Errors')[0].find('Error/Message').text
+
+ def _extract_code(self, result):
+ tree = etree.fromstring(result.body)
+ return tree.findall('./Errors')[0].find('Error/Code').text
+
+ def test_instance_not_found(self):
+ def not_found(context):
+ raise exception.InstanceNotFound(instance_id=5)
+ result = self._execute(not_found)
+ self.assertIn('i-00000005', self._extract_message(result))
+ self.assertEqual('InvalidInstanceID.NotFound',
+ self._extract_code(result))
+
+ def test_instance_not_found_none(self):
+ def not_found(context):
+ raise exception.InstanceNotFound(instance_id=None)
+
+ # NOTE(mikal): we want no exception to be raised here, which was what
+ # was happening in bug/1080406
+ result = self._execute(not_found)
+ self.assertIn('None', self._extract_message(result))
+ self.assertEqual('InvalidInstanceID.NotFound',
+ self._extract_code(result))
+
+ def test_snapshot_not_found(self):
+ def not_found(context):
+ raise exception.SnapshotNotFound(snapshot_id=5)
+ result = self._execute(not_found)
+ self.assertIn('snap-00000005', self._extract_message(result))
+ self.assertEqual('InvalidSnapshot.NotFound',
+ self._extract_code(result))
+
+ def test_volume_not_found(self):
+ def not_found(context):
+ raise exception.VolumeNotFound(volume_id=5)
+ result = self._execute(not_found)
+ self.assertIn('vol-00000005', self._extract_message(result))
+ self.assertEqual('InvalidVolume.NotFound', self._extract_code(result))
+
+
+class FakeResponse(object):
+ reason = "Test Reason"
+
+ def __init__(self, status=400):
+ self.status = status
+
+ def read(self):
+ return '{}'
+
+
+class KeystoneAuthTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(KeystoneAuthTestCase, self).setUp()
+ self.kauth = ec2.EC2KeystoneAuth(conditional_forbid)
+
+ def _validate_ec2_error(self, response, http_status, ec2_code):
+ self.assertEqual(response.status_code, http_status,
+ 'Expected HTTP status %s' % http_status)
+ root_e = etree.XML(response.body)
+ self.assertEqual(root_e.tag, 'Response',
+ "Top element must be Response.")
+ errors_e = root_e.find('Errors')
+ error_e = errors_e[0]
+ code_e = error_e.find('Code')
+ self.assertIsNotNone(code_e, "Code element must be present.")
+ self.assertEqual(code_e.text, ec2_code)
+
+ def test_no_signature(self):
+ req = wsgi.Request.blank('/test')
+ resp = self.kauth(req)
+ self._validate_ec2_error(resp, 400, 'AuthFailure')
+
+ def test_no_key_id(self):
+ req = wsgi.Request.blank('/test')
+ req.GET['Signature'] = 'test-signature'
+ resp = self.kauth(req)
+ self._validate_ec2_error(resp, 400, 'AuthFailure')
+
+ def test_communication_failure(self):
+ req = wsgi.Request.blank('/test')
+ req.GET['Signature'] = 'test-signature'
+ req.GET['AWSAccessKeyId'] = 'test-key-id'
+
+ conn = httplib.HTTPConnection('/mock')
+ self.mox.StubOutWithMock(httplib.HTTPConnection, 'request')
+ self.mox.StubOutWithMock(httplib.HTTPConnection, 'getresponse')
+ conn.request('POST', mox.IgnoreArg(), body=mox.IgnoreArg(),
+ headers=mox.IgnoreArg())
+ resp = FakeResponse()
+ conn.getresponse().AndReturn(resp)
+ self.mox.ReplayAll()
+
+ resp = self.kauth(req)
+ self._validate_ec2_error(resp, 400, 'AuthFailure')
+
+ def test_no_result_data(self):
+ req = wsgi.Request.blank('/test')
+ req.GET['Signature'] = 'test-signature'
+ req.GET['AWSAccessKeyId'] = 'test-key-id'
+
+ conn = httplib.HTTPConnection('/mock')
+ self.mox.StubOutWithMock(httplib.HTTPConnection, 'request')
+ self.mox.StubOutWithMock(httplib.HTTPConnection, 'getresponse')
+ self.mox.StubOutWithMock(httplib.HTTPConnection, 'close')
+ conn.request('POST', mox.IgnoreArg(), body=mox.IgnoreArg(),
+ headers=mox.IgnoreArg())
+ resp = FakeResponse(200)
+ conn.getresponse().AndReturn(resp)
+ conn.close()
+ self.mox.ReplayAll()
+
+ resp = self.kauth(req)
+ self._validate_ec2_error(resp, 400, 'AuthFailure')
diff --git a/nova/tests/unit/api/openstack/__init__.py b/nova/tests/unit/api/openstack/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/api/openstack/__init__.py
diff --git a/nova/tests/unit/api/openstack/common.py b/nova/tests/unit/api/openstack/common.py
new file mode 100644
index 0000000000..972958a329
--- /dev/null
+++ b/nova/tests/unit/api/openstack/common.py
@@ -0,0 +1,55 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.serialization import jsonutils
+import webob
+
+
+def webob_factory(url):
+ """Factory for removing duplicate webob code from tests."""
+
+ base_url = url
+
+ def web_request(url, method=None, body=None):
+ req = webob.Request.blank("%s%s" % (base_url, url))
+ if method:
+ req.content_type = "application/json"
+ req.method = method
+ if body:
+ req.body = jsonutils.dumps(body)
+ return req
+ return web_request
+
+
+def compare_links(actual, expected):
+ """Compare xml atom links."""
+
+ return compare_tree_to_dict(actual, expected, ('rel', 'href', 'type'))
+
+
+def compare_media_types(actual, expected):
+ """Compare xml media types."""
+
+ return compare_tree_to_dict(actual, expected, ('base', 'type'))
+
+
+def compare_tree_to_dict(actual, expected, keys):
+ """Compare parts of lxml.etree objects to dicts."""
+
+ for elem, data in zip(actual, expected):
+ for key in keys:
+ if elem.get(key) != data.get(key):
+ return False
+ return True
diff --git a/nova/tests/unit/api/openstack/compute/__init__.py b/nova/tests/unit/api/openstack/compute/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/__init__.py
diff --git a/nova/tests/unit/api/openstack/compute/contrib/__init__.py b/nova/tests/unit/api/openstack/compute/contrib/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/__init__.py
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_admin_actions.py b/nova/tests/unit/api/openstack/compute/contrib/test_admin_actions.py
new file mode 100644
index 0000000000..44bf495b29
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_admin_actions.py
@@ -0,0 +1,734 @@
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import webob
+
+from nova.api.openstack import common
+from nova.api.openstack.compute.contrib import admin_actions as \
+ admin_actions_v2
+from nova.api.openstack.compute.plugins.v3 import admin_actions as \
+ admin_actions_v21
+from nova.compute import vm_states
+import nova.context
+from nova import exception
+from nova import objects
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+
+class CommonMixin(object):
+ admin_actions = None
+ fake_url = None
+
+ def _make_request(self, url, body):
+ req = webob.Request.blank(self.fake_url + url)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.content_type = 'application/json'
+ return req.get_response(self.app)
+
+ def _stub_instance_get(self, uuid=None):
+ if uuid is None:
+ uuid = uuidutils.generate_uuid()
+ instance = fake_instance.fake_db_instance(
+ id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
+ task_state=None, launched_at=timeutils.utcnow())
+ instance = objects.Instance._from_db_object(
+ self.context, objects.Instance(), instance)
+ self.compute_api.get(self.context, uuid, expected_attrs=None,
+ want_objects=True).AndReturn(instance)
+ return instance
+
+ def _stub_instance_get_failure(self, exc_info, uuid=None):
+ if uuid is None:
+ uuid = uuidutils.generate_uuid()
+ self.compute_api.get(self.context, uuid, expected_attrs=None,
+ want_objects=True).AndRaise(exc_info)
+ return uuid
+
+ def _test_non_existing_instance(self, action, body_map=None):
+ uuid = uuidutils.generate_uuid()
+ self._stub_instance_get_failure(
+ exception.InstanceNotFound(instance_id=uuid), uuid=uuid)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % uuid,
+ {action: body_map.get(action)})
+ self.assertEqual(404, res.status_int)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def _test_action(self, action, body=None, method=None):
+ if method is None:
+ method = action
+
+ instance = self._stub_instance_get()
+ getattr(self.compute_api, method)(self.context, instance)
+
+ self.mox.ReplayAll()
+ res = self._make_request('/servers/%s/action' % instance['uuid'],
+ {action: None})
+ self.assertEqual(202, res.status_int)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def _test_invalid_state(self, action, method=None, body_map=None,
+ compute_api_args_map=None):
+ if method is None:
+ method = action
+ if body_map is None:
+ body_map = {}
+ if compute_api_args_map is None:
+ compute_api_args_map = {}
+
+ instance = self._stub_instance_get()
+
+ args, kwargs = compute_api_args_map.get(action, ((), {}))
+
+ getattr(self.compute_api, method)(self.context, instance,
+ *args, **kwargs).AndRaise(
+ exception.InstanceInvalidState(
+ attr='vm_state', instance_uuid=instance['uuid'],
+ state='foo', method=method))
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance['uuid'],
+ {action: body_map.get(action)})
+ self.assertEqual(409, res.status_int)
+ self.assertIn("Cannot \'%(action)s\' instance %(id)s"
+ % {'id': instance['uuid'], 'action': action}, res.body)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def _test_locked_instance(self, action, method=None, body_map=None,
+ compute_api_args_map=None):
+ if method is None:
+ method = action
+
+ instance = self._stub_instance_get()
+
+ args, kwargs = (), {}
+ act = None
+
+ if compute_api_args_map:
+ args, kwargs = compute_api_args_map.get(action, ((), {}))
+ act = body_map.get(action)
+
+ getattr(self.compute_api, method)(self.context, instance,
+ *args, **kwargs).AndRaise(
+ exception.InstanceIsLocked(instance_uuid=instance['uuid']))
+ self.mox.ReplayAll()
+ res = self._make_request('/servers/%s/action' % instance['uuid'],
+ {action: act})
+ self.assertEqual(409, res.status_int)
+ self.assertIn('Instance %s is locked' % instance['uuid'], res.body)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+
+class AdminActionsTestV21(CommonMixin, test.NoDBTestCase):
+ admin_actions = admin_actions_v21
+ fake_url = '/v2/fake'
+
+ def setUp(self):
+ super(AdminActionsTestV21, self).setUp()
+ self.controller = self.admin_actions.AdminActionsController()
+ self.compute_api = self.controller.compute_api
+ self.context = nova.context.RequestContext('fake', 'fake')
+
+ def _fake_controller(*args, **kwargs):
+ return self.controller
+
+ self.stubs.Set(self.admin_actions, 'AdminActionsController',
+ _fake_controller)
+
+ self.app = self._get_app()
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def _get_app(self):
+ return fakes.wsgi_app_v21(init_only=('servers',
+ 'os-admin-actions'),
+ fake_auth_context=self.context)
+
+ def test_actions(self):
+ actions = ['resetNetwork', 'injectNetworkInfo']
+ method_translations = {'resetNetwork': 'reset_network',
+ 'injectNetworkInfo': 'inject_network_info'}
+
+ for action in actions:
+ method = method_translations.get(action)
+ self.mox.StubOutWithMock(self.compute_api, method or action)
+ self._test_action(action, method=method)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def test_actions_with_non_existed_instance(self):
+ actions = ['resetNetwork', 'injectNetworkInfo', 'os-resetState']
+ body_map = {'os-resetState': {'state': 'active'}}
+
+ for action in actions:
+ self._test_non_existing_instance(action,
+ body_map=body_map)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def test_actions_with_locked_instance(self):
+ actions = ['resetNetwork', 'injectNetworkInfo']
+ method_translations = {'resetNetwork': 'reset_network',
+ 'injectNetworkInfo': 'inject_network_info'}
+
+ for action in actions:
+ method = method_translations.get(action)
+ self.mox.StubOutWithMock(self.compute_api, method or action)
+ self._test_locked_instance(action, method=method)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+
+class AdminActionsTestV2(AdminActionsTestV21):
+ admin_actions = admin_actions_v2
+
+ def setUp(self):
+ super(AdminActionsTestV2, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Admin_actions'])
+
+ def _get_app(self):
+ return fakes.wsgi_app(init_only=('servers',),
+ fake_auth_context=self.context)
+
+ def test_actions(self):
+ actions = ['pause', 'unpause', 'suspend', 'resume', 'migrate',
+ 'resetNetwork', 'injectNetworkInfo', 'lock',
+ 'unlock']
+ method_translations = {'migrate': 'resize',
+ 'resetNetwork': 'reset_network',
+ 'injectNetworkInfo': 'inject_network_info'}
+
+ for action in actions:
+ method = method_translations.get(action)
+ self.mox.StubOutWithMock(self.compute_api, method or action)
+ self._test_action(action, method=method)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def test_actions_raise_conflict_on_invalid_state(self):
+ actions = ['pause', 'unpause', 'suspend', 'resume', 'migrate',
+ 'os-migrateLive']
+ method_translations = {'migrate': 'resize',
+ 'os-migrateLive': 'live_migrate'}
+ body_map = {'os-migrateLive':
+ {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}}
+ args_map = {'os-migrateLive': ((False, False, 'hostname'), {})}
+
+ for action in actions:
+ method = method_translations.get(action)
+ self.mox.StubOutWithMock(self.compute_api, method or action)
+ self._test_invalid_state(action, method=method, body_map=body_map,
+ compute_api_args_map=args_map)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def test_actions_with_non_existed_instance(self):
+ actions = ['pause', 'unpause', 'suspend', 'resume',
+ 'resetNetwork', 'injectNetworkInfo', 'lock',
+ 'unlock', 'os-resetState', 'migrate', 'os-migrateLive']
+ body_map = {'os-resetState': {'state': 'active'},
+ 'os-migrateLive':
+ {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}}
+ for action in actions:
+ self._test_non_existing_instance(action,
+ body_map=body_map)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def test_actions_with_locked_instance(self):
+ actions = ['pause', 'unpause', 'suspend', 'resume', 'migrate',
+ 'resetNetwork', 'injectNetworkInfo', 'os-migrateLive']
+ method_translations = {'migrate': 'resize',
+ 'resetNetwork': 'reset_network',
+ 'injectNetworkInfo': 'inject_network_info',
+ 'os-migrateLive': 'live_migrate'}
+ args_map = {'os-migrateLive': ((False, False, 'hostname'), {})}
+ body_map = {'os-migrateLive': {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}}
+
+ for action in actions:
+ method = method_translations.get(action)
+ self.mox.StubOutWithMock(self.compute_api, method or action)
+ self._test_locked_instance(action, method=method,
+ body_map=body_map,
+ compute_api_args_map=args_map)
+
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def _test_migrate_exception(self, exc_info, expected_result):
+ self.mox.StubOutWithMock(self.compute_api, 'resize')
+ instance = self._stub_instance_get()
+ self.compute_api.resize(self.context, instance).AndRaise(exc_info)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance['uuid'],
+ {'migrate': None})
+ self.assertEqual(expected_result, res.status_int)
+
+ def _test_migrate_live_succeeded(self, param):
+ self.mox.StubOutWithMock(self.compute_api, 'live_migrate')
+ instance = self._stub_instance_get()
+ self.compute_api.live_migrate(self.context, instance, False,
+ False, 'hostname')
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance['uuid'],
+ {'os-migrateLive': param})
+ self.assertEqual(202, res.status_int)
+
+ def test_migrate_live_enabled(self):
+ param = {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}
+ self._test_migrate_live_succeeded(param)
+
+ def test_migrate_live_enabled_with_string_param(self):
+ param = {'host': 'hostname',
+ 'block_migration': "False",
+ 'disk_over_commit': "False"}
+ self._test_migrate_live_succeeded(param)
+
+ def test_migrate_live_missing_dict_param(self):
+ body = {'os-migrateLive': {'dummy': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}}
+ res = self._make_request('/servers/FAKE/action', body)
+ self.assertEqual(400, res.status_int)
+
+ def test_migrate_live_with_invalid_block_migration(self):
+ body = {'os-migrateLive': {'host': 'hostname',
+ 'block_migration': "foo",
+ 'disk_over_commit': False}}
+ res = self._make_request('/servers/FAKE/action', body)
+ self.assertEqual(400, res.status_int)
+
+ def test_migrate_live_with_invalid_disk_over_commit(self):
+ body = {'os-migrateLive': {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': "foo"}}
+ res = self._make_request('/servers/FAKE/action', body)
+ self.assertEqual(400, res.status_int)
+
+ def _test_migrate_live_failed_with_exception(self, fake_exc,
+ uuid=None):
+ self.mox.StubOutWithMock(self.compute_api, 'live_migrate')
+
+ instance = self._stub_instance_get(uuid=uuid)
+ self.compute_api.live_migrate(self.context, instance, False,
+ False, 'hostname').AndRaise(fake_exc)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance.uuid,
+ {'os-migrateLive':
+ {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}})
+ self.assertEqual(400, res.status_int)
+ self.assertIn(unicode(fake_exc), res.body)
+
+ def test_migrate_live_compute_service_unavailable(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.ComputeServiceUnavailable(host='host'))
+
+ def test_migrate_live_invalid_hypervisor_type(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.InvalidHypervisorType())
+
+ def test_migrate_live_invalid_cpu_info(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.InvalidCPUInfo(reason=""))
+
+ def test_migrate_live_unable_to_migrate_to_self(self):
+ uuid = uuidutils.generate_uuid()
+ self._test_migrate_live_failed_with_exception(
+ exception.UnableToMigrateToSelf(instance_id=uuid,
+ host='host'),
+ uuid=uuid)
+
+ def test_migrate_live_destination_hypervisor_too_old(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.DestinationHypervisorTooOld())
+
+ def test_migrate_live_no_valid_host(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.NoValidHost(reason=''))
+
+ def test_migrate_live_invalid_local_storage(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.InvalidLocalStorage(path='', reason=''))
+
+ def test_migrate_live_invalid_shared_storage(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.InvalidSharedStorage(path='', reason=''))
+
+ def test_migrate_live_hypervisor_unavailable(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.HypervisorUnavailable(host=""))
+
+ def test_migrate_live_instance_not_running(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.InstanceNotRunning(instance_id=""))
+
+ def test_migrate_live_migration_pre_check_error(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.MigrationPreCheckError(reason=''))
+
+ def test_unlock_not_authorized(self):
+ self.mox.StubOutWithMock(self.compute_api, 'unlock')
+
+ instance = self._stub_instance_get()
+
+ self.compute_api.unlock(self.context, instance).AndRaise(
+ exception.PolicyNotAuthorized(action='unlock'))
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance['uuid'],
+ {'unlock': None})
+ self.assertEqual(403, res.status_int)
+
+
+class CreateBackupTestsV2(CommonMixin, test.NoDBTestCase):
+ fake_url = '/v2/fake'
+
+ def setUp(self):
+ super(CreateBackupTestsV2, self).setUp()
+ self.controller = admin_actions_v2.AdminActionsController()
+ self.compute_api = self.controller.compute_api
+ self.context = nova.context.RequestContext('fake', 'fake')
+
+ def _fake_controller(*args, **kwargs):
+ return self.controller
+
+ self.stubs.Set(admin_actions_v2, 'AdminActionsController',
+ _fake_controller)
+
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Admin_actions'])
+
+ self.app = fakes.wsgi_app(init_only=('servers',),
+ fake_auth_context=self.context)
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+ self.mox.StubOutWithMock(common,
+ 'check_img_metadata_properties_quota')
+ self.mox.StubOutWithMock(self.compute_api,
+ 'backup')
+
+ def _make_url(self, uuid):
+ return '/servers/%s/action' % uuid
+
+ def test_create_backup_with_metadata(self):
+ metadata = {'123': 'asdf'}
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ 'metadata': metadata,
+ },
+ }
+
+ image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
+ properties=metadata)
+
+ common.check_img_metadata_properties_quota(self.context, metadata)
+ instance = self._stub_instance_get()
+ self.compute_api.backup(self.context, instance, 'Backup 1',
+ 'daily', 1,
+ extra_properties=metadata).AndReturn(image)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request(self._make_url(instance['uuid']), body=body)
+ self.assertEqual(202, res.status_int)
+ self.assertIn('fake-image-id', res.headers['Location'])
+
+ def test_create_backup_no_name(self):
+ # Name is required for backups.
+ body = {
+ 'createBackup': {
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+ res = self._make_request(self._make_url('fake'), body=body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_no_rotation(self):
+ # Rotation is required for backup requests.
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ },
+ }
+ res = self._make_request(self._make_url('fake'), body=body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_negative_rotation(self):
+ """Rotation must be greater than or equal to zero
+ for backup requests
+ """
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': -1,
+ },
+ }
+ res = self._make_request(self._make_url('fake'), body=body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_no_backup_type(self):
+ # Backup Type (daily or weekly) is required for backup requests.
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'rotation': 1,
+ },
+ }
+ res = self._make_request(self._make_url('fake'), body=body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_bad_entity(self):
+ body = {'createBackup': 'go'}
+ res = self._make_request(self._make_url('fake'), body=body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_rotation_is_zero(self):
+ # The happy path for creating backups if rotation is zero.
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 0,
+ },
+ }
+
+ image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
+ properties={})
+ common.check_img_metadata_properties_quota(self.context, {})
+ instance = self._stub_instance_get()
+ self.compute_api.backup(self.context, instance, 'Backup 1',
+ 'daily', 0,
+ extra_properties={}).AndReturn(image)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request(self._make_url(instance['uuid']), body=body)
+ self.assertEqual(202, res.status_int)
+ self.assertNotIn('Location', res.headers)
+
+ def test_create_backup_rotation_is_positive(self):
+ # The happy path for creating backups if rotation is positive.
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+
+ image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
+ properties={})
+ common.check_img_metadata_properties_quota(self.context, {})
+ instance = self._stub_instance_get()
+ self.compute_api.backup(self.context, instance, 'Backup 1',
+ 'daily', 1,
+ extra_properties={}).AndReturn(image)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request(self._make_url(instance['uuid']), body=body)
+ self.assertEqual(202, res.status_int)
+ self.assertIn('fake-image-id', res.headers['Location'])
+
+ def test_create_backup_raises_conflict_on_invalid_state(self):
+ body_map = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+ args_map = {
+ 'createBackup': (
+ ('Backup 1', 'daily', 1), {'extra_properties': {}}
+ ),
+ }
+ common.check_img_metadata_properties_quota(self.context, {})
+ self._test_invalid_state('createBackup', method='backup',
+ body_map=body_map,
+ compute_api_args_map=args_map)
+
+ def test_create_backup_with_non_existed_instance(self):
+ body_map = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+ common.check_img_metadata_properties_quota(self.context, {})
+ self._test_non_existing_instance('createBackup',
+ body_map=body_map)
+
+ def test_create_backup_with_invalid_createBackup(self):
+ body = {
+ 'createBackupup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+ res = self._make_request(self._make_url('fake'), body=body)
+ self.assertEqual(400, res.status_int)
+
+
+class ResetStateTestsV21(test.NoDBTestCase):
+ admin_act = admin_actions_v21
+ bad_request = exception.ValidationError
+ fake_url = '/servers'
+
+ def setUp(self):
+ super(ResetStateTestsV21, self).setUp()
+ self.uuid = uuidutils.generate_uuid()
+ self.admin_api = self.admin_act.AdminActionsController()
+ self.compute_api = self.admin_api.compute_api
+
+ url = '%s/%s/action' % (self.fake_url, self.uuid)
+ self.request = self._get_request(url)
+ self.context = self.request.environ['nova.context']
+
+ def _get_request(self, url):
+ return fakes.HTTPRequest.blank(url)
+
+ def test_no_state(self):
+ self.assertRaises(self.bad_request,
+ self.admin_api._reset_state,
+ self.request, self.uuid,
+ body={"os-resetState": None})
+
+ def test_bad_state(self):
+ self.assertRaises(self.bad_request,
+ self.admin_api._reset_state,
+ self.request, self.uuid,
+ body={"os-resetState": {"state": "spam"}})
+
+ def test_no_instance(self):
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+ exc = exception.InstanceNotFound(instance_id='inst_ud')
+ self.compute_api.get(self.context, self.uuid, expected_attrs=None,
+ want_objects=True).AndRaise(exc)
+ self.mox.ReplayAll()
+
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.admin_api._reset_state,
+ self.request, self.uuid,
+ body={"os-resetState": {"state": "active"}})
+
+ def _setup_mock(self, expected):
+ instance = objects.Instance()
+ instance.uuid = self.uuid
+ instance.vm_state = 'fake'
+ instance.task_state = 'fake'
+ instance.obj_reset_changes()
+
+ self.mox.StubOutWithMock(instance, 'save')
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def check_state(admin_state_reset=True):
+ self.assertEqual(set(expected.keys()),
+ instance.obj_what_changed())
+ for k, v in expected.items():
+ self.assertEqual(v, getattr(instance, k),
+ "Instance.%s doesn't match" % k)
+ instance.obj_reset_changes()
+
+ self.compute_api.get(self.context, instance.uuid, expected_attrs=None,
+ want_objects=True).AndReturn(instance)
+ instance.save(admin_state_reset=True).WithSideEffects(check_state)
+
+ def test_reset_active(self):
+ self._setup_mock(dict(vm_state=vm_states.ACTIVE,
+ task_state=None))
+ self.mox.ReplayAll()
+
+ body = {"os-resetState": {"state": "active"}}
+ result = self.admin_api._reset_state(self.request, self.uuid,
+ body=body)
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ if isinstance(self.admin_api,
+ admin_actions_v21.AdminActionsController):
+ status_int = self.admin_api._reset_state.wsgi_code
+ else:
+ status_int = result.status_int
+ self.assertEqual(202, status_int)
+
+ def test_reset_error(self):
+ self._setup_mock(dict(vm_state=vm_states.ERROR,
+ task_state=None))
+ self.mox.ReplayAll()
+ body = {"os-resetState": {"state": "error"}}
+ result = self.admin_api._reset_state(self.request, self.uuid,
+ body=body)
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ if isinstance(self.admin_api,
+ admin_actions_v21.AdminActionsController):
+ status_int = self.admin_api._reset_state.wsgi_code
+ else:
+ status_int = result.status_int
+ self.assertEqual(202, status_int)
+
+
+class ResetStateTestsV2(ResetStateTestsV21):
+ admin_act = admin_actions_v2
+ bad_request = webob.exc.HTTPBadRequest
+ fake_url = '/fake/servers'
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_admin_password.py b/nova/tests/unit/api/openstack/compute/contrib/test_admin_password.py
new file mode 100644
index 0000000000..4ddfc08dcc
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_admin_password.py
@@ -0,0 +1,111 @@
+# Copyright 2011 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.plugins.v3 import admin_password \
+ as admin_password_v21
+from nova.compute import api as compute_api
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+def fake_get(self, context, id, expected_attrs=None, want_objects=False):
+ return {'uuid': id}
+
+
+def fake_get_non_existent(self, context, id, expected_attrs=None,
+ want_objects=False):
+ raise exception.InstanceNotFound(instance_id=id)
+
+
+def fake_set_admin_password(self, context, instance, password=None):
+ pass
+
+
+def fake_set_admin_password_failed(self, context, instance, password=None):
+ raise exception.InstancePasswordSetFailed(instance=instance, reason='')
+
+
+def fake_set_admin_password_not_implemented(self, context, instance,
+ password=None):
+ raise NotImplementedError()
+
+
+class AdminPasswordTestV21(test.NoDBTestCase):
+ plugin = admin_password_v21
+
+ def setUp(self):
+ super(AdminPasswordTestV21, self).setUp()
+ self.stubs.Set(compute_api.API, 'set_admin_password',
+ fake_set_admin_password)
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+ self.app = fakes.wsgi_app_v21(init_only=('servers',
+ self.plugin.ALIAS))
+
+ def _make_request(self, body):
+ req = webob.Request.blank('/v2/fake/servers/1/action')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.content_type = 'application/json'
+ res = req.get_response(self.app)
+ return res
+
+ def test_change_password(self):
+ body = {'changePassword': {'adminPass': 'test'}}
+ res = self._make_request(body)
+ self.assertEqual(res.status_int, 202)
+
+ def test_change_password_empty_string(self):
+ body = {'changePassword': {'adminPass': ''}}
+ res = self._make_request(body)
+ self.assertEqual(res.status_int, 202)
+
+ def test_change_password_with_non_implement(self):
+ body = {'changePassword': {'adminPass': 'test'}}
+ self.stubs.Set(compute_api.API, 'set_admin_password',
+ fake_set_admin_password_not_implemented)
+ res = self._make_request(body)
+ self.assertEqual(res.status_int, 501)
+
+ def test_change_password_with_non_existed_instance(self):
+ body = {'changePassword': {'adminPass': 'test'}}
+ self.stubs.Set(compute_api.API, 'get', fake_get_non_existent)
+ res = self._make_request(body)
+ self.assertEqual(res.status_int, 404)
+
+ def test_change_password_with_non_string_password(self):
+ body = {'changePassword': {'adminPass': 1234}}
+ res = self._make_request(body)
+ self.assertEqual(res.status_int, 400)
+
+ def test_change_password_failed(self):
+ body = {'changePassword': {'adminPass': 'test'}}
+ self.stubs.Set(compute_api.API, 'set_admin_password',
+ fake_set_admin_password_failed)
+ res = self._make_request(body)
+ self.assertEqual(res.status_int, 409)
+
+ def test_change_password_without_admin_password(self):
+ body = {'changPassword': {}}
+ res = self._make_request(body)
+ self.assertEqual(res.status_int, 400)
+
+ def test_change_password_none(self):
+ body = {'changePassword': None}
+ res = self._make_request(body)
+ self.assertEqual(res.status_int, 400)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_agents.py b/nova/tests/unit/api/openstack/compute/contrib/test_agents.py
new file mode 100644
index 0000000000..b8c6f857b6
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_agents.py
@@ -0,0 +1,352 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import webob.exc
+
+from nova.api.openstack.compute.contrib import agents as agents_v2
+from nova.api.openstack.compute.plugins.v3 import agents as agents_v21
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import models
+from nova import exception
+from nova import test
+
+fake_agents_list = [{'hypervisor': 'kvm', 'os': 'win',
+ 'architecture': 'x86',
+ 'version': '7.0',
+ 'url': 'http://example.com/path/to/resource',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545',
+ 'id': 1},
+ {'hypervisor': 'kvm', 'os': 'linux',
+ 'architecture': 'x86',
+ 'version': '16.0',
+ 'url': 'http://example.com/path/to/resource1',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f546',
+ 'id': 2},
+ {'hypervisor': 'xen', 'os': 'linux',
+ 'architecture': 'x86',
+ 'version': '16.0',
+ 'url': 'http://example.com/path/to/resource2',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f547',
+ 'id': 3},
+ {'hypervisor': 'xen', 'os': 'win',
+ 'architecture': 'power',
+ 'version': '7.0',
+ 'url': 'http://example.com/path/to/resource3',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f548',
+ 'id': 4},
+ ]
+
+
+def fake_agent_build_get_all(context, hypervisor):
+ agent_build_all = []
+ for agent in fake_agents_list:
+ if hypervisor and hypervisor != agent['hypervisor']:
+ continue
+ agent_build_ref = models.AgentBuild()
+ agent_build_ref.update(agent)
+ agent_build_all.append(agent_build_ref)
+ return agent_build_all
+
+
+def fake_agent_build_update(context, agent_build_id, values):
+ pass
+
+
+def fake_agent_build_destroy(context, agent_update_id):
+ pass
+
+
+def fake_agent_build_create(context, values):
+ values['id'] = 1
+ agent_build_ref = models.AgentBuild()
+ agent_build_ref.update(values)
+ return agent_build_ref
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {}
+
+
+class FakeRequestWithHypervisor(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {'hypervisor': 'kvm'}
+
+
+class AgentsTestV21(test.NoDBTestCase):
+ controller = agents_v21.AgentController()
+ validation_error = exception.ValidationError
+
+ def setUp(self):
+ super(AgentsTestV21, self).setUp()
+
+ self.stubs.Set(db, "agent_build_get_all",
+ fake_agent_build_get_all)
+ self.stubs.Set(db, "agent_build_update",
+ fake_agent_build_update)
+ self.stubs.Set(db, "agent_build_destroy",
+ fake_agent_build_destroy)
+ self.stubs.Set(db, "agent_build_create",
+ fake_agent_build_create)
+ self.context = context.get_admin_context()
+
+ def test_agents_create(self):
+ req = FakeRequest()
+ body = {'agent': {'hypervisor': 'kvm',
+ 'os': 'win',
+ 'architecture': 'x86',
+ 'version': '7.0',
+ 'url': 'http://example.com/path/to/resource',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
+ response = {'agent': {'hypervisor': 'kvm',
+ 'os': 'win',
+ 'architecture': 'x86',
+ 'version': '7.0',
+ 'url': 'http://example.com/path/to/resource',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545',
+ 'agent_id': 1}}
+ res_dict = self.controller.create(req, body=body)
+ self.assertEqual(res_dict, response)
+
+ def _test_agents_create_key_error(self, key):
+ req = FakeRequest()
+ body = {'agent': {'hypervisor': 'kvm',
+ 'os': 'win',
+ 'architecture': 'x86',
+ 'version': '7.0',
+ 'url': 'xxx://xxxx/xxx/xxx',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
+ body['agent'].pop(key)
+ self.assertRaises(self.validation_error,
+ self.controller.create, req, body=body)
+
+ def test_agents_create_without_hypervisor(self):
+ self._test_agents_create_key_error('hypervisor')
+
+ def test_agents_create_without_os(self):
+ self._test_agents_create_key_error('os')
+
+ def test_agents_create_without_architecture(self):
+ self._test_agents_create_key_error('architecture')
+
+ def test_agents_create_without_version(self):
+ self._test_agents_create_key_error('version')
+
+ def test_agents_create_without_url(self):
+ self._test_agents_create_key_error('url')
+
+ def test_agents_create_without_md5hash(self):
+ self._test_agents_create_key_error('md5hash')
+
+ def test_agents_create_with_wrong_type(self):
+ req = FakeRequest()
+ body = {'agent': None}
+ self.assertRaises(self.validation_error,
+ self.controller.create, req, body=body)
+
+ def test_agents_create_with_empty_type(self):
+ req = FakeRequest()
+ body = {}
+ self.assertRaises(self.validation_error,
+ self.controller.create, req, body=body)
+
+ def test_agents_create_with_existed_agent(self):
+ def fake_agent_build_create_with_exited_agent(context, values):
+ raise exception.AgentBuildExists(**values)
+
+ self.stubs.Set(db, 'agent_build_create',
+ fake_agent_build_create_with_exited_agent)
+ req = FakeRequest()
+ body = {'agent': {'hypervisor': 'kvm',
+ 'os': 'win',
+ 'architecture': 'x86',
+ 'version': '7.0',
+ 'url': 'xxx://xxxx/xxx/xxx',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
+ self.assertRaises(webob.exc.HTTPConflict, self.controller.create, req,
+ body=body)
+
+ def _test_agents_create_with_invalid_length(self, key):
+ req = FakeRequest()
+ body = {'agent': {'hypervisor': 'kvm',
+ 'os': 'win',
+ 'architecture': 'x86',
+ 'version': '7.0',
+ 'url': 'http://example.com/path/to/resource',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
+ body['agent'][key] = 'x' * 256
+ self.assertRaises(self.validation_error,
+ self.controller.create, req, body=body)
+
+ def test_agents_create_with_invalid_length_hypervisor(self):
+ self._test_agents_create_with_invalid_length('hypervisor')
+
+ def test_agents_create_with_invalid_length_os(self):
+ self._test_agents_create_with_invalid_length('os')
+
+ def test_agents_create_with_invalid_length_architecture(self):
+ self._test_agents_create_with_invalid_length('architecture')
+
+ def test_agents_create_with_invalid_length_version(self):
+ self._test_agents_create_with_invalid_length('version')
+
+ def test_agents_create_with_invalid_length_url(self):
+ self._test_agents_create_with_invalid_length('url')
+
+ def test_agents_create_with_invalid_length_md5hash(self):
+ self._test_agents_create_with_invalid_length('md5hash')
+
+ def test_agents_delete(self):
+ req = FakeRequest()
+ self.controller.delete(req, 1)
+
+ def test_agents_delete_with_id_not_found(self):
+ with mock.patch.object(db, 'agent_build_destroy',
+ side_effect=exception.AgentBuildNotFound(id=1)):
+ req = FakeRequest()
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.delete, req, 1)
+
+ def test_agents_list(self):
+ req = FakeRequest()
+ res_dict = self.controller.index(req)
+ agents_list = [{'hypervisor': 'kvm', 'os': 'win',
+ 'architecture': 'x86',
+ 'version': '7.0',
+ 'url': 'http://example.com/path/to/resource',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545',
+ 'agent_id': 1},
+ {'hypervisor': 'kvm', 'os': 'linux',
+ 'architecture': 'x86',
+ 'version': '16.0',
+ 'url': 'http://example.com/path/to/resource1',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f546',
+ 'agent_id': 2},
+ {'hypervisor': 'xen', 'os': 'linux',
+ 'architecture': 'x86',
+ 'version': '16.0',
+ 'url': 'http://example.com/path/to/resource2',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f547',
+ 'agent_id': 3},
+ {'hypervisor': 'xen', 'os': 'win',
+ 'architecture': 'power',
+ 'version': '7.0',
+ 'url': 'http://example.com/path/to/resource3',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f548',
+ 'agent_id': 4},
+ ]
+ self.assertEqual(res_dict, {'agents': agents_list})
+
+ def test_agents_list_with_hypervisor(self):
+ req = FakeRequestWithHypervisor()
+ res_dict = self.controller.index(req)
+ response = [{'hypervisor': 'kvm', 'os': 'win',
+ 'architecture': 'x86',
+ 'version': '7.0',
+ 'url': 'http://example.com/path/to/resource',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545',
+ 'agent_id': 1},
+ {'hypervisor': 'kvm', 'os': 'linux',
+ 'architecture': 'x86',
+ 'version': '16.0',
+ 'url': 'http://example.com/path/to/resource1',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f546',
+ 'agent_id': 2},
+ ]
+ self.assertEqual(res_dict, {'agents': response})
+
+ def test_agents_update(self):
+ req = FakeRequest()
+ body = {'para': {'version': '7.0',
+ 'url': 'http://example.com/path/to/resource',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
+ response = {'agent': {'agent_id': 1,
+ 'version': '7.0',
+ 'url': 'http://example.com/path/to/resource',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
+ res_dict = self.controller.update(req, 1, body=body)
+ self.assertEqual(res_dict, response)
+
+ def _test_agents_update_key_error(self, key):
+ req = FakeRequest()
+ body = {'para': {'version': '7.0',
+ 'url': 'xxx://xxxx/xxx/xxx',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
+ body['para'].pop(key)
+ self.assertRaises(self.validation_error,
+ self.controller.update, req, 1, body=body)
+
+ def test_agents_update_without_version(self):
+ self._test_agents_update_key_error('version')
+
+ def test_agents_update_without_url(self):
+ self._test_agents_update_key_error('url')
+
+ def test_agents_update_without_md5hash(self):
+ self._test_agents_update_key_error('md5hash')
+
+ def test_agents_update_with_wrong_type(self):
+ req = FakeRequest()
+ body = {'agent': None}
+ self.assertRaises(self.validation_error,
+ self.controller.update, req, 1, body=body)
+
+ def test_agents_update_with_empty(self):
+ req = FakeRequest()
+ body = {}
+ self.assertRaises(self.validation_error,
+ self.controller.update, req, 1, body=body)
+
+ def test_agents_update_value_error(self):
+ req = FakeRequest()
+ body = {'para': {'version': '7.0',
+ 'url': 1111,
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
+ self.assertRaises(self.validation_error,
+ self.controller.update, req, 1, body=body)
+
+ def _test_agents_update_with_invalid_length(self, key):
+ req = FakeRequest()
+ body = {'para': {'version': '7.0',
+ 'url': 'http://example.com/path/to/resource',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
+ body['para'][key] = 'x' * 256
+ self.assertRaises(self.validation_error,
+ self.controller.update, req, 1, body=body)
+
+ def test_agents_update_with_invalid_length_version(self):
+ self._test_agents_update_with_invalid_length('version')
+
+ def test_agents_update_with_invalid_length_url(self):
+ self._test_agents_update_with_invalid_length('url')
+
+ def test_agents_update_with_invalid_length_md5hash(self):
+ self._test_agents_update_with_invalid_length('md5hash')
+
+ def test_agents_update_with_id_not_found(self):
+ with mock.patch.object(db, 'agent_build_update',
+ side_effect=exception.AgentBuildNotFound(id=1)):
+ req = FakeRequest()
+ body = {'para': {'version': '7.0',
+ 'url': 'http://example.com/path/to/resource',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update, req, 1, body=body)
+
+
+class AgentsTestV2(AgentsTestV21):
+ controller = agents_v2.AgentController()
+ validation_error = webob.exc.HTTPBadRequest
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_aggregates.py b/nova/tests/unit/api/openstack/compute/contrib/test_aggregates.py
new file mode 100644
index 0000000000..9b52146fa1
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_aggregates.py
@@ -0,0 +1,670 @@
+# Copyright (c) 2012 Citrix Systems, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for the aggregates admin api."""
+
+import mock
+from webob import exc
+
+from nova.api.openstack.compute.contrib import aggregates as aggregates_v2
+from nova.api.openstack.compute.plugins.v3 import aggregates as aggregates_v21
+from nova import context
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import matchers
+
+AGGREGATE_LIST = [
+ {"name": "aggregate1", "id": "1", "availability_zone": "nova1"},
+ {"name": "aggregate2", "id": "2", "availability_zone": "nova1"},
+ {"name": "aggregate3", "id": "3", "availability_zone": "nova2"},
+ {"name": "aggregate1", "id": "4", "availability_zone": "nova1"}]
+AGGREGATE = {"name": "aggregate1",
+ "id": "1",
+ "availability_zone": "nova1",
+ "metadata": {"foo": "bar"},
+ "hosts": ["host1, host2"]}
+
+FORMATTED_AGGREGATE = {"name": "aggregate1",
+ "id": "1",
+ "availability_zone": "nova1"}
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context.get_admin_context()}
+
+
+class AggregateTestCaseV21(test.NoDBTestCase):
+ """Test Case for aggregates admin api."""
+
+ add_host = 'self.controller._add_host'
+ remove_host = 'self.controller._remove_host'
+ set_metadata = 'self.controller._set_metadata'
+ bad_request = exception.ValidationError
+
+ def _set_up(self):
+ self.controller = aggregates_v21.AggregateController()
+ self.req = fakes.HTTPRequest.blank('/v3/os-aggregates',
+ use_admin_context=True)
+ self.user_req = fakes.HTTPRequest.blank('/v3/os-aggregates')
+ self.context = self.req.environ['nova.context']
+
+ def setUp(self):
+ super(AggregateTestCaseV21, self).setUp()
+ self._set_up()
+
+ def test_index(self):
+ def stub_list_aggregates(context):
+ if context is None:
+ raise Exception()
+ return AGGREGATE_LIST
+ self.stubs.Set(self.controller.api, 'get_aggregate_list',
+ stub_list_aggregates)
+
+ result = self.controller.index(self.req)
+
+ self.assertEqual(AGGREGATE_LIST, result["aggregates"])
+
+ def test_index_no_admin(self):
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.index,
+ self.user_req)
+
+ def test_create(self):
+ def stub_create_aggregate(context, name, availability_zone):
+ self.assertEqual(context, self.context, "context")
+ self.assertEqual("test", name, "name")
+ self.assertEqual("nova1", availability_zone, "availability_zone")
+ return AGGREGATE
+ self.stubs.Set(self.controller.api, "create_aggregate",
+ stub_create_aggregate)
+
+ result = self.controller.create(self.req, body={"aggregate":
+ {"name": "test",
+ "availability_zone": "nova1"}})
+ self.assertEqual(FORMATTED_AGGREGATE, result["aggregate"])
+
+ def test_create_no_admin(self):
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.create, self.user_req,
+ body={"aggregate":
+ {"name": "test",
+ "availability_zone": "nova1"}})
+
+ def test_create_with_duplicate_aggregate_name(self):
+ def stub_create_aggregate(context, name, availability_zone):
+ raise exception.AggregateNameExists(aggregate_name=name)
+ self.stubs.Set(self.controller.api, "create_aggregate",
+ stub_create_aggregate)
+
+ self.assertRaises(exc.HTTPConflict, self.controller.create,
+ self.req, body={"aggregate":
+ {"name": "test",
+ "availability_zone": "nova1"}})
+
+ def test_create_with_incorrect_availability_zone(self):
+ def stub_create_aggregate(context, name, availability_zone):
+ raise exception.InvalidAggregateAction(action='create_aggregate',
+ aggregate_id="'N/A'",
+ reason='invalid zone')
+
+ self.stubs.Set(self.controller.api, "create_aggregate",
+ stub_create_aggregate)
+
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create,
+ self.req, body={"aggregate":
+ {"name": "test",
+ "availability_zone": "nova_bad"}})
+
+ def test_create_with_no_aggregate(self):
+ self.assertRaises(self.bad_request, self.controller.create,
+ self.req, body={"foo":
+ {"name": "test",
+ "availability_zone": "nova1"}})
+
+ def test_create_with_no_name(self):
+ self.assertRaises(self.bad_request, self.controller.create,
+ self.req, body={"aggregate":
+ {"foo": "test",
+ "availability_zone": "nova1"}})
+
+ def test_create_with_no_availability_zone(self):
+ def stub_create_aggregate(context, name, availability_zone):
+ self.assertEqual(context, self.context, "context")
+ self.assertEqual("test", name, "name")
+ self.assertIsNone(availability_zone, "availability_zone")
+ return AGGREGATE
+ self.stubs.Set(self.controller.api, "create_aggregate",
+ stub_create_aggregate)
+
+ result = self.controller.create(self.req,
+ body={"aggregate": {"name": "test"}})
+ self.assertEqual(FORMATTED_AGGREGATE, result["aggregate"])
+
+ def test_create_with_null_name(self):
+ self.assertRaises(self.bad_request, self.controller.create,
+ self.req, body={"aggregate":
+ {"name": "",
+ "availability_zone": "nova1"}})
+
+ def test_create_with_name_too_long(self):
+ self.assertRaises(self.bad_request, self.controller.create,
+ self.req, body={"aggregate":
+ {"name": "x" * 256,
+ "availability_zone": "nova1"}})
+
+ def test_create_with_availability_zone_too_long(self):
+ self.assertRaises(self.bad_request, self.controller.create,
+ self.req, body={"aggregate":
+ {"name": "test",
+ "availability_zone": "x" * 256}})
+
+ def test_create_with_null_availability_zone(self):
+ aggregate = {"name": "aggregate1",
+ "id": "1",
+ "availability_zone": None,
+ "metadata": {},
+ "hosts": []}
+
+ formatted_aggregate = {"name": "aggregate1",
+ "id": "1",
+ "availability_zone": None}
+
+ def stub_create_aggregate(context, name, az_name):
+ self.assertEqual(context, self.context, "context")
+ self.assertEqual("aggregate1", name, "name")
+ self.assertIsNone(az_name, "availability_zone")
+ return aggregate
+ self.stubs.Set(self.controller.api, 'create_aggregate',
+ stub_create_aggregate)
+
+ result = self.controller.create(self.req,
+ body={"aggregate":
+ {"name": "aggregate1",
+ "availability_zone": None}})
+ self.assertEqual(formatted_aggregate, result["aggregate"])
+
+ def test_create_with_empty_availability_zone(self):
+ self.assertRaises(self.bad_request, self.controller.create,
+ self.req, body={"aggregate":
+ {"name": "test",
+ "availability_zone": ""}})
+
+ def test_create_with_extra_invalid_arg(self):
+ self.assertRaises(self.bad_request, self.controller.create,
+ self.req, body={"name": "test",
+ "availability_zone": "nova1",
+ "foo": 'bar'})
+
+ def test_show(self):
+ def stub_get_aggregate(context, id):
+ self.assertEqual(context, self.context, "context")
+ self.assertEqual("1", id, "id")
+ return AGGREGATE
+ self.stubs.Set(self.controller.api, 'get_aggregate',
+ stub_get_aggregate)
+
+ aggregate = self.controller.show(self.req, "1")
+
+ self.assertEqual(AGGREGATE, aggregate["aggregate"])
+
+ def test_show_no_admin(self):
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.show,
+ self.user_req, "1")
+
+ def test_show_with_invalid_id(self):
+ def stub_get_aggregate(context, id):
+ raise exception.AggregateNotFound(aggregate_id=2)
+
+ self.stubs.Set(self.controller.api, 'get_aggregate',
+ stub_get_aggregate)
+
+ self.assertRaises(exc.HTTPNotFound,
+ self.controller.show, self.req, "2")
+
+ def test_update(self):
+ body = {"aggregate": {"name": "new_name",
+ "availability_zone": "nova1"}}
+
+ def stub_update_aggregate(context, aggregate, values):
+ self.assertEqual(context, self.context, "context")
+ self.assertEqual("1", aggregate, "aggregate")
+ self.assertEqual(body["aggregate"], values, "values")
+ return AGGREGATE
+ self.stubs.Set(self.controller.api, "update_aggregate",
+ stub_update_aggregate)
+
+ result = self.controller.update(self.req, "1", body=body)
+
+ self.assertEqual(AGGREGATE, result["aggregate"])
+
+ def test_update_no_admin(self):
+ body = {"aggregate": {"availability_zone": "nova"}}
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.update,
+ self.user_req, "1", body=body)
+
+ def test_update_with_only_name(self):
+ body = {"aggregate": {"name": "new_name"}}
+
+ def stub_update_aggregate(context, aggregate, values):
+ return AGGREGATE
+ self.stubs.Set(self.controller.api, "update_aggregate",
+ stub_update_aggregate)
+
+ result = self.controller.update(self.req, "1", body=body)
+
+ self.assertEqual(AGGREGATE, result["aggregate"])
+
+ def test_update_with_only_availability_zone(self):
+ body = {"aggregate": {"availability_zone": "nova1"}}
+
+ def stub_update_aggregate(context, aggregate, values):
+ return AGGREGATE
+ self.stubs.Set(self.controller.api, "update_aggregate",
+ stub_update_aggregate)
+ result = self.controller.update(self.req, "1", body=body)
+ self.assertEqual(AGGREGATE, result["aggregate"])
+
+ def test_update_with_no_updates(self):
+ test_metadata = {"aggregate": {}}
+ self.assertRaises(self.bad_request, self.controller.update,
+ self.req, "2", body=test_metadata)
+
+ def test_update_with_no_update_key(self):
+ test_metadata = {"asdf": {}}
+ self.assertRaises(self.bad_request, self.controller.update,
+ self.req, "2", body=test_metadata)
+
+ def test_update_with_wrong_updates(self):
+ test_metadata = {"aggregate": {"status": "disable",
+ "foo": "bar"}}
+ self.assertRaises(self.bad_request, self.controller.update,
+ self.req, "2", body=test_metadata)
+
+ def test_update_with_null_name(self):
+ test_metadata = {"aggregate": {"name": ""}}
+ self.assertRaises(self.bad_request, self.controller.update,
+ self.req, "2", body=test_metadata)
+
+ def test_update_with_name_too_long(self):
+ test_metadata = {"aggregate": {"name": "x" * 256}}
+ self.assertRaises(self.bad_request, self.controller.update,
+ self.req, "2", body=test_metadata)
+
+ def test_update_with_availability_zone_too_long(self):
+ test_metadata = {"aggregate": {"availability_zone": "x" * 256}}
+ self.assertRaises(self.bad_request, self.controller.update,
+ self.req, "2", body=test_metadata)
+
+ def test_update_with_empty_availability_zone(self):
+ test_metadata = {"aggregate": {"availability_zone": ""}}
+ self.assertRaises(self.bad_request, self.controller.update,
+ self.req, "2", body=test_metadata)
+
+ def test_update_with_null_availability_zone(self):
+ body = {"aggregate": {"availability_zone": None}}
+ aggre = {"name": "aggregate1",
+ "id": "1",
+ "availability_zone": None}
+
+ def stub_update_aggregate(context, aggregate, values):
+ self.assertEqual(context, self.context, "context")
+ self.assertEqual("1", aggregate, "aggregate")
+ self.assertIsNone(values["availability_zone"], "availability_zone")
+ return aggre
+ self.stubs.Set(self.controller.api, "update_aggregate",
+ stub_update_aggregate)
+
+ result = self.controller.update(self.req, "1", body=body)
+
+ self.assertEqual(aggre, result["aggregate"])
+
+ def test_update_with_bad_aggregate(self):
+ test_metadata = {"aggregate": {"name": "test_name"}}
+
+ def stub_update_aggregate(context, aggregate, metadata):
+ raise exception.AggregateNotFound(aggregate_id=2)
+ self.stubs.Set(self.controller.api, "update_aggregate",
+ stub_update_aggregate)
+
+ self.assertRaises(exc.HTTPNotFound, self.controller.update,
+ self.req, "2", body=test_metadata)
+
+ def test_update_with_duplicated_name(self):
+ test_metadata = {"aggregate": {"name": "test_name"}}
+
+ def stub_update_aggregate(context, aggregate, metadata):
+ raise exception.AggregateNameExists(aggregate_name="test_name")
+
+ self.stubs.Set(self.controller.api, "update_aggregate",
+ stub_update_aggregate)
+ self.assertRaises(exc.HTTPConflict, self.controller.update,
+ self.req, "2", body=test_metadata)
+
+ def test_invalid_action(self):
+ body = {"append_host": {"host": "host1"}}
+ self.assertRaises(self.bad_request,
+ eval(self.add_host), self.req, "1", body=body)
+
+ def test_update_with_invalid_action(self):
+ with mock.patch.object(self.controller.api, "update_aggregate",
+ side_effect=exception.InvalidAggregateAction(
+ action='invalid', aggregate_id='agg1', reason= "not empty")):
+ body = {"aggregate": {"availability_zone": "nova"}}
+ self.assertRaises(exc.HTTPBadRequest, self.controller.update,
+ self.req, "1", body=body)
+
+ def test_add_host(self):
+ def stub_add_host_to_aggregate(context, aggregate, host):
+ self.assertEqual(context, self.context, "context")
+ self.assertEqual("1", aggregate, "aggregate")
+ self.assertEqual("host1", host, "host")
+ return AGGREGATE
+ self.stubs.Set(self.controller.api, "add_host_to_aggregate",
+ stub_add_host_to_aggregate)
+
+ aggregate = eval(self.add_host)(self.req, "1",
+ body={"add_host": {"host":
+ "host1"}})
+
+ self.assertEqual(aggregate["aggregate"], AGGREGATE)
+
+ def test_add_host_no_admin(self):
+ self.assertRaises(exception.PolicyNotAuthorized,
+ eval(self.add_host),
+ self.user_req, "1",
+ body={"add_host": {"host": "host1"}})
+
+ def test_add_host_with_already_added_host(self):
+ def stub_add_host_to_aggregate(context, aggregate, host):
+ raise exception.AggregateHostExists(aggregate_id=aggregate,
+ host=host)
+ self.stubs.Set(self.controller.api, "add_host_to_aggregate",
+ stub_add_host_to_aggregate)
+
+ self.assertRaises(exc.HTTPConflict, eval(self.add_host),
+ self.req, "1",
+ body={"add_host": {"host": "host1"}})
+
+ def test_add_host_with_bad_aggregate(self):
+ def stub_add_host_to_aggregate(context, aggregate, host):
+ raise exception.AggregateNotFound(aggregate_id=aggregate)
+ self.stubs.Set(self.controller.api, "add_host_to_aggregate",
+ stub_add_host_to_aggregate)
+
+ self.assertRaises(exc.HTTPNotFound, eval(self.add_host),
+ self.req, "bogus_aggregate",
+ body={"add_host": {"host": "host1"}})
+
+ def test_add_host_with_bad_host(self):
+ def stub_add_host_to_aggregate(context, aggregate, host):
+ raise exception.ComputeHostNotFound(host=host)
+ self.stubs.Set(self.controller.api, "add_host_to_aggregate",
+ stub_add_host_to_aggregate)
+
+ self.assertRaises(exc.HTTPNotFound, eval(self.add_host),
+ self.req, "1",
+ body={"add_host": {"host": "bogus_host"}})
+
+ def test_add_host_with_missing_host(self):
+ self.assertRaises(self.bad_request, eval(self.add_host),
+ self.req, "1", body={"add_host": {"asdf": "asdf"}})
+
+ def test_add_host_with_invalid_format_host(self):
+ self.assertRaises(self.bad_request, eval(self.add_host),
+ self.req, "1", body={"add_host": {"host": "a" * 300}})
+
+ def test_add_host_with_multiple_hosts(self):
+ self.assertRaises(self.bad_request, eval(self.add_host),
+ self.req, "1", body={"add_host": {"host": ["host1", "host2"]}})
+
+ def test_add_host_raises_key_error(self):
+ def stub_add_host_to_aggregate(context, aggregate, host):
+ raise KeyError
+ self.stubs.Set(self.controller.api, "add_host_to_aggregate",
+ stub_add_host_to_aggregate)
+ self.assertRaises(exc.HTTPInternalServerError,
+ eval(self.add_host), self.req, "1",
+ body={"add_host": {"host": "host1"}})
+
+ def test_add_host_with_invalid_request(self):
+ self.assertRaises(self.bad_request, eval(self.add_host),
+ self.req, "1", body={"add_host": "1"})
+
+ def test_add_host_with_non_string(self):
+ self.assertRaises(self.bad_request, eval(self.add_host),
+ self.req, "1", body={"add_host": {"host": 1}})
+
+ def test_remove_host(self):
+ def stub_remove_host_from_aggregate(context, aggregate, host):
+ self.assertEqual(context, self.context, "context")
+ self.assertEqual("1", aggregate, "aggregate")
+ self.assertEqual("host1", host, "host")
+ stub_remove_host_from_aggregate.called = True
+ return {}
+ self.stubs.Set(self.controller.api,
+ "remove_host_from_aggregate",
+ stub_remove_host_from_aggregate)
+ eval(self.remove_host)(self.req, "1",
+ body={"remove_host": {"host": "host1"}})
+
+ self.assertTrue(stub_remove_host_from_aggregate.called)
+
+ def test_remove_host_no_admin(self):
+ self.assertRaises(exception.PolicyNotAuthorized,
+ eval(self.remove_host),
+ self.user_req, "1",
+ body={"remove_host": {"host": "host1"}})
+
+ def test_remove_host_with_bad_aggregate(self):
+ def stub_remove_host_from_aggregate(context, aggregate, host):
+ raise exception.AggregateNotFound(aggregate_id=aggregate)
+ self.stubs.Set(self.controller.api,
+ "remove_host_from_aggregate",
+ stub_remove_host_from_aggregate)
+
+ self.assertRaises(exc.HTTPNotFound, eval(self.remove_host),
+ self.req, "bogus_aggregate",
+ body={"remove_host": {"host": "host1"}})
+
+ def test_remove_host_with_host_not_in_aggregate(self):
+ def stub_remove_host_from_aggregate(context, aggregate, host):
+ raise exception.AggregateHostNotFound(aggregate_id=aggregate,
+ host=host)
+ self.stubs.Set(self.controller.api,
+ "remove_host_from_aggregate",
+ stub_remove_host_from_aggregate)
+
+ self.assertRaises(exc.HTTPNotFound, eval(self.remove_host),
+ self.req, "1",
+ body={"remove_host": {"host": "host1"}})
+
+ def test_remove_host_with_bad_host(self):
+ def stub_remove_host_from_aggregate(context, aggregate, host):
+ raise exception.ComputeHostNotFound(host=host)
+ self.stubs.Set(self.controller.api,
+ "remove_host_from_aggregate",
+ stub_remove_host_from_aggregate)
+
+ self.assertRaises(exc.HTTPNotFound, eval(self.remove_host),
+ self.req, "1", body={"remove_host": {"host": "bogushost"}})
+
+ def test_remove_host_with_missing_host(self):
+ self.assertRaises(self.bad_request, eval(self.remove_host),
+ self.req, "1", body={"asdf": "asdf"})
+
+ def test_remove_host_with_multiple_hosts(self):
+ self.assertRaises(self.bad_request, eval(self.remove_host),
+ self.req, "1", body={"remove_host": {"host":
+ ["host1", "host2"]}})
+
+ def test_remove_host_with_extra_param(self):
+ self.assertRaises(self.bad_request, eval(self.remove_host),
+ self.req, "1", body={"remove_host": {"asdf": "asdf",
+ "host": "asdf"}})
+
+ def test_remove_host_with_invalid_request(self):
+ self.assertRaises(self.bad_request,
+ eval(self.remove_host),
+ self.req, "1", body={"remove_host": "1"})
+
+ def test_remove_host_with_missing_host_empty(self):
+ self.assertRaises(self.bad_request,
+ eval(self.remove_host),
+ self.req, "1", body={"remove_host": {}})
+
+ def test_set_metadata(self):
+ body = {"set_metadata": {"metadata": {"foo": "bar"}}}
+
+ def stub_update_aggregate(context, aggregate, values):
+ self.assertEqual(context, self.context, "context")
+ self.assertEqual("1", aggregate, "aggregate")
+ self.assertThat(body["set_metadata"]['metadata'],
+ matchers.DictMatches(values))
+ return AGGREGATE
+ self.stubs.Set(self.controller.api,
+ "update_aggregate_metadata",
+ stub_update_aggregate)
+
+ result = eval(self.set_metadata)(self.req, "1", body=body)
+
+ self.assertEqual(AGGREGATE, result["aggregate"])
+
+ def test_set_metadata_delete(self):
+ body = {"set_metadata": {"metadata": {"foo": None}}}
+
+ with mock.patch.object(self.controller.api,
+ 'update_aggregate_metadata') as mocked:
+ mocked.return_value = AGGREGATE
+ result = eval(self.set_metadata)(self.req, "1", body=body)
+
+ self.assertEqual(AGGREGATE, result["aggregate"])
+ mocked.assert_called_once_with(self.context, "1",
+ body["set_metadata"]["metadata"])
+
+ def test_set_metadata_no_admin(self):
+ self.assertRaises(exception.PolicyNotAuthorized,
+ eval(self.set_metadata),
+ self.user_req, "1",
+ body={"set_metadata": {"metadata":
+ {"foo": "bar"}}})
+
+ def test_set_metadata_with_bad_aggregate(self):
+ body = {"set_metadata": {"metadata": {"foo": "bar"}}}
+
+ def stub_update_aggregate(context, aggregate, metadata):
+ raise exception.AggregateNotFound(aggregate_id=aggregate)
+ self.stubs.Set(self.controller.api,
+ "update_aggregate_metadata",
+ stub_update_aggregate)
+ self.assertRaises(exc.HTTPNotFound, eval(self.set_metadata),
+ self.req, "bad_aggregate", body=body)
+
+ def test_set_metadata_with_missing_metadata(self):
+ body = {"asdf": {"foo": "bar"}}
+ self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
+ self.req, "1", body=body)
+
+ def test_set_metadata_with_extra_params(self):
+ body = {"metadata": {"foo": "bar"}, "asdf": {"foo": "bar"}}
+ self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
+ self.req, "1", body=body)
+
+ def test_set_metadata_without_dict(self):
+ body = {"set_metadata": {'metadata': 1}}
+ self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
+ self.req, "1", body=body)
+
+ def test_set_metadata_with_empty_key(self):
+ body = {"set_metadata": {"metadata": {"": "value"}}}
+ self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
+ self.req, "1", body=body)
+
+ def test_set_metadata_with_key_too_long(self):
+ body = {"set_metadata": {"metadata": {"x" * 256: "value"}}}
+ self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
+ self.req, "1", body=body)
+
+ def test_set_metadata_with_value_too_long(self):
+ body = {"set_metadata": {"metadata": {"key": "x" * 256}}}
+ self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
+ self.req, "1", body=body)
+
+ def test_set_metadata_with_string(self):
+ body = {"set_metadata": {"metadata": "test"}}
+ self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
+ self.req, "1", body=body)
+
+ def test_delete_aggregate(self):
+ def stub_delete_aggregate(context, aggregate):
+ self.assertEqual(context, self.context, "context")
+ self.assertEqual("1", aggregate, "aggregate")
+ stub_delete_aggregate.called = True
+ self.stubs.Set(self.controller.api, "delete_aggregate",
+ stub_delete_aggregate)
+
+ self.controller.delete(self.req, "1")
+ self.assertTrue(stub_delete_aggregate.called)
+
+ def test_delete_aggregate_no_admin(self):
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.delete,
+ self.user_req, "1")
+
+ def test_delete_aggregate_with_bad_aggregate(self):
+ def stub_delete_aggregate(context, aggregate):
+ raise exception.AggregateNotFound(aggregate_id=aggregate)
+ self.stubs.Set(self.controller.api, "delete_aggregate",
+ stub_delete_aggregate)
+
+ self.assertRaises(exc.HTTPNotFound, self.controller.delete,
+ self.req, "bogus_aggregate")
+
+ def test_delete_aggregate_with_host(self):
+ with mock.patch.object(self.controller.api, "delete_aggregate",
+ side_effect=exception.InvalidAggregateAction(
+ action="delete", aggregate_id="agg1",
+ reason="not empty")):
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.delete,
+ self.req, "agg1")
+
+
+class AggregateTestCaseV2(AggregateTestCaseV21):
+ add_host = 'self.controller.action'
+ remove_host = 'self.controller.action'
+ set_metadata = 'self.controller.action'
+ bad_request = exc.HTTPBadRequest
+
+ def _set_up(self):
+ self.controller = aggregates_v2.AggregateController()
+ self.req = FakeRequest()
+ self.user_req = fakes.HTTPRequest.blank('/v2/os-aggregates')
+ self.context = self.req.environ['nova.context']
+
+ def test_add_host_raises_key_error(self):
+ def stub_add_host_to_aggregate(context, aggregate, host):
+ raise KeyError
+ self.stubs.Set(self.controller.api, "add_host_to_aggregate",
+ stub_add_host_to_aggregate)
+ # NOTE(mtreinish) The check for a KeyError here is to ensure that
+ # if add_host_to_aggregate() raises a KeyError it propagates. At
+ # one point the api code would mask the error as a HTTPBadRequest.
+ # This test is to ensure that this doesn't occur again.
+ self.assertRaises(KeyError, eval(self.add_host), self.req, "1",
+ body={"add_host": {"host": "host1"}})
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_attach_interfaces.py b/nova/tests/unit/api/openstack/compute/contrib/test_attach_interfaces.py
new file mode 100644
index 0000000000..3b7e0b058a
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_attach_interfaces.py
@@ -0,0 +1,455 @@
+# Copyright 2012 SINA Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+
+from nova.api.openstack.compute.contrib import attach_interfaces \
+ as attach_interfaces_v2
+from nova.api.openstack.compute.plugins.v3 import attach_interfaces \
+ as attach_interfaces_v3
+from nova.compute import api as compute_api
+from nova import context
+from nova import exception
+from nova.network import api as network_api
+from nova import objects
+from nova import test
+from nova.tests.unit import fake_network_cache_model
+
+import webob
+from webob import exc
+
+
+CONF = cfg.CONF
+
+FAKE_UUID1 = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+FAKE_UUID2 = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
+
+FAKE_PORT_ID1 = '11111111-1111-1111-1111-111111111111'
+FAKE_PORT_ID2 = '22222222-2222-2222-2222-222222222222'
+FAKE_PORT_ID3 = '33333333-3333-3333-3333-333333333333'
+
+FAKE_NET_ID1 = '44444444-4444-4444-4444-444444444444'
+FAKE_NET_ID2 = '55555555-5555-5555-5555-555555555555'
+FAKE_NET_ID3 = '66666666-6666-6666-6666-666666666666'
+FAKE_BAD_NET_ID = '00000000-0000-0000-0000-000000000000'
+
+port_data1 = {
+ "id": FAKE_PORT_ID1,
+ "network_id": FAKE_NET_ID1,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "mac_address": "aa:aa:aa:aa:aa:aa",
+ "fixed_ips": ["10.0.1.2"],
+ "device_id": FAKE_UUID1,
+}
+
+port_data2 = {
+ "id": FAKE_PORT_ID2,
+ "network_id": FAKE_NET_ID2,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "mac_address": "bb:bb:bb:bb:bb:bb",
+ "fixed_ips": ["10.0.2.2"],
+ "device_id": FAKE_UUID1,
+}
+
+port_data3 = {
+ "id": FAKE_PORT_ID3,
+ "network_id": FAKE_NET_ID3,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "mac_address": "bb:bb:bb:bb:bb:bb",
+ "fixed_ips": ["10.0.2.2"],
+ "device_id": '',
+}
+
+fake_networks = [FAKE_NET_ID1, FAKE_NET_ID2]
+ports = [port_data1, port_data2, port_data3]
+
+
+def fake_list_ports(self, *args, **kwargs):
+ result = []
+ for port in ports:
+ if port['device_id'] == kwargs['device_id']:
+ result.append(port)
+ return {'ports': result}
+
+
+def fake_show_port(self, context, port_id, **kwargs):
+ for port in ports:
+ if port['id'] == port_id:
+ return {'port': port}
+ else:
+ raise exception.PortNotFound(port_id=port_id)
+
+
+def fake_attach_interface(self, context, instance, network_id, port_id,
+ requested_ip='192.168.1.3'):
+ if not network_id:
+ # if no network_id is given when add a port to an instance, use the
+ # first default network.
+ network_id = fake_networks[0]
+ if network_id == FAKE_BAD_NET_ID:
+ raise exception.NetworkNotFound(network_id=network_id)
+ if not port_id:
+ port_id = ports[fake_networks.index(network_id)]['id']
+ vif = fake_network_cache_model.new_vif()
+ vif['id'] = port_id
+ vif['network']['id'] = network_id
+ vif['network']['subnets'][0]['ips'][0]['address'] = requested_ip
+ return vif
+
+
+def fake_detach_interface(self, context, instance, port_id):
+ for port in ports:
+ if port['id'] == port_id:
+ return
+ raise exception.PortNotFound(port_id=port_id)
+
+
+def fake_get_instance(self, *args, **kwargs):
+ return objects.Instance(uuid=FAKE_UUID1)
+
+
+class InterfaceAttachTestsV21(test.NoDBTestCase):
+ url = '/v3/os-interfaces'
+ controller_cls = attach_interfaces_v3.InterfaceAttachmentController
+ validate_exc = exception.ValidationError
+
+ def setUp(self):
+ super(InterfaceAttachTestsV21, self).setUp()
+ self.flags(auth_strategy=None, group='neutron')
+ self.flags(url='http://anyhost/', group='neutron')
+ self.flags(url_timeout=30, group='neutron')
+ self.stubs.Set(network_api.API, 'show_port', fake_show_port)
+ self.stubs.Set(network_api.API, 'list_ports', fake_list_ports)
+ self.stubs.Set(compute_api.API, 'get', fake_get_instance)
+ self.context = context.get_admin_context()
+ self.expected_show = {'interfaceAttachment':
+ {'net_id': FAKE_NET_ID1,
+ 'port_id': FAKE_PORT_ID1,
+ 'mac_addr': port_data1['mac_address'],
+ 'port_state': port_data1['status'],
+ 'fixed_ips': port_data1['fixed_ips'],
+ }}
+ self.attachments = self.controller_cls()
+
+ @mock.patch.object(compute_api.API, 'get',
+ side_effect=exception.InstanceNotFound(instance_id=''))
+ def _test_instance_not_found(self, url, func, args, mock_get, kwargs=None,
+ method='GET'):
+ req = webob.Request.blank(url)
+ req.method = method
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ if not kwargs:
+ kwargs = {}
+ self.assertRaises(exc.HTTPNotFound, func, req, *args, **kwargs)
+
+ def test_show_instance_not_found(self):
+ self._test_instance_not_found(self.url + 'fake',
+ self.attachments.show, ('fake', 'fake'))
+
+ def test_index_instance_not_found(self):
+ self._test_instance_not_found(self.url,
+ self.attachments.index, ('fake', ))
+
+ def test_detach_interface_instance_not_found(self):
+ self._test_instance_not_found(self.url + '/fake',
+ self.attachments.delete,
+ ('fake', 'fake'), method='DELETE')
+
+ def test_attach_interface_instance_not_found(self):
+ self._test_instance_not_found(
+ '/v2/fake/os-interfaces', self.attachments.create, ('fake', ),
+ kwargs={'body': {'interfaceAttachment': {}}}, method='POST')
+
+ def test_show(self):
+ req = webob.Request.blank(self.url + '/show')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ result = self.attachments.show(req, FAKE_UUID1, FAKE_PORT_ID1)
+ self.assertEqual(self.expected_show, result)
+
+ def test_show_invalid(self):
+ req = webob.Request.blank(self.url + '/show')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(exc.HTTPNotFound,
+ self.attachments.show, req, FAKE_UUID2,
+ FAKE_PORT_ID1)
+
+ @mock.patch.object(network_api.API, 'show_port',
+ side_effect=exception.Forbidden)
+ def test_show_forbidden(self, show_port_mock):
+ req = webob.Request.blank(self.url + '/show')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(exc.HTTPForbidden,
+ self.attachments.show, req, FAKE_UUID1,
+ FAKE_PORT_ID1)
+
+ def test_delete(self):
+ self.stubs.Set(compute_api.API, 'detach_interface',
+ fake_detach_interface)
+ req = webob.Request.blank(self.url + '/delete')
+ req.method = 'DELETE'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ result = self.attachments.delete(req, FAKE_UUID1, FAKE_PORT_ID1)
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ if isinstance(self.attachments,
+ attach_interfaces_v3.InterfaceAttachmentController):
+ status_int = self.attachments.delete.wsgi_code
+ else:
+ status_int = result.status_int
+ self.assertEqual(202, status_int)
+
+ def test_detach_interface_instance_locked(self):
+ def fake_detach_interface_from_locked_server(self, context,
+ instance, port_id):
+ raise exception.InstanceIsLocked(instance_uuid=FAKE_UUID1)
+
+ self.stubs.Set(compute_api.API,
+ 'detach_interface',
+ fake_detach_interface_from_locked_server)
+ req = webob.Request.blank(self.url + '/delete')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(exc.HTTPConflict,
+ self.attachments.delete,
+ req,
+ FAKE_UUID1,
+ FAKE_PORT_ID1)
+
+ def test_delete_interface_not_found(self):
+ self.stubs.Set(compute_api.API, 'detach_interface',
+ fake_detach_interface)
+ req = webob.Request.blank(self.url + '/delete')
+ req.method = 'DELETE'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(exc.HTTPNotFound,
+ self.attachments.delete,
+ req,
+ FAKE_UUID1,
+ 'invaid-port-id')
+
+ def test_attach_interface_instance_locked(self):
+ def fake_attach_interface_to_locked_server(self, context,
+ instance, network_id, port_id, requested_ip):
+ raise exception.InstanceIsLocked(instance_uuid=FAKE_UUID1)
+
+ self.stubs.Set(compute_api.API,
+ 'attach_interface',
+ fake_attach_interface_to_locked_server)
+ req = webob.Request.blank(self.url + '/attach')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exc.HTTPConflict,
+ self.attachments.create, req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
+
+ def test_attach_interface_without_network_id(self):
+ self.stubs.Set(compute_api.API, 'attach_interface',
+ fake_attach_interface)
+ req = webob.Request.blank(self.url + '/attach')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ result = self.attachments.create(req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
+ self.assertEqual(result['interfaceAttachment']['net_id'],
+ FAKE_NET_ID1)
+
+ def test_attach_interface_with_network_id(self):
+ self.stubs.Set(compute_api.API, 'attach_interface',
+ fake_attach_interface)
+ req = webob.Request.blank(self.url + '/attach')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({'interfaceAttachment':
+ {'net_id': FAKE_NET_ID2}})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ result = self.attachments.create(req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
+ self.assertEqual(result['interfaceAttachment']['net_id'],
+ FAKE_NET_ID2)
+
+ def _attach_interface_bad_request_case(self, body):
+ self.stubs.Set(compute_api.API, 'attach_interface',
+ fake_attach_interface)
+ req = webob.Request.blank(self.url + '/attach')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exc.HTTPBadRequest,
+ self.attachments.create, req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
+
+ def test_attach_interface_with_port_and_network_id(self):
+ body = {
+ 'interfaceAttachment': {
+ 'port_id': FAKE_PORT_ID1,
+ 'net_id': FAKE_NET_ID2
+ }
+ }
+ self._attach_interface_bad_request_case(body)
+
+ def test_attach_interface_with_invalid_data(self):
+ body = {
+ 'interfaceAttachment': {
+ 'net_id': FAKE_BAD_NET_ID
+ }
+ }
+ self._attach_interface_bad_request_case(body)
+
+ def test_attach_interface_with_invalid_state(self):
+ def fake_attach_interface_invalid_state(*args, **kwargs):
+ raise exception.InstanceInvalidState(
+ instance_uuid='', attr='', state='',
+ method='attach_interface')
+
+ self.stubs.Set(compute_api.API, 'attach_interface',
+ fake_attach_interface_invalid_state)
+ req = webob.Request.blank(self.url + '/attach')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({'interfaceAttachment':
+ {'net_id': FAKE_NET_ID1}})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exc.HTTPConflict,
+ self.attachments.create, req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
+
+ def test_detach_interface_with_invalid_state(self):
+ def fake_detach_interface_invalid_state(*args, **kwargs):
+ raise exception.InstanceInvalidState(
+ instance_uuid='', attr='', state='',
+ method='detach_interface')
+
+ self.stubs.Set(compute_api.API, 'detach_interface',
+ fake_detach_interface_invalid_state)
+ req = webob.Request.blank(self.url + '/attach')
+ req.method = 'DELETE'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exc.HTTPConflict,
+ self.attachments.delete,
+ req,
+ FAKE_UUID1,
+ FAKE_NET_ID1)
+
+ def test_attach_interface_invalid_fixed_ip(self):
+ req = webob.Request.blank(self.url + '/attach')
+ req.method = 'POST'
+ body = {
+ 'interfaceAttachment': {
+ 'net_id': FAKE_NET_ID1,
+ 'fixed_ips': [{'ip_address': 'invalid_ip'}]
+ }
+ }
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ self.assertRaises(self.validate_exc,
+ self.attachments.create, req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
+
+ @mock.patch.object(compute_api.API, 'get')
+ @mock.patch.object(compute_api.API, 'attach_interface')
+ def test_attach_interface_fixed_ip_already_in_use(self,
+ attach_mock,
+ get_mock):
+ fake_instance = objects.Instance(uuid=FAKE_UUID1)
+ get_mock.return_value = fake_instance
+ attach_mock.side_effect = exception.FixedIpAlreadyInUse(
+ address='10.0.2.2', instance_uuid=FAKE_UUID1)
+ req = webob.Request.blank(self.url + '/attach')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exc.HTTPBadRequest,
+ self.attachments.create, req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
+ attach_mock.assert_called_once_with(self.context, fake_instance, None,
+ None, None)
+ get_mock.assert_called_once_with(self.context, FAKE_UUID1,
+ want_objects=True,
+ expected_attrs=None)
+
+ def _test_attach_interface_with_invalid_parameter(self, param):
+ self.stubs.Set(compute_api.API, 'attach_interface',
+ fake_attach_interface)
+ req = webob.Request.blank(self.url + '/attach')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({'interface_attachment': param})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exception.ValidationError,
+ self.attachments.create, req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
+
+ def test_attach_interface_instance_with_non_uuid_net_id(self):
+ param = {'net_id': 'non_uuid'}
+ self._test_attach_interface_with_invalid_parameter(param)
+
+ def test_attach_interface_instance_with_non_uuid_port_id(self):
+ param = {'port_id': 'non_uuid'}
+ self._test_attach_interface_with_invalid_parameter(param)
+
+ def test_attach_interface_instance_with_non_array_fixed_ips(self):
+ param = {'fixed_ips': 'non_array'}
+ self._test_attach_interface_with_invalid_parameter(param)
+
+
+class InterfaceAttachTestsV2(InterfaceAttachTestsV21):
+ url = '/v2/fake/os-interfaces'
+ controller_cls = attach_interfaces_v2.InterfaceAttachmentController
+ validate_exc = exc.HTTPBadRequest
+
+ def test_attach_interface_instance_with_non_uuid_net_id(self):
+ pass
+
+ def test_attach_interface_instance_with_non_uuid_port_id(self):
+ pass
+
+ def test_attach_interface_instance_with_non_array_fixed_ips(self):
+ pass
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_availability_zone.py b/nova/tests/unit/api/openstack/compute/contrib/test_availability_zone.py
new file mode 100644
index 0000000000..31b20d6861
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_availability_zone.py
@@ -0,0 +1,512 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from lxml import etree
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import availability_zone as az_v2
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import availability_zone as az_v21
+from nova.api.openstack.compute.plugins.v3 import servers as servers_v21
+from nova.api.openstack.compute import servers as servers_v2
+from nova.api.openstack import extensions
+from nova import availability_zones
+from nova.compute import api as compute_api
+from nova.compute import flavors
+from nova import context
+from nova import db
+from nova import exception
+from nova import servicegroup
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit.image import fake
+from nova.tests.unit import matchers
+from nova.tests.unit.objects import test_service
+
+FAKE_UUID = fakes.FAKE_UUID
+
+
+def fake_service_get_all(context, disabled=None):
+ def __fake_service(binary, availability_zone,
+ created_at, updated_at, host, disabled):
+ return dict(test_service.fake_service,
+ binary=binary,
+ availability_zone=availability_zone,
+ available_zones=availability_zone,
+ created_at=created_at,
+ updated_at=updated_at,
+ host=host,
+ disabled=disabled)
+
+ if disabled:
+ return [__fake_service("nova-compute", "zone-2",
+ datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
+ datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
+ "fake_host-1", True),
+ __fake_service("nova-scheduler", "internal",
+ datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
+ datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
+ "fake_host-1", True),
+ __fake_service("nova-network", "internal",
+ datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
+ datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
+ "fake_host-2", True)]
+ else:
+ return [__fake_service("nova-compute", "zone-1",
+ datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
+ datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
+ "fake_host-1", False),
+ __fake_service("nova-sched", "internal",
+ datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
+ datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
+ "fake_host-1", False),
+ __fake_service("nova-network", "internal",
+ datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
+ datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
+ "fake_host-2", False)]
+
+
+def fake_service_is_up(self, service):
+ return service['binary'] != u"nova-network"
+
+
+def fake_set_availability_zones(context, services):
+ return services
+
+
+def fake_get_availability_zones(context):
+ return ['nova'], []
+
+
+CONF = cfg.CONF
+
+
+class AvailabilityZoneApiTestV21(test.NoDBTestCase):
+ availability_zone = az_v21
+ url = '/v2/fake/os-availability-zone'
+
+ def setUp(self):
+ super(AvailabilityZoneApiTestV21, self).setUp()
+ availability_zones.reset_cache()
+ self.stubs.Set(db, 'service_get_all', fake_service_get_all)
+ self.stubs.Set(availability_zones, 'set_availability_zones',
+ fake_set_availability_zones)
+ self.stubs.Set(servicegroup.API, 'service_is_up', fake_service_is_up)
+
+ def _get_wsgi_instance(self):
+ return fakes.wsgi_app_v21(init_only=('os-availability-zone',
+ 'servers'))
+
+ def test_filtered_availability_zones(self):
+ az = self.availability_zone.AvailabilityZoneController()
+ zones = ['zone1', 'internal']
+ expected = [{'zoneName': 'zone1',
+ 'zoneState': {'available': True},
+ "hosts": None}]
+ result = az._get_filtered_availability_zones(zones, True)
+ self.assertEqual(result, expected)
+
+ expected = [{'zoneName': 'zone1',
+ 'zoneState': {'available': False},
+ "hosts": None}]
+ result = az._get_filtered_availability_zones(zones, False)
+ self.assertEqual(result, expected)
+
+ def test_availability_zone_index(self):
+ req = webob.Request.blank(self.url)
+ resp = req.get_response(self._get_wsgi_instance())
+ self.assertEqual(resp.status_int, 200)
+ resp_dict = jsonutils.loads(resp.body)
+
+ self.assertIn('availabilityZoneInfo', resp_dict)
+ zones = resp_dict['availabilityZoneInfo']
+ self.assertEqual(len(zones), 2)
+ self.assertEqual(zones[0]['zoneName'], u'zone-1')
+ self.assertTrue(zones[0]['zoneState']['available'])
+ self.assertIsNone(zones[0]['hosts'])
+ self.assertEqual(zones[1]['zoneName'], u'zone-2')
+ self.assertFalse(zones[1]['zoneState']['available'])
+ self.assertIsNone(zones[1]['hosts'])
+
+ def test_availability_zone_detail(self):
+ def _formatZone(zone_dict):
+ result = []
+
+ # Zone tree view item
+ result.append({'zoneName': zone_dict['zoneName'],
+ 'zoneState': u'available'
+ if zone_dict['zoneState']['available'] else
+ u'not available'})
+
+ if zone_dict['hosts'] is not None:
+ for (host, services) in zone_dict['hosts'].items():
+ # Host tree view item
+ result.append({'zoneName': u'|- %s' % host,
+ 'zoneState': u''})
+ for (svc, state) in services.items():
+ # Service tree view item
+ result.append({'zoneName': u'| |- %s' % svc,
+ 'zoneState': u'%s %s %s' % (
+ 'enabled' if state['active'] else
+ 'disabled',
+ ':-)' if state['available'] else
+ 'XXX',
+ jsonutils.to_primitive(
+ state['updated_at']))})
+ return result
+
+ def _assertZone(zone, name, status):
+ self.assertEqual(zone['zoneName'], name)
+ self.assertEqual(zone['zoneState'], status)
+
+ availabilityZone = self.availability_zone.AvailabilityZoneController()
+
+ req_url = self.url + '/detail'
+ req = webob.Request.blank(req_url)
+ req.method = 'GET'
+ req.environ['nova.context'] = context.get_admin_context()
+ resp_dict = availabilityZone.detail(req)
+
+ self.assertIn('availabilityZoneInfo', resp_dict)
+ zones = resp_dict['availabilityZoneInfo']
+ self.assertEqual(len(zones), 3)
+
+ ''' availabilityZoneInfo field content in response body:
+ [{'zoneName': 'zone-1',
+ 'zoneState': {'available': True},
+ 'hosts': {'fake_host-1': {
+ 'nova-compute': {'active': True, 'available': True,
+ 'updated_at': datetime(2012, 12, 26, 14, 45, 25)}}}},
+ {'zoneName': 'internal',
+ 'zoneState': {'available': True},
+ 'hosts': {'fake_host-1': {
+ 'nova-sched': {'active': True, 'available': True,
+ 'updated_at': datetime(2012, 12, 26, 14, 45, 25)}},
+ 'fake_host-2': {
+ 'nova-network': {'active': True, 'available': False,
+ 'updated_at': datetime(2012, 12, 26, 14, 45, 24)}}}},
+ {'zoneName': 'zone-2',
+ 'zoneState': {'available': False},
+ 'hosts': None}]
+ '''
+
+ l0 = [u'zone-1', u'available']
+ l1 = [u'|- fake_host-1', u'']
+ l2 = [u'| |- nova-compute', u'enabled :-) 2012-12-26T14:45:25.000000']
+ l3 = [u'internal', u'available']
+ l4 = [u'|- fake_host-1', u'']
+ l5 = [u'| |- nova-sched', u'enabled :-) 2012-12-26T14:45:25.000000']
+ l6 = [u'|- fake_host-2', u'']
+ l7 = [u'| |- nova-network', u'enabled XXX 2012-12-26T14:45:24.000000']
+ l8 = [u'zone-2', u'not available']
+
+ z0 = _formatZone(zones[0])
+ z1 = _formatZone(zones[1])
+ z2 = _formatZone(zones[2])
+
+ self.assertEqual(len(z0), 3)
+ self.assertEqual(len(z1), 5)
+ self.assertEqual(len(z2), 1)
+
+ _assertZone(z0[0], l0[0], l0[1])
+ _assertZone(z0[1], l1[0], l1[1])
+ _assertZone(z0[2], l2[0], l2[1])
+ _assertZone(z1[0], l3[0], l3[1])
+ _assertZone(z1[1], l4[0], l4[1])
+ _assertZone(z1[2], l5[0], l5[1])
+ _assertZone(z1[3], l6[0], l6[1])
+ _assertZone(z1[4], l7[0], l7[1])
+ _assertZone(z2[0], l8[0], l8[1])
+
+ def test_availability_zone_detail_no_services(self):
+ expected_response = {'availabilityZoneInfo':
+ [{'zoneState': {'available': True},
+ 'hosts': {},
+ 'zoneName': 'nova'}]}
+ self.stubs.Set(availability_zones, 'get_availability_zones',
+ fake_get_availability_zones)
+ availabilityZone = self.availability_zone.AvailabilityZoneController()
+
+ req_url = self.url + '/detail'
+ req = webob.Request.blank(req_url)
+ req.method = 'GET'
+ req.environ['nova.context'] = context.get_admin_context()
+ resp_dict = availabilityZone.detail(req)
+
+ self.assertThat(resp_dict,
+ matchers.DictMatches(expected_response))
+
+
+class AvailabilityZoneApiTestV2(AvailabilityZoneApiTestV21):
+ availability_zone = az_v2
+
+ def _get_wsgi_instance(self):
+ return fakes.wsgi_app()
+
+
+class ServersControllerCreateTestV21(test.TestCase):
+ base_url = '/v2/fake/'
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTestV21, self).setUp()
+
+ self.instance_cache_num = 0
+
+ self._set_up_controller()
+
+ def instance_create(context, inst):
+ inst_type = flavors.get_flavor_by_flavor_id(3)
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ def_image_ref = 'http://localhost/images/%s' % image_uuid
+ self.instance_cache_num += 1
+ instance = fake_instance.fake_db_instance(**{
+ 'id': self.instance_cache_num,
+ 'display_name': inst['display_name'] or 'test',
+ 'uuid': FAKE_UUID,
+ 'instance_type': inst_type,
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fead::1234',
+ 'image_ref': inst.get('image_ref', def_image_ref),
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'availability_zone': 'nova',
+ 'reservation_id': inst['reservation_id'],
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "progress": 0,
+ "fixed_ips": [],
+ "task_state": "",
+ "vm_state": "",
+ "root_device_name": inst.get('root_device_name', 'vda'),
+ })
+
+ return instance
+
+ fake.stub_out_image_service(self.stubs)
+ self.stubs.Set(db, 'instance_create', instance_create)
+
+ def _set_up_controller(self):
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers_v21.ServersController(
+ extension_info=ext_info)
+ CONF.set_override('extensions_blacklist',
+ 'os-availability-zone',
+ 'osapi_v3')
+ self.no_availability_zone_controller = servers_v21.ServersController(
+ extension_info=ext_info)
+
+ def _verify_no_availability_zone(self, **kwargs):
+ self.assertNotIn('availability_zone', kwargs)
+
+ def _test_create_extra(self, params, controller):
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
+ server.update(params)
+ body = dict(server=server)
+ req = fakes.HTTPRequest.blank(self.base_url + 'servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ server = controller.create(req, body=body).obj['server']
+
+ def test_create_instance_with_availability_zone_disabled(self):
+ params = {'availability_zone': 'foo'}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self._verify_no_availability_zone(**kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params, self.no_availability_zone_controller)
+
+ def _create_instance_with_availability_zone(self, zone_name):
+ def create(*args, **kwargs):
+ self.assertIn('availability_zone', kwargs)
+ self.assertEqual('nova', kwargs['availability_zone'])
+ return old_create(*args, **kwargs)
+
+ old_create = compute_api.API.create
+ self.stubs.Set(compute_api.API, 'create', create)
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ 'availability_zone': zone_name,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.base_url + 'servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ admin_context = context.get_admin_context()
+ db.service_create(admin_context, {'host': 'host1_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0})
+ agg = db.aggregate_create(admin_context,
+ {'name': 'agg1'}, {'availability_zone': 'nova'})
+ db.aggregate_host_add(admin_context, agg['id'], 'host1_zones')
+ return req, body
+
+ def test_create_instance_with_availability_zone(self):
+ zone_name = 'nova'
+ req, body = self._create_instance_with_availability_zone(zone_name)
+ res = self.controller.create(req, body=body).obj
+ server = res['server']
+ self.assertEqual(fakes.FAKE_UUID, server['id'])
+
+ def test_create_instance_with_invalid_availability_zone_too_long(self):
+ zone_name = 'a' * 256
+ req, body = self._create_instance_with_availability_zone(zone_name)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
+
+ def test_create_instance_with_invalid_availability_zone_too_short(self):
+ zone_name = ''
+ req, body = self._create_instance_with_availability_zone(zone_name)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
+
+ def test_create_instance_with_invalid_availability_zone_not_str(self):
+ zone_name = 111
+ req, body = self._create_instance_with_availability_zone(zone_name)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
+
+ def test_create_instance_without_availability_zone(self):
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.base_url + 'servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = self.controller.create(req, body=body).obj
+ server = res['server']
+ self.assertEqual(fakes.FAKE_UUID, server['id'])
+
+
+class ServersControllerCreateTestV2(ServersControllerCreateTestV21):
+
+ def _set_up_controller(self):
+ ext_mgr = extensions.ExtensionManager()
+ ext_mgr.extensions = {'os-availability-zone': 'fake'}
+ self.controller = servers_v2.Controller(ext_mgr)
+ ext_mgr_no_az = extensions.ExtensionManager()
+ ext_mgr_no_az.extensions = {}
+ self.no_availability_zone_controller = servers_v2.Controller(
+ ext_mgr_no_az)
+
+ def _verify_no_availability_zone(self, **kwargs):
+ self.assertIsNone(kwargs['availability_zone'])
+
+ def test_create_instance_with_invalid_availability_zone_too_long(self):
+ # NOTE: v2.0 API does not check this bad request case.
+ # So we skip this test for v2.0 API.
+ pass
+
+ def test_create_instance_with_invalid_availability_zone_too_short(self):
+ # NOTE: v2.0 API does not check this bad request case.
+ # So we skip this test for v2.0 API.
+ pass
+
+ def test_create_instance_with_invalid_availability_zone_not_str(self):
+ # NOTE: v2.0 API does not check this bad request case.
+ # So we skip this test for v2.0 API.
+ pass
+
+
+class AvailabilityZoneSerializerTest(test.NoDBTestCase):
+ def test_availability_zone_index_detail_serializer(self):
+ def _verify_zone(zone_dict, tree):
+ self.assertEqual(tree.tag, 'availabilityZone')
+ self.assertEqual(zone_dict['zoneName'], tree.get('name'))
+ self.assertEqual(str(zone_dict['zoneState']['available']),
+ tree[0].get('available'))
+
+ for _idx, host_child in enumerate(tree[1]):
+ self.assertIn(host_child.get('name'), zone_dict['hosts'])
+ svcs = zone_dict['hosts'][host_child.get('name')]
+ for _idx, svc_child in enumerate(host_child[0]):
+ self.assertIn(svc_child.get('name'), svcs)
+ svc = svcs[svc_child.get('name')]
+ self.assertEqual(len(svc_child), 1)
+
+ self.assertEqual(str(svc['available']),
+ svc_child[0].get('available'))
+ self.assertEqual(str(svc['active']),
+ svc_child[0].get('active'))
+ self.assertEqual(str(svc['updated_at']),
+ svc_child[0].get('updated_at'))
+
+ serializer = az_v2.AvailabilityZonesTemplate()
+ raw_availability_zones = \
+ [{'zoneName': 'zone-1',
+ 'zoneState': {'available': True},
+ 'hosts': {'fake_host-1': {
+ 'nova-compute': {'active': True, 'available': True,
+ 'updated_at':
+ datetime.datetime(
+ 2012, 12, 26, 14, 45, 25)}}}},
+ {'zoneName': 'internal',
+ 'zoneState': {'available': True},
+ 'hosts': {'fake_host-1': {
+ 'nova-sched': {'active': True, 'available': True,
+ 'updated_at':
+ datetime.datetime(
+ 2012, 12, 26, 14, 45, 25)}},
+ 'fake_host-2': {
+ 'nova-network': {'active': True,
+ 'available': False,
+ 'updated_at':
+ datetime.datetime(
+ 2012, 12, 26, 14, 45, 24)}}}},
+ {'zoneName': 'zone-2',
+ 'zoneState': {'available': False},
+ 'hosts': None}]
+
+ text = serializer.serialize(
+ dict(availabilityZoneInfo=raw_availability_zones))
+ tree = etree.fromstring(text)
+
+ self.assertEqual('availabilityZones', tree.tag)
+ self.assertEqual(len(raw_availability_zones), len(tree))
+ for idx, child in enumerate(tree):
+ _verify_zone(raw_availability_zones[idx], child)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_baremetal_nodes.py b/nova/tests/unit/api/openstack/compute/contrib/test_baremetal_nodes.py
new file mode 100644
index 0000000000..451c92a40b
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_baremetal_nodes.py
@@ -0,0 +1,159 @@
+# Copyright (c) 2013 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from webob import exc
+
+from nova.api.openstack.compute.contrib import baremetal_nodes as b_nodes_v2
+from nova.api.openstack.compute.plugins.v3 import baremetal_nodes \
+ as b_nodes_v21
+from nova.api.openstack import extensions
+from nova import context
+from nova import test
+from nova.tests.unit.virt.ironic import utils as ironic_utils
+
+
+class FakeRequest(object):
+
+ def __init__(self, context):
+ self.environ = {"nova.context": context}
+
+
+def fake_node(**updates):
+ node = {
+ 'id': 1,
+ 'service_host': "host",
+ 'cpus': 8,
+ 'memory_mb': 8192,
+ 'local_gb': 128,
+ 'pm_address': "10.1.2.3",
+ 'pm_user': "pm_user",
+ 'pm_password': "pm_pass",
+ 'terminal_port': 8000,
+ 'interfaces': [],
+ 'instance_uuid': 'fake-instance-uuid',
+ }
+ if updates:
+ node.update(updates)
+ return node
+
+
+def fake_node_ext_status(**updates):
+ node = fake_node(uuid='fake-uuid',
+ task_state='fake-task-state',
+ updated_at='fake-updated-at',
+ pxe_config_path='fake-pxe-config-path')
+ if updates:
+ node.update(updates)
+ return node
+
+
+FAKE_IRONIC_CLIENT = ironic_utils.FakeClient()
+
+
+@mock.patch.object(b_nodes_v21, '_get_ironic_client',
+ lambda *_: FAKE_IRONIC_CLIENT)
+class BareMetalNodesTestV21(test.NoDBTestCase):
+ def setUp(self):
+ super(BareMetalNodesTestV21, self).setUp()
+
+ self._setup()
+ self.context = context.get_admin_context()
+ self.request = FakeRequest(self.context)
+
+ def _setup(self):
+ self.controller = b_nodes_v21.BareMetalNodeController()
+
+ @mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list')
+ def test_index_ironic(self, mock_list):
+ properties = {'cpus': 2, 'memory_mb': 1024, 'local_gb': 20}
+ node = ironic_utils.get_test_node(properties=properties)
+ mock_list.return_value = [node]
+
+ res_dict = self.controller.index(self.request)
+ expected_output = {'nodes':
+ [{'memory_mb': properties['memory_mb'],
+ 'host': 'IRONIC MANAGED',
+ 'disk_gb': properties['local_gb'],
+ 'interfaces': [],
+ 'task_state': None,
+ 'id': node.uuid,
+ 'cpus': properties['cpus']}]}
+ self.assertEqual(expected_output, res_dict)
+ mock_list.assert_called_once_with(detail=True)
+
+ @mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
+ @mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
+ def test_show_ironic(self, mock_get, mock_list_ports):
+ properties = {'cpus': 1, 'memory_mb': 512, 'local_gb': 10}
+ node = ironic_utils.get_test_node(properties=properties)
+ port = ironic_utils.get_test_port()
+ mock_get.return_value = node
+ mock_list_ports.return_value = [port]
+
+ res_dict = self.controller.show(self.request, node.uuid)
+ expected_output = {'node':
+ {'memory_mb': properties['memory_mb'],
+ 'instance_uuid': None,
+ 'host': 'IRONIC MANAGED',
+ 'disk_gb': properties['local_gb'],
+ 'interfaces': [{'address': port.address}],
+ 'task_state': None,
+ 'id': node.uuid,
+ 'cpus': properties['cpus']}}
+ self.assertEqual(expected_output, res_dict)
+ mock_get.assert_called_once_with(node.uuid)
+ mock_list_ports.assert_called_once_with(node.uuid)
+
+ @mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
+ @mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
+ def test_show_ironic_no_interfaces(self, mock_get, mock_list_ports):
+ properties = {'cpus': 1, 'memory_mb': 512, 'local_gb': 10}
+ node = ironic_utils.get_test_node(properties=properties)
+ mock_get.return_value = node
+ mock_list_ports.return_value = []
+
+ res_dict = self.controller.show(self.request, node.uuid)
+ self.assertEqual([], res_dict['node']['interfaces'])
+ mock_get.assert_called_once_with(node.uuid)
+ mock_list_ports.assert_called_once_with(node.uuid)
+
+ def test_create_ironic_not_supported(self):
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create,
+ self.request, {'node': object()})
+
+ def test_delete_ironic_not_supported(self):
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.delete,
+ self.request, 'fake-id')
+
+ def test_add_interface_ironic_not_supported(self):
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller._add_interface,
+ self.request, 'fake-id', 'fake-body')
+
+ def test_remove_interface_ironic_not_supported(self):
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller._remove_interface,
+ self.request, 'fake-id', 'fake-body')
+
+
+@mock.patch.object(b_nodes_v2, '_get_ironic_client',
+ lambda *_: FAKE_IRONIC_CLIENT)
+class BareMetalNodesTestV2(BareMetalNodesTestV21):
+ def _setup(self):
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.controller = b_nodes_v2.BareMetalNodeController(self.ext_mgr)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_block_device_mapping.py b/nova/tests/unit/api/openstack/compute/contrib/test_block_device_mapping.py
new file mode 100644
index 0000000000..ab20ad85c3
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_block_device_mapping.py
@@ -0,0 +1,359 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import mox
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from webob import exc
+
+from nova.api.openstack.compute import extensions
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import block_device_mapping
+from nova.api.openstack.compute.plugins.v3 import servers as servers_v3
+from nova.api.openstack.compute import servers as servers_v2
+from nova import block_device
+from nova.compute import api as compute_api
+from nova import exception
+from nova import objects
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.image import fake
+from nova.tests.unit import matchers
+
+CONF = cfg.CONF
+
+
+class BlockDeviceMappingTestV21(test.TestCase):
+
+ def _setup_controller(self):
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers_v3.ServersController(extension_info=ext_info)
+ CONF.set_override('extensions_blacklist', 'os-block-device-mapping',
+ 'osapi_v3')
+ self.no_bdm_v2_controller = servers_v3.ServersController(
+ extension_info=ext_info)
+ CONF.set_override('extensions_blacklist', '', 'osapi_v3')
+
+ def setUp(self):
+ super(BlockDeviceMappingTestV21, self).setUp()
+ self._setup_controller()
+ fake.stub_out_image_service(self.stubs)
+
+ self.bdm = [{
+ 'no_device': None,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'uuid': 'fake',
+ 'device_name': 'vda',
+ 'delete_on_termination': False,
+ }]
+
+ def _get_servers_body(self, no_image=False):
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ 'flavorRef': 'http://localhost/123/flavors/3',
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ },
+ }
+ if no_image:
+ del body['server']['imageRef']
+ return body
+
+ def _test_create(self, params, no_image=False, override_controller=None):
+ body = self._get_servers_body(no_image)
+ body['server'].update(params)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers')
+ req.method = 'POST'
+ req.headers['content-type'] = 'application/json'
+
+ req.body = jsonutils.dumps(body)
+
+ if override_controller:
+ override_controller.create(req, body=body).obj['server']
+ else:
+ self.controller.create(req, body=body).obj['server']
+
+ def test_create_instance_with_block_device_mapping_disabled(self):
+ bdm = [{'device_name': 'foo'}]
+
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertNotIn('block_device_mapping', kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: bdm}
+ self._test_create(params,
+ override_controller=self.no_bdm_v2_controller)
+
+ def test_create_instance_with_volumes_enabled_no_image(self):
+ """Test that the create will fail if there is no image
+ and no bdms supplied in the request
+ """
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertNotIn('imageRef', kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, {}, no_image=True)
+
+ def test_create_instance_with_bdms_and_no_image(self):
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertThat(
+ block_device.BlockDeviceDict(self.bdm[0]),
+ matchers.DictMatches(kwargs['block_device_mapping'][0])
+ )
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ self.mox.StubOutWithMock(compute_api.API, '_validate_bdm')
+ self.mox.StubOutWithMock(compute_api.API, '_get_bdm_image_metadata')
+
+ compute_api.API._validate_bdm(
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(True)
+ compute_api.API._get_bdm_image_metadata(
+ mox.IgnoreArg(), mox.IgnoreArg(), False).AndReturn({})
+ self.mox.ReplayAll()
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
+ self._test_create(params, no_image=True)
+
+ def test_create_instance_with_device_name_not_string(self):
+ self.bdm[0]['device_name'] = 123
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params, no_image=True)
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_instance_with_bdm_param_not_list(self, mock_create):
+ self.params = {'block_device_mapping': '/dev/vdb'}
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, self.params)
+
+ def test_create_instance_with_device_name_empty(self):
+ self.bdm[0]['device_name'] = ''
+
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params, no_image=True)
+
+ def test_create_instance_with_device_name_too_long(self):
+ self.bdm[0]['device_name'] = 'a' * 256
+
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params, no_image=True)
+
+ def test_create_instance_with_space_in_device_name(self):
+ self.bdm[0]['device_name'] = 'v da'
+
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertTrue(kwargs['legacy_bdm'])
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params, no_image=True)
+
+ def test_create_instance_with_invalid_size(self):
+ self.bdm[0]['volume_size'] = 'hello world'
+
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params, no_image=True)
+
+ def test_create_instance_bdm(self):
+ bdm = [{
+ 'source_type': 'volume',
+ 'device_name': 'fake_dev',
+ 'uuid': 'fake_vol'
+ }]
+ bdm_expected = [{
+ 'source_type': 'volume',
+ 'device_name': 'fake_dev',
+ 'volume_id': 'fake_vol'
+ }]
+
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertFalse(kwargs['legacy_bdm'])
+ for expected, received in zip(bdm_expected,
+ kwargs['block_device_mapping']):
+ self.assertThat(block_device.BlockDeviceDict(expected),
+ matchers.DictMatches(received))
+ return old_create(*args, **kwargs)
+
+ def _validate_bdm(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: bdm}
+ self._test_create(params, no_image=True)
+
+ def test_create_instance_bdm_missing_device_name(self):
+ del self.bdm[0]['device_name']
+
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertFalse(kwargs['legacy_bdm'])
+ self.assertNotIn(None,
+ kwargs['block_device_mapping'][0]['device_name'])
+ return old_create(*args, **kwargs)
+
+ def _validate_bdm(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
+ self._test_create(params, no_image=True)
+
+ def test_create_instance_bdm_validation_error(self):
+ def _validate(*args, **kwargs):
+ raise exception.InvalidBDMFormat(details='Wrong BDM')
+
+ self.stubs.Set(block_device.BlockDeviceDict,
+ '_validate', _validate)
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params, no_image=True)
+
+ @mock.patch('nova.compute.api.API._get_bdm_image_metadata')
+ def test_create_instance_non_bootable_volume_fails(self, fake_bdm_meta):
+ params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
+ fake_bdm_meta.side_effect = exception.InvalidBDMVolumeNotBootable(id=1)
+ self.assertRaises(exc.HTTPBadRequest, self._test_create, params,
+ no_image=True)
+
+ def test_create_instance_bdm_api_validation_fails(self):
+ self.validation_fail_test_validate_called = False
+ self.validation_fail_instance_destroy_called = False
+
+ bdm_exceptions = ((exception.InvalidBDMSnapshot, {'id': 'fake'}),
+ (exception.InvalidBDMVolume, {'id': 'fake'}),
+ (exception.InvalidBDMImage, {'id': 'fake'}),
+ (exception.InvalidBDMBootSequence, {}),
+ (exception.InvalidBDMLocalsLimit, {}))
+
+ ex_iter = iter(bdm_exceptions)
+
+ def _validate_bdm(*args, **kwargs):
+ self.validation_fail_test_validate_called = True
+ ex, kargs = ex_iter.next()
+ raise ex(**kargs)
+
+ def _instance_destroy(*args, **kwargs):
+ self.validation_fail_instance_destroy_called = True
+
+ self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
+ self.stubs.Set(objects.Instance, 'destroy', _instance_destroy)
+
+ for _unused in xrange(len(bdm_exceptions)):
+ params = {block_device_mapping.ATTRIBUTE_NAME:
+ [self.bdm[0].copy()]}
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params)
+ self.assertTrue(self.validation_fail_test_validate_called)
+ self.assertTrue(self.validation_fail_instance_destroy_called)
+ self.validation_fail_test_validate_called = False
+ self.validation_fail_instance_destroy_called = False
+
+
+class BlockDeviceMappingTestV2(BlockDeviceMappingTestV21):
+
+ def _setup_controller(self):
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {'os-volumes': 'fake',
+ 'os-block-device-mapping-v2-boot': 'fake'}
+ self.controller = servers_v2.Controller(self.ext_mgr)
+ self.ext_mgr_bdm_v2 = extensions.ExtensionManager()
+ self.ext_mgr_bdm_v2.extensions = {'os-volumes': 'fake'}
+ self.no_bdm_v2_controller = servers_v2.Controller(
+ self.ext_mgr_bdm_v2)
+
+ def test_create_instance_with_block_device_mapping_disabled(self):
+ bdm = [{'device_name': 'foo'}]
+
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIsNone(kwargs['block_device_mapping'], None)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: bdm}
+ self._test_create(params,
+ override_controller=self.no_bdm_v2_controller)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_block_device_mapping_v1.py b/nova/tests/unit/api/openstack/compute/contrib/test_block_device_mapping_v1.py
new file mode 100644
index 0000000000..2f73f00952
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_block_device_mapping_v1.py
@@ -0,0 +1,421 @@
+# Copyright (c) 2014 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import mox
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from webob import exc
+
+from nova.api.openstack.compute import extensions
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import block_device_mapping_v1 as \
+ block_device_mapping
+from nova.api.openstack.compute.plugins.v3 import servers as servers_v3
+from nova.api.openstack.compute import servers as servers_v2
+from nova.compute import api as compute_api
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.image import fake
+
+CONF = cfg.CONF
+
+
+class BlockDeviceMappingTestV21(test.TestCase):
+
+ def _setup_controller(self):
+ ext_info = plugins.LoadedExtensionInfo()
+ CONF.set_override('extensions_blacklist', 'os-block-device-mapping',
+ 'osapi_v3')
+ self.controller = servers_v3.ServersController(extension_info=ext_info)
+ CONF.set_override('extensions_blacklist',
+ ['os-block-device-mapping-v1',
+ 'os-block-device-mapping'],
+ 'osapi_v3')
+ self.no_volumes_controller = servers_v3.ServersController(
+ extension_info=ext_info)
+ CONF.set_override('extensions_blacklist', '', 'osapi_v3')
+
+ def setUp(self):
+ super(BlockDeviceMappingTestV21, self).setUp()
+ self._setup_controller()
+ fake.stub_out_image_service(self.stubs)
+ self.volume_id = fakes.FAKE_UUID
+ self.bdm = [{
+ 'id': 1,
+ 'no_device': None,
+ 'virtual_name': None,
+ 'snapshot_id': None,
+ 'volume_id': self.volume_id,
+ 'status': 'active',
+ 'device_name': 'vda',
+ 'delete_on_termination': False,
+ 'volume_image_metadata':
+ {'test_key': 'test_value'}
+ }]
+
+ def _get_servers_body(self, no_image=False):
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ 'flavorRef': 'http://localhost/123/flavors/3',
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ },
+ }
+ if no_image:
+ del body['server']['imageRef']
+ return body
+
+ def _test_create(self, params, no_image=False, override_controller=None):
+ body = self._get_servers_body(no_image)
+ body['server'].update(params)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers')
+ req.method = 'POST'
+ req.headers['content-type'] = 'application/json'
+
+ req.body = jsonutils.dumps(body)
+
+ if override_controller:
+ override_controller.create(req, body=body).obj['server']
+ else:
+ self.controller.create(req, body=body).obj['server']
+
+ def test_create_instance_with_volumes_enabled(self):
+ params = {'block_device_mapping': self.bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ return old_create(*args, **kwargs)
+
+ def _validate_bdm(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
+ self._test_create(params)
+
+ def test_create_instance_with_volumes_enabled_and_bdms_no_image(self):
+ """Test that the create works if there is no image supplied but
+ os-volumes extension is enabled and bdms are supplied
+ """
+ self.mox.StubOutWithMock(compute_api.API, '_validate_bdm')
+ self.mox.StubOutWithMock(compute_api.API, '_get_bdm_image_metadata')
+ volume = self.bdm[0]
+ compute_api.API._validate_bdm(mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(True)
+ compute_api.API._get_bdm_image_metadata(mox.IgnoreArg(),
+ self.bdm,
+ True).AndReturn(volume)
+ params = {'block_device_mapping': self.bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ self.assertNotIn('imageRef', kwargs)
+ return old_create(*args, **kwargs)
+
+ def _validate_bdm(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.mox.ReplayAll()
+ self._test_create(params, no_image=True)
+
+ def test_create_instance_with_volumes_disabled(self):
+ bdm = [{'device_name': 'foo'}]
+ params = {'block_device_mapping': bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertNotIn(block_device_mapping, kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create(params,
+ override_controller=self.no_volumes_controller)
+
+ @mock.patch('nova.compute.api.API._get_bdm_image_metadata')
+ def test_create_instance_non_bootable_volume_fails(self, fake_bdm_meta):
+ bdm = [{
+ 'id': 1,
+ 'bootable': False,
+ 'volume_id': self.volume_id,
+ 'status': 'active',
+ 'device_name': 'vda',
+ }]
+ params = {'block_device_mapping': bdm}
+ fake_bdm_meta.side_effect = exception.InvalidBDMVolumeNotBootable(id=1)
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params, no_image=True)
+
+ def test_create_instance_with_device_name_not_string(self):
+ old_create = compute_api.API.create
+ self.params = {'block_device_mapping': self.bdm}
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, self.params)
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_instance_with_bdm_param_not_list(self, mock_create):
+ self.params = {'block_device_mapping': '/dev/vdb'}
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, self.params)
+
+ def test_create_instance_with_device_name_empty(self):
+ self.bdm[0]['device_name'] = ''
+ params = {'block_device_mapping': self.bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params)
+
+ def test_create_instance_with_device_name_too_long(self):
+ self.bdm[0]['device_name'] = 'a' * 256,
+ params = {'block_device_mapping': self.bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params)
+
+ def test_create_instance_with_space_in_device_name(self):
+ self.bdm[0]['device_name'] = 'vd a',
+ params = {'block_device_mapping': self.bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertTrue(kwargs['legacy_bdm'])
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params)
+
+ def test_create_instance_with_invalid_size(self):
+ bdm = [{'delete_on_termination': 1,
+ 'device_name': 'vda',
+ 'volume_size': "hello world",
+ 'volume_id': '11111111-1111-1111-1111-111111111111'}]
+ params = {'block_device_mapping': bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params)
+
+ def test_create_instance_with_bdm_delete_on_termination(self):
+ bdm = [{'device_name': 'foo1', 'volume_id': 'fake_vol',
+ 'delete_on_termination': 1},
+ {'device_name': 'foo2', 'volume_id': 'fake_vol',
+ 'delete_on_termination': True},
+ {'device_name': 'foo3', 'volume_id': 'fake_vol',
+ 'delete_on_termination': 'invalid'},
+ {'device_name': 'foo4', 'volume_id': 'fake_vol',
+ 'delete_on_termination': 0},
+ {'device_name': 'foo5', 'volume_id': 'fake_vol',
+ 'delete_on_termination': False}]
+ expected_bdm = [
+ {'device_name': 'foo1', 'volume_id': 'fake_vol',
+ 'delete_on_termination': True},
+ {'device_name': 'foo2', 'volume_id': 'fake_vol',
+ 'delete_on_termination': True},
+ {'device_name': 'foo3', 'volume_id': 'fake_vol',
+ 'delete_on_termination': False},
+ {'device_name': 'foo4', 'volume_id': 'fake_vol',
+ 'delete_on_termination': False},
+ {'device_name': 'foo5', 'volume_id': 'fake_vol',
+ 'delete_on_termination': False}]
+ params = {'block_device_mapping': bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(expected_bdm, kwargs['block_device_mapping'])
+ return old_create(*args, **kwargs)
+
+ def _validate_bdm(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
+ self._test_create(params)
+
+ def test_create_instance_decide_format_legacy(self):
+ ext_info = plugins.LoadedExtensionInfo()
+ CONF.set_override('extensions_blacklist',
+ ['os-block-device-mapping',
+ 'os-block-device-mapping-v1'],
+ 'osapi_v3')
+ controller = servers_v3.ServersController(extension_info=ext_info)
+ bdm = [{'device_name': 'foo1',
+ 'volume_id': 'fake_vol',
+ 'delete_on_termination': 1}]
+
+ expected_legacy_flag = True
+
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ legacy_bdm = kwargs.get('legacy_bdm', True)
+ self.assertEqual(legacy_bdm, expected_legacy_flag)
+ return old_create(*args, **kwargs)
+
+ def _validate_bdm(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.stubs.Set(compute_api.API, '_validate_bdm',
+ _validate_bdm)
+
+ self._test_create({}, override_controller=controller)
+
+ params = {'block_device_mapping': bdm}
+ self._test_create(params, override_controller=controller)
+
+ def test_create_instance_both_bdm_formats(self):
+ bdm = [{'device_name': 'foo'}]
+ bdm_v2 = [{'source_type': 'volume',
+ 'uuid': 'fake_vol'}]
+ params = {'block_device_mapping': bdm,
+ 'block_device_mapping_v2': bdm_v2}
+ self.assertRaises(exc.HTTPBadRequest, self._test_create, params)
+
+
+class BlockDeviceMappingTestV2(BlockDeviceMappingTestV21):
+
+ def _setup_controller(self):
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {'os-volumes': 'fake'}
+ self.controller = servers_v2.Controller(self.ext_mgr)
+ self.ext_mgr_no_vols = extensions.ExtensionManager()
+ self.ext_mgr_no_vols.extensions = {}
+ self.no_volumes_controller = servers_v2.Controller(
+ self.ext_mgr_no_vols)
+
+ def test_create_instance_with_volumes_disabled(self):
+ bdm = [{'device_name': 'foo'}]
+ params = {'block_device_mapping': bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIsNone(kwargs['block_device_mapping'])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create(params,
+ override_controller=self.no_volumes_controller)
+
+ def test_create_instance_decide_format_legacy(self):
+ ext_mgr = extensions.ExtensionManager()
+ ext_mgr.extensions = {'os-volumes': 'fake',
+ 'os-block-device-mapping-v2-boot': 'fake'}
+ controller = servers_v2.Controller(self.ext_mgr)
+ bdm = [{'device_name': 'foo1',
+ 'volume_id': 'fake_vol',
+ 'delete_on_termination': 1}]
+
+ expected_legacy_flag = True
+
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ legacy_bdm = kwargs.get('legacy_bdm', True)
+ self.assertEqual(legacy_bdm, expected_legacy_flag)
+ return old_create(*args, **kwargs)
+
+ def _validate_bdm(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.stubs.Set(compute_api.API, '_validate_bdm',
+ _validate_bdm)
+
+ self._test_create({}, override_controller=controller)
+
+ params = {'block_device_mapping': bdm}
+ self._test_create(params, override_controller=controller)
+
+
+class TestServerCreateRequestXMLDeserializer(test.TestCase):
+
+ def setUp(self):
+ super(TestServerCreateRequestXMLDeserializer, self).setUp()
+ self.deserializer = servers_v2.CreateDeserializer()
+
+ def test_request_with_block_device_mapping(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <block_device_mapping>
+ <mapping volume_id="7329b667-50c7-46a6-b913-cb2a09dfeee0"
+ device_name="/dev/vda" virtual_name="root"
+ delete_on_termination="False" />
+ <mapping snapshot_id="f31efb24-34d2-43e1-8b44-316052956a39"
+ device_name="/dev/vdb" virtual_name="ephemeral0"
+ delete_on_termination="False" />
+ <mapping device_name="/dev/vdc" no_device="True" />
+ </block_device_mapping>
+ </server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "block_device_mapping": [
+ {
+ "volume_id": "7329b667-50c7-46a6-b913-cb2a09dfeee0",
+ "device_name": "/dev/vda",
+ "virtual_name": "root",
+ "delete_on_termination": False,
+ },
+ {
+ "snapshot_id": "f31efb24-34d2-43e1-8b44-316052956a39",
+ "device_name": "/dev/vdb",
+ "virtual_name": "ephemeral0",
+ "delete_on_termination": False,
+ },
+ {
+ "device_name": "/dev/vdc",
+ "no_device": True,
+ },
+ ]
+ }}
+ self.assertEqual(request['body'], expected)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_cells.py b/nova/tests/unit/api/openstack/compute/contrib/test_cells.py
new file mode 100644
index 0000000000..1460d33e3a
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_cells.py
@@ -0,0 +1,698 @@
+# Copyright 2011-2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from lxml import etree
+from oslo.utils import timeutils
+from webob import exc
+
+from nova.api.openstack.compute.contrib import cells as cells_ext_v2
+from nova.api.openstack.compute.plugins.v3 import cells as cells_ext_v21
+from nova.api.openstack import extensions
+from nova.api.openstack import xmlutil
+from nova.cells import rpcapi as cells_rpcapi
+from nova import context
+from nova import exception
+from nova import rpc
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import utils
+
+
+class BaseCellsTest(test.NoDBTestCase):
+ def setUp(self):
+ super(BaseCellsTest, self).setUp()
+
+ self.fake_cells = [
+ dict(id=1, name='cell1', is_parent=True,
+ weight_scale=1.0, weight_offset=0.0,
+ transport_url='rabbit://bob:xxxx@r1.example.org/'),
+ dict(id=2, name='cell2', is_parent=False,
+ weight_scale=1.0, weight_offset=0.0,
+ transport_url='rabbit://alice:qwerty@r2.example.org/')]
+
+ self.fake_capabilities = [
+ {'cap1': '0,1', 'cap2': '2,3'},
+ {'cap3': '4,5', 'cap4': '5,6'}]
+
+ def fake_cell_get(_self, context, cell_name):
+ for cell in self.fake_cells:
+ if cell_name == cell['name']:
+ return cell
+ else:
+ raise exception.CellNotFound(cell_name=cell_name)
+
+ def fake_cell_create(_self, context, values):
+ cell = dict(id=1)
+ cell.update(values)
+ return cell
+
+ def fake_cell_update(_self, context, cell_id, values):
+ cell = fake_cell_get(_self, context, cell_id)
+ cell.update(values)
+ return cell
+
+ def fake_cells_api_get_all_cell_info(*args):
+ return self._get_all_cell_info(*args)
+
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_get', fake_cell_get)
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_update', fake_cell_update)
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_create', fake_cell_create)
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'get_cell_info_for_neighbors',
+ fake_cells_api_get_all_cell_info)
+
+ def _get_all_cell_info(self, *args):
+ def insecure_transport_url(url):
+ transport_url = rpc.get_transport_url(url)
+ transport_url.hosts[0].password = None
+ return str(transport_url)
+
+ cells = copy.deepcopy(self.fake_cells)
+ cells[0]['transport_url'] = insecure_transport_url(
+ cells[0]['transport_url'])
+ cells[1]['transport_url'] = insecure_transport_url(
+ cells[1]['transport_url'])
+ for i, cell in enumerate(cells):
+ cell['capabilities'] = self.fake_capabilities[i]
+ return cells
+
+
+class CellsTestV21(BaseCellsTest):
+ cell_extension = 'compute_extension:v3:os-cells'
+ bad_request = exception.ValidationError
+
+ def _get_cell_controller(self, ext_mgr):
+ return cells_ext_v21.CellsController()
+
+ def _get_request(self, resource):
+ return fakes.HTTPRequest.blank('/v2/fake/' + resource)
+
+ def setUp(self):
+ super(CellsTestV21, self).setUp()
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.controller = self._get_cell_controller(self.ext_mgr)
+ self.context = context.get_admin_context()
+ self.flags(enable=True, group='cells')
+
+ def test_index(self):
+ req = self._get_request("cells")
+ res_dict = self.controller.index(req)
+
+ self.assertEqual(len(res_dict['cells']), 2)
+ for i, cell in enumerate(res_dict['cells']):
+ self.assertEqual(cell['name'], self.fake_cells[i]['name'])
+ self.assertNotIn('capabilitiles', cell)
+ self.assertNotIn('password', cell)
+
+ def test_detail(self):
+ req = self._get_request("cells/detail")
+ res_dict = self.controller.detail(req)
+
+ self.assertEqual(len(res_dict['cells']), 2)
+ for i, cell in enumerate(res_dict['cells']):
+ self.assertEqual(cell['name'], self.fake_cells[i]['name'])
+ self.assertEqual(cell['capabilities'], self.fake_capabilities[i])
+ self.assertNotIn('password', cell)
+
+ def test_show_bogus_cell_raises(self):
+ req = self._get_request("cells/bogus")
+ self.assertRaises(exc.HTTPNotFound, self.controller.show, req, 'bogus')
+
+ def test_get_cell_by_name(self):
+ req = self._get_request("cells/cell1")
+ res_dict = self.controller.show(req, 'cell1')
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'cell1')
+ self.assertEqual(cell['rpc_host'], 'r1.example.org')
+ self.assertNotIn('password', cell)
+
+ def _cell_delete(self):
+ call_info = {'delete_called': 0}
+
+ def fake_cell_delete(inst, context, cell_name):
+ self.assertEqual(cell_name, 'cell999')
+ call_info['delete_called'] += 1
+
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_delete', fake_cell_delete)
+
+ req = self._get_request("cells/cell999")
+ req.environ['nova.context'] = self.context
+ self.controller.delete(req, 'cell999')
+ self.assertEqual(call_info['delete_called'], 1)
+
+ def test_cell_delete(self):
+ # Test cell delete with just cell policy
+ rules = {"default": "is_admin:true",
+ self.cell_extension: "is_admin:true"}
+ self.policy.set_rules(rules)
+ self._cell_delete()
+
+ def test_cell_delete_with_delete_policy(self):
+ self._cell_delete()
+
+ def test_delete_bogus_cell_raises(self):
+ def fake_cell_delete(inst, context, cell_name):
+ return 0
+
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_delete', fake_cell_delete)
+
+ req = self._get_request("cells/cell999")
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exc.HTTPNotFound, self.controller.delete, req,
+ 'cell999')
+
+ def test_cell_delete_fails_for_invalid_policy(self):
+ def fake_cell_delete(inst, context, cell_name):
+ pass
+
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_delete', fake_cell_delete)
+
+ req = self._get_request("cells/cell999")
+ req.environ['nova.context'] = self.context
+ req.environ["nova.context"].is_admin = False
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.delete, req, 'cell999')
+
+ def _cell_create_parent(self):
+ body = {'cell': {'name': 'meow',
+ 'username': 'fred',
+ 'password': 'fubar',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
+ res_dict = self.controller.create(req, body=body)
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'meow')
+ self.assertEqual(cell['username'], 'fred')
+ self.assertEqual(cell['rpc_host'], 'r3.example.org')
+ self.assertEqual(cell['type'], 'parent')
+ self.assertNotIn('password', cell)
+ self.assertNotIn('is_parent', cell)
+
+ def test_cell_create_parent(self):
+ # Test create with just cells policy
+ rules = {"default": "is_admin:true",
+ self.cell_extension: "is_admin:true"}
+ self.policy.set_rules(rules)
+ self._cell_create_parent()
+
+ def test_cell_create_parent_with_create_policy(self):
+ self._cell_create_parent()
+
+ def _cell_create_child(self):
+ body = {'cell': {'name': 'meow',
+ 'username': 'fred',
+ 'password': 'fubar',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'child'}}
+
+ req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
+ res_dict = self.controller.create(req, body=body)
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'meow')
+ self.assertEqual(cell['username'], 'fred')
+ self.assertEqual(cell['rpc_host'], 'r3.example.org')
+ self.assertEqual(cell['type'], 'child')
+ self.assertNotIn('password', cell)
+ self.assertNotIn('is_parent', cell)
+
+ def test_cell_create_child(self):
+ # Test create with just cells policy
+ rules = {"default": "is_admin:true",
+ self.cell_extension: "is_admin:true"}
+ self.policy.set_rules(rules)
+ self._cell_create_child()
+
+ def test_cell_create_child_with_create_policy(self):
+ self._cell_create_child()
+
+ def test_cell_create_no_name_raises(self):
+ body = {'cell': {'username': 'moocow',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
+ self.assertRaises(self.bad_request,
+ self.controller.create, req, body=body)
+
+ def test_cell_create_name_empty_string_raises(self):
+ body = {'cell': {'name': '',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
+ self.assertRaises(self.bad_request,
+ self.controller.create, req, body=body)
+
+ def test_cell_create_name_with_bang_raises(self):
+ body = {'cell': {'name': 'moo!cow',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
+ self.assertRaises(self.bad_request,
+ self.controller.create, req, body=body)
+
+ def test_cell_create_name_with_dot_raises(self):
+ body = {'cell': {'name': 'moo.cow',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
+ res_dict = self.controller.create(req, body=body)
+ cell = res_dict['cell']
+ self.assertEqual(cell['name'], 'moo.cow')
+
+ def test_cell_create_name_with_invalid_type_raises(self):
+ body = {'cell': {'name': 'moocow',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'invalid'}}
+
+ req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
+ self.assertRaises(self.bad_request,
+ self.controller.create, req, body=body)
+
+ def test_cell_create_fails_for_invalid_policy(self):
+ body = {'cell': {'name': 'fake'}}
+ req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
+ req.environ['nova.context'].is_admin = False
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.create, req, body=body)
+
+ def _cell_update(self):
+ body = {'cell': {'username': 'zeb',
+ 'password': 'sneaky'}}
+
+ req = self._get_request("cells/cell1")
+ req.environ['nova.context'] = self.context
+ res_dict = self.controller.update(req, 'cell1', body=body)
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'cell1')
+ self.assertEqual(cell['rpc_host'], 'r1.example.org')
+ self.assertEqual(cell['username'], 'zeb')
+ self.assertNotIn('password', cell)
+
+ def test_cell_update(self):
+ # Test cell update with just cell policy
+ rules = {"default": "is_admin:true",
+ self.cell_extension: "is_admin:true"}
+ self.policy.set_rules(rules)
+ self._cell_update()
+
+ def test_cell_update_with_update_policy(self):
+ self._cell_update()
+
+ def test_cell_update_fails_for_invalid_policy(self):
+ body = {'cell': {'name': 'got_changed'}}
+ req = self._get_request("cells/cell1")
+ req.environ['nova.context'] = self.context
+ req.environ['nova.context'].is_admin = False
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.create, req, body=body)
+
+ def test_cell_update_empty_name_raises(self):
+ body = {'cell': {'name': '',
+ 'username': 'zeb',
+ 'password': 'sneaky'}}
+
+ req = self._get_request("cells/cell1")
+ req.environ['nova.context'] = self.context
+ self.assertRaises(self.bad_request,
+ self.controller.update, req, 'cell1', body=body)
+
+ def test_cell_update_invalid_type_raises(self):
+ body = {'cell': {'username': 'zeb',
+ 'type': 'invalid',
+ 'password': 'sneaky'}}
+
+ req = self._get_request("cells/cell1")
+ req.environ['nova.context'] = self.context
+ self.assertRaises(self.bad_request,
+ self.controller.update, req, 'cell1', body=body)
+
+ def test_cell_update_without_type_specified(self):
+ body = {'cell': {'username': 'wingwj'}}
+
+ req = self._get_request("cells/cell1")
+ req.environ['nova.context'] = self.context
+ res_dict = self.controller.update(req, 'cell1', body=body)
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'cell1')
+ self.assertEqual(cell['rpc_host'], 'r1.example.org')
+ self.assertEqual(cell['username'], 'wingwj')
+ self.assertEqual(cell['type'], 'parent')
+
+ def test_cell_update_with_type_specified(self):
+ body1 = {'cell': {'username': 'wingwj', 'type': 'child'}}
+ body2 = {'cell': {'username': 'wingwj', 'type': 'parent'}}
+
+ req1 = self._get_request("cells/cell1")
+ req1.environ['nova.context'] = self.context
+ res_dict1 = self.controller.update(req1, 'cell1', body=body1)
+ cell1 = res_dict1['cell']
+
+ req2 = self._get_request("cells/cell2")
+ req2.environ['nova.context'] = self.context
+ res_dict2 = self.controller.update(req2, 'cell2', body=body2)
+ cell2 = res_dict2['cell']
+
+ self.assertEqual(cell1['name'], 'cell1')
+ self.assertEqual(cell1['rpc_host'], 'r1.example.org')
+ self.assertEqual(cell1['username'], 'wingwj')
+ self.assertEqual(cell1['type'], 'child')
+
+ self.assertEqual(cell2['name'], 'cell2')
+ self.assertEqual(cell2['rpc_host'], 'r2.example.org')
+ self.assertEqual(cell2['username'], 'wingwj')
+ self.assertEqual(cell2['type'], 'parent')
+
+ def test_cell_info(self):
+ caps = ['cap1=a;b', 'cap2=c;d']
+ self.flags(name='darksecret', capabilities=caps, group='cells')
+
+ req = self._get_request("cells/info")
+ res_dict = self.controller.info(req)
+ cell = res_dict['cell']
+ cell_caps = cell['capabilities']
+
+ self.assertEqual(cell['name'], 'darksecret')
+ self.assertEqual(cell_caps['cap1'], 'a;b')
+ self.assertEqual(cell_caps['cap2'], 'c;d')
+
+ def test_show_capacities(self):
+ if (self.cell_extension == 'compute_extension:cells'):
+ self.ext_mgr.is_loaded('os-cell-capacities').AndReturn(True)
+ self.mox.StubOutWithMock(self.controller.cells_rpcapi,
+ 'get_capacities')
+ response = {"ram_free":
+ {"units_by_mb": {"8192": 0, "512": 13,
+ "4096": 1, "2048": 3, "16384": 0},
+ "total_mb": 7680},
+ "disk_free":
+ {"units_by_mb": {"81920": 11, "20480": 46,
+ "40960": 23, "163840": 5, "0": 0},
+ "total_mb": 1052672}
+ }
+ self.controller.cells_rpcapi.\
+ get_capacities(self.context, cell_name=None).AndReturn(response)
+ self.mox.ReplayAll()
+ req = self._get_request("cells/capacities")
+ req.environ["nova.context"] = self.context
+ res_dict = self.controller.capacities(req)
+ self.assertEqual(response, res_dict['cell']['capacities'])
+
+ def test_show_capacity_fails_with_non_admin_context(self):
+ if (self.cell_extension == 'compute_extension:cells'):
+ self.ext_mgr.is_loaded('os-cell-capacities').AndReturn(True)
+ rules = {self.cell_extension: "is_admin:true"}
+ self.policy.set_rules(rules)
+
+ self.mox.ReplayAll()
+ req = self._get_request("cells/capacities")
+ req.environ["nova.context"] = self.context
+ req.environ["nova.context"].is_admin = False
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.capacities, req)
+
+ def test_show_capacities_for_invalid_cell(self):
+ if (self.cell_extension == 'compute_extension:cells'):
+ self.ext_mgr.is_loaded('os-cell-capacities').AndReturn(True)
+ self.mox.StubOutWithMock(self.controller.cells_rpcapi,
+ 'get_capacities')
+ self.controller.cells_rpcapi. \
+ get_capacities(self.context, cell_name="invalid_cell").AndRaise(
+ exception.CellNotFound(cell_name="invalid_cell"))
+ self.mox.ReplayAll()
+ req = self._get_request("cells/invalid_cell/capacities")
+ req.environ["nova.context"] = self.context
+ self.assertRaises(exc.HTTPNotFound,
+ self.controller.capacities, req, "invalid_cell")
+
+ def test_show_capacities_for_cell(self):
+ if (self.cell_extension == 'compute_extension:cells'):
+ self.ext_mgr.is_loaded('os-cell-capacities').AndReturn(True)
+ self.mox.StubOutWithMock(self.controller.cells_rpcapi,
+ 'get_capacities')
+ response = {"ram_free":
+ {"units_by_mb": {"8192": 0, "512": 13,
+ "4096": 1, "2048": 3, "16384": 0},
+ "total_mb": 7680},
+ "disk_free":
+ {"units_by_mb": {"81920": 11, "20480": 46,
+ "40960": 23, "163840": 5, "0": 0},
+ "total_mb": 1052672}
+ }
+ self.controller.cells_rpcapi.\
+ get_capacities(self.context, cell_name='cell_name').\
+ AndReturn(response)
+ self.mox.ReplayAll()
+ req = self._get_request("cells/capacities")
+ req.environ["nova.context"] = self.context
+ res_dict = self.controller.capacities(req, 'cell_name')
+ self.assertEqual(response, res_dict['cell']['capacities'])
+
+ def test_sync_instances(self):
+ call_info = {}
+
+ def sync_instances(self, context, **kwargs):
+ call_info['project_id'] = kwargs.get('project_id')
+ call_info['updated_since'] = kwargs.get('updated_since')
+ call_info['deleted'] = kwargs.get('deleted')
+
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances)
+
+ req = self._get_request("cells/sync_instances")
+ req.environ['nova.context'] = self.context
+ body = {}
+ self.controller.sync_instances(req, body=body)
+ self.assertIsNone(call_info['project_id'])
+ self.assertIsNone(call_info['updated_since'])
+
+ body = {'project_id': 'test-project'}
+ self.controller.sync_instances(req, body=body)
+ self.assertEqual(call_info['project_id'], 'test-project')
+ self.assertIsNone(call_info['updated_since'])
+
+ expected = timeutils.utcnow().isoformat()
+ if not expected.endswith("+00:00"):
+ expected += "+00:00"
+
+ body = {'updated_since': expected}
+ self.controller.sync_instances(req, body=body)
+ self.assertIsNone(call_info['project_id'])
+ self.assertEqual(call_info['updated_since'], expected)
+
+ body = {'updated_since': 'skjdfkjsdkf'}
+ self.assertRaises(self.bad_request,
+ self.controller.sync_instances, req, body=body)
+
+ body = {'deleted': False}
+ self.controller.sync_instances(req, body=body)
+ self.assertIsNone(call_info['project_id'])
+ self.assertIsNone(call_info['updated_since'])
+ self.assertEqual(call_info['deleted'], False)
+
+ body = {'deleted': 'False'}
+ self.controller.sync_instances(req, body=body)
+ self.assertIsNone(call_info['project_id'])
+ self.assertIsNone(call_info['updated_since'])
+ self.assertEqual(call_info['deleted'], False)
+
+ body = {'deleted': 'True'}
+ self.controller.sync_instances(req, body=body)
+ self.assertIsNone(call_info['project_id'])
+ self.assertIsNone(call_info['updated_since'])
+ self.assertEqual(call_info['deleted'], True)
+
+ body = {'deleted': 'foo'}
+ self.assertRaises(self.bad_request,
+ self.controller.sync_instances, req, body=body)
+
+ body = {'foo': 'meow'}
+ self.assertRaises(self.bad_request,
+ self.controller.sync_instances, req, body=body)
+
+ def test_sync_instances_fails_for_invalid_policy(self):
+ def sync_instances(self, context, **kwargs):
+ pass
+
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances)
+
+ req = self._get_request("cells/sync_instances")
+ req.environ['nova.context'] = self.context
+ req.environ['nova.context'].is_admin = False
+
+ body = {}
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.sync_instances, req, body=body)
+
+ def test_cells_disabled(self):
+ self.flags(enable=False, group='cells')
+
+ req = self._get_request("cells")
+ self.assertRaises(exc.HTTPNotImplemented,
+ self.controller.index, req)
+
+ req = self._get_request("cells/detail")
+ self.assertRaises(exc.HTTPNotImplemented,
+ self.controller.detail, req)
+
+ req = self._get_request("cells/cell1")
+ self.assertRaises(exc.HTTPNotImplemented,
+ self.controller.show, req)
+
+ self.assertRaises(exc.HTTPNotImplemented,
+ self.controller.delete, req, 'cell999')
+
+ req = self._get_request("cells/cells")
+ self.assertRaises(exc.HTTPNotImplemented,
+ self.controller.create, req, {})
+
+ req = self._get_request("cells/capacities")
+ self.assertRaises(exc.HTTPNotImplemented,
+ self.controller.capacities, req)
+
+ req = self._get_request("cells/sync_instances")
+ self.assertRaises(exc.HTTPNotImplemented,
+ self.controller.sync_instances, req, {})
+
+
+class CellsTestV2(CellsTestV21):
+ cell_extension = 'compute_extension:cells'
+ bad_request = exc.HTTPBadRequest
+
+ def _get_cell_controller(self, ext_mgr):
+ return cells_ext_v2.Controller(ext_mgr)
+
+ def test_cell_create_name_with_dot_raises(self):
+ body = {'cell': {'name': 'moo.cow',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, req, body=body)
+
+
+class TestCellsXMLSerializer(BaseCellsTest):
+ def test_multiple_cells(self):
+ fixture = {'cells': self._get_all_cell_info()}
+
+ serializer = cells_ext_v2.CellsTemplate()
+ output = serializer.serialize(fixture)
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, '{%s}cells' % xmlutil.XMLNS_V10)
+ self.assertEqual(len(res_tree), 2)
+ self.assertEqual(res_tree[0].tag, '{%s}cell' % xmlutil.XMLNS_V10)
+ self.assertEqual(res_tree[1].tag, '{%s}cell' % xmlutil.XMLNS_V10)
+
+ def test_single_cell_with_caps(self):
+ cell = {'id': 1,
+ 'name': 'darksecret',
+ 'username': 'meow',
+ 'capabilities': {'cap1': 'a;b',
+ 'cap2': 'c;d'}}
+ fixture = {'cell': cell}
+
+ serializer = cells_ext_v2.CellTemplate()
+ output = serializer.serialize(fixture)
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, '{%s}cell' % xmlutil.XMLNS_V10)
+ self.assertEqual(res_tree.get('name'), 'darksecret')
+ self.assertEqual(res_tree.get('username'), 'meow')
+ self.assertIsNone(res_tree.get('password'))
+ self.assertEqual(len(res_tree), 1)
+
+ child = res_tree[0]
+ self.assertEqual(child.tag,
+ '{%s}capabilities' % xmlutil.XMLNS_V10)
+ for elem in child:
+ self.assertIn(elem.tag, ('{%s}cap1' % xmlutil.XMLNS_V10,
+ '{%s}cap2' % xmlutil.XMLNS_V10))
+ if elem.tag == '{%s}cap1' % xmlutil.XMLNS_V10:
+ self.assertEqual(elem.text, 'a;b')
+ elif elem.tag == '{%s}cap2' % xmlutil.XMLNS_V10:
+ self.assertEqual(elem.text, 'c;d')
+
+ def test_single_cell_without_caps(self):
+ cell = {'id': 1,
+ 'username': 'woof',
+ 'name': 'darksecret'}
+ fixture = {'cell': cell}
+
+ serializer = cells_ext_v2.CellTemplate()
+ output = serializer.serialize(fixture)
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, '{%s}cell' % xmlutil.XMLNS_V10)
+ self.assertEqual(res_tree.get('name'), 'darksecret')
+ self.assertEqual(res_tree.get('username'), 'woof')
+ self.assertIsNone(res_tree.get('password'))
+ self.assertEqual(len(res_tree), 0)
+
+
+class TestCellsXMLDeserializer(test.NoDBTestCase):
+ def test_cell_deserializer(self):
+ caps_dict = {'cap1': 'a;b',
+ 'cap2': 'c;d'}
+ caps_xml = ("<capabilities><cap1>a;b</cap1>"
+ "<cap2>c;d</cap2></capabilities>")
+ expected = {'cell': {'name': 'testcell1',
+ 'type': 'child',
+ 'rpc_host': 'localhost',
+ 'capabilities': caps_dict}}
+ intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ "<cell><name>testcell1</name><type>child</type>"
+ "<rpc_host>localhost</rpc_host>"
+ "%s</cell>") % caps_xml
+ deserializer = cells_ext_v2.CellDeserializer()
+ result = deserializer.deserialize(intext)
+ self.assertEqual(dict(body=expected), result)
+
+ def test_with_corrupt_xml(self):
+ deserializer = cells_ext_v2.CellDeserializer()
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ deserializer.deserialize,
+ utils.killer_xml_body())
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_certificates.py b/nova/tests/unit/api/openstack/compute/contrib/test_certificates.py
new file mode 100644
index 0000000000..c7066516d8
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_certificates.py
@@ -0,0 +1,140 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import mock
+import mox
+from webob import exc
+
+from nova.api.openstack.compute.contrib import certificates as certificates_v2
+from nova.api.openstack.compute.plugins.v3 import certificates \
+ as certificates_v21
+from nova.cert import rpcapi
+from nova import context
+from nova import exception
+from nova.openstack.common import policy as common_policy
+from nova import policy
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+class CertificatesTestV21(test.NoDBTestCase):
+ certificates = certificates_v21
+ url = '/v3/os-certificates'
+ certificate_show_extension = 'compute_extension:v3:os-certificates:show'
+ certificate_create_extension = \
+ 'compute_extension:v3:os-certificates:create'
+
+ def setUp(self):
+ super(CertificatesTestV21, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+ self.controller = self.certificates.CertificatesController()
+
+ def test_translate_certificate_view(self):
+ pk, cert = 'fakepk', 'fakecert'
+ view = self.certificates._translate_certificate_view(cert, pk)
+ self.assertEqual(view['data'], cert)
+ self.assertEqual(view['private_key'], pk)
+
+ def test_certificates_show_root(self):
+ self.mox.StubOutWithMock(self.controller.cert_rpcapi, 'fetch_ca')
+
+ self.controller.cert_rpcapi.fetch_ca(
+ mox.IgnoreArg(), project_id='fake').AndReturn('fakeroot')
+
+ self.mox.ReplayAll()
+
+ req = fakes.HTTPRequest.blank(self.url + '/root')
+ res_dict = self.controller.show(req, 'root')
+
+ response = {'certificate': {'data': 'fakeroot', 'private_key': None}}
+ self.assertEqual(res_dict, response)
+
+ def test_certificates_show_policy_failed(self):
+ rules = {
+ self.certificate_show_extension:
+ common_policy.parse_rule("!")
+ }
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(self.url + '/root')
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.show, req, 'root')
+ self.assertIn(self.certificate_show_extension,
+ exc.format_message())
+
+ def test_certificates_create_certificate(self):
+ self.mox.StubOutWithMock(self.controller.cert_rpcapi,
+ 'generate_x509_cert')
+
+ self.controller.cert_rpcapi.generate_x509_cert(
+ mox.IgnoreArg(),
+ user_id='fake_user',
+ project_id='fake').AndReturn(('fakepk', 'fakecert'))
+
+ self.mox.ReplayAll()
+
+ req = fakes.HTTPRequest.blank(self.url)
+ res_dict = self.controller.create(req)
+
+ response = {
+ 'certificate': {'data': 'fakecert',
+ 'private_key': 'fakepk'}
+ }
+ self.assertEqual(res_dict, response)
+
+ def test_certificates_create_policy_failed(self):
+ rules = {
+ self.certificate_create_extension:
+ common_policy.parse_rule("!")
+ }
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(self.url)
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.create, req)
+ self.assertIn(self.certificate_create_extension,
+ exc.format_message())
+
+ @mock.patch.object(rpcapi.CertAPI, 'fetch_ca',
+ side_effect=exception.CryptoCAFileNotFound(project='fake'))
+ def test_non_exist_certificates_show(self, mock_fetch_ca):
+ req = fakes.HTTPRequest.blank(self.url + '/root')
+ self.assertRaises(
+ exc.HTTPNotFound,
+ self.controller.show,
+ req, 'root')
+
+
+class CertificatesTestV2(CertificatesTestV21):
+ certificates = certificates_v2
+ url = '/v2/fake/os-certificates'
+ certificate_show_extension = 'compute_extension:certificates'
+ certificate_create_extension = 'compute_extension:certificates'
+
+
+class CertificatesSerializerTest(test.NoDBTestCase):
+ def test_index_serializer(self):
+ serializer = certificates_v2.CertificateTemplate()
+ text = serializer.serialize(dict(
+ certificate=dict(
+ data='fakecert',
+ private_key='fakepk'),
+ ))
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('certificate', tree.tag)
+ self.assertEqual('fakepk', tree.get('private_key'))
+ self.assertEqual('fakecert', tree.get('data'))
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_cloudpipe.py b/nova/tests/unit/api/openstack/compute/contrib/test_cloudpipe.py
new file mode 100644
index 0000000000..ab3b1a58cc
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_cloudpipe.py
@@ -0,0 +1,210 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid as uuid_lib
+
+from lxml import etree
+from oslo.config import cfg
+from oslo.utils import timeutils
+from webob import exc
+
+from nova.api.openstack.compute.contrib import cloudpipe as cloudpipe_v2
+from nova.api.openstack.compute.plugins.v3 import cloudpipe as cloudpipe_v21
+from nova.api.openstack import wsgi
+from nova.compute import utils as compute_utils
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_network
+from nova.tests.unit import matchers
+from nova import utils
+
+CONF = cfg.CONF
+CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
+
+
+project_id = str(uuid_lib.uuid4().hex)
+uuid = str(uuid_lib.uuid4())
+
+
+def fake_vpn_instance():
+ return {
+ 'id': 7, 'image_ref': CONF.vpn_image_id, 'vm_state': 'active',
+ 'created_at': timeutils.parse_strtime('1981-10-20T00:00:00.000000'),
+ 'uuid': uuid, 'project_id': project_id,
+ }
+
+
+def compute_api_get_all_empty(context, search_opts=None):
+ return []
+
+
+def compute_api_get_all(context, search_opts=None):
+ return [fake_vpn_instance()]
+
+
+def utils_vpn_ping(addr, port, timoeout=0.05, session_id=None):
+ return True
+
+
+class CloudpipeTestV21(test.NoDBTestCase):
+ cloudpipe = cloudpipe_v21
+ url = '/v2/fake/os-cloudpipe'
+
+ def setUp(self):
+ super(CloudpipeTestV21, self).setUp()
+ self.controller = self.cloudpipe.CloudpipeController()
+ self.stubs.Set(self.controller.compute_api, "get_all",
+ compute_api_get_all_empty)
+ self.stubs.Set(utils, 'vpn_ping', utils_vpn_ping)
+
+ def test_cloudpipe_list_no_network(self):
+
+ def fake_get_nw_info_for_instance(instance):
+ return {}
+
+ self.stubs.Set(compute_utils, "get_nw_info_for_instance",
+ fake_get_nw_info_for_instance)
+ self.stubs.Set(self.controller.compute_api, "get_all",
+ compute_api_get_all)
+ req = fakes.HTTPRequest.blank(self.url)
+ res_dict = self.controller.index(req)
+ response = {'cloudpipes': [{'project_id': project_id,
+ 'instance_id': uuid,
+ 'created_at': '1981-10-20T00:00:00Z'}]}
+ self.assertEqual(res_dict, response)
+
+ def test_cloudpipe_list(self):
+
+ def network_api_get(context, network_id):
+ self.assertEqual(context.project_id, project_id)
+ return {'vpn_public_address': '127.0.0.1',
+ 'vpn_public_port': 22}
+
+ def fake_get_nw_info_for_instance(instance):
+ return fake_network.fake_get_instance_nw_info(self.stubs)
+
+ self.stubs.Set(compute_utils, "get_nw_info_for_instance",
+ fake_get_nw_info_for_instance)
+ self.stubs.Set(self.controller.network_api, "get",
+ network_api_get)
+ self.stubs.Set(self.controller.compute_api, "get_all",
+ compute_api_get_all)
+ req = fakes.HTTPRequest.blank(self.url)
+ res_dict = self.controller.index(req)
+ response = {'cloudpipes': [{'project_id': project_id,
+ 'internal_ip': '192.168.1.100',
+ 'public_ip': '127.0.0.1',
+ 'public_port': 22,
+ 'state': 'running',
+ 'instance_id': uuid,
+ 'created_at': '1981-10-20T00:00:00Z'}]}
+ self.assertThat(res_dict, matchers.DictMatches(response))
+
+ def test_cloudpipe_create(self):
+ def launch_vpn_instance(context):
+ return ([fake_vpn_instance()], 'fake-reservation')
+
+ self.stubs.Set(self.controller.cloudpipe, 'launch_vpn_instance',
+ launch_vpn_instance)
+ body = {'cloudpipe': {'project_id': project_id}}
+ req = fakes.HTTPRequest.blank(self.url)
+ res_dict = self.controller.create(req, body=body)
+
+ response = {'instance_id': uuid}
+ self.assertEqual(res_dict, response)
+
+ def test_cloudpipe_create_no_networks(self):
+ def launch_vpn_instance(context):
+ raise exception.NoMoreNetworks
+
+ self.stubs.Set(self.controller.cloudpipe, 'launch_vpn_instance',
+ launch_vpn_instance)
+ body = {'cloudpipe': {'project_id': project_id}}
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, req, body=body)
+
+ def test_cloudpipe_create_already_running(self):
+ def launch_vpn_instance(*args, **kwargs):
+ self.fail("Method should not have been called")
+
+ self.stubs.Set(self.controller.cloudpipe, 'launch_vpn_instance',
+ launch_vpn_instance)
+ self.stubs.Set(self.controller.compute_api, "get_all",
+ compute_api_get_all)
+ body = {'cloudpipe': {'project_id': project_id}}
+ req = fakes.HTTPRequest.blank(self.url)
+ res_dict = self.controller.create(req, body=body)
+ response = {'instance_id': uuid}
+ self.assertEqual(res_dict, response)
+
+ def test_cloudpipe_create_with_bad_project_id_failed(self):
+ body = {'cloudpipe': {'project_id': 'bad.project.id'}}
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
+
+
+class CloudpipeTestV2(CloudpipeTestV21):
+ cloudpipe = cloudpipe_v2
+
+ def test_cloudpipe_create_with_bad_project_id_failed(self):
+ pass
+
+
+class CloudpipesXMLSerializerTestV2(test.NoDBTestCase):
+ def test_default_serializer(self):
+ serializer = cloudpipe_v2.CloudpipeTemplate()
+ exemplar = dict(cloudpipe=dict(instance_id='1234-1234-1234-1234'))
+ text = serializer.serialize(exemplar)
+ tree = etree.fromstring(text)
+ self.assertEqual('cloudpipe', tree.tag)
+ for child in tree:
+ self.assertIn(child.tag, exemplar['cloudpipe'])
+ self.assertEqual(child.text, exemplar['cloudpipe'][child.tag])
+
+ def test_index_serializer(self):
+ serializer = cloudpipe_v2.CloudpipesTemplate()
+ exemplar = dict(cloudpipes=[
+ dict(
+ project_id='1234',
+ public_ip='1.2.3.4',
+ public_port='321',
+ instance_id='1234-1234-1234-1234',
+ created_at=timeutils.isotime(),
+ state='running'),
+ dict(
+ project_id='4321',
+ public_ip='4.3.2.1',
+ public_port='123',
+ state='pending')])
+ text = serializer.serialize(exemplar)
+ tree = etree.fromstring(text)
+ self.assertEqual('cloudpipes', tree.tag)
+ self.assertEqual(len(exemplar['cloudpipes']), len(tree))
+ for idx, cl_pipe in enumerate(tree):
+ kp_data = exemplar['cloudpipes'][idx]
+ for child in cl_pipe:
+ self.assertIn(child.tag, kp_data)
+ self.assertEqual(child.text, kp_data[child.tag])
+
+ def test_deserializer(self):
+ deserializer = wsgi.XMLDeserializer()
+ exemplar = dict(cloudpipe=dict(project_id='4321'))
+ intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<cloudpipe><project_id>4321</project_id></cloudpipe>')
+ result = deserializer.deserialize(intext)['body']
+ self.assertEqual(result, exemplar)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_cloudpipe_update.py b/nova/tests/unit/api/openstack/compute/contrib/test_cloudpipe_update.py
new file mode 100644
index 0000000000..23faf6275a
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_cloudpipe_update.py
@@ -0,0 +1,99 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob
+
+from nova.api.openstack.compute.contrib import cloudpipe_update as clup_v2
+from nova.api.openstack.compute.plugins.v3 import cloudpipe as clup_v21
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_network
+
+
+fake_networks = [fake_network.fake_network(1),
+ fake_network.fake_network(2)]
+
+
+def fake_project_get_networks(context, project_id, associate=True):
+ return fake_networks
+
+
+def fake_network_update(context, network_id, values):
+ for network in fake_networks:
+ if network['id'] == network_id:
+ for key in values:
+ network[key] = values[key]
+
+
+class CloudpipeUpdateTestV21(test.NoDBTestCase):
+ bad_request = exception.ValidationError
+
+ def setUp(self):
+ super(CloudpipeUpdateTestV21, self).setUp()
+ self.stubs.Set(db, "project_get_networks", fake_project_get_networks)
+ self.stubs.Set(db, "network_update", fake_network_update)
+ self._setup()
+
+ def _setup(self):
+ self.controller = clup_v21.CloudpipeController()
+
+ def _check_status(self, expected_status, res, controller_methord):
+ self.assertEqual(expected_status, controller_methord.wsgi_code)
+
+ def test_cloudpipe_configure_project(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-cloudpipe/configure-project')
+ body = {"configure_project": {"vpn_ip": "1.2.3.4", "vpn_port": 222}}
+ result = self.controller.update(req, 'configure-project',
+ body=body)
+ self._check_status(202, result, self.controller.update)
+ self.assertEqual(fake_networks[0]['vpn_public_address'], "1.2.3.4")
+ self.assertEqual(fake_networks[0]['vpn_public_port'], 222)
+
+ def test_cloudpipe_configure_project_bad_url(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-cloudpipe/configure-projectx')
+ body = {"configure_project": {"vpn_ip": "1.2.3.4", "vpn_port": 222}}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req,
+ 'configure-projectx', body=body)
+
+ def test_cloudpipe_configure_project_bad_data(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-cloudpipe/configure-project')
+ body = {"configure_project": {"vpn_ipxx": "1.2.3.4", "vpn_port": 222}}
+ self.assertRaises(self.bad_request,
+ self.controller.update, req,
+ 'configure-project', body=body)
+
+ def test_cloudpipe_configure_project_bad_vpn_port(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-cloudpipe/configure-project')
+ body = {"configure_project": {"vpn_ipxx": "1.2.3.4",
+ "vpn_port": "foo"}}
+ self.assertRaises(self.bad_request,
+ self.controller.update, req,
+ 'configure-project', body=body)
+
+
+class CloudpipeUpdateTestV2(CloudpipeUpdateTestV21):
+ bad_request = webob.exc.HTTPBadRequest
+
+ def _setup(self):
+ self.controller = clup_v2.CloudpipeUpdateController()
+
+ def _check_status(self, expected_status, res, controller_methord):
+ self.assertEqual(expected_status, res.status_int)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_config_drive.py b/nova/tests/unit/api/openstack/compute/contrib/test_config_drive.py
new file mode 100644
index 0000000000..ef94db0d23
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_config_drive.py
@@ -0,0 +1,260 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import config_drive as config_drive_v2
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import config_drive \
+ as config_drive_v21
+from nova.api.openstack.compute.plugins.v3 import servers as servers_v21
+from nova.api.openstack.compute import servers as servers_v2
+from nova.api.openstack import extensions
+from nova.compute import api as compute_api
+from nova.compute import flavors
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit.image import fake
+
+
+CONF = cfg.CONF
+
+
+class ConfigDriveTestV21(test.TestCase):
+ base_url = '/v2/fake/servers/'
+
+ def _setup_wsgi(self):
+ self.app = fakes.wsgi_app_v21(init_only=('servers', 'os-config-drive'))
+
+ def _get_config_drive_controller(self):
+ return config_drive_v21.ConfigDriveController()
+
+ def setUp(self):
+ super(ConfigDriveTestV21, self).setUp()
+ self.Controller = self._get_config_drive_controller()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+ self._setup_wsgi()
+
+ def test_show(self):
+ self.stubs.Set(db, 'instance_get',
+ fakes.fake_instance_get())
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get())
+ req = webob.Request.blank(self.base_url + '1')
+ req.headers['Content-Type'] = 'application/json'
+ response = req.get_response(self.app)
+ self.assertEqual(response.status_int, 200)
+ res_dict = jsonutils.loads(response.body)
+ self.assertIn('config_drive', res_dict['server'])
+
+ def test_detail_servers(self):
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fakes.fake_instance_get_all_by_filters())
+ req = fakes.HTTPRequest.blank(self.base_url + 'detail')
+ res = req.get_response(self.app)
+ server_dicts = jsonutils.loads(res.body)['servers']
+ self.assertNotEqual(len(server_dicts), 0)
+ for server_dict in server_dicts:
+ self.assertIn('config_drive', server_dict)
+
+
+class ConfigDriveTestV2(ConfigDriveTestV21):
+
+ def _get_config_drive_controller(self):
+ return config_drive_v2.Controller()
+
+ def _setup_wsgi(self):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Config_drive'])
+ self.app = fakes.wsgi_app(init_only=('servers',))
+
+
+class ServersControllerCreateTestV21(test.TestCase):
+ base_url = '/v2/fake/'
+ bad_request = exception.ValidationError
+
+ def _set_up_controller(self):
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers_v21.ServersController(
+ extension_info=ext_info)
+ CONF.set_override('extensions_blacklist',
+ 'os-config-drive',
+ 'osapi_v3')
+ self.no_config_drive_controller = servers_v21.ServersController(
+ extension_info=ext_info)
+
+ def _verfiy_config_drive(self, **kwargs):
+ self.assertNotIn('config_drive', kwargs)
+
+ def _initialize_extension(self):
+ pass
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTestV21, self).setUp()
+
+ self.instance_cache_num = 0
+ self._set_up_controller()
+
+ def instance_create(context, inst):
+ inst_type = flavors.get_flavor_by_flavor_id(3)
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ def_image_ref = 'http://localhost/images/%s' % image_uuid
+ self.instance_cache_num += 1
+ instance = fake_instance.fake_db_instance(**{
+ 'id': self.instance_cache_num,
+ 'display_name': inst['display_name'] or 'test',
+ 'uuid': fakes.FAKE_UUID,
+ 'instance_type': inst_type,
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fead::1234',
+ 'image_ref': inst.get('image_ref', def_image_ref),
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'reservation_id': inst['reservation_id'],
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "progress": 0,
+ "fixed_ips": [],
+ "task_state": "",
+ "vm_state": "",
+ "root_device_name": inst.get('root_device_name', 'vda'),
+ })
+
+ return instance
+
+ fake.stub_out_image_service(self.stubs)
+ self.stubs.Set(db, 'instance_create', instance_create)
+
+ def _test_create_extra(self, params, override_controller):
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
+ server.update(params)
+ body = dict(server=server)
+ req = fakes.HTTPRequest.blank(self.base_url + 'servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ if override_controller is not None:
+ server = override_controller.create(req, body=body).obj['server']
+ else:
+ server = self.controller.create(req, body=body).obj['server']
+
+ def test_create_instance_with_config_drive_disabled(self):
+ params = {'config_drive': "False"}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self._verfiy_config_drive(**kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params,
+ override_controller=self.no_config_drive_controller)
+
+ def _create_instance_body_of_config_drive(self, param):
+ self._initialize_extension()
+
+ def create(*args, **kwargs):
+ self.assertIn('config_drive', kwargs)
+ return old_create(*args, **kwargs)
+
+ old_create = compute_api.API.create
+ self.stubs.Set(compute_api.API, 'create', create)
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
+ body = {
+ 'server': {
+ 'name': 'config_drive_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'config_drive': param,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.base_url + 'servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ return req, body
+
+ def test_create_instance_with_config_drive(self):
+ param = True
+ req, body = self._create_instance_body_of_config_drive(param)
+ res = self.controller.create(req, body=body).obj
+ server = res['server']
+ self.assertEqual(fakes.FAKE_UUID, server['id'])
+
+ def test_create_instance_with_config_drive_as_boolean_string(self):
+ param = 'false'
+ req, body = self._create_instance_body_of_config_drive(param)
+ res = self.controller.create(req, body=body).obj
+ server = res['server']
+ self.assertEqual(fakes.FAKE_UUID, server['id'])
+
+ def test_create_instance_with_bad_config_drive(self):
+ param = 12345
+ req, body = self._create_instance_body_of_config_drive(param)
+ self.assertRaises(self.bad_request,
+ self.controller.create, req, body=body)
+
+ def test_create_instance_without_config_drive(self):
+ param = True
+ req, body = self._create_instance_body_of_config_drive(param)
+ del body['server']['config_drive']
+ res = self.controller.create(req, body=body).obj
+ server = res['server']
+ self.assertEqual(fakes.FAKE_UUID, server['id'])
+
+ def test_create_instance_with_empty_config_drive(self):
+ param = ''
+ req, body = self._create_instance_body_of_config_drive(param)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
+
+
+class ServersControllerCreateTestV2(ServersControllerCreateTestV21):
+ bad_request = webob.exc.HTTPBadRequest
+
+ def _set_up_controller(self):
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = servers_v2.Controller(self.ext_mgr)
+ self.no_config_drive_controller = None
+
+ def _verfiy_config_drive(self, **kwargs):
+ self.assertIsNone(kwargs['config_drive'])
+
+ def _initialize_extension(self):
+ self.ext_mgr.extensions = {'os-config-drive': 'fake'}
+
+ def test_create_instance_with_empty_config_drive(self):
+ param = ''
+ req, body = self._create_instance_body_of_config_drive(param)
+ res = self.controller.create(req, body=body).obj
+ server = res['server']
+ self.assertEqual(fakes.FAKE_UUID, server['id'])
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_console_auth_tokens.py b/nova/tests/unit/api/openstack/compute/contrib/test_console_auth_tokens.py
new file mode 100644
index 0000000000..eef4cd62ea
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_console_auth_tokens.py
@@ -0,0 +1,103 @@
+# Copyright 2013 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.consoleauth import rpcapi as consoleauth_rpcapi
+from nova import context
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+CONF = cfg.CONF
+CONF.import_opt('osapi_compute_ext_list', 'nova.api.openstack.compute.contrib')
+
+_FAKE_CONNECT_INFO = {'instance_uuid': 'fake_instance_uuid',
+ 'host': 'fake_host',
+ 'port': 'fake_port',
+ 'internal_access_path': 'fake_access_path',
+ 'console_type': 'rdp-html5'}
+
+
+def _fake_check_token(self, context, token):
+ return _FAKE_CONNECT_INFO
+
+
+def _fake_check_token_not_found(self, context, token):
+ return None
+
+
+def _fake_check_token_unauthorized(self, context, token):
+ connect_info = _FAKE_CONNECT_INFO
+ connect_info['console_type'] = 'unauthorized_console_type'
+ return connect_info
+
+
+class ConsoleAuthTokensExtensionTest(test.TestCase):
+
+ _FAKE_URL = '/v2/fake/os-console-auth-tokens/1'
+
+ _EXPECTED_OUTPUT = {'console': {'instance_uuid': 'fake_instance_uuid',
+ 'host': 'fake_host',
+ 'port': 'fake_port',
+ 'internal_access_path':
+ 'fake_access_path'}}
+
+ def setUp(self):
+ super(ConsoleAuthTokensExtensionTest, self).setUp()
+ self.stubs.Set(consoleauth_rpcapi.ConsoleAuthAPI, 'check_token',
+ _fake_check_token)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Console_auth_tokens'])
+
+ ctxt = self._get_admin_context()
+ self.app = fakes.wsgi_app(init_only=('os-console-auth-tokens',),
+ fake_auth_context=ctxt)
+
+ def _get_admin_context(self):
+ ctxt = context.get_admin_context()
+ ctxt.user_id = 'fake'
+ ctxt.project_id = 'fake'
+ return ctxt
+
+ def _create_request(self):
+ req = webob.Request.blank(self._FAKE_URL)
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+ return req
+
+ def test_get_console_connect_info(self):
+ req = self._create_request()
+ res = req.get_response(self.app)
+ self.assertEqual(200, res.status_int)
+ output = jsonutils.loads(res.body)
+ self.assertEqual(self._EXPECTED_OUTPUT, output)
+
+ def test_get_console_connect_info_token_not_found(self):
+ self.stubs.Set(consoleauth_rpcapi.ConsoleAuthAPI, 'check_token',
+ _fake_check_token_not_found)
+ req = self._create_request()
+ res = req.get_response(self.app)
+ self.assertEqual(404, res.status_int)
+
+ def test_get_console_connect_info_unauthorized_console_type(self):
+ self.stubs.Set(consoleauth_rpcapi.ConsoleAuthAPI, 'check_token',
+ _fake_check_token_unauthorized)
+ req = self._create_request()
+ res = req.get_response(self.app)
+ self.assertEqual(401, res.status_int)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_console_output.py b/nova/tests/unit/api/openstack/compute/contrib/test_console_output.py
new file mode 100644
index 0000000000..441899a19b
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_console_output.py
@@ -0,0 +1,171 @@
+# Copyright 2011 Eldar Nugaev
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import string
+
+from oslo.serialization import jsonutils
+
+from nova.compute import api as compute_api
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+
+def fake_get_console_output(self, _context, _instance, tail_length):
+ fixture = [str(i) for i in range(5)]
+
+ if tail_length is None:
+ pass
+ elif tail_length == 0:
+ fixture = []
+ else:
+ fixture = fixture[-int(tail_length):]
+
+ return '\n'.join(fixture)
+
+
+def fake_get_console_output_not_ready(self, _context, _instance, tail_length):
+ raise exception.InstanceNotReady(instance_id=_instance["uuid"])
+
+
+def fake_get_console_output_all_characters(self, _ctx, _instance, _tail_len):
+ return string.printable
+
+
+def fake_get(self, context, instance_uuid, want_objects=False,
+ expected_attrs=None):
+ return fake_instance.fake_instance_obj(context, **{'uuid': instance_uuid})
+
+
+def fake_get_not_found(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+
+class ConsoleOutputExtensionTestV21(test.NoDBTestCase):
+ application_type = "application/json"
+ action_url = '/v2/fake/servers/1/action'
+
+ def setUp(self):
+ super(ConsoleOutputExtensionTestV21, self).setUp()
+ self.stubs.Set(compute_api.API, 'get_console_output',
+ fake_get_console_output)
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+ self.app = self._get_app()
+
+ def _get_app(self):
+ return fakes.wsgi_app_v21(init_only=('servers',
+ 'os-console-output'))
+
+ def _get_response(self, length_dict=None):
+ length_dict = length_dict or {}
+ body = {'os-getConsoleOutput': length_dict}
+ req = fakes.HTTPRequest.blank(self.action_url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = self.application_type
+ res = req.get_response(self.app)
+ return res
+
+ def test_get_text_console_instance_action(self):
+ res = self._get_response()
+ output = jsonutils.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual({'output': '0\n1\n2\n3\n4'}, output)
+
+ def test_get_console_output_with_tail(self):
+ res = self._get_response(length_dict={'length': 3})
+ output = jsonutils.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual({'output': '2\n3\n4'}, output)
+
+ def test_get_console_output_with_none_length(self):
+ res = self._get_response(length_dict={'length': None})
+ output = jsonutils.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual({'output': '0\n1\n2\n3\n4'}, output)
+
+ def test_get_console_output_with_length_as_str(self):
+ res = self._get_response(length_dict={'length': '3'})
+ output = jsonutils.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual({'output': '2\n3\n4'}, output)
+
+ def test_get_console_output_filtered_characters(self):
+ self.stubs.Set(compute_api.API, 'get_console_output',
+ fake_get_console_output_all_characters)
+ res = self._get_response()
+ output = jsonutils.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ expect = string.digits + string.letters + string.punctuation + ' \t\n'
+ self.assertEqual({'output': expect}, output)
+
+ def test_get_text_console_no_instance(self):
+ self.stubs.Set(compute_api.API, 'get', fake_get_not_found)
+ res = self._get_response()
+ self.assertEqual(404, res.status_int)
+
+ def test_get_text_console_no_instance_on_get_output(self):
+ self.stubs.Set(compute_api.API,
+ 'get_console_output',
+ fake_get_not_found)
+ res = self._get_response()
+ self.assertEqual(404, res.status_int)
+
+ def _get_console_output_bad_request_case(self, body):
+ req = fakes.HTTPRequest.blank(self.action_url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self.app)
+ self.assertEqual(400, res.status_int)
+
+ def test_get_console_output_with_non_integer_length(self):
+ body = {'os-getConsoleOutput': {'length': 'NaN'}}
+ self._get_console_output_bad_request_case(body)
+
+ def test_get_text_console_bad_body(self):
+ body = {}
+ self._get_console_output_bad_request_case(body)
+
+ def test_get_console_output_with_length_as_float(self):
+ body = {'os-getConsoleOutput': {'length': 2.5}}
+ self._get_console_output_bad_request_case(body)
+
+ def test_get_console_output_not_ready(self):
+ self.stubs.Set(compute_api.API, 'get_console_output',
+ fake_get_console_output_not_ready)
+ res = self._get_response(length_dict={'length': 3})
+ self.assertEqual(409, res.status_int)
+
+ def test_not_implemented(self):
+ self.stubs.Set(compute_api.API, 'get_console_output',
+ fakes.fake_not_implemented)
+ res = self._get_response()
+ self.assertEqual(501, res.status_int)
+
+ def test_get_console_output_with_boolean_length(self):
+ res = self._get_response(length_dict={'length': True})
+ self.assertEqual(400, res.status_int)
+
+
+class ConsoleOutputExtensionTestV2(ConsoleOutputExtensionTestV21):
+ need_osapi_compute_extension = True
+
+ def _get_app(self):
+ self.flags(osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Console_output'])
+ return fakes.wsgi_app(init_only=('servers',))
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_consoles.py b/nova/tests/unit/api/openstack/compute/contrib/test_consoles.py
new file mode 100644
index 0000000000..debd1e7f5f
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_consoles.py
@@ -0,0 +1,587 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.serialization import jsonutils
+import webob
+
+from nova.compute import api as compute_api
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+def fake_get_vnc_console(self, _context, _instance, _console_type):
+ return {'url': 'http://fake'}
+
+
+def fake_get_spice_console(self, _context, _instance, _console_type):
+ return {'url': 'http://fake'}
+
+
+def fake_get_rdp_console(self, _context, _instance, _console_type):
+ return {'url': 'http://fake'}
+
+
+def fake_get_serial_console(self, _context, _instance, _console_type):
+ return {'url': 'http://fake'}
+
+
+def fake_get_vnc_console_invalid_type(self, _context,
+ _instance, _console_type):
+ raise exception.ConsoleTypeInvalid(console_type=_console_type)
+
+
+def fake_get_spice_console_invalid_type(self, _context,
+ _instance, _console_type):
+ raise exception.ConsoleTypeInvalid(console_type=_console_type)
+
+
+def fake_get_rdp_console_invalid_type(self, _context,
+ _instance, _console_type):
+ raise exception.ConsoleTypeInvalid(console_type=_console_type)
+
+
+def fake_get_vnc_console_type_unavailable(self, _context,
+ _instance, _console_type):
+ raise exception.ConsoleTypeUnavailable(console_type=_console_type)
+
+
+def fake_get_spice_console_type_unavailable(self, _context,
+ _instance, _console_type):
+ raise exception.ConsoleTypeUnavailable(console_type=_console_type)
+
+
+def fake_get_rdp_console_type_unavailable(self, _context,
+ _instance, _console_type):
+ raise exception.ConsoleTypeUnavailable(console_type=_console_type)
+
+
+def fake_get_vnc_console_not_ready(self, _context, instance, _console_type):
+ raise exception.InstanceNotReady(instance_id=instance["uuid"])
+
+
+def fake_get_spice_console_not_ready(self, _context, instance, _console_type):
+ raise exception.InstanceNotReady(instance_id=instance["uuid"])
+
+
+def fake_get_rdp_console_not_ready(self, _context, instance, _console_type):
+ raise exception.InstanceNotReady(instance_id=instance["uuid"])
+
+
+def fake_get_vnc_console_not_found(self, _context, instance, _console_type):
+ raise exception.InstanceNotFound(instance_id=instance["uuid"])
+
+
+def fake_get_spice_console_not_found(self, _context, instance, _console_type):
+ raise exception.InstanceNotFound(instance_id=instance["uuid"])
+
+
+def fake_get_rdp_console_not_found(self, _context, instance, _console_type):
+ raise exception.InstanceNotFound(instance_id=instance["uuid"])
+
+
+def fake_get(self, context, instance_uuid, want_objects=False,
+ expected_attrs=None):
+ return {'uuid': instance_uuid}
+
+
+def fake_get_not_found(self, context, instance_uuid, want_objects=False,
+ expected_attrs=None):
+ raise exception.InstanceNotFound(instance_id=instance_uuid)
+
+
+class ConsolesExtensionTestV21(test.NoDBTestCase):
+ url = '/v2/fake/servers/1/action'
+
+ def _setup_wsgi(self):
+ self.app = fakes.wsgi_app_v21(init_only=('servers',
+ 'os-remote-consoles'))
+
+ def setUp(self):
+ super(ConsolesExtensionTestV21, self).setUp()
+ self.stubs.Set(compute_api.API, 'get_vnc_console',
+ fake_get_vnc_console)
+ self.stubs.Set(compute_api.API, 'get_spice_console',
+ fake_get_spice_console)
+ self.stubs.Set(compute_api.API, 'get_rdp_console',
+ fake_get_rdp_console)
+ self.stubs.Set(compute_api.API, 'get_serial_console',
+ fake_get_serial_console)
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+ self._setup_wsgi()
+
+ def test_get_vnc_console(self):
+ body = {'os-getVNCConsole': {'type': 'novnc'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ output = jsonutils.loads(res.body)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(output,
+ {u'console': {u'url': u'http://fake', u'type': u'novnc'}})
+
+ def test_get_vnc_console_not_ready(self):
+ self.stubs.Set(compute_api.API, 'get_vnc_console',
+ fake_get_vnc_console_not_ready)
+ body = {'os-getVNCConsole': {'type': 'novnc'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ jsonutils.loads(res.body)
+ self.assertEqual(res.status_int, 409)
+
+ def test_get_vnc_console_no_type(self):
+ self.stubs.Set(compute_api.API, 'get_vnc_console',
+ fake_get_vnc_console_invalid_type)
+ body = {'os-getVNCConsole': {}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+
+ def test_get_vnc_console_no_instance(self):
+ self.stubs.Set(compute_api.API, 'get', fake_get_not_found)
+ body = {'os-getVNCConsole': {'type': 'novnc'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 404)
+
+ def test_get_vnc_console_no_instance_on_console_get(self):
+ self.stubs.Set(compute_api.API, 'get_vnc_console',
+ fake_get_vnc_console_not_found)
+ body = {'os-getVNCConsole': {'type': 'novnc'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 404)
+
+ def test_get_vnc_console_invalid_type(self):
+ body = {'os-getVNCConsole': {'type': 'invalid'}}
+ self.stubs.Set(compute_api.API, 'get_vnc_console',
+ fake_get_vnc_console_invalid_type)
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+
+ def test_get_vnc_console_type_unavailable(self):
+ body = {'os-getVNCConsole': {'type': 'unavailable'}}
+ self.stubs.Set(compute_api.API, 'get_vnc_console',
+ fake_get_vnc_console_type_unavailable)
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(400, res.status_int)
+
+ def test_get_vnc_console_not_implemented(self):
+ self.stubs.Set(compute_api.API, 'get_vnc_console',
+ fakes.fake_not_implemented)
+
+ body = {'os-getVNCConsole': {'type': 'novnc'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 501)
+
+ def test_get_spice_console(self):
+ body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ output = jsonutils.loads(res.body)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(output,
+ {u'console': {u'url': u'http://fake', u'type': u'spice-html5'}})
+
+ def test_get_spice_console_not_ready(self):
+ self.stubs.Set(compute_api.API, 'get_spice_console',
+ fake_get_spice_console_not_ready)
+ body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ jsonutils.loads(res.body)
+ self.assertEqual(res.status_int, 409)
+
+ def test_get_spice_console_no_type(self):
+ self.stubs.Set(compute_api.API, 'get_spice_console',
+ fake_get_spice_console_invalid_type)
+ body = {'os-getSPICEConsole': {}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+
+ def test_get_spice_console_no_instance(self):
+ self.stubs.Set(compute_api.API, 'get', fake_get_not_found)
+ body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 404)
+
+ def test_get_spice_console_no_instance_on_console_get(self):
+ self.stubs.Set(compute_api.API, 'get_spice_console',
+ fake_get_spice_console_not_found)
+ body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 404)
+
+ def test_get_spice_console_invalid_type(self):
+ body = {'os-getSPICEConsole': {'type': 'invalid'}}
+ self.stubs.Set(compute_api.API, 'get_spice_console',
+ fake_get_spice_console_invalid_type)
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+
+ def test_get_spice_console_not_implemented(self):
+ body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
+ self.stubs.Set(compute_api.API, 'get_spice_console',
+ fakes.fake_not_implemented)
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 501)
+
+ def test_get_spice_console_type_unavailable(self):
+ body = {'os-getSPICEConsole': {'type': 'unavailable'}}
+ self.stubs.Set(compute_api.API, 'get_spice_console',
+ fake_get_spice_console_type_unavailable)
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(400, res.status_int)
+
+ def test_get_rdp_console(self):
+ body = {'os-getRDPConsole': {'type': 'rdp-html5'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ output = jsonutils.loads(res.body)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(output,
+ {u'console': {u'url': u'http://fake', u'type': u'rdp-html5'}})
+
+ def test_get_rdp_console_not_ready(self):
+ self.stubs.Set(compute_api.API, 'get_rdp_console',
+ fake_get_rdp_console_not_ready)
+ body = {'os-getRDPConsole': {'type': 'rdp-html5'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ jsonutils.loads(res.body)
+ self.assertEqual(res.status_int, 409)
+
+ def test_get_rdp_console_no_type(self):
+ self.stubs.Set(compute_api.API, 'get_rdp_console',
+ fake_get_rdp_console_invalid_type)
+ body = {'os-getRDPConsole': {}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+
+ def test_get_rdp_console_no_instance(self):
+ self.stubs.Set(compute_api.API, 'get', fake_get_not_found)
+ body = {'os-getRDPConsole': {'type': 'rdp-html5'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 404)
+
+ def test_get_rdp_console_no_instance_on_console_get(self):
+ self.stubs.Set(compute_api.API, 'get_rdp_console',
+ fake_get_rdp_console_not_found)
+ body = {'os-getRDPConsole': {'type': 'rdp-html5'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 404)
+
+ def test_get_rdp_console_invalid_type(self):
+ body = {'os-getRDPConsole': {'type': 'invalid'}}
+ self.stubs.Set(compute_api.API, 'get_rdp_console',
+ fake_get_rdp_console_invalid_type)
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+
+ def test_get_rdp_console_type_unavailable(self):
+ body = {'os-getRDPConsole': {'type': 'unavailable'}}
+ self.stubs.Set(compute_api.API, 'get_rdp_console',
+ fake_get_rdp_console_type_unavailable)
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(400, res.status_int)
+
+ def test_get_vnc_console_with_undefined_param(self):
+ body = {'os-getVNCConsole': {'type': 'novnc', 'undefined': 'foo'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self.app)
+ self.assertEqual(400, res.status_int)
+
+ def test_get_spice_console_with_undefined_param(self):
+ body = {'os-getSPICEConsole': {'type': 'spice-html5',
+ 'undefined': 'foo'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self.app)
+ self.assertEqual(400, res.status_int)
+
+ def test_get_rdp_console_with_undefined_param(self):
+ body = {'os-getRDPConsole': {'type': 'rdp-html5', 'undefined': 'foo'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self.app)
+ self.assertEqual(400, res.status_int)
+
+
+class ConsolesExtensionTestV2(ConsolesExtensionTestV21):
+
+ def _setup_wsgi(self):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Consoles'])
+ self.app = fakes.wsgi_app(init_only=('servers',))
+
+ def test_get_vnc_console_with_undefined_param(self):
+ pass
+
+ def test_get_spice_console_with_undefined_param(self):
+ pass
+
+ def test_get_rdp_console_with_undefined_param(self):
+ pass
+
+ def test_get_serial_console(self):
+ body = {'os-getSerialConsole': {'type': 'serial'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ output = jsonutils.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual({u'console': {u'url': u'http://fake',
+ u'type': u'serial'}},
+ output)
+
+ @mock.patch.object(compute_api.API, 'get_serial_console')
+ def test_get_serial_console_not_enable(self, get_serial_console):
+ get_serial_console.side_effect = exception.ConsoleTypeUnavailable(
+ console_type="serial")
+
+ body = {'os-getSerialConsole': {'type': 'serial'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue(get_serial_console.called)
+
+ @mock.patch.object(compute_api.API, 'get_serial_console')
+ def test_get_serial_console_invalid_type(self, get_serial_console):
+ get_serial_console.side_effect = (
+ exception.ConsoleTypeInvalid(console_type='invalid'))
+
+ body = {'os-getSerialConsole': {'type': 'invalid'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue(get_serial_console.called)
+
+ @mock.patch.object(compute_api.API, 'get_serial_console')
+ def test_get_serial_console_no_type(self, get_serial_console):
+ get_serial_console.side_effect = (
+ exception.ConsoleTypeInvalid(console_type=''))
+
+ body = {'os-getSerialConsole': {}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue(get_serial_console.called)
+
+ @mock.patch.object(compute_api.API, 'get_serial_console')
+ def test_get_serial_console_no_instance(self, get_serial_console):
+ get_serial_console.side_effect = (
+ exception.InstanceNotFound(instance_id='xxx'))
+
+ body = {'os-getSerialConsole': {'type': 'serial'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 404)
+ self.assertTrue(get_serial_console.called)
+
+ @mock.patch.object(compute_api.API, 'get_serial_console')
+ def test_get_serial_console_instance_not_ready(self, get_serial_console):
+ get_serial_console.side_effect = (
+ exception.InstanceNotReady(instance_id='xxx'))
+
+ body = {'os-getSerialConsole': {'type': 'serial'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 409)
+ self.assertTrue(get_serial_console.called)
+
+ @mock.patch.object(compute_api.API, 'get_serial_console')
+ def test_get_serial_console_socket_exhausted(self, get_serial_console):
+ get_serial_console.side_effect = (
+ exception.SocketPortRangeExhaustedException(
+ host='127.0.0.1'))
+
+ body = {'os-getSerialConsole': {'type': 'serial'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 500)
+ self.assertTrue(get_serial_console.called)
+
+ @mock.patch.object(compute_api.API, 'get_serial_console')
+ def test_get_serial_console_image_nport_invalid(self, get_serial_console):
+ get_serial_console.side_effect = (
+ exception.ImageSerialPortNumberInvalid(
+ num_ports='x', property="hw_serial_port_count"))
+
+ body = {'os-getSerialConsole': {'type': 'serial'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue(get_serial_console.called)
+
+ @mock.patch.object(compute_api.API, 'get_serial_console')
+ def test_get_serial_console_image_nport_exceed(self, get_serial_console):
+ get_serial_console.side_effect = (
+ exception.ImageSerialPortNumberExceedFlavorValue())
+
+ body = {'os-getSerialConsole': {'type': 'serial'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue(get_serial_console.called)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_createserverext.py b/nova/tests/unit/api/openstack/compute/contrib/test_createserverext.py
new file mode 100644
index 0000000000..eca3aa3953
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_createserverext.py
@@ -0,0 +1,387 @@
+# Copyright 2010-2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+from xml.dom import minidom
+
+from oslo.serialization import jsonutils
+import webob
+
+from nova.compute import api as compute_api
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+FAKE_UUID = fakes.FAKE_UUID
+
+FAKE_NETWORKS = [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12'),
+ ('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '10.0.2.12')]
+
+DUPLICATE_NETWORKS = [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12'),
+ ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12')]
+
+INVALID_NETWORKS = [('invalid', 'invalid-ip-address')]
+
+
+def return_security_group_non_existing(context, project_id, group_name):
+ raise exception.SecurityGroupNotFoundForProject(project_id=project_id,
+ security_group_id=group_name)
+
+
+def return_security_group_get_by_name(context, project_id, group_name):
+ return {'id': 1, 'name': group_name}
+
+
+def return_security_group_get(context, security_group_id, session):
+ return {'id': security_group_id}
+
+
+def return_instance_add_security_group(context, instance_id,
+ security_group_id):
+ pass
+
+
+class CreateserverextTest(test.TestCase):
+ def setUp(self):
+ super(CreateserverextTest, self).setUp()
+
+ self.security_group = None
+ self.injected_files = None
+ self.networks = None
+ self.user_data = None
+
+ def create(*args, **kwargs):
+ if 'security_group' in kwargs:
+ self.security_group = kwargs['security_group']
+ else:
+ self.security_group = None
+ if 'injected_files' in kwargs:
+ self.injected_files = kwargs['injected_files']
+ else:
+ self.injected_files = None
+
+ if 'requested_networks' in kwargs:
+ self.networks = kwargs['requested_networks']
+ else:
+ self.networks = None
+
+ if 'user_data' in kwargs:
+ self.user_data = kwargs['user_data']
+
+ resv_id = None
+
+ return ([{'id': '1234', 'display_name': 'fakeinstance',
+ 'uuid': FAKE_UUID,
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'created_at': "",
+ 'updated_at': "",
+ 'fixed_ips': [],
+ 'progress': 0}], resv_id)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Createserverext', 'User_data',
+ 'Security_groups', 'Os_networks'])
+
+ def _make_stub_method(self, canned_return):
+ def stub_method(*args, **kwargs):
+ return canned_return
+ return stub_method
+
+ def _create_security_group_request_dict(self, security_groups):
+ server = {}
+ server['name'] = 'new-server-test'
+ server['imageRef'] = 'cedef40a-ed67-4d10-800e-17455edce175'
+ server['flavorRef'] = 1
+ if security_groups is not None:
+ sg_list = []
+ for name in security_groups:
+ sg_list.append({'name': name})
+ server['security_groups'] = sg_list
+ return {'server': server}
+
+ def _create_networks_request_dict(self, networks):
+ server = {}
+ server['name'] = 'new-server-test'
+ server['imageRef'] = 'cedef40a-ed67-4d10-800e-17455edce175'
+ server['flavorRef'] = 1
+ if networks is not None:
+ network_list = []
+ for uuid, fixed_ip in networks:
+ network_list.append({'uuid': uuid, 'fixed_ip': fixed_ip})
+ server['networks'] = network_list
+ return {'server': server}
+
+ def _create_user_data_request_dict(self, user_data):
+ server = {}
+ server['name'] = 'new-server-test'
+ server['imageRef'] = 'cedef40a-ed67-4d10-800e-17455edce175'
+ server['flavorRef'] = 1
+ server['user_data'] = user_data
+ return {'server': server}
+
+ def _get_create_request_json(self, body_dict):
+ req = webob.Request.blank('/v2/fake/os-create-server-ext')
+ req.headers['Content-Type'] = 'application/json'
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body_dict)
+ return req
+
+ def _format_xml_request_body(self, body_dict):
+ server = body_dict['server']
+ body_parts = []
+ body_parts.extend([
+ '<?xml version="1.0" encoding="UTF-8"?>',
+ '<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.1"',
+ ' name="%s" imageRef="%s" flavorRef="%s">' % (
+ server['name'], server['imageRef'], server['flavorRef'])])
+ if 'metadata' in server:
+ metadata = server['metadata']
+ body_parts.append('<metadata>')
+ for item in metadata.iteritems():
+ body_parts.append('<meta key="%s">%s</meta>' % item)
+ body_parts.append('</metadata>')
+ if 'personality' in server:
+ personalities = server['personality']
+ body_parts.append('<personality>')
+ for file in personalities:
+ item = (file['path'], file['contents'])
+ body_parts.append('<file path="%s">%s</file>' % item)
+ body_parts.append('</personality>')
+ if 'networks' in server:
+ networks = server['networks']
+ body_parts.append('<networks>')
+ for network in networks:
+ item = (network['uuid'], network['fixed_ip'])
+ body_parts.append('<network uuid="%s" fixed_ip="%s"></network>'
+ % item)
+ body_parts.append('</networks>')
+ body_parts.append('</server>')
+ return ''.join(body_parts)
+
+ def _get_create_request_xml(self, body_dict):
+ req = webob.Request.blank('/v2/fake/os-create-server-ext')
+ req.content_type = 'application/xml'
+ req.accept = 'application/xml'
+ req.method = 'POST'
+ req.body = self._format_xml_request_body(body_dict)
+ return req
+
+ def _create_instance_with_networks_json(self, networks):
+ body_dict = self._create_networks_request_dict(networks)
+ request = self._get_create_request_json(body_dict)
+ response = request.get_response(fakes.wsgi_app(
+ init_only=('servers', 'os-create-server-ext')))
+ return request, response, self.networks
+
+ def _create_instance_with_user_data_json(self, networks):
+ body_dict = self._create_user_data_request_dict(networks)
+ request = self._get_create_request_json(body_dict)
+ response = request.get_response(fakes.wsgi_app(
+ init_only=('servers', 'os-create-server-ext')))
+ return request, response, self.user_data
+
+ def _create_instance_with_networks_xml(self, networks):
+ body_dict = self._create_networks_request_dict(networks)
+ request = self._get_create_request_xml(body_dict)
+ response = request.get_response(fakes.wsgi_app(
+ init_only=('servers', 'os-create-server-ext')))
+ return request, response, self.networks
+
+ def test_create_instance_with_no_networks(self):
+ _create_inst = self._create_instance_with_networks_json
+ request, response, networks = _create_inst(networks=None)
+ self.assertEqual(response.status_int, 202)
+ self.assertIsNone(networks)
+
+ def test_create_instance_with_no_networks_xml(self):
+ _create_inst = self._create_instance_with_networks_xml
+ request, response, networks = _create_inst(networks=None)
+ self.assertEqual(response.status_int, 202)
+ self.assertIsNone(networks)
+
+ def test_create_instance_with_one_network(self):
+ _create_inst = self._create_instance_with_networks_json
+ request, response, networks = _create_inst([FAKE_NETWORKS[0]])
+ self.assertEqual(response.status_int, 202)
+ self.assertEqual([FAKE_NETWORKS[0]], networks.as_tuples())
+
+ def test_create_instance_with_one_network_xml(self):
+ _create_inst = self._create_instance_with_networks_xml
+ request, response, networks = _create_inst([FAKE_NETWORKS[0]])
+ self.assertEqual(response.status_int, 202)
+ self.assertEqual([FAKE_NETWORKS[0]], networks.as_tuples())
+
+ def test_create_instance_with_two_networks(self):
+ _create_inst = self._create_instance_with_networks_json
+ request, response, networks = _create_inst(FAKE_NETWORKS)
+ self.assertEqual(response.status_int, 202)
+ self.assertEqual(FAKE_NETWORKS, networks.as_tuples())
+
+ def test_create_instance_with_two_networks_xml(self):
+ _create_inst = self._create_instance_with_networks_xml
+ request, response, networks = _create_inst(FAKE_NETWORKS)
+ self.assertEqual(response.status_int, 202)
+ self.assertEqual(FAKE_NETWORKS, networks.as_tuples())
+
+ def test_create_instance_with_duplicate_networks(self):
+ _create_inst = self._create_instance_with_networks_json
+ request, response, networks = _create_inst(DUPLICATE_NETWORKS)
+ self.assertEqual(response.status_int, 400)
+ self.assertIsNone(networks)
+
+ def test_create_instance_with_duplicate_networks_xml(self):
+ _create_inst = self._create_instance_with_networks_xml
+ request, response, networks = _create_inst(DUPLICATE_NETWORKS)
+ self.assertEqual(response.status_int, 400)
+ self.assertIsNone(networks)
+
+ def test_create_instance_with_network_no_id(self):
+ body_dict = self._create_networks_request_dict([FAKE_NETWORKS[0]])
+ del body_dict['server']['networks'][0]['uuid']
+ request = self._get_create_request_json(body_dict)
+ response = request.get_response(fakes.wsgi_app(
+ init_only=('servers', 'os-create-server-ext')))
+ self.assertEqual(response.status_int, 400)
+ self.assertIsNone(self.networks)
+
+ def test_create_instance_with_network_no_id_xml(self):
+ body_dict = self._create_networks_request_dict([FAKE_NETWORKS[0]])
+ request = self._get_create_request_xml(body_dict)
+ uuid = ' uuid="aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"'
+ request.body = request.body.replace(uuid, '')
+ response = request.get_response(fakes.wsgi_app(
+ init_only=('servers', 'os-create-server-ext')))
+ self.assertEqual(response.status_int, 400)
+ self.assertIsNone(self.networks)
+
+ def test_create_instance_with_network_invalid_id(self):
+ _create_inst = self._create_instance_with_networks_json
+ request, response, networks = _create_inst(INVALID_NETWORKS)
+ self.assertEqual(response.status_int, 400)
+ self.assertIsNone(networks)
+
+ def test_create_instance_with_network_invalid_id_xml(self):
+ _create_inst = self._create_instance_with_networks_xml
+ request, response, networks = _create_inst(INVALID_NETWORKS)
+ self.assertEqual(response.status_int, 400)
+ self.assertIsNone(networks)
+
+ def test_create_instance_with_network_empty_fixed_ip(self):
+ networks = [('1', '')]
+ _create_inst = self._create_instance_with_networks_json
+ request, response, networks = _create_inst(networks)
+ self.assertEqual(response.status_int, 400)
+ self.assertIsNone(networks)
+
+ def test_create_instance_with_network_non_string_fixed_ip(self):
+ networks = [('1', 12345)]
+ _create_inst = self._create_instance_with_networks_json
+ request, response, networks = _create_inst(networks)
+ self.assertEqual(response.status_int, 400)
+ self.assertIsNone(networks)
+
+ def test_create_instance_with_network_empty_fixed_ip_xml(self):
+ networks = [('1', '')]
+ _create_inst = self._create_instance_with_networks_xml
+ request, response, networks = _create_inst(networks)
+ self.assertEqual(response.status_int, 400)
+ self.assertIsNone(networks)
+
+ def test_create_instance_with_network_no_fixed_ip(self):
+ body_dict = self._create_networks_request_dict([FAKE_NETWORKS[0]])
+ del body_dict['server']['networks'][0]['fixed_ip']
+ request = self._get_create_request_json(body_dict)
+ response = request.get_response(fakes.wsgi_app(
+ init_only=('servers', 'os-create-server-ext')))
+ self.assertEqual(response.status_int, 202)
+ self.assertEqual([('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)],
+ self.networks.as_tuples())
+
+ def test_create_instance_with_network_no_fixed_ip_xml(self):
+ body_dict = self._create_networks_request_dict([FAKE_NETWORKS[0]])
+ request = self._get_create_request_xml(body_dict)
+ request.body = request.body.replace(' fixed_ip="10.0.1.12"', '')
+ response = request.get_response(fakes.wsgi_app(
+ init_only=('servers', 'os-create-server-ext')))
+ self.assertEqual(response.status_int, 202)
+ self.assertEqual([('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)],
+ self.networks.as_tuples())
+
+ def test_create_instance_with_userdata(self):
+ user_data_contents = '#!/bin/bash\necho "Oh no!"\n'
+ user_data_contents = base64.b64encode(user_data_contents)
+ _create_inst = self._create_instance_with_user_data_json
+ request, response, user_data = _create_inst(user_data_contents)
+ self.assertEqual(response.status_int, 202)
+ self.assertEqual(user_data, user_data_contents)
+
+ def test_create_instance_with_userdata_none(self):
+ user_data_contents = None
+ _create_inst = self._create_instance_with_user_data_json
+ request, response, user_data = _create_inst(user_data_contents)
+ self.assertEqual(response.status_int, 202)
+ self.assertEqual(user_data, user_data_contents)
+
+ def test_create_instance_with_userdata_with_non_b64_content(self):
+ user_data_contents = '#!/bin/bash\necho "Oh no!"\n'
+ _create_inst = self._create_instance_with_user_data_json
+ request, response, user_data = _create_inst(user_data_contents)
+ self.assertEqual(response.status_int, 400)
+ self.assertIsNone(user_data)
+
+ def test_create_instance_with_security_group_json(self):
+ security_groups = ['test', 'test1']
+ self.stubs.Set(db, 'security_group_get_by_name',
+ return_security_group_get_by_name)
+ self.stubs.Set(db, 'instance_add_security_group',
+ return_instance_add_security_group)
+ body_dict = self._create_security_group_request_dict(security_groups)
+ request = self._get_create_request_json(body_dict)
+ response = request.get_response(fakes.wsgi_app(
+ init_only=('servers', 'os-create-server-ext')))
+ self.assertEqual(response.status_int, 202)
+ self.assertEqual(self.security_group, security_groups)
+
+ def test_get_server_by_id_verify_security_groups_json(self):
+ self.stubs.Set(db, 'instance_get', fakes.fake_instance_get())
+ self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get())
+ req = webob.Request.blank('/v2/fake/os-create-server-ext/1')
+ req.headers['Content-Type'] = 'application/json'
+ response = req.get_response(fakes.wsgi_app(
+ init_only=('os-create-server-ext', 'servers')))
+ self.assertEqual(response.status_int, 200)
+ res_dict = jsonutils.loads(response.body)
+ expected_security_group = [{"name": "test"}]
+ self.assertEqual(res_dict['server'].get('security_groups'),
+ expected_security_group)
+
+ def test_get_server_by_id_verify_security_groups_xml(self):
+ self.stubs.Set(db, 'instance_get', fakes.fake_instance_get())
+ self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get())
+ req = webob.Request.blank('/v2/fake/os-create-server-ext/1')
+ req.headers['Accept'] = 'application/xml'
+ response = req.get_response(fakes.wsgi_app(
+ init_only=('os-create-server-ext', 'servers')))
+ self.assertEqual(response.status_int, 200)
+ dom = minidom.parseString(response.body)
+ server = dom.childNodes[0]
+ sec_groups = server.getElementsByTagName('security_groups')[0]
+ sec_group = sec_groups.getElementsByTagName('security_group')[0]
+ self.assertEqual('test', sec_group.getAttribute("name"))
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_deferred_delete.py b/nova/tests/unit/api/openstack/compute/contrib/test_deferred_delete.py
new file mode 100644
index 0000000000..0dfd0e5339
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_deferred_delete.py
@@ -0,0 +1,147 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import webob
+
+from nova.api.openstack.compute.contrib import deferred_delete
+from nova.api.openstack.compute.plugins.v3 import deferred_delete as dd_v21
+from nova.compute import api as compute_api
+from nova import context
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+class FakeRequest(object):
+ def __init__(self, context):
+ self.environ = {'nova.context': context}
+
+
+class DeferredDeleteExtensionTestV21(test.NoDBTestCase):
+ ext_ver = dd_v21.DeferredDeleteController
+
+ def setUp(self):
+ super(DeferredDeleteExtensionTestV21, self).setUp()
+ self.fake_input_dict = {}
+ self.fake_uuid = 'fake_uuid'
+ self.fake_context = context.RequestContext('fake', 'fake')
+ self.fake_req = FakeRequest(self.fake_context)
+ self.extension = self.ext_ver()
+
+ def test_force_delete(self):
+ self.mox.StubOutWithMock(compute_api.API, 'get')
+ self.mox.StubOutWithMock(compute_api.API, 'force_delete')
+
+ fake_instance = 'fake_instance'
+
+ compute_api.API.get(self.fake_context, self.fake_uuid,
+ expected_attrs=None,
+ want_objects=True).AndReturn(fake_instance)
+ compute_api.API.force_delete(self.fake_context, fake_instance)
+
+ self.mox.ReplayAll()
+ res = self.extension._force_delete(self.fake_req, self.fake_uuid,
+ self.fake_input_dict)
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ if isinstance(self.extension, dd_v21.DeferredDeleteController):
+ status_int = self.extension._force_delete.wsgi_code
+ else:
+ status_int = res.status_int
+ self.assertEqual(202, status_int)
+
+ def test_force_delete_instance_not_found(self):
+ self.mox.StubOutWithMock(compute_api.API, 'get')
+
+ compute_api.API.get(self.fake_context, self.fake_uuid,
+ expected_attrs=None,
+ want_objects=True).AndRaise(
+ exception.InstanceNotFound(instance_id='instance-0000'))
+
+ self.mox.ReplayAll()
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.extension._force_delete,
+ self.fake_req,
+ self.fake_uuid,
+ self.fake_input_dict)
+
+ @mock.patch.object(compute_api.API, 'get')
+ @mock.patch.object(compute_api.API, 'force_delete',
+ side_effect=exception.InstanceIsLocked(
+ instance_uuid='fake_uuid'))
+ def test_force_delete_instance_locked(self, mock_force_delete, mock_get):
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/fake_uuid/action')
+ ex = self.assertRaises(webob.exc.HTTPConflict,
+ self.extension._force_delete,
+ req, 'fake_uuid', '')
+ self.assertIn('Instance fake_uuid is locked', ex.explanation)
+
+ def test_restore(self):
+ self.mox.StubOutWithMock(compute_api.API, 'get')
+ self.mox.StubOutWithMock(compute_api.API, 'restore')
+
+ fake_instance = 'fake_instance'
+
+ compute_api.API.get(self.fake_context, self.fake_uuid,
+ expected_attrs=None,
+ want_objects=True).AndReturn(fake_instance)
+ compute_api.API.restore(self.fake_context, fake_instance)
+
+ self.mox.ReplayAll()
+ res = self.extension._restore(self.fake_req, self.fake_uuid,
+ self.fake_input_dict)
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ if isinstance(self.extension, dd_v21.DeferredDeleteController):
+ status_int = self.extension._restore.wsgi_code
+ else:
+ status_int = res.status_int
+ self.assertEqual(202, status_int)
+
+ def test_restore_instance_not_found(self):
+ self.mox.StubOutWithMock(compute_api.API, 'get')
+
+ compute_api.API.get(self.fake_context, self.fake_uuid,
+ expected_attrs=None, want_objects=True).AndRaise(
+ exception.InstanceNotFound(instance_id='instance-0000'))
+
+ self.mox.ReplayAll()
+ self.assertRaises(webob.exc.HTTPNotFound, self.extension._restore,
+ self.fake_req, self.fake_uuid,
+ self.fake_input_dict)
+
+ def test_restore_raises_conflict_on_invalid_state(self):
+ self.mox.StubOutWithMock(compute_api.API, 'get')
+ self.mox.StubOutWithMock(compute_api.API, 'restore')
+
+ fake_instance = 'fake_instance'
+ exc = exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ compute_api.API.get(self.fake_context, self.fake_uuid,
+ expected_attrs=None,
+ want_objects=True).AndReturn(fake_instance)
+ compute_api.API.restore(self.fake_context, fake_instance).AndRaise(
+ exc)
+
+ self.mox.ReplayAll()
+ self.assertRaises(webob.exc.HTTPConflict, self.extension._restore,
+ self.fake_req, self.fake_uuid, self.fake_input_dict)
+
+
+class DeferredDeleteExtensionTestV2(DeferredDeleteExtensionTestV21):
+ ext_ver = deferred_delete.DeferredDeleteController
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_disk_config.py b/nova/tests/unit/api/openstack/compute/contrib/test_disk_config.py
new file mode 100644
index 0000000000..b9a514a451
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_disk_config.py
@@ -0,0 +1,449 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from oslo.serialization import jsonutils
+
+from nova.api.openstack import compute
+from nova.compute import api as compute_api
+from nova import db
+from nova import objects
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+import nova.tests.unit.image.fake
+
+
+MANUAL_INSTANCE_UUID = fakes.FAKE_UUID
+AUTO_INSTANCE_UUID = fakes.FAKE_UUID.replace('a', 'b')
+
+stub_instance = fakes.stub_instance
+
+API_DISK_CONFIG = 'OS-DCF:diskConfig'
+
+
+def instance_addresses(context, instance_id):
+ return None
+
+
+class DiskConfigTestCaseV21(test.TestCase):
+
+ def setUp(self):
+ super(DiskConfigTestCaseV21, self).setUp()
+ self._set_up_app()
+ self._setup_fake_image_service()
+
+ fakes.stub_out_nw_api(self.stubs)
+
+ FAKE_INSTANCES = [
+ fakes.stub_instance(1,
+ uuid=MANUAL_INSTANCE_UUID,
+ auto_disk_config=False),
+ fakes.stub_instance(2,
+ uuid=AUTO_INSTANCE_UUID,
+ auto_disk_config=True)
+ ]
+
+ def fake_instance_get(context, id_):
+ for instance in FAKE_INSTANCES:
+ if id_ == instance['id']:
+ return instance
+
+ self.stubs.Set(db, 'instance_get', fake_instance_get)
+
+ def fake_instance_get_by_uuid(context, uuid,
+ columns_to_join=None, use_slave=False):
+ for instance in FAKE_INSTANCES:
+ if uuid == instance['uuid']:
+ return instance
+
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fake_instance_get_by_uuid)
+
+ def fake_instance_get_all(context, *args, **kwargs):
+ return FAKE_INSTANCES
+
+ self.stubs.Set(db, 'instance_get_all', fake_instance_get_all)
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_instance_get_all)
+
+ self.stubs.Set(objects.Instance, 'save',
+ lambda *args, **kwargs: None)
+
+ def fake_rebuild(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
+
+ def fake_instance_create(context, inst_, session=None):
+ inst = fake_instance.fake_db_instance(**{
+ 'id': 1,
+ 'uuid': AUTO_INSTANCE_UUID,
+ 'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
+ 'updated_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
+ 'progress': 0,
+ 'name': 'instance-1', # this is a property
+ 'task_state': '',
+ 'vm_state': '',
+ 'auto_disk_config': inst_['auto_disk_config'],
+ 'security_groups': inst_['security_groups'],
+ })
+
+ def fake_instance_get_for_create(context, id_, *args, **kwargs):
+ return (inst, inst)
+
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ fake_instance_get_for_create)
+
+ def fake_instance_get_all_for_create(context, *args, **kwargs):
+ return [inst]
+ self.stubs.Set(db, 'instance_get_all',
+ fake_instance_get_all_for_create)
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_instance_get_all_for_create)
+
+ def fake_instance_add_security_group(context, instance_id,
+ security_group_id):
+ pass
+
+ self.stubs.Set(db,
+ 'instance_add_security_group',
+ fake_instance_add_security_group)
+
+ return inst
+
+ self.stubs.Set(db, 'instance_create', fake_instance_create)
+
+ def _set_up_app(self):
+ self.app = compute.APIRouterV21(init_only=('servers', 'images',
+ 'os-disk-config'))
+
+ def _get_expected_msg_for_invalid_disk_config(self):
+ return ('{{"badRequest": {{"message": "Invalid input for'
+ ' field/attribute {0}. Value: {1}. u\'{1}\' is'
+ ' not one of [\'AUTO\', \'MANUAL\']", "code": 400}}}}')
+
+ def _setup_fake_image_service(self):
+ self.image_service = nova.tests.unit.image.fake.stub_out_image_service(
+ self.stubs)
+ timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
+ image = {'id': '88580842-f50a-11e2-8d3a-f23c91aec05e',
+ 'name': 'fakeimage7',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'ova',
+ 'disk_format': 'vhd',
+ 'size': '74185822',
+ 'properties': {'auto_disk_config': 'Disabled'}}
+ self.image_service.create(None, image)
+
+ def tearDown(self):
+ super(DiskConfigTestCaseV21, self).tearDown()
+ nova.tests.unit.image.fake.FakeImageService_reset()
+
+ def assertDiskConfig(self, dict_, value):
+ self.assertIn(API_DISK_CONFIG, dict_)
+ self.assertEqual(dict_[API_DISK_CONFIG], value)
+
+ def test_show_server(self):
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers/%s' % MANUAL_INSTANCE_UUID)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, 'MANUAL')
+
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers/%s' % AUTO_INSTANCE_UUID)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, 'AUTO')
+
+ def test_detail_servers(self):
+ req = fakes.HTTPRequest.blank('/fake/servers/detail')
+ res = req.get_response(self.app)
+ server_dicts = jsonutils.loads(res.body)['servers']
+
+ expectations = ['MANUAL', 'AUTO']
+ for server_dict, expected in zip(server_dicts, expectations):
+ self.assertDiskConfig(server_dict, expected)
+
+ def test_show_image(self):
+ req = fakes.HTTPRequest.blank(
+ '/fake/images/a440c04b-79fa-479c-bed1-0b816eaec379')
+ res = req.get_response(self.app)
+ image_dict = jsonutils.loads(res.body)['image']
+ self.assertDiskConfig(image_dict, 'MANUAL')
+
+ req = fakes.HTTPRequest.blank(
+ '/fake/images/70a599e0-31e7-49b7-b260-868f441e862b')
+ res = req.get_response(self.app)
+ image_dict = jsonutils.loads(res.body)['image']
+ self.assertDiskConfig(image_dict, 'AUTO')
+
+ def test_detail_image(self):
+ req = fakes.HTTPRequest.blank('/fake/images/detail')
+ res = req.get_response(self.app)
+ image_dicts = jsonutils.loads(res.body)['images']
+
+ expectations = ['MANUAL', 'AUTO']
+ for image_dict, expected in zip(image_dicts, expectations):
+ # NOTE(sirp): image fixtures 6 and 7 are setup for
+ # auto_disk_config testing
+ if image_dict['id'] in (6, 7):
+ self.assertDiskConfig(image_dict, expected)
+
+ def test_create_server_override_auto(self):
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'server': {
+ 'name': 'server_test',
+ 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'flavorRef': '1',
+ API_DISK_CONFIG: 'AUTO'
+ }}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, 'AUTO')
+
+ def test_create_server_override_manual(self):
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'server': {
+ 'name': 'server_test',
+ 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'flavorRef': '1',
+ API_DISK_CONFIG: 'MANUAL'
+ }}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, 'MANUAL')
+
+ def test_create_server_detect_from_image(self):
+ """If user doesn't pass in diskConfig for server, use image metadata
+ to specify AUTO or MANUAL.
+ """
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'server': {
+ 'name': 'server_test',
+ 'imageRef': 'a440c04b-79fa-479c-bed1-0b816eaec379',
+ 'flavorRef': '1',
+ }}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, 'MANUAL')
+
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'server': {
+ 'name': 'server_test',
+ 'imageRef': '70a599e0-31e7-49b7-b260-868f441e862b',
+ 'flavorRef': '1',
+ }}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, 'AUTO')
+
+ def test_create_server_detect_from_image_disabled_goes_to_manual(self):
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'server': {
+ 'name': 'server_test',
+ 'imageRef': '88580842-f50a-11e2-8d3a-f23c91aec05e',
+ 'flavorRef': '1',
+ }}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, 'MANUAL')
+
+ def test_create_server_errors_when_disabled_and_auto(self):
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'server': {
+ 'name': 'server_test',
+ 'imageRef': '88580842-f50a-11e2-8d3a-f23c91aec05e',
+ 'flavorRef': '1',
+ API_DISK_CONFIG: 'AUTO'
+ }}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+
+ def test_create_server_when_disabled_and_manual(self):
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'server': {
+ 'name': 'server_test',
+ 'imageRef': '88580842-f50a-11e2-8d3a-f23c91aec05e',
+ 'flavorRef': '1',
+ API_DISK_CONFIG: 'MANUAL'
+ }}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, 'MANUAL')
+
+ def _test_update_server_disk_config(self, uuid, disk_config):
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers/%s' % uuid)
+ req.method = 'PUT'
+ req.content_type = 'application/json'
+ body = {'server': {API_DISK_CONFIG: disk_config}}
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, disk_config)
+
+ def test_update_server_override_auto(self):
+ self._test_update_server_disk_config(AUTO_INSTANCE_UUID, 'AUTO')
+
+ def test_update_server_override_manual(self):
+ self._test_update_server_disk_config(MANUAL_INSTANCE_UUID, 'MANUAL')
+
+ def test_update_server_invalid_disk_config(self):
+ # Return BadRequest if user passes an invalid diskConfig value.
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers/%s' % MANUAL_INSTANCE_UUID)
+ req.method = 'PUT'
+ req.content_type = 'application/json'
+ body = {'server': {API_DISK_CONFIG: 'server_test'}}
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+ expected_msg = self._get_expected_msg_for_invalid_disk_config()
+ self.assertEqual(expected_msg.format(API_DISK_CONFIG, 'server_test'),
+ res.body)
+
+ def _test_rebuild_server_disk_config(self, uuid, disk_config):
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers/%s/action' % uuid)
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {"rebuild": {
+ 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
+ API_DISK_CONFIG: disk_config
+ }}
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, disk_config)
+
+ def test_rebuild_server_override_auto(self):
+ self._test_rebuild_server_disk_config(AUTO_INSTANCE_UUID, 'AUTO')
+
+ def test_rebuild_server_override_manual(self):
+ self._test_rebuild_server_disk_config(MANUAL_INSTANCE_UUID, 'MANUAL')
+
+ def test_create_server_with_auto_disk_config(self):
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'server': {
+ 'name': 'server_test',
+ 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'flavorRef': '1',
+ API_DISK_CONFIG: 'AUTO'
+ }}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIn('auto_disk_config', kwargs)
+ self.assertEqual(True, kwargs['auto_disk_config'])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, 'AUTO')
+
+ def test_rebuild_server_with_auto_disk_config(self):
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers/%s/action' % AUTO_INSTANCE_UUID)
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {"rebuild": {
+ 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
+ API_DISK_CONFIG: 'AUTO'
+ }}
+
+ def rebuild(*args, **kwargs):
+ self.assertIn('auto_disk_config', kwargs)
+ self.assertEqual(True, kwargs['auto_disk_config'])
+
+ self.stubs.Set(compute_api.API, 'rebuild', rebuild)
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, 'AUTO')
+
+ def test_resize_server_with_auto_disk_config(self):
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers/%s/action' % AUTO_INSTANCE_UUID)
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {"resize": {
+ "flavorRef": "3",
+ API_DISK_CONFIG: 'AUTO'
+ }}
+
+ def resize(*args, **kwargs):
+ self.assertIn('auto_disk_config', kwargs)
+ self.assertEqual(True, kwargs['auto_disk_config'])
+
+ self.stubs.Set(compute_api.API, 'resize', resize)
+
+ req.body = jsonutils.dumps(body)
+ req.get_response(self.app)
+
+
+class DiskConfigTestCaseV2(DiskConfigTestCaseV21):
+ def _set_up_app(self):
+ self.flags(verbose=True,
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Disk_config'])
+
+ self.app = compute.APIRouter(init_only=('servers', 'images'))
+
+ def _get_expected_msg_for_invalid_disk_config(self):
+ return ('{{"badRequest": {{"message": "{0} must be either'
+ ' \'MANUAL\' or \'AUTO\'.", "code": 400}}}}')
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_evacuate.py b/nova/tests/unit/api/openstack/compute/contrib/test_evacuate.py
new file mode 100644
index 0000000000..3f5b662db5
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_evacuate.py
@@ -0,0 +1,268 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.compute import api as compute_api
+from nova.compute import vm_states
+from nova import context
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
+
+
+def fake_compute_api(*args, **kwargs):
+ return True
+
+
+def fake_compute_api_get(self, context, instance_id, want_objects=False,
+ **kwargs):
+ # BAD_UUID is something that does not exist
+ if instance_id == 'BAD_UUID':
+ raise exception.InstanceNotFound(instance_id=instance_id)
+ else:
+ return fake_instance.fake_instance_obj(context, id=1, uuid=instance_id,
+ task_state=None, host='host1',
+ vm_state=vm_states.ACTIVE)
+
+
+def fake_service_get_by_compute_host(self, context, host):
+ if host == 'bad-host':
+ raise exception.ComputeHostNotFound(host=host)
+ else:
+ return {
+ 'host_name': host,
+ 'service': 'compute',
+ 'zone': 'nova'
+ }
+
+
+class EvacuateTestV21(test.NoDBTestCase):
+
+ _methods = ('resize', 'evacuate')
+ fake_url = '/v2/fake'
+
+ def setUp(self):
+ super(EvacuateTestV21, self).setUp()
+ self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
+ self.stubs.Set(compute_api.HostAPI, 'service_get_by_compute_host',
+ fake_service_get_by_compute_host)
+ self.UUID = uuid.uuid4()
+ for _method in self._methods:
+ self.stubs.Set(compute_api.API, _method, fake_compute_api)
+
+ def _fake_wsgi_app(self, ctxt):
+ return fakes.wsgi_app_v21(fake_auth_context=ctxt)
+
+ def _gen_resource_with_app(self, json_load, is_admin=True, uuid=None):
+ ctxt = context.get_admin_context()
+ ctxt.user_id = 'fake'
+ ctxt.project_id = 'fake'
+ ctxt.is_admin = is_admin
+ app = self._fake_wsgi_app(ctxt)
+ req = webob.Request.blank('%s/servers/%s/action' % (self.fake_url,
+ uuid or self.UUID))
+ req.method = 'POST'
+ base_json_load = {'evacuate': json_load}
+ req.body = jsonutils.dumps(base_json_load)
+ req.content_type = 'application/json'
+
+ return req.get_response(app)
+
+ def _fake_update(self, inst, context, instance, task_state,
+ expected_task_state):
+ return None
+
+ def test_evacuate_with_valid_instance(self):
+ res = self._gen_resource_with_app({'host': 'my-host',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+
+ self.assertEqual(res.status_int, 200)
+
+ def test_evacuate_with_invalid_instance(self):
+ res = self._gen_resource_with_app({'host': 'my-host',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'},
+ uuid='BAD_UUID')
+
+ self.assertEqual(res.status_int, 404)
+
+ def test_evacuate_with_active_service(self):
+ def fake_evacuate(*args, **kwargs):
+ raise exception.ComputeServiceInUse("Service still in use")
+
+ self.stubs.Set(compute_api.API, 'evacuate', fake_evacuate)
+ res = self._gen_resource_with_app({'host': 'my-host',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(res.status_int, 400)
+
+ def test_evacuate_instance_with_no_target(self):
+ res = self._gen_resource_with_app({'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(200, res.status_int)
+
+ def test_evacuate_instance_without_on_shared_storage(self):
+ res = self._gen_resource_with_app({'host': 'my-host',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(res.status_int, 400)
+
+ def test_evacuate_instance_with_invalid_characters_host(self):
+ host = 'abc!#'
+ res = self._gen_resource_with_app({'host': host,
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(400, res.status_int)
+
+ def test_evacuate_instance_with_too_long_host(self):
+ host = 'a' * 256
+ res = self._gen_resource_with_app({'host': host,
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(400, res.status_int)
+
+ def test_evacuate_instance_with_invalid_on_shared_storage(self):
+ res = self._gen_resource_with_app({'host': 'my-host',
+ 'onSharedStorage': 'foo',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(400, res.status_int)
+
+ def test_evacuate_instance_with_bad_target(self):
+ res = self._gen_resource_with_app({'host': 'bad-host',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(res.status_int, 404)
+
+ def test_evacuate_instance_with_target(self):
+ self.stubs.Set(compute_api.API, 'update', self._fake_update)
+
+ res = self._gen_resource_with_app({'host': 'my-host',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(res.status_int, 200)
+ resp_json = jsonutils.loads(res.body)
+ self.assertEqual("MyNewPass", resp_json['adminPass'])
+
+ def test_evacuate_shared_and_pass(self):
+ self.stubs.Set(compute_api.API, 'update', self._fake_update)
+ res = self._gen_resource_with_app({'host': 'my-host',
+ 'onSharedStorage': 'True',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(res.status_int, 400)
+
+ def test_evacuate_not_shared_pass_generated(self):
+ self.stubs.Set(compute_api.API, 'update', self._fake_update)
+ res = self._gen_resource_with_app({'host': 'my-host',
+ 'onSharedStorage': 'False'})
+ self.assertEqual(res.status_int, 200)
+ resp_json = jsonutils.loads(res.body)
+ self.assertEqual(CONF.password_length, len(resp_json['adminPass']))
+
+ def test_evacuate_shared(self):
+ self.stubs.Set(compute_api.API, 'update', self._fake_update)
+ res = self._gen_resource_with_app({'host': 'my-host',
+ 'onSharedStorage': 'True'})
+ self.assertEqual(res.status_int, 200)
+
+ def test_not_admin(self):
+ res = self._gen_resource_with_app({'host': 'my-host',
+ 'onSharedStorage': 'True'},
+ is_admin=False)
+ self.assertEqual(res.status_int, 403)
+
+ def test_evacuate_to_same_host(self):
+ res = self._gen_resource_with_app({'host': 'host1',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(res.status_int, 400)
+
+ def test_evacuate_instance_with_empty_host(self):
+ res = self._gen_resource_with_app({'host': '',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(400, res.status_int)
+
+ def test_evacuate_instance_with_underscore_in_hostname(self):
+ # NOTE: The hostname grammar in RFC952 does not allow for
+ # underscores in hostnames. However, we should test that it
+ # is supported because it sometimes occurs in real systems.
+ self.stubs.Set(compute_api.API, 'update', self._fake_update)
+ res = self._gen_resource_with_app({'host': 'underscore_hostname',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(200, res.status_int)
+ resp_json = jsonutils.loads(res.body)
+ self.assertEqual("MyNewPass", resp_json['adminPass'])
+
+ def test_evacuate_disable_password_return(self):
+ self._test_evacuate_enable_instance_password_conf(False)
+
+ def test_evacuate_enable_password_return(self):
+ self._test_evacuate_enable_instance_password_conf(True)
+
+ def _test_evacuate_enable_instance_password_conf(self, enable_pass):
+ self.flags(enable_instance_password=enable_pass)
+ self.stubs.Set(compute_api.API, 'update', self._fake_update)
+
+ res = self._gen_resource_with_app({'host': 'my_host',
+ 'onSharedStorage': 'False'})
+ self.assertEqual(res.status_int, 200)
+ resp_json = jsonutils.loads(res.body)
+ if enable_pass:
+ self.assertIn('adminPass', resp_json)
+ else:
+ self.assertIsNone(resp_json.get('adminPass'))
+
+
+class EvacuateTestV2(EvacuateTestV21):
+
+ def setUp(self):
+ super(EvacuateTestV2, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Evacuate'])
+
+ def _fake_wsgi_app(self, ctxt):
+ return fakes.wsgi_app(fake_auth_context=ctxt)
+
+ def test_evacuate_instance_with_no_target(self):
+ res = self._gen_resource_with_app({'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(400, res.status_int)
+
+ def test_evacuate_instance_with_too_long_host(self):
+ pass
+
+ def test_evacuate_instance_with_invalid_characters_host(self):
+ pass
+
+ def test_evacuate_instance_with_invalid_on_shared_storage(self):
+ pass
+
+ def test_evacuate_disable_password_return(self):
+ pass
+
+ def test_evacuate_enable_password_return(self):
+ pass
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_extended_availability_zone.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_availability_zone.py
new file mode 100644
index 0000000000..a3e6dd4a78
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_availability_zone.py
@@ -0,0 +1,184 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import extended_availability_zone
+from nova import availability_zones
+from nova import compute
+from nova.compute import vm_states
+from nova import db
+from nova import exception
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+UUID1 = '00000000-0000-0000-0000-000000000001'
+UUID2 = '00000000-0000-0000-0000-000000000002'
+UUID3 = '00000000-0000-0000-0000-000000000003'
+
+
+def fake_compute_get_az(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID3, host="get-host",
+ vm_state=vm_states.ACTIVE,
+ availability_zone='fakeaz')
+ return fake_instance.fake_instance_obj(args[1], **inst)
+
+
+def fake_compute_get_empty(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID3, host="",
+ vm_state=vm_states.ACTIVE,
+ availability_zone='fakeaz')
+ return fake_instance.fake_instance_obj(args[1], **inst)
+
+
+def fake_compute_get(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID3, host="get-host",
+ vm_state=vm_states.ACTIVE)
+ return fake_instance.fake_instance_obj(args[1], **inst)
+
+
+def fake_compute_get_all(*args, **kwargs):
+ inst1 = fakes.stub_instance(1, uuid=UUID1, host="all-host",
+ vm_state=vm_states.ACTIVE)
+ inst2 = fakes.stub_instance(2, uuid=UUID2, host="all-host",
+ vm_state=vm_states.ACTIVE)
+ db_list = [inst1, inst2]
+ fields = instance_obj.INSTANCE_DEFAULT_FIELDS
+ return instance_obj._make_instance_list(args[1],
+ objects.InstanceList(),
+ db_list, fields)
+
+
+def fake_get_host_availability_zone(context, host):
+ return host
+
+
+def fake_get_no_host_availability_zone(context, host):
+ return None
+
+
+class ExtendedAvailabilityZoneTestV21(test.TestCase):
+ content_type = 'application/json'
+ prefix = 'OS-EXT-AZ:'
+ base_url = '/v2/fake/servers/'
+
+ def setUp(self):
+ super(ExtendedAvailabilityZoneTestV21, self).setUp()
+ availability_zones.reset_cache()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+ self.stubs.Set(availability_zones, 'get_host_availability_zone',
+ fake_get_host_availability_zone)
+ return_server = fakes.fake_instance_get()
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app_v21(init_only=None))
+ return res
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def assertAvailabilityZone(self, server, az):
+ self.assertEqual(server.get('%savailability_zone' % self.prefix),
+ az)
+
+ def test_show_no_host_az(self):
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get_az)
+ self.stubs.Set(availability_zones, 'get_host_availability_zone',
+ fake_get_no_host_availability_zone)
+
+ url = self.base_url + UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertAvailabilityZone(self._get_server(res.body), 'fakeaz')
+
+ def test_show_empty_host_az(self):
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get_empty)
+ self.stubs.Set(availability_zones, 'get_host_availability_zone',
+ fake_get_no_host_availability_zone)
+
+ url = self.base_url + UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertAvailabilityZone(self._get_server(res.body), 'fakeaz')
+
+ def test_show(self):
+ url = self.base_url + UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertAvailabilityZone(self._get_server(res.body), 'get-host')
+
+ def test_detail(self):
+ url = self.base_url + 'detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ for i, server in enumerate(self._get_servers(res.body)):
+ self.assertAvailabilityZone(server, 'all-host')
+
+ def test_no_instance_passthrough_404(self):
+
+ def fake_compute_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ url = self.base_url + '70f6db34-de8d-4fbd-aafb-4065bdfa6115'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 404)
+
+
+class ExtendedAvailabilityZoneTestV2(ExtendedAvailabilityZoneTestV21):
+
+ def setUp(self):
+ super(ExtendedAvailabilityZoneTestV2, self).setUp()
+
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Extended_availability_zone'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ return res
+
+
+class ExtendedAvailabilityZoneXmlTestV2(ExtendedAvailabilityZoneTestV2):
+ content_type = 'application/xml'
+ prefix = '{%s}' % extended_availability_zone.\
+ Extended_availability_zone.namespace
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_extended_evacuate_find_host.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_evacuate_find_host.py
new file mode 100644
index 0000000000..1aaee6837a
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_evacuate_find_host.py
@@ -0,0 +1,114 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import mock
+from oslo.serialization import jsonutils
+import webob
+
+from nova.compute import vm_states
+from nova import context
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+
+class ExtendedEvacuateFindHostTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(ExtendedEvacuateFindHostTest, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Extended_evacuate_find_host',
+ 'Evacuate'])
+ self.UUID = uuid.uuid4()
+
+ def _get_admin_context(self, user_id='fake', project_id='fake'):
+ ctxt = context.get_admin_context()
+ ctxt.user_id = user_id
+ ctxt.project_id = project_id
+ return ctxt
+
+ def _fake_compute_api(*args, **kwargs):
+ return True
+
+ def _fake_compute_api_get(self, context, instance_id, **kwargs):
+ instance = fake_instance.fake_db_instance(id=1, uuid=uuid,
+ task_state=None,
+ host='host1',
+ vm_state=vm_states.ACTIVE)
+ instance = instance_obj.Instance._from_db_object(context,
+ instance_obj.Instance(),
+ instance)
+ return instance
+
+ def _fake_service_get_by_compute_host(self, context, host):
+ return {'host_name': host,
+ 'service': 'compute',
+ 'zone': 'nova'
+ }
+
+ @mock.patch('nova.compute.api.HostAPI.service_get_by_compute_host')
+ @mock.patch('nova.compute.api.API.get')
+ @mock.patch('nova.compute.api.API.evacuate')
+ def test_evacuate_instance_with_no_target(self, evacuate_mock,
+ api_get_mock,
+ service_get_mock):
+ service_get_mock.side_effects = self._fake_service_get_by_compute_host
+ api_get_mock.side_effects = self._fake_compute_api_get
+ evacuate_mock.side_effects = self._fake_compute_api
+
+ ctxt = self._get_admin_context()
+ app = fakes.wsgi_app(fake_auth_context=ctxt)
+ req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps({
+ 'evacuate': {
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'
+ }
+ })
+ req.content_type = 'application/json'
+ res = req.get_response(app)
+ self.assertEqual(200, res.status_int)
+ evacuate_mock.assert_called_once_with(mock.ANY, mock.ANY, None,
+ mock.ANY, mock.ANY)
+
+ @mock.patch('nova.compute.api.HostAPI.service_get_by_compute_host')
+ @mock.patch('nova.compute.api.API.get')
+ def test_no_target_fails_if_extension_not_loaded(self, api_get_mock,
+ service_get_mock):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Evacuate'])
+ service_get_mock.side_effects = self._fake_service_get_by_compute_host
+ api_get_mock.side_effects = self._fake_compute_api_get
+
+ ctxt = self._get_admin_context()
+ app = fakes.wsgi_app(fake_auth_context=ctxt)
+ req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps({
+ 'evacuate': {
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'
+ }
+ })
+ req.content_type = 'application/json'
+ res = req.get_response(app)
+ self.assertEqual(400, res.status_int)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_extended_hypervisors.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_hypervisors.py
new file mode 100644
index 0000000000..df5e0d787a
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_hypervisors.py
@@ -0,0 +1,101 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+import mock
+
+from nova.api.openstack.compute.contrib import hypervisors as hypervisors_v2
+from nova.api.openstack.compute.plugins.v3 import hypervisors \
+ as hypervisors_v21
+from nova.api.openstack import extensions
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack.compute.contrib import test_hypervisors
+from nova.tests.unit.api.openstack import fakes
+
+
+def fake_compute_node_get(context, compute_id):
+ for hyper in test_hypervisors.TEST_HYPERS:
+ if hyper['id'] == compute_id:
+ return hyper
+ raise exception.ComputeHostNotFound(host=compute_id)
+
+
+def fake_compute_node_get_all(context):
+ return test_hypervisors.TEST_HYPERS
+
+
+class ExtendedHypervisorsTestV21(test.NoDBTestCase):
+ DETAIL_HYPERS_DICTS = copy.deepcopy(test_hypervisors.TEST_HYPERS)
+ del DETAIL_HYPERS_DICTS[0]['service_id']
+ del DETAIL_HYPERS_DICTS[1]['service_id']
+ DETAIL_HYPERS_DICTS[0].update({'state': 'up',
+ 'status': 'enabled',
+ 'service': dict(id=1, host='compute1',
+ disabled_reason=None)})
+ DETAIL_HYPERS_DICTS[1].update({'state': 'up',
+ 'status': 'enabled',
+ 'service': dict(id=2, host='compute2',
+ disabled_reason=None)})
+
+ def _set_up_controller(self):
+ self.controller = hypervisors_v21.HypervisorsController()
+ self.controller.servicegroup_api.service_is_up = mock.MagicMock(
+ return_value=True)
+
+ def _get_request(self):
+ return fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/detail',
+ use_admin_context=True)
+
+ def setUp(self):
+ super(ExtendedHypervisorsTestV21, self).setUp()
+ self._set_up_controller()
+
+ self.stubs.Set(db, 'compute_node_get_all', fake_compute_node_get_all)
+ self.stubs.Set(db, 'compute_node_get',
+ fake_compute_node_get)
+
+ def test_view_hypervisor_detail_noservers(self):
+ result = self.controller._view_hypervisor(
+ test_hypervisors.TEST_HYPERS[0], True)
+
+ self.assertEqual(result, self.DETAIL_HYPERS_DICTS[0])
+
+ def test_detail(self):
+ req = self._get_request()
+ result = self.controller.detail(req)
+
+ self.assertEqual(result, dict(hypervisors=self.DETAIL_HYPERS_DICTS))
+
+ def test_show_withid(self):
+ req = self._get_request()
+ result = self.controller.show(req, '1')
+
+ self.assertEqual(result, dict(hypervisor=self.DETAIL_HYPERS_DICTS[0]))
+
+
+class ExtendedHypervisorsTestV2(ExtendedHypervisorsTestV21):
+ DETAIL_HYPERS_DICTS = copy.deepcopy(test_hypervisors.TEST_HYPERS)
+ del DETAIL_HYPERS_DICTS[0]['service_id']
+ del DETAIL_HYPERS_DICTS[1]['service_id']
+ DETAIL_HYPERS_DICTS[0].update({'service': dict(id=1, host='compute1')})
+ DETAIL_HYPERS_DICTS[1].update({'service': dict(id=2, host='compute2')})
+
+ def _set_up_controller(self):
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.ext_mgr.extensions['os-extended-hypervisors'] = True
+ self.controller = hypervisors_v2.HypervisorsController(self.ext_mgr)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_extended_ips.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_ips.py
new file mode 100644
index 0000000000..770814116c
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_ips.py
@@ -0,0 +1,189 @@
+# Copyright 2013 Nebula, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import extended_ips
+from nova.api.openstack import xmlutil
+from nova import compute
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+UUID1 = '00000000-0000-0000-0000-000000000001'
+UUID2 = '00000000-0000-0000-0000-000000000002'
+UUID3 = '00000000-0000-0000-0000-000000000003'
+NW_CACHE = [
+ {
+ 'address': 'aa:aa:aa:aa:aa:aa',
+ 'id': 1,
+ 'network': {
+ 'bridge': 'br0',
+ 'id': 1,
+ 'label': 'private',
+ 'subnets': [
+ {
+ 'cidr': '192.168.1.0/24',
+ 'ips': [
+ {
+ 'address': '192.168.1.100',
+ 'type': 'fixed',
+ 'floating_ips': [
+ {'address': '5.0.0.1', 'type': 'floating'},
+ ],
+ },
+ ],
+ },
+ ]
+ }
+ },
+ {
+ 'address': 'bb:bb:bb:bb:bb:bb',
+ 'id': 2,
+ 'network': {
+ 'bridge': 'br1',
+ 'id': 2,
+ 'label': 'public',
+ 'subnets': [
+ {
+ 'cidr': '10.0.0.0/24',
+ 'ips': [
+ {
+ 'address': '10.0.0.100',
+ 'type': 'fixed',
+ 'floating_ips': [
+ {'address': '5.0.0.2', 'type': 'floating'},
+ ],
+ }
+ ],
+ },
+ ]
+ }
+ }
+]
+ALL_IPS = []
+for cache in NW_CACHE:
+ for subnet in cache['network']['subnets']:
+ for fixed in subnet['ips']:
+ sanitized = dict(fixed)
+ sanitized.pop('floating_ips')
+ ALL_IPS.append(sanitized)
+ for floating in fixed['floating_ips']:
+ ALL_IPS.append(floating)
+ALL_IPS.sort()
+
+
+def fake_compute_get(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID3, nw_cache=NW_CACHE)
+ return fake_instance.fake_instance_obj(args[1],
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, **inst)
+
+
+def fake_compute_get_all(*args, **kwargs):
+ db_list = [
+ fakes.stub_instance(1, uuid=UUID1, nw_cache=NW_CACHE),
+ fakes.stub_instance(2, uuid=UUID2, nw_cache=NW_CACHE),
+ ]
+ fields = instance_obj.INSTANCE_DEFAULT_FIELDS
+ return instance_obj._make_instance_list(args[1],
+ objects.InstanceList(),
+ db_list, fields)
+
+
+class ExtendedIpsTestV21(test.TestCase):
+ content_type = 'application/json'
+ prefix = 'OS-EXT-IPS:'
+
+ def setUp(self):
+ super(ExtendedIpsTestV21, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app_v21(init_only=('servers',)))
+ return res
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def _get_ips(self, server):
+ for network in server['addresses'].itervalues():
+ for ip in network:
+ yield ip
+
+ def assertServerStates(self, server):
+ results = []
+ for ip in self._get_ips(server):
+ results.append({'address': ip.get('addr'),
+ 'type': ip.get('%stype' % self.prefix)})
+
+ self.assertEqual(ALL_IPS, sorted(results))
+
+ def test_show(self):
+ url = '/v2/fake/servers/%s' % UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertServerStates(self._get_server(res.body))
+
+ def test_detail(self):
+ url = '/v2/fake/servers/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ for i, server in enumerate(self._get_servers(res.body)):
+ self.assertServerStates(server)
+
+
+class ExtendedIpsTestV2(ExtendedIpsTestV21):
+
+ def setUp(self):
+ super(ExtendedIpsTestV2, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Extended_ips'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ return res
+
+
+class ExtendedIpsXmlTest(ExtendedIpsTestV2):
+ content_type = 'application/xml'
+ prefix = '{%s}' % extended_ips.Extended_ips.namespace
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
+
+ def _get_ips(self, server):
+ for network in server.find('{%s}addresses' % xmlutil.XMLNS_V11):
+ for ip in network:
+ yield ip
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_extended_ips_mac.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_ips_mac.py
new file mode 100644
index 0000000000..c3e94600aa
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_ips_mac.py
@@ -0,0 +1,196 @@
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import extended_ips_mac
+from nova.api.openstack import xmlutil
+from nova import compute
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+UUID1 = '00000000-0000-0000-0000-000000000001'
+UUID2 = '00000000-0000-0000-0000-000000000002'
+UUID3 = '00000000-0000-0000-0000-000000000003'
+NW_CACHE = [
+ {
+ 'address': 'aa:aa:aa:aa:aa:aa',
+ 'id': 1,
+ 'network': {
+ 'bridge': 'br0',
+ 'id': 1,
+ 'label': 'private',
+ 'subnets': [
+ {
+ 'cidr': '192.168.1.0/24',
+ 'ips': [
+ {
+ 'address': '192.168.1.100',
+ 'type': 'fixed',
+ 'floating_ips': [
+ {'address': '5.0.0.1', 'type': 'floating'},
+ ],
+ },
+ ],
+ },
+ ]
+ }
+ },
+ {
+ 'address': 'bb:bb:bb:bb:bb:bb',
+ 'id': 2,
+ 'network': {
+ 'bridge': 'br1',
+ 'id': 2,
+ 'label': 'public',
+ 'subnets': [
+ {
+ 'cidr': '10.0.0.0/24',
+ 'ips': [
+ {
+ 'address': '10.0.0.100',
+ 'type': 'fixed',
+ 'floating_ips': [
+ {'address': '5.0.0.2', 'type': 'floating'},
+ ],
+ }
+ ],
+ },
+ ]
+ }
+ }
+]
+ALL_IPS = []
+for cache in NW_CACHE:
+ for subnet in cache['network']['subnets']:
+ for fixed in subnet['ips']:
+ sanitized = dict(fixed)
+ sanitized['mac_address'] = cache['address']
+ sanitized.pop('floating_ips')
+ sanitized.pop('type')
+ ALL_IPS.append(sanitized)
+ for floating in fixed['floating_ips']:
+ sanitized = dict(floating)
+ sanitized['mac_address'] = cache['address']
+ sanitized.pop('type')
+ ALL_IPS.append(sanitized)
+ALL_IPS.sort()
+
+
+def fake_compute_get(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID3, nw_cache=NW_CACHE)
+ return fake_instance.fake_instance_obj(args[1],
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, **inst)
+
+
+def fake_compute_get_all(*args, **kwargs):
+ db_list = [
+ fakes.stub_instance(1, uuid=UUID1, nw_cache=NW_CACHE),
+ fakes.stub_instance(2, uuid=UUID2, nw_cache=NW_CACHE),
+ ]
+ fields = instance_obj.INSTANCE_DEFAULT_FIELDS
+ return instance_obj._make_instance_list(args[1],
+ objects.InstanceList(),
+ db_list, fields)
+
+
+class ExtendedIpsMacTestV21(test.TestCase):
+ content_type = 'application/json'
+ prefix = '%s:' % extended_ips_mac.Extended_ips_mac.alias
+
+ def setUp(self):
+ super(ExtendedIpsMacTestV21, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app_v21(init_only=('servers',)))
+ return res
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def _get_ips(self, server):
+ for network in server['addresses'].itervalues():
+ for ip in network:
+ yield ip
+
+ def assertServerStates(self, server):
+ results = []
+ for ip in self._get_ips(server):
+ results.append({'address': ip.get('addr'),
+ 'mac_address': ip.get('%smac_addr' % self.prefix)})
+
+ self.assertEqual(ALL_IPS, sorted(results))
+
+ def test_show(self):
+ url = '/v2/fake/servers/%s' % UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertServerStates(self._get_server(res.body))
+
+ def test_detail(self):
+ url = '/v2/fake/servers/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ for _i, server in enumerate(self._get_servers(res.body)):
+ self.assertServerStates(server)
+
+
+class ExtendedIpsMacTestV2(ExtendedIpsMacTestV21):
+ content_type = 'application/json'
+ prefix = '%s:' % extended_ips_mac.Extended_ips_mac.alias
+
+ def setUp(self):
+ super(ExtendedIpsMacTestV2, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Extended_ips_mac'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ return res
+
+
+class ExtendedIpsMacXmlTest(ExtendedIpsMacTestV2):
+ content_type = 'application/xml'
+ prefix = '{%s}' % extended_ips_mac.Extended_ips_mac.namespace
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
+
+ def _get_ips(self, server):
+ for network in server.find('{%s}addresses' % xmlutil.XMLNS_V11):
+ for ip in network:
+ yield ip
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_extended_rescue_with_image.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_rescue_with_image.py
new file mode 100644
index 0000000000..42a8382595
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_rescue_with_image.py
@@ -0,0 +1,62 @@
+# Copyright 2014 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.config import cfg
+
+from nova.api.openstack import common
+from nova.api.openstack.compute.contrib import rescue
+from nova.api.openstack import extensions
+from nova import compute
+import nova.context as context
+from nova import test
+
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
+
+
+class FakeRequest(object):
+ def __init__(self, context):
+ self.environ = {"nova.context": context}
+
+
+class ExtendedRescueWithImageTest(test.NoDBTestCase):
+ def setUp(self):
+ super(ExtendedRescueWithImageTest, self).setUp()
+ ext_mgr = extensions.ExtensionManager()
+ ext_mgr.extensions = {'os-extended-rescue-with-image': 'fake'}
+ self.controller = rescue.RescueController(ext_mgr)
+
+ @mock.patch.object(common, 'get_instance',
+ return_value="instance")
+ @mock.patch.object(compute.api.API, "rescue")
+ def _make_rescue_request_with_image_ref(self, body, mock_rescue,
+ mock_get_instance):
+ instance = "instance"
+ self.controller._get_instance = mock.Mock(return_value=instance)
+ fake_context = context.RequestContext('fake', 'fake')
+ req = FakeRequest(fake_context)
+
+ self.controller._rescue(req, "id", body)
+ rescue_image_ref = body["rescue"].get("rescue_image_ref")
+ mock_rescue.assert_called_with(mock.ANY, mock.ANY,
+ rescue_password=mock.ANY, rescue_image_ref=rescue_image_ref)
+
+ def test_rescue_with_image_specified(self):
+ body = dict(rescue={"rescue_image_ref": "image-ref"})
+ self._make_rescue_request_with_image_ref(body)
+
+ def test_rescue_without_image_specified(self):
+ body = dict(rescue={})
+ self._make_rescue_request_with_image_ref(body)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_extended_server_attributes.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_server_attributes.py
new file mode 100644
index 0000000000..f944289efe
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_server_attributes.py
@@ -0,0 +1,148 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import extended_server_attributes
+from nova import compute
+from nova import db
+from nova import exception
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+from oslo.config import cfg
+
+
+NAME_FMT = cfg.CONF.instance_name_template
+UUID1 = '00000000-0000-0000-0000-000000000001'
+UUID2 = '00000000-0000-0000-0000-000000000002'
+UUID3 = '00000000-0000-0000-0000-000000000003'
+
+
+def fake_compute_get(*args, **kwargs):
+ fields = instance_obj.INSTANCE_DEFAULT_FIELDS
+ return objects.Instance._from_db_object(
+ args[1], objects.Instance(),
+ fakes.stub_instance(1, uuid=UUID3, host="host-fake",
+ node="node-fake"), fields)
+
+
+def fake_compute_get_all(*args, **kwargs):
+ db_list = [
+ fakes.stub_instance(1, uuid=UUID1, host="host-1", node="node-1"),
+ fakes.stub_instance(2, uuid=UUID2, host="host-2", node="node-2")
+ ]
+ fields = instance_obj.INSTANCE_DEFAULT_FIELDS
+ return instance_obj._make_instance_list(args[1],
+ objects.InstanceList(),
+ db_list, fields)
+
+
+class ExtendedServerAttributesTestV21(test.TestCase):
+ content_type = 'application/json'
+ prefix = 'OS-EXT-SRV-ATTR:'
+ fake_url = '/v2/fake'
+
+ def setUp(self):
+ super(ExtendedServerAttributesTestV21, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_compute_get)
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(
+ fakes.wsgi_app_v21(init_only=('servers',
+ 'os-extended-server-attributes')))
+ return res
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def assertServerAttributes(self, server, host, node, instance_name):
+ self.assertEqual(server.get('%shost' % self.prefix), host)
+ self.assertEqual(server.get('%sinstance_name' % self.prefix),
+ instance_name)
+ self.assertEqual(server.get('%shypervisor_hostname' % self.prefix),
+ node)
+
+ def test_show(self):
+ url = self.fake_url + '/servers/%s' % UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertServerAttributes(self._get_server(res.body),
+ host='host-fake',
+ node='node-fake',
+ instance_name=NAME_FMT % 1)
+
+ def test_detail(self):
+ url = self.fake_url + '/servers/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ for i, server in enumerate(self._get_servers(res.body)):
+ self.assertServerAttributes(server,
+ host='host-%s' % (i + 1),
+ node='node-%s' % (i + 1),
+ instance_name=NAME_FMT % (i + 1))
+
+ def test_no_instance_passthrough_404(self):
+
+ def fake_compute_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ url = self.fake_url + '/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 404)
+
+
+class ExtendedServerAttributesTestV2(ExtendedServerAttributesTestV21):
+
+ def setUp(self):
+ super(ExtendedServerAttributesTestV2, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Extended_server_attributes'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ return res
+
+
+class ExtendedServerAttributesXmlTest(ExtendedServerAttributesTestV2):
+ content_type = 'application/xml'
+ ext = extended_server_attributes
+ prefix = '{%s}' % ext.Extended_server_attributes.namespace
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_extended_status.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_status.py
new file mode 100644
index 0000000000..b47562f7a7
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_status.py
@@ -0,0 +1,148 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import extended_status
+from nova import compute
+from nova import db
+from nova import exception
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+UUID1 = '00000000-0000-0000-0000-000000000001'
+UUID2 = '00000000-0000-0000-0000-000000000002'
+UUID3 = '00000000-0000-0000-0000-000000000003'
+
+
+def fake_compute_get(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID3, task_state="kayaking",
+ vm_state="slightly crunchy", power_state=1)
+ return fake_instance.fake_instance_obj(args[1], **inst)
+
+
+def fake_compute_get_all(*args, **kwargs):
+ db_list = [
+ fakes.stub_instance(1, uuid=UUID1, task_state="task-1",
+ vm_state="vm-1", power_state=1),
+ fakes.stub_instance(2, uuid=UUID2, task_state="task-2",
+ vm_state="vm-2", power_state=2),
+ ]
+
+ fields = instance_obj.INSTANCE_DEFAULT_FIELDS
+ return instance_obj._make_instance_list(args[1],
+ objects.InstanceList(),
+ db_list, fields)
+
+
+class ExtendedStatusTestV21(test.TestCase):
+ content_type = 'application/json'
+ prefix = 'OS-EXT-STS:'
+ fake_url = '/v2/fake'
+
+ def _set_flags(self):
+ pass
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app_v21(
+ init_only=('servers',
+ 'os-extended-status')))
+ return res
+
+ def setUp(self):
+ super(ExtendedStatusTestV21, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+ self._set_flags()
+ return_server = fakes.fake_instance_get()
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def assertServerStates(self, server, vm_state, power_state, task_state):
+ self.assertEqual(server.get('%svm_state' % self.prefix), vm_state)
+ self.assertEqual(int(server.get('%spower_state' % self.prefix)),
+ power_state)
+ self.assertEqual(server.get('%stask_state' % self.prefix), task_state)
+
+ def test_show(self):
+ url = self.fake_url + '/servers/%s' % UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertServerStates(self._get_server(res.body),
+ vm_state='slightly crunchy',
+ power_state=1,
+ task_state='kayaking')
+
+ def test_detail(self):
+ url = self.fake_url + '/servers/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ for i, server in enumerate(self._get_servers(res.body)):
+ self.assertServerStates(server,
+ vm_state='vm-%s' % (i + 1),
+ power_state=(i + 1),
+ task_state='task-%s' % (i + 1))
+
+ def test_no_instance_passthrough_404(self):
+
+ def fake_compute_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ url = self.fake_url + '/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 404)
+
+
+class ExtendedStatusTestV2(ExtendedStatusTestV21):
+
+ def _set_flags(self):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Extended_status'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ return res
+
+
+class ExtendedStatusXmlTest(ExtendedStatusTestV2):
+ content_type = 'application/xml'
+ prefix = '{%s}' % extended_status.Extended_status.namespace
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_extended_virtual_interfaces_net.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_virtual_interfaces_net.py
new file mode 100644
index 0000000000..851848d7a5
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_virtual_interfaces_net.py
@@ -0,0 +1,123 @@
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import extended_virtual_interfaces_net
+from nova.api.openstack import wsgi
+from nova import compute
+from nova import network
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+
+
+FAKE_VIFS = [{'uuid': '00000000-0000-0000-0000-00000000000000000',
+ 'address': '00-00-00-00-00-00',
+ 'net_uuid': '00000000-0000-0000-0000-00000000000000001'},
+ {'uuid': '11111111-1111-1111-1111-11111111111111111',
+ 'address': '11-11-11-11-11-11',
+ 'net_uuid': '11111111-1111-1111-1111-11111111111111112'}]
+
+EXPECTED_NET_UUIDS = ['00000000-0000-0000-0000-00000000000000001',
+ '11111111-1111-1111-1111-11111111111111112']
+
+
+def compute_api_get(self, context, instance_id, expected_attrs=None,
+ want_objects=False):
+ return dict(uuid=FAKE_UUID, id=instance_id, instance_type_id=1, host='bob')
+
+
+def get_vifs_by_instance(self, context, instance_id):
+ return FAKE_VIFS
+
+
+def get_vif_by_mac_address(self, context, mac_address):
+ if mac_address == "00-00-00-00-00-00":
+ return {'net_uuid': '00000000-0000-0000-0000-00000000000000001'}
+ else:
+ return {'net_uuid': '11111111-1111-1111-1111-11111111111111112'}
+
+
+class ExtendedServerVIFNetTest(test.NoDBTestCase):
+ content_type = 'application/json'
+ prefix = "%s:" % extended_virtual_interfaces_net. \
+ Extended_virtual_interfaces_net.alias
+
+ def setUp(self):
+ super(ExtendedServerVIFNetTest, self).setUp()
+ self.stubs.Set(compute.api.API, "get",
+ compute_api_get)
+ self.stubs.Set(network.api.API, "get_vifs_by_instance",
+ get_vifs_by_instance)
+ self.stubs.Set(network.api.API, "get_vif_by_mac_address",
+ get_vif_by_mac_address)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Virtual_interfaces',
+ 'Extended_virtual_interfaces_net'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=(
+ 'os-virtual-interfaces', 'OS-EXT-VIF-NET')))
+ return res
+
+ def _get_vifs(self, body):
+ return jsonutils.loads(body).get('virtual_interfaces')
+
+ def _get_net_id(self, vifs):
+ for vif in vifs:
+ yield vif['%snet_id' % self.prefix]
+
+ def assertVIFs(self, vifs):
+ result = []
+ for net_id in self._get_net_id(vifs):
+ result.append(net_id)
+ sorted(result)
+
+ for i, net_uuid in enumerate(result):
+ self.assertEqual(net_uuid, EXPECTED_NET_UUIDS[i])
+
+ def test_get_extend_virtual_interfaces_list(self):
+ res = self._make_request('/v2/fake/servers/abcd/os-virtual-interfaces')
+
+ self.assertEqual(res.status_int, 200)
+ self.assertVIFs(self._get_vifs(res.body))
+
+
+class ExtendedServerVIFNetSerializerTest(ExtendedServerVIFNetTest):
+ content_type = 'application/xml'
+ prefix = "{%s}" % extended_virtual_interfaces_net. \
+ Extended_virtual_interfaces_net.namespace
+
+ def setUp(self):
+ super(ExtendedServerVIFNetSerializerTest, self).setUp()
+ self.namespace = wsgi.XMLNS_V11
+ self.serializer = extended_virtual_interfaces_net. \
+ ExtendedVirtualInterfaceNetTemplate()
+
+ def _get_vifs(self, body):
+ return etree.XML(body).getchildren()
+
+ def _get_net_id(self, vifs):
+ for vif in vifs:
+ yield vif.attrib['%snet_id' % self.prefix]
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_extended_volumes.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_volumes.py
new file mode 100644
index 0000000000..d441013e8d
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_volumes.py
@@ -0,0 +1,124 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import extended_volumes
+from nova import compute
+from nova import db
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+
+UUID1 = '00000000-0000-0000-0000-000000000001'
+UUID2 = '00000000-0000-0000-0000-000000000002'
+UUID3 = '00000000-0000-0000-0000-000000000003'
+
+
+def fake_compute_get(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID1)
+ return fake_instance.fake_instance_obj(args[1], **inst)
+
+
+def fake_compute_get_all(*args, **kwargs):
+ db_list = [fakes.stub_instance(1), fakes.stub_instance(2)]
+ fields = instance_obj.INSTANCE_DEFAULT_FIELDS
+ return instance_obj._make_instance_list(args[1],
+ objects.InstanceList(),
+ db_list, fields)
+
+
+def fake_bdms_get_all_by_instance(*args, **kwargs):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': UUID1, 'source_type': 'volume',
+ 'destination_type': 'volume', 'id': 1}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': UUID2, 'source_type': 'volume',
+ 'destination_type': 'volume', 'id': 2})]
+
+
+class ExtendedVolumesTest(test.TestCase):
+ content_type = 'application/json'
+ prefix = 'os-extended-volumes:'
+
+ def setUp(self):
+ super(ExtendedVolumesTest, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_bdms_get_all_by_instance)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Extended_volumes'])
+ return_server = fakes.fake_instance_get()
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ return res
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def test_show(self):
+ url = '/v2/fake/servers/%s' % UUID1
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ server = self._get_server(res.body)
+ exp_volumes = [{'id': UUID1}, {'id': UUID2}]
+ if self.content_type == 'application/json':
+ actual = server.get('%svolumes_attached' % self.prefix)
+ elif self.content_type == 'application/xml':
+ actual = [dict(elem.items()) for elem in
+ server.findall('%svolume_attached' % self.prefix)]
+ self.assertEqual(exp_volumes, actual)
+
+ def test_detail(self):
+ url = '/v2/fake/servers/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ exp_volumes = [{'id': UUID1}, {'id': UUID2}]
+ for i, server in enumerate(self._get_servers(res.body)):
+ if self.content_type == 'application/json':
+ actual = server.get('%svolumes_attached' % self.prefix)
+ elif self.content_type == 'application/xml':
+ actual = [dict(elem.items()) for elem in
+ server.findall('%svolume_attached' % self.prefix)]
+ self.assertEqual(exp_volumes, actual)
+
+
+class ExtendedVolumesXmlTest(ExtendedVolumesTest):
+ content_type = 'application/xml'
+ prefix = '{%s}' % extended_volumes.Extended_volumes.namespace
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_fixed_ips.py b/nova/tests/unit/api/openstack/compute/contrib/test_fixed_ips.py
new file mode 100644
index 0000000000..f331da80fe
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_fixed_ips.py
@@ -0,0 +1,256 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob
+
+from nova.api.openstack.compute.contrib import fixed_ips as fixed_ips_v2
+from nova.api.openstack.compute.plugins.v3 import fixed_ips as fixed_ips_v21
+from nova import context
+from nova import db
+from nova import exception
+from nova.i18n import _
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.objects import test_network
+
+
+fake_fixed_ips = [{'id': 1,
+ 'address': '192.168.1.1',
+ 'network_id': 1,
+ 'virtual_interface_id': 1,
+ 'instance_uuid': '1',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'host': None,
+ 'instance': None,
+ 'network': test_network.fake_network,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False},
+ {'id': 2,
+ 'address': '192.168.1.2',
+ 'network_id': 1,
+ 'virtual_interface_id': 2,
+ 'instance_uuid': '2',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'host': None,
+ 'instance': None,
+ 'network': test_network.fake_network,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False},
+ {'id': 3,
+ 'address': '10.0.0.2',
+ 'network_id': 1,
+ 'virtual_interface_id': 3,
+ 'instance_uuid': '3',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'host': None,
+ 'instance': None,
+ 'network': test_network.fake_network,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': True},
+ ]
+
+
+def fake_fixed_ip_get_by_address(context, address, columns_to_join=None):
+ if address == 'inv.ali.d.ip':
+ msg = _("Invalid fixed IP Address %s in request") % address
+ raise exception.FixedIpInvalid(msg)
+ for fixed_ip in fake_fixed_ips:
+ if fixed_ip['address'] == address and not fixed_ip['deleted']:
+ return fixed_ip
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+
+def fake_fixed_ip_get_by_address_detailed(context, address):
+ network = {'id': 1,
+ 'cidr': "192.168.1.0/24"}
+ for fixed_ip in fake_fixed_ips:
+ if fixed_ip['address'] == address and not fixed_ip['deleted']:
+ return (fixed_ip, FakeModel(network), None)
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+
+def fake_fixed_ip_update(context, address, values):
+ fixed_ip = fake_fixed_ip_get_by_address(context, address)
+ if fixed_ip is None:
+ raise exception.FixedIpNotFoundForAddress(address=address)
+ else:
+ for key in values:
+ fixed_ip[key] = values[key]
+
+
+class FakeModel(object):
+ """Stubs out for model."""
+ def __init__(self, values):
+ self.values = values
+
+ def __getattr__(self, name):
+ return self.values[name]
+
+ def __getitem__(self, key):
+ if key in self.values:
+ return self.values[key]
+ else:
+ raise NotImplementedError()
+
+ def __repr__(self):
+ return '<FakeModel: %s>' % self.values
+
+
+def fake_network_get_all(context):
+ network = {'id': 1,
+ 'cidr': "192.168.1.0/24"}
+ return [FakeModel(network)]
+
+
+class FixedIpTestV21(test.NoDBTestCase):
+
+ fixed_ips = fixed_ips_v21
+ url = '/v2/fake/os-fixed-ips'
+
+ def setUp(self):
+ super(FixedIpTestV21, self).setUp()
+
+ self.stubs.Set(db, "fixed_ip_get_by_address",
+ fake_fixed_ip_get_by_address)
+ self.stubs.Set(db, "fixed_ip_get_by_address_detailed",
+ fake_fixed_ip_get_by_address_detailed)
+ self.stubs.Set(db, "fixed_ip_update", fake_fixed_ip_update)
+
+ self.context = context.get_admin_context()
+ self.controller = self.fixed_ips.FixedIPController()
+
+ def _assert_equal(self, ret, exp):
+ self.assertEqual(ret.wsgi_code, exp)
+
+ def _get_reserve_action(self):
+ return self.controller.reserve
+
+ def _get_unreserve_action(self):
+ return self.controller.unreserve
+
+ def test_fixed_ips_get(self):
+ req = fakes.HTTPRequest.blank('%s/192.168.1.1' % self.url)
+ res_dict = self.controller.show(req, '192.168.1.1')
+ response = {'fixed_ip': {'cidr': '192.168.1.0/24',
+ 'hostname': None,
+ 'host': None,
+ 'address': '192.168.1.1'}}
+ self.assertEqual(response, res_dict)
+
+ def test_fixed_ips_get_bad_ip_fail(self):
+ req = fakes.HTTPRequest.blank('%s/10.0.0.1' % self.url)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req,
+ '10.0.0.1')
+
+ def test_fixed_ips_get_invalid_ip_address(self):
+ req = fakes.HTTPRequest.blank('%s/inv.ali.d.ip' % self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.show, req,
+ 'inv.ali.d.ip')
+
+ def test_fixed_ips_get_deleted_ip_fail(self):
+ req = fakes.HTTPRequest.blank('%s/10.0.0.2' % self.url)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req,
+ '10.0.0.2')
+
+ def test_fixed_ip_reserve(self):
+ fake_fixed_ips[0]['reserved'] = False
+ body = {'reserve': None}
+ req = fakes.HTTPRequest.blank('%s/192.168.1.1/action' % self.url)
+ action = self._get_reserve_action()
+ result = action(req, "192.168.1.1", body)
+
+ self._assert_equal(result or action, 202)
+ self.assertEqual(fake_fixed_ips[0]['reserved'], True)
+
+ def test_fixed_ip_reserve_bad_ip(self):
+ body = {'reserve': None}
+ req = fakes.HTTPRequest.blank('%s/10.0.0.1/action' % self.url)
+ action = self._get_reserve_action()
+
+ self.assertRaises(webob.exc.HTTPNotFound, action, req,
+ '10.0.0.1', body)
+
+ def test_fixed_ip_reserve_invalid_ip_address(self):
+ body = {'reserve': None}
+ req = fakes.HTTPRequest.blank('%s/inv.ali.d.ip/action' % self.url)
+ action = self._get_reserve_action()
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ action, req, 'inv.ali.d.ip', body)
+
+ def test_fixed_ip_reserve_deleted_ip(self):
+ body = {'reserve': None}
+ action = self._get_reserve_action()
+
+ req = fakes.HTTPRequest.blank('%s/10.0.0.2/action' % self.url)
+ self.assertRaises(webob.exc.HTTPNotFound, action, req,
+ '10.0.0.2', body)
+
+ def test_fixed_ip_unreserve(self):
+ fake_fixed_ips[0]['reserved'] = True
+ body = {'unreserve': None}
+ req = fakes.HTTPRequest.blank('%s/192.168.1.1/action' % self.url)
+ action = self._get_unreserve_action()
+ result = action(req, "192.168.1.1", body)
+
+ self._assert_equal(result or action, 202)
+ self.assertEqual(fake_fixed_ips[0]['reserved'], False)
+
+ def test_fixed_ip_unreserve_bad_ip(self):
+ body = {'unreserve': None}
+ req = fakes.HTTPRequest.blank('%s/10.0.0.1/action' % self.url)
+ action = self._get_unreserve_action()
+
+ self.assertRaises(webob.exc.HTTPNotFound, action, req,
+ '10.0.0.1', body)
+
+ def test_fixed_ip_unreserve_invalid_ip_address(self):
+ body = {'unreserve': None}
+ req = fakes.HTTPRequest.blank('%s/inv.ali.d.ip/action' % self.url)
+ action = self._get_unreserve_action()
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ action, req, 'inv.ali.d.ip', body)
+
+ def test_fixed_ip_unreserve_deleted_ip(self):
+ body = {'unreserve': None}
+ req = fakes.HTTPRequest.blank('%s/10.0.0.2/action' % self.url)
+ action = self._get_unreserve_action()
+ self.assertRaises(webob.exc.HTTPNotFound, action, req,
+ '10.0.0.2', body)
+
+
+class FixedIpTestV2(FixedIpTestV21):
+
+ fixed_ips = fixed_ips_v2
+
+ def _assert_equal(self, ret, exp):
+ self.assertEqual(ret.status, '202 Accepted')
+
+ def _get_reserve_action(self):
+ return self.controller.action
+
+ def _get_unreserve_action(self):
+ return self.controller.action
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_flavor_access.py b/nova/tests/unit/api/openstack/compute/contrib/test_flavor_access.py
new file mode 100644
index 0000000000..5718a826e4
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_flavor_access.py
@@ -0,0 +1,402 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from lxml import etree
+from webob import exc
+
+from nova.api.openstack.compute.contrib import flavor_access \
+ as flavor_access_v2
+from nova.api.openstack.compute import flavors as flavors_api
+from nova.api.openstack.compute.plugins.v3 import flavor_access \
+ as flavor_access_v3
+from nova import context
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+def generate_flavor(flavorid, ispublic):
+ return {
+ 'id': flavorid,
+ 'flavorid': str(flavorid),
+ 'root_gb': 1,
+ 'ephemeral_gb': 1,
+ 'name': u'test',
+ 'deleted': False,
+ 'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1),
+ 'updated_at': None,
+ 'memory_mb': 512,
+ 'vcpus': 1,
+ 'swap': 512,
+ 'rxtx_factor': 1.0,
+ 'disabled': False,
+ 'extra_specs': {},
+ 'deleted_at': None,
+ 'vcpu_weight': None,
+ 'is_public': bool(ispublic)
+ }
+
+
+INSTANCE_TYPES = {
+ '0': generate_flavor(0, True),
+ '1': generate_flavor(1, True),
+ '2': generate_flavor(2, False),
+ '3': generate_flavor(3, False)}
+
+
+ACCESS_LIST = [{'flavor_id': '2', 'project_id': 'proj2'},
+ {'flavor_id': '2', 'project_id': 'proj3'},
+ {'flavor_id': '3', 'project_id': 'proj3'}]
+
+
+def fake_get_flavor_access_by_flavor_id(context, flavorid):
+ res = []
+ for access in ACCESS_LIST:
+ if access['flavor_id'] == flavorid:
+ res.append(access)
+ return res
+
+
+def fake_get_flavor_by_flavor_id(context, flavorid, read_deleted=None):
+ return INSTANCE_TYPES[flavorid]
+
+
+def _has_flavor_access(flavorid, projectid):
+ for access in ACCESS_LIST:
+ if access['flavor_id'] == flavorid and \
+ access['project_id'] == projectid:
+ return True
+ return False
+
+
+def fake_get_all_flavors_sorted_list(context, inactive=False,
+ filters=None, sort_key='flavorid',
+ sort_dir='asc', limit=None, marker=None):
+ if filters is None or filters['is_public'] is None:
+ return sorted(INSTANCE_TYPES.values(), key=lambda item: item[sort_key])
+
+ res = {}
+ for k, v in INSTANCE_TYPES.iteritems():
+ if filters['is_public'] and _has_flavor_access(k, context.project_id):
+ res.update({k: v})
+ continue
+ if v['is_public'] == filters['is_public']:
+ res.update({k: v})
+
+ res = sorted(res.values(), key=lambda item: item[sort_key])
+ return res
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context.get_admin_context()}
+
+ def get_db_flavor(self, flavor_id):
+ return INSTANCE_TYPES[flavor_id]
+
+
+class FakeResponse(object):
+ obj = {'flavor': {'id': '0'},
+ 'flavors': [
+ {'id': '0'},
+ {'id': '2'}]
+ }
+
+ def attach(self, **kwargs):
+ pass
+
+
+class FlavorAccessTestV21(test.NoDBTestCase):
+ api_version = "2.1"
+ FlavorAccessController = flavor_access_v3.FlavorAccessController
+ FlavorActionController = flavor_access_v3.FlavorActionController
+ _prefix = "/v3"
+ validation_ex = exception.ValidationError
+
+ def setUp(self):
+ super(FlavorAccessTestV21, self).setUp()
+ self.flavor_controller = flavors_api.Controller()
+ self.req = FakeRequest()
+ self.context = self.req.environ['nova.context']
+ self.stubs.Set(db, 'flavor_get_by_flavor_id',
+ fake_get_flavor_by_flavor_id)
+ self.stubs.Set(db, 'flavor_get_all',
+ fake_get_all_flavors_sorted_list)
+ self.stubs.Set(db, 'flavor_access_get_by_flavor_id',
+ fake_get_flavor_access_by_flavor_id)
+
+ self.flavor_access_controller = self.FlavorAccessController()
+ self.flavor_action_controller = self.FlavorActionController()
+
+ def _verify_flavor_list(self, result, expected):
+ # result already sorted by flavor_id
+ self.assertEqual(len(result), len(expected))
+
+ for d1, d2 in zip(result, expected):
+ self.assertEqual(d1['id'], d2['id'])
+
+ def test_list_flavor_access_public(self):
+ # query os-flavor-access on public flavor should return 404
+ self.assertRaises(exc.HTTPNotFound,
+ self.flavor_access_controller.index,
+ self.req, '1')
+
+ def test_list_flavor_access_private(self):
+ expected = {'flavor_access': [
+ {'flavor_id': '2', 'tenant_id': 'proj2'},
+ {'flavor_id': '2', 'tenant_id': 'proj3'}]}
+ result = self.flavor_access_controller.index(self.req, '2')
+ self.assertEqual(result, expected)
+
+ def test_list_with_no_context(self):
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors/fake/flavors')
+
+ def fake_authorize(context, target=None, action=None):
+ raise exception.PolicyNotAuthorized(action='index')
+
+ if self.api_version == "2.1":
+ self.stubs.Set(flavor_access_v3,
+ 'authorize',
+ fake_authorize)
+ else:
+ self.stubs.Set(flavor_access_v2,
+ 'authorize',
+ fake_authorize)
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.flavor_access_controller.index,
+ req, 'fake')
+
+ def test_list_flavor_with_admin_default_proj1(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
+ req = fakes.HTTPRequest.blank(self._prefix + '/fake/flavors',
+ use_admin_context=True)
+ req.environ['nova.context'].project_id = 'proj1'
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_admin_default_proj2(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}, {'id': '2'}]}
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors',
+ use_admin_context=True)
+ req.environ['nova.context'].project_id = 'proj2'
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_admin_ispublic_true(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
+ url = self._prefix + '/flavors?is_public=true'
+ req = fakes.HTTPRequest.blank(url,
+ use_admin_context=True)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_admin_ispublic_false(self):
+ expected = {'flavors': [{'id': '2'}, {'id': '3'}]}
+ url = self._prefix + '/flavors?is_public=false'
+ req = fakes.HTTPRequest.blank(url,
+ use_admin_context=True)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_admin_ispublic_false_proj2(self):
+ expected = {'flavors': [{'id': '2'}, {'id': '3'}]}
+ url = self._prefix + '/flavors?is_public=false'
+ req = fakes.HTTPRequest.blank(url,
+ use_admin_context=True)
+ req.environ['nova.context'].project_id = 'proj2'
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_admin_ispublic_none(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}, {'id': '2'},
+ {'id': '3'}]}
+ url = self._prefix + '/flavors?is_public=none'
+ req = fakes.HTTPRequest.blank(url,
+ use_admin_context=True)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_no_admin_default(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors',
+ use_admin_context=False)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_no_admin_ispublic_true(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
+ url = self._prefix + '/flavors?is_public=true'
+ req = fakes.HTTPRequest.blank(url,
+ use_admin_context=False)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_no_admin_ispublic_false(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
+ url = self._prefix + '/flavors?is_public=false'
+ req = fakes.HTTPRequest.blank(url,
+ use_admin_context=False)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_no_admin_ispublic_none(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
+ url = self._prefix + '/flavors?is_public=none'
+ req = fakes.HTTPRequest.blank(url,
+ use_admin_context=False)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_show(self):
+ resp = FakeResponse()
+ self.flavor_action_controller.show(self.req, resp, '0')
+ self.assertEqual({'id': '0', 'os-flavor-access:is_public': True},
+ resp.obj['flavor'])
+ self.flavor_action_controller.show(self.req, resp, '2')
+ self.assertEqual({'id': '0', 'os-flavor-access:is_public': False},
+ resp.obj['flavor'])
+
+ def test_detail(self):
+ resp = FakeResponse()
+ self.flavor_action_controller.detail(self.req, resp)
+ self.assertEqual([{'id': '0', 'os-flavor-access:is_public': True},
+ {'id': '2', 'os-flavor-access:is_public': False}],
+ resp.obj['flavors'])
+
+ def test_create(self):
+ resp = FakeResponse()
+ self.flavor_action_controller.create(self.req, {}, resp)
+ self.assertEqual({'id': '0', 'os-flavor-access:is_public': True},
+ resp.obj['flavor'])
+
+ def _get_add_access(self):
+ if self.api_version == "2.1":
+ return self.flavor_action_controller._add_tenant_access
+ else:
+ return self.flavor_action_controller._addTenantAccess
+
+ def _get_remove_access(self):
+ if self.api_version == "2.1":
+ return self.flavor_action_controller._remove_tenant_access
+ else:
+ return self.flavor_action_controller._removeTenantAccess
+
+ def test_add_tenant_access(self):
+ def stub_add_flavor_access(context, flavorid, projectid):
+ self.assertEqual('3', flavorid, "flavorid")
+ self.assertEqual("proj2", projectid, "projectid")
+ self.stubs.Set(db, 'flavor_access_add',
+ stub_add_flavor_access)
+ expected = {'flavor_access':
+ [{'flavor_id': '3', 'tenant_id': 'proj3'}]}
+ body = {'addTenantAccess': {'tenant': 'proj2'}}
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
+ use_admin_context=True)
+
+ add_access = self._get_add_access()
+ result = add_access(req, '3', body=body)
+ self.assertEqual(result, expected)
+
+ def test_add_tenant_access_with_no_admin_user(self):
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
+ use_admin_context=False)
+ body = {'addTenantAccess': {'tenant': 'proj2'}}
+ add_access = self._get_add_access()
+ self.assertRaises(exception.PolicyNotAuthorized,
+ add_access, req, '2', body=body)
+
+ def test_add_tenant_access_with_no_tenant(self):
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
+ use_admin_context=True)
+ body = {'addTenantAccess': {'foo': 'proj2'}}
+ add_access = self._get_add_access()
+ self.assertRaises(self.validation_ex,
+ add_access, req, '2', body=body)
+ body = {'addTenantAccess': {'tenant': ''}}
+ self.assertRaises(self.validation_ex,
+ add_access, req, '2', body=body)
+
+ def test_add_tenant_access_with_already_added_access(self):
+ def stub_add_flavor_access(context, flavorid, projectid):
+ raise exception.FlavorAccessExists(flavor_id=flavorid,
+ project_id=projectid)
+ self.stubs.Set(db, 'flavor_access_add',
+ stub_add_flavor_access)
+ body = {'addTenantAccess': {'tenant': 'proj2'}}
+ add_access = self._get_add_access()
+ self.assertRaises(exc.HTTPConflict,
+ add_access, self.req, '3', body=body)
+
+ def test_remove_tenant_access_with_bad_access(self):
+ def stub_remove_flavor_access(context, flavorid, projectid):
+ raise exception.FlavorAccessNotFound(flavor_id=flavorid,
+ project_id=projectid)
+ self.stubs.Set(db, 'flavor_access_remove',
+ stub_remove_flavor_access)
+ body = {'removeTenantAccess': {'tenant': 'proj2'}}
+ remove_access = self._get_remove_access()
+ self.assertRaises(exc.HTTPNotFound,
+ remove_access, self.req, '3', body=body)
+
+ def test_delete_tenant_access_with_no_tenant(self):
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
+ use_admin_context=True)
+ remove_access = self._get_remove_access()
+ body = {'removeTenantAccess': {'foo': 'proj2'}}
+ self.assertRaises(self.validation_ex,
+ remove_access, req, '2', body=body)
+ body = {'removeTenantAccess': {'tenant': ''}}
+ self.assertRaises(self.validation_ex,
+ remove_access, req, '2', body=body)
+
+ def test_remove_tenant_access_with_no_admin_user(self):
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
+ use_admin_context=False)
+ body = {'removeTenantAccess': {'tenant': 'proj2'}}
+ remove_access = self._get_remove_access()
+ self.assertRaises(exception.PolicyNotAuthorized,
+ remove_access, req, '2', body=body)
+
+
+class FlavorAccessTestV20(FlavorAccessTestV21):
+ api_version = "2.0"
+ FlavorAccessController = flavor_access_v2.FlavorAccessController
+ FlavorActionController = flavor_access_v2.FlavorActionController
+ _prefix = "/v2/fake"
+ validation_ex = exc.HTTPBadRequest
+
+
+class FlavorAccessSerializerTest(test.NoDBTestCase):
+ def test_serializer_empty(self):
+ serializer = flavor_access_v2.FlavorAccessTemplate()
+ text = serializer.serialize(dict(flavor_access=[]))
+ tree = etree.fromstring(text)
+ self.assertEqual(len(tree), 0)
+
+ def test_serializer(self):
+ expected = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<flavor_access>'
+ '<access tenant_id="proj2" flavor_id="2"/>'
+ '<access tenant_id="proj3" flavor_id="2"/>'
+ '</flavor_access>')
+ access_list = [{'flavor_id': '2', 'tenant_id': 'proj2'},
+ {'flavor_id': '2', 'tenant_id': 'proj3'}]
+
+ serializer = flavor_access_v2.FlavorAccessTemplate()
+ text = serializer.serialize(dict(flavor_access=access_list))
+ self.assertEqual(text, expected)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_flavor_disabled.py b/nova/tests/unit/api/openstack/compute/contrib/test_flavor_disabled.py
new file mode 100644
index 0000000000..a646f43fd1
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_flavor_disabled.py
@@ -0,0 +1,127 @@
+# Copyright 2012 Nebula, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import flavor_disabled
+from nova.compute import flavors
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+FAKE_FLAVORS = {
+ 'flavor 1': {
+ "flavorid": '1',
+ "name": 'flavor 1',
+ "memory_mb": '256',
+ "root_gb": '10',
+ "swap": 512,
+ "vcpus": 1,
+ "ephemeral_gb": 1,
+ "disabled": False,
+ },
+ 'flavor 2': {
+ "flavorid": '2',
+ "name": 'flavor 2',
+ "memory_mb": '512',
+ "root_gb": '20',
+ "swap": None,
+ "vcpus": 1,
+ "ephemeral_gb": 1,
+ "disabled": True,
+ },
+}
+
+
+def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
+ return FAKE_FLAVORS['flavor %s' % flavorid]
+
+
+def fake_get_all_flavors_sorted_list(context=None, inactive=False,
+ filters=None, sort_key='flavorid',
+ sort_dir='asc', limit=None, marker=None):
+ return [
+ fake_flavor_get_by_flavor_id(1),
+ fake_flavor_get_by_flavor_id(2)
+ ]
+
+
+class FlavorDisabledTestV21(test.NoDBTestCase):
+ base_url = '/v2/fake/flavors'
+ content_type = 'application/json'
+ prefix = "OS-FLV-DISABLED:"
+
+ def setUp(self):
+ super(FlavorDisabledTestV21, self).setUp()
+ ext = ('nova.api.openstack.compute.contrib'
+ '.flavor_disabled.Flavor_disabled')
+ self.flags(osapi_compute_extension=[ext])
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(flavors, "get_all_flavors_sorted_list",
+ fake_get_all_flavors_sorted_list)
+ self.stubs.Set(flavors,
+ "get_flavor_by_flavor_id",
+ fake_flavor_get_by_flavor_id)
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app_v21(init_only=('flavors')))
+ return res
+
+ def _get_flavor(self, body):
+ return jsonutils.loads(body).get('flavor')
+
+ def _get_flavors(self, body):
+ return jsonutils.loads(body).get('flavors')
+
+ def assertFlavorDisabled(self, flavor, disabled):
+ self.assertEqual(str(flavor.get('%sdisabled' % self.prefix)), disabled)
+
+ def test_show(self):
+ url = self.base_url + '/1'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertFlavorDisabled(self._get_flavor(res.body), 'False')
+
+ def test_detail(self):
+ url = self.base_url + '/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ flavors = self._get_flavors(res.body)
+ self.assertFlavorDisabled(flavors[0], 'False')
+ self.assertFlavorDisabled(flavors[1], 'True')
+
+
+class FlavorDisabledTestV2(FlavorDisabledTestV21):
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app())
+ return res
+
+
+class FlavorDisabledXmlTest(FlavorDisabledTestV2):
+ content_type = 'application/xml'
+ prefix = '{%s}' % flavor_disabled.Flavor_disabled.namespace
+
+ def _get_flavor(self, body):
+ return etree.XML(body)
+
+ def _get_flavors(self, body):
+ return etree.XML(body).getchildren()
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_flavor_manage.py b/nova/tests/unit/api/openstack/compute/contrib/test_flavor_manage.py
new file mode 100644
index 0000000000..3d44e4970b
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_flavor_manage.py
@@ -0,0 +1,465 @@
+# Copyright 2011 Andrew Bogott for the Wikimedia Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+import mock
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import flavor_access
+from nova.api.openstack.compute.contrib import flavormanage as flavormanage_v2
+from nova.api.openstack.compute.plugins.v3 import flavor_manage as \
+ flavormanage_v21
+from nova.compute import flavors
+from nova import context
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+def fake_db_flavor(**updates):
+ db_flavor = {
+ 'root_gb': 1,
+ 'ephemeral_gb': 1,
+ 'name': u'frob',
+ 'deleted': False,
+ 'created_at': datetime.datetime(2012, 1, 19, 18, 49, 30, 877329),
+ 'updated_at': None,
+ 'memory_mb': 256,
+ 'vcpus': 1,
+ 'flavorid': 1,
+ 'swap': 0,
+ 'rxtx_factor': 1.0,
+ 'extra_specs': {},
+ 'deleted_at': None,
+ 'vcpu_weight': None,
+ 'id': 7,
+ 'is_public': True,
+ 'disabled': False,
+ }
+ if updates:
+ db_flavor.update(updates)
+ return db_flavor
+
+
+def fake_get_flavor_by_flavor_id(flavorid, ctxt=None, read_deleted='yes'):
+ if flavorid == 'failtest':
+ raise exception.FlavorNotFound(flavor_id=flavorid)
+ elif not str(flavorid) == '1234':
+ raise Exception("This test expects flavorid 1234, not %s" % flavorid)
+ if read_deleted != 'no':
+ raise test.TestingException("Should not be reading deleted")
+ return fake_db_flavor(flavorid=flavorid)
+
+
+def fake_destroy(flavorname):
+ pass
+
+
+def fake_create(context, kwargs, projects=None):
+ newflavor = fake_db_flavor()
+
+ flavorid = kwargs.get('flavorid')
+ if flavorid is None:
+ flavorid = 1234
+
+ newflavor['flavorid'] = flavorid
+ newflavor["name"] = kwargs.get('name')
+ newflavor["memory_mb"] = int(kwargs.get('memory_mb'))
+ newflavor["vcpus"] = int(kwargs.get('vcpus'))
+ newflavor["root_gb"] = int(kwargs.get('root_gb'))
+ newflavor["ephemeral_gb"] = int(kwargs.get('ephemeral_gb'))
+ newflavor["swap"] = kwargs.get('swap')
+ newflavor["rxtx_factor"] = float(kwargs.get('rxtx_factor'))
+ newflavor["is_public"] = bool(kwargs.get('is_public'))
+ newflavor["disabled"] = bool(kwargs.get('disabled'))
+
+ return newflavor
+
+
+class FlavorManageTestV21(test.NoDBTestCase):
+ controller = flavormanage_v21.FlavorManageController()
+ validation_error = exception.ValidationError
+ base_url = '/v2/fake/flavors'
+
+ def setUp(self):
+ super(FlavorManageTestV21, self).setUp()
+ self.stubs.Set(flavors,
+ "get_flavor_by_flavor_id",
+ fake_get_flavor_by_flavor_id)
+ self.stubs.Set(flavors, "destroy", fake_destroy)
+ self.stubs.Set(db, "flavor_create", fake_create)
+ self.ctxt = context.RequestContext('fake', 'fake',
+ is_admin=True, auth_token=True)
+ self.app = self._setup_app()
+
+ self.request_body = {
+ "flavor": {
+ "name": "test",
+ "ram": 512,
+ "vcpus": 2,
+ "disk": 1,
+ "OS-FLV-EXT-DATA:ephemeral": 1,
+ "id": unicode('1234'),
+ "swap": 512,
+ "rxtx_factor": 1,
+ "os-flavor-access:is_public": True,
+ }
+ }
+ self.expected_flavor = self.request_body
+
+ def _setup_app(self):
+ return fakes.wsgi_app_v21(init_only=('flavor-manage', 'os-flavor-rxtx',
+ 'os-flavor-access', 'flavors',
+ 'os-flavor-extra-data'))
+
+ def test_delete(self):
+ req = fakes.HTTPRequest.blank(self.base_url + '/1234')
+ res = self.controller._delete(req, 1234)
+
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ if isinstance(self.controller,
+ flavormanage_v21.FlavorManageController):
+ status_int = self.controller._delete.wsgi_code
+ else:
+ status_int = res.status_int
+ self.assertEqual(202, status_int)
+
+ # subsequent delete should fail
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._delete, req, "failtest")
+
+ def _test_create_missing_parameter(self, parameter):
+ body = {
+ "flavor": {
+ "name": "azAZ09. -_",
+ "ram": 512,
+ "vcpus": 2,
+ "disk": 1,
+ "OS-FLV-EXT-DATA:ephemeral": 1,
+ "id": unicode('1234'),
+ "swap": 512,
+ "rxtx_factor": 1,
+ "os-flavor-access:is_public": True,
+ }
+ }
+
+ del body['flavor'][parameter]
+
+ req = fakes.HTTPRequest.blank(self.base_url)
+ self.assertRaises(self.validation_error, self.controller._create,
+ req, body=body)
+
+ def test_create_missing_name(self):
+ self._test_create_missing_parameter('name')
+
+ def test_create_missing_ram(self):
+ self._test_create_missing_parameter('ram')
+
+ def test_create_missing_vcpus(self):
+ self._test_create_missing_parameter('vcpus')
+
+ def test_create_missing_disk(self):
+ self._test_create_missing_parameter('disk')
+
+ def _create_flavor_success_case(self, body):
+ req = webob.Request.blank(self.base_url)
+ req.headers['Content-Type'] = 'application/json'
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(200, res.status_code)
+ return jsonutils.loads(res.body)
+
+ def test_create(self):
+ body = self._create_flavor_success_case(self.request_body)
+ for key in self.expected_flavor["flavor"]:
+ self.assertEqual(body["flavor"][key],
+ self.expected_flavor["flavor"][key])
+
+ def test_create_public_default(self):
+ del self.request_body['flavor']['os-flavor-access:is_public']
+ body = self._create_flavor_success_case(self.request_body)
+ for key in self.expected_flavor["flavor"]:
+ self.assertEqual(body["flavor"][key],
+ self.expected_flavor["flavor"][key])
+
+ def test_create_without_flavorid(self):
+ del self.request_body['flavor']['id']
+ body = self._create_flavor_success_case(self.request_body)
+ for key in self.expected_flavor["flavor"]:
+ self.assertEqual(body["flavor"][key],
+ self.expected_flavor["flavor"][key])
+
+ def _create_flavor_bad_request_case(self, body):
+ self.stubs.UnsetAll()
+
+ req = webob.Request.blank(self.base_url)
+ req.headers['Content-Type'] = 'application/json'
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_code, 400)
+
+ def test_create_invalid_name(self):
+ self.request_body['flavor']['name'] = 'bad !@#!$% name'
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_flavor_name_is_whitespace(self):
+ self.request_body['flavor']['name'] = ' '
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_name_too_long(self):
+ self.request_body['flavor']['name'] = 'a' * 256
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_without_flavorname(self):
+ del self.request_body['flavor']['name']
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_empty_body(self):
+ body = {
+ "flavor": {}
+ }
+ self._create_flavor_bad_request_case(body)
+
+ def test_create_no_body(self):
+ body = {}
+ self._create_flavor_bad_request_case(body)
+
+ def test_create_invalid_format_body(self):
+ body = {
+ "flavor": []
+ }
+ self._create_flavor_bad_request_case(body)
+
+ def test_create_invalid_flavorid(self):
+ self.request_body['flavor']['id'] = "!@#!$#!$^#&^$&"
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_check_flavor_id_length(self):
+ MAX_LENGTH = 255
+ self.request_body['flavor']['id'] = "a" * (MAX_LENGTH + 1)
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_leading_trailing_whitespaces_in_flavor_id(self):
+ self.request_body['flavor']['id'] = " bad_id "
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_without_ram(self):
+ del self.request_body['flavor']['ram']
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_0_ram(self):
+ self.request_body['flavor']['ram'] = 0
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_without_vcpus(self):
+ del self.request_body['flavor']['vcpus']
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_0_vcpus(self):
+ self.request_body['flavor']['vcpus'] = 0
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_without_disk(self):
+ del self.request_body['flavor']['disk']
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_minus_disk(self):
+ self.request_body['flavor']['disk'] = -1
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_minus_ephemeral(self):
+ self.request_body['flavor']['OS-FLV-EXT-DATA:ephemeral'] = -1
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_minus_swap(self):
+ self.request_body['flavor']['swap'] = -1
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_minus_rxtx_factor(self):
+ self.request_body['flavor']['rxtx_factor'] = -1
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_non_boolean_is_public(self):
+ self.request_body['flavor']['os-flavor-access:is_public'] = 123
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_flavor_exists_exception_returns_409(self):
+ expected = {
+ "flavor": {
+ "name": "test",
+ "ram": 512,
+ "vcpus": 2,
+ "disk": 1,
+ "OS-FLV-EXT-DATA:ephemeral": 1,
+ "id": 1235,
+ "swap": 512,
+ "rxtx_factor": 1,
+ "os-flavor-access:is_public": True,
+ }
+ }
+
+ def fake_create(name, memory_mb, vcpus, root_gb, ephemeral_gb,
+ flavorid, swap, rxtx_factor, is_public):
+ raise exception.FlavorExists(name=name)
+
+ self.stubs.Set(flavors, "create", fake_create)
+ req = webob.Request.blank(self.base_url)
+ req.headers['Content-Type'] = 'application/json'
+ req.method = 'POST'
+ req.body = jsonutils.dumps(expected)
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 409)
+
+ @mock.patch('nova.compute.flavors.create',
+ side_effect=exception.FlavorCreateFailed)
+ def test_flavor_create_db_failed(self, mock_create):
+ request_dict = {
+ "flavor": {
+ "name": "test",
+ 'id': "12345",
+ "ram": 512,
+ "vcpus": 2,
+ "disk": 1,
+ "OS-FLV-EXT-DATA:ephemeral": 1,
+ "swap": 512,
+ "rxtx_factor": 1,
+ "os-flavor-access:is_public": True,
+ }
+ }
+ req = webob.Request.blank(self.base_url)
+ req.headers['Content-Type'] = 'application/json'
+ req.method = 'POST'
+ req.body = jsonutils.dumps(request_dict)
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 500)
+ self.assertIn('Unable to create flavor', res.body)
+
+ def test_invalid_memory_mb(self):
+ """Check negative and decimal number can't be accepted."""
+
+ self.stubs.UnsetAll()
+ self.assertRaises(exception.InvalidInput, flavors.create, "abc",
+ -512, 2, 1, 1, 1234, 512, 1, True)
+ self.assertRaises(exception.InvalidInput, flavors.create, "abcd",
+ 512.2, 2, 1, 1, 1234, 512, 1, True)
+ self.assertRaises(exception.InvalidInput, flavors.create, "abcde",
+ None, 2, 1, 1, 1234, 512, 1, True)
+ self.assertRaises(exception.InvalidInput, flavors.create, "abcdef",
+ 512, 2, None, 1, 1234, 512, 1, True)
+ self.assertRaises(exception.InvalidInput, flavors.create, "abcdef",
+ "test_memory_mb", 2, None, 1, 1234, 512, 1, True)
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context.get_admin_context()}
+
+
+class PrivateFlavorManageTestV21(test.TestCase):
+ controller = flavormanage_v21.FlavorManageController()
+ base_url = '/v2/fake/flavors'
+
+ def setUp(self):
+ super(PrivateFlavorManageTestV21, self).setUp()
+ self.flavor_access_controller = flavor_access.FlavorAccessController()
+ self.ctxt = context.RequestContext('fake', 'fake',
+ is_admin=True, auth_token=True)
+ self.app = self._setup_app()
+ self.expected = {
+ "flavor": {
+ "name": "test",
+ "ram": 512,
+ "vcpus": 2,
+ "disk": 1,
+ "OS-FLV-EXT-DATA:ephemeral": 1,
+ "swap": 512,
+ "rxtx_factor": 1
+ }
+ }
+
+ def _setup_app(self):
+ return fakes.wsgi_app_v21(init_only=('flavor-manage',
+ 'os-flavor-access',
+ 'os-flavor-rxtx', 'flavors',
+ 'os-flavor-extra-data'),
+ fake_auth_context=self.ctxt)
+
+ def _get_response(self):
+ req = webob.Request.blank(self.base_url)
+ req.headers['Content-Type'] = 'application/json'
+ req.method = 'POST'
+ req.body = jsonutils.dumps(self.expected)
+ res = req.get_response(self.app)
+ return jsonutils.loads(res.body)
+
+ def test_create_private_flavor_should_not_grant_flavor_access(self):
+ self.expected["flavor"]["os-flavor-access:is_public"] = False
+ body = self._get_response()
+ for key in self.expected["flavor"]:
+ self.assertEqual(body["flavor"][key], self.expected["flavor"][key])
+ flavor_access_body = self.flavor_access_controller.index(
+ FakeRequest(), body["flavor"]["id"])
+ expected_flavor_access_body = {
+ "tenant_id": "%s" % self.ctxt.project_id,
+ "flavor_id": "%s" % body["flavor"]["id"]
+ }
+ self.assertNotIn(expected_flavor_access_body,
+ flavor_access_body["flavor_access"])
+
+ def test_create_public_flavor_should_not_create_flavor_access(self):
+ self.expected["flavor"]["os-flavor-access:is_public"] = True
+ self.mox.StubOutWithMock(flavors, "add_flavor_access")
+ self.mox.ReplayAll()
+ body = self._get_response()
+ for key in self.expected["flavor"]:
+ self.assertEqual(body["flavor"][key], self.expected["flavor"][key])
+
+
+class FlavorManageTestV2(FlavorManageTestV21):
+ controller = flavormanage_v2.FlavorManageController()
+ validation_error = webob.exc.HTTPBadRequest
+
+ def setUp(self):
+ super(FlavorManageTestV2, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Flavormanage', 'Flavorextradata',
+ 'Flavor_access', 'Flavor_rxtx', 'Flavor_swap'])
+
+ def _setup_app(self):
+ return fakes.wsgi_app(init_only=('flavors',),
+ fake_auth_context=self.ctxt)
+
+
+class PrivateFlavorManageTestV2(PrivateFlavorManageTestV21):
+ controller = flavormanage_v2.FlavorManageController()
+
+ def setUp(self):
+ super(PrivateFlavorManageTestV2, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Flavormanage', 'Flavorextradata',
+ 'Flavor_access', 'Flavor_rxtx', 'Flavor_swap'])
+
+ def _setup_app(self):
+ return fakes.wsgi_app(init_only=('flavors',),
+ fake_auth_context=self.ctxt)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_flavor_rxtx.py b/nova/tests/unit/api/openstack/compute/contrib/test_flavor_rxtx.py
new file mode 100644
index 0000000000..a8f31653c1
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_flavor_rxtx.py
@@ -0,0 +1,127 @@
+# Copyright 2012 Nebula, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.compute import flavors
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+FAKE_FLAVORS = {
+ 'flavor 1': {
+ "flavorid": '1',
+ "name": 'flavor 1',
+ "memory_mb": '256',
+ "root_gb": '10',
+ "swap": '5',
+ "disabled": False,
+ "ephemeral_gb": '20',
+ "rxtx_factor": '1.0',
+ "vcpus": 1,
+ },
+ 'flavor 2': {
+ "flavorid": '2',
+ "name": 'flavor 2',
+ "memory_mb": '512',
+ "root_gb": '10',
+ "swap": '10',
+ "ephemeral_gb": '25',
+ "rxtx_factor": None,
+ "disabled": False,
+ "vcpus": 1,
+ },
+}
+
+
+def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
+ return FAKE_FLAVORS['flavor %s' % flavorid]
+
+
+def fake_get_all_flavors_sorted_list(context=None, inactive=False,
+ filters=None, sort_key='flavorid',
+ sort_dir='asc', limit=None, marker=None):
+ return [
+ fake_flavor_get_by_flavor_id(1),
+ fake_flavor_get_by_flavor_id(2)
+ ]
+
+
+class FlavorRxtxTestV21(test.NoDBTestCase):
+ content_type = 'application/json'
+ _prefix = "/v2/fake"
+
+ def setUp(self):
+ super(FlavorRxtxTestV21, self).setUp()
+ ext = ('nova.api.openstack.compute.contrib'
+ '.flavor_rxtx.Flavor_rxtx')
+ self.flags(osapi_compute_extension=[ext])
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(flavors, "get_all_flavors_sorted_list",
+ fake_get_all_flavors_sorted_list)
+ self.stubs.Set(flavors,
+ "get_flavor_by_flavor_id",
+ fake_flavor_get_by_flavor_id)
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(self._get_app())
+ return res
+
+ def _get_app(self):
+ return fakes.wsgi_app_v21(init_only=('servers',
+ 'flavors', 'os-flavor-rxtx'))
+
+ def _get_flavor(self, body):
+ return jsonutils.loads(body).get('flavor')
+
+ def _get_flavors(self, body):
+ return jsonutils.loads(body).get('flavors')
+
+ def assertFlavorRxtx(self, flavor, rxtx):
+ self.assertEqual(str(flavor.get('rxtx_factor')), rxtx)
+
+ def test_show(self):
+ url = self._prefix + '/flavors/1'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertFlavorRxtx(self._get_flavor(res.body), '1.0')
+
+ def test_detail(self):
+ url = self._prefix + '/flavors/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ flavors = self._get_flavors(res.body)
+ self.assertFlavorRxtx(flavors[0], '1.0')
+ self.assertFlavorRxtx(flavors[1], '')
+
+
+class FlavorRxtxTestV20(FlavorRxtxTestV21):
+
+ def _get_app(self):
+ return fakes.wsgi_app()
+
+
+class FlavorRxtxXmlTest(FlavorRxtxTestV20):
+ content_type = 'application/xml'
+
+ def _get_flavor(self, body):
+ return etree.XML(body)
+
+ def _get_flavors(self, body):
+ return etree.XML(body).getchildren()
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_flavor_swap.py b/nova/tests/unit/api/openstack/compute/contrib/test_flavor_swap.py
new file mode 100644
index 0000000000..f168db060a
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_flavor_swap.py
@@ -0,0 +1,126 @@
+# Copyright 2012 Nebula, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.compute import flavors
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+FAKE_FLAVORS = {
+ 'flavor 1': {
+ "flavorid": '1',
+ "name": 'flavor 1',
+ "memory_mb": '256',
+ "root_gb": '10',
+ "swap": 512,
+ "vcpus": 1,
+ "ephemeral_gb": 1,
+ "disabled": False,
+ },
+ 'flavor 2': {
+ "flavorid": '2',
+ "name": 'flavor 2',
+ "memory_mb": '512',
+ "root_gb": '10',
+ "swap": None,
+ "vcpus": 1,
+ "ephemeral_gb": 1,
+ "disabled": False,
+ },
+}
+
+
+# TODO(jogo) dedup these across nova.api.openstack.contrib.test_flavor*
+def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
+ return FAKE_FLAVORS['flavor %s' % flavorid]
+
+
+def fake_get_all_flavors_sorted_list(context=None, inactive=False,
+ filters=None, sort_key='flavorid',
+ sort_dir='asc', limit=None, marker=None):
+ return [
+ fake_flavor_get_by_flavor_id(1),
+ fake_flavor_get_by_flavor_id(2)
+ ]
+
+
+class FlavorSwapTestV21(test.NoDBTestCase):
+ base_url = '/v2/fake/flavors'
+ content_type = 'application/json'
+ prefix = ''
+
+ def setUp(self):
+ super(FlavorSwapTestV21, self).setUp()
+ ext = ('nova.api.openstack.compute.contrib'
+ '.flavor_swap.Flavor_swap')
+ self.flags(osapi_compute_extension=[ext])
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(flavors, "get_all_flavors_sorted_list",
+ fake_get_all_flavors_sorted_list)
+ self.stubs.Set(flavors,
+ "get_flavor_by_flavor_id",
+ fake_flavor_get_by_flavor_id)
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app_v21(init_only=('flavors')))
+ return res
+
+ def _get_flavor(self, body):
+ return jsonutils.loads(body).get('flavor')
+
+ def _get_flavors(self, body):
+ return jsonutils.loads(body).get('flavors')
+
+ def assertFlavorSwap(self, flavor, swap):
+ self.assertEqual(str(flavor.get('%sswap' % self.prefix)), swap)
+
+ def test_show(self):
+ url = self.base_url + '/1'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertFlavorSwap(self._get_flavor(res.body), '512')
+
+ def test_detail(self):
+ url = self.base_url + '/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ flavors = self._get_flavors(res.body)
+ self.assertFlavorSwap(flavors[0], '512')
+ self.assertFlavorSwap(flavors[1], '')
+
+
+class FlavorSwapTestV2(FlavorSwapTestV21):
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app())
+ return res
+
+
+class FlavorSwapXmlTest(FlavorSwapTestV2):
+ content_type = 'application/xml'
+
+ def _get_flavor(self, body):
+ return etree.XML(body)
+
+ def _get_flavors(self, body):
+ return etree.XML(body).getchildren()
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_flavorextradata.py b/nova/tests/unit/api/openstack/compute/contrib/test_flavorextradata.py
new file mode 100644
index 0000000000..1299b6c88d
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_flavorextradata.py
@@ -0,0 +1,127 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from oslo.serialization import jsonutils
+import webob
+
+from nova.compute import flavors
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+def fake_get_flavor_by_flavor_id(flavorid, ctxt=None):
+ return {
+ 'id': flavorid,
+ 'flavorid': str(flavorid),
+ 'root_gb': 1,
+ 'ephemeral_gb': 1,
+ 'name': u'test',
+ 'deleted': False,
+ 'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1),
+ 'updated_at': None,
+ 'memory_mb': 512,
+ 'vcpus': 1,
+ 'extra_specs': {},
+ 'deleted_at': None,
+ 'vcpu_weight': None,
+ 'swap': 0,
+ 'disabled': False,
+ }
+
+
+def fake_get_all_flavors_sorted_list(context=None, inactive=False,
+ filters=None, sort_key='flavorid',
+ sort_dir='asc', limit=None, marker=None):
+ return [
+ fake_get_flavor_by_flavor_id(1),
+ fake_get_flavor_by_flavor_id(2)
+ ]
+
+
+class FlavorExtraDataTestV21(test.NoDBTestCase):
+ base_url = '/v2/fake/flavors'
+
+ def setUp(self):
+ super(FlavorExtraDataTestV21, self).setUp()
+ ext = ('nova.api.openstack.compute.contrib'
+ '.flavorextradata.Flavorextradata')
+ self.flags(osapi_compute_extension=[ext])
+ self.stubs.Set(flavors, 'get_flavor_by_flavor_id',
+ fake_get_flavor_by_flavor_id)
+ self.stubs.Set(flavors, 'get_all_flavors_sorted_list',
+ fake_get_all_flavors_sorted_list)
+ self._setup_app()
+
+ def _setup_app(self):
+ self.app = fakes.wsgi_app_v21(init_only=('flavors'))
+
+ def _verify_flavor_response(self, flavor, expected):
+ for key in expected:
+ self.assertEqual(flavor[key], expected[key])
+
+ def test_show(self):
+ expected = {
+ 'flavor': {
+ 'id': '1',
+ 'name': 'test',
+ 'ram': 512,
+ 'vcpus': 1,
+ 'disk': 1,
+ 'OS-FLV-EXT-DATA:ephemeral': 1,
+ }
+ }
+
+ url = self.base_url + '/1'
+ req = webob.Request.blank(url)
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ body = jsonutils.loads(res.body)
+ self._verify_flavor_response(body['flavor'], expected['flavor'])
+
+ def test_detail(self):
+ expected = [
+ {
+ 'id': '1',
+ 'name': 'test',
+ 'ram': 512,
+ 'vcpus': 1,
+ 'disk': 1,
+ 'OS-FLV-EXT-DATA:ephemeral': 1,
+ },
+ {
+ 'id': '2',
+ 'name': 'test',
+ 'ram': 512,
+ 'vcpus': 1,
+ 'disk': 1,
+ 'OS-FLV-EXT-DATA:ephemeral': 1,
+ },
+ ]
+
+ url = self.base_url + '/detail'
+ req = webob.Request.blank(url)
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ body = jsonutils.loads(res.body)
+ for i, flavor in enumerate(body['flavors']):
+ self._verify_flavor_response(flavor, expected[i])
+
+
+class FlavorExtraDataTestV2(FlavorExtraDataTestV21):
+
+ def _setup_app(self):
+ self.app = fakes.wsgi_app(init_only=('flavors',))
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_flavors_extra_specs.py b/nova/tests/unit/api/openstack/compute/contrib/test_flavors_extra_specs.py
new file mode 100644
index 0000000000..8a6f4814a8
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_flavors_extra_specs.py
@@ -0,0 +1,403 @@
+# Copyright 2011 University of Southern California
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import webob
+
+from nova.api.openstack.compute.contrib import flavorextraspecs \
+ as flavorextraspecs_v2
+from nova.api.openstack.compute.plugins.v3 import flavors_extraspecs \
+ as flavorextraspecs_v21
+import nova.db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.objects import test_flavor
+
+
+def return_create_flavor_extra_specs(context, flavor_id, extra_specs):
+ return stub_flavor_extra_specs()
+
+
+def return_flavor_extra_specs(context, flavor_id):
+ return stub_flavor_extra_specs()
+
+
+def return_flavor_extra_specs_item(context, flavor_id, key):
+ return {key: stub_flavor_extra_specs()[key]}
+
+
+def return_empty_flavor_extra_specs(context, flavor_id):
+ return {}
+
+
+def delete_flavor_extra_specs(context, flavor_id, key):
+ pass
+
+
+def stub_flavor_extra_specs():
+ specs = {
+ "key1": "value1",
+ "key2": "value2",
+ "key3": "value3",
+ "key4": "value4",
+ "key5": "value5"}
+ return specs
+
+
+class FlavorsExtraSpecsTestV21(test.TestCase):
+ bad_request = exception.ValidationError
+ flavorextraspecs = flavorextraspecs_v21
+
+ def _get_request(self, url, use_admin_context=False):
+ req_url = '/v2/fake/flavors/' + url
+ return fakes.HTTPRequest.blank(req_url,
+ use_admin_context=use_admin_context)
+
+ def setUp(self):
+ super(FlavorsExtraSpecsTestV21, self).setUp()
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ self.controller = self.flavorextraspecs.FlavorExtraSpecsController()
+
+ def test_index(self):
+ flavor = dict(test_flavor.fake_flavor,
+ extra_specs={'key1': 'value1'})
+
+ req = self._get_request('1/os-extra_specs')
+ with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
+ mock_get.return_value = flavor
+ res_dict = self.controller.index(req, 1)
+
+ self.assertEqual('value1', res_dict['extra_specs']['key1'])
+
+ def test_index_no_data(self):
+ self.stubs.Set(nova.db, 'flavor_extra_specs_get',
+ return_empty_flavor_extra_specs)
+
+ req = self._get_request('1/os-extra_specs')
+ res_dict = self.controller.index(req, 1)
+
+ self.assertEqual(0, len(res_dict['extra_specs']))
+
+ def test_show(self):
+ flavor = dict(test_flavor.fake_flavor,
+ extra_specs={'key5': 'value5'})
+ req = self._get_request('1/os-extra_specs/key5')
+ with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
+ mock_get.return_value = flavor
+ res_dict = self.controller.show(req, 1, 'key5')
+
+ self.assertEqual('value5', res_dict['key5'])
+
+ def test_show_spec_not_found(self):
+ self.stubs.Set(nova.db, 'flavor_extra_specs_get',
+ return_empty_flavor_extra_specs)
+
+ req = self._get_request('1/os-extra_specs/key6')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
+ req, 1, 'key6')
+
+ def test_not_found_because_flavor(self):
+ req = self._get_request('1/os-extra_specs/key5',
+ use_admin_context=True)
+ with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
+ mock_get.side_effect = exception.FlavorNotFound(flavor_id='1')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
+ req, 1, 'key5')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
+ req, 1, 'key5', body={'key5': 'value5'})
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, 1, 'key5')
+
+ req = self._get_request('1/os-extra_specs', use_admin_context=True)
+ with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
+ mock_get.side_effect = exception.FlavorNotFound(flavor_id='1')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
+ req, 1, body={'extra_specs': {'key5': 'value5'}})
+
+ def test_delete(self):
+ flavor = dict(test_flavor.fake_flavor,
+ extra_specs={'key5': 'value5'})
+ self.stubs.Set(nova.db, 'flavor_extra_specs_delete',
+ delete_flavor_extra_specs)
+
+ req = self._get_request('1/os-extra_specs/key5',
+ use_admin_context=True)
+ with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
+ mock_get.return_value = flavor
+ self.controller.delete(req, 1, 'key5')
+
+ def test_delete_no_admin(self):
+ self.stubs.Set(nova.db, 'flavor_extra_specs_delete',
+ delete_flavor_extra_specs)
+
+ req = self._get_request('1/os-extra_specs/key5')
+ self.assertRaises(exception.Forbidden, self.controller.delete,
+ req, 1, 'key 5')
+
+ def test_delete_spec_not_found(self):
+ req = self._get_request('1/os-extra_specs/key6',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, 1, 'key6')
+
+ def test_create(self):
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ body = {"extra_specs": {"key1": "value1", "key2": 0.5, "key3": 5}}
+
+ req = self._get_request('1/os-extra_specs', use_admin_context=True)
+ res_dict = self.controller.create(req, 1, body=body)
+
+ self.assertEqual('value1', res_dict['extra_specs']['key1'])
+ self.assertEqual(0.5, res_dict['extra_specs']['key2'])
+ self.assertEqual(5, res_dict['extra_specs']['key3'])
+
+ def test_create_no_admin(self):
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ body = {"extra_specs": {"key1": "value1"}}
+
+ req = self._get_request('1/os-extra_specs')
+ self.assertRaises(exception.Forbidden, self.controller.create,
+ req, 1, body=body)
+
+ def test_create_flavor_not_found(self):
+ def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
+ raise exception.FlavorNotFound(flavor_id='')
+
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ fake_instance_type_extra_specs_update_or_create)
+ body = {"extra_specs": {"key1": "value1"}}
+ req = self._get_request('1/os-extra_specs', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
+ req, 1, body=body)
+
+ def test_create_flavor_db_duplicate(self):
+ def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
+ raise exception.FlavorExtraSpecUpdateCreateFailed(id=1, retries=5)
+
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ fake_instance_type_extra_specs_update_or_create)
+ body = {"extra_specs": {"key1": "value1"}}
+ req = self._get_request('1/os-extra_specs', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPConflict, self.controller.create,
+ req, 1, body=body)
+
+ def _test_create_bad_request(self, body):
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+
+ req = self._get_request('1/os-extra_specs', use_admin_context=True)
+ self.assertRaises(self.bad_request, self.controller.create,
+ req, 1, body=body)
+
+ def test_create_empty_body(self):
+ self._test_create_bad_request('')
+
+ def test_create_non_dict_extra_specs(self):
+ self._test_create_bad_request({"extra_specs": "non_dict"})
+
+ def test_create_non_string_key(self):
+ self._test_create_bad_request({"extra_specs": {None: "value1"}})
+
+ def test_create_non_string_value(self):
+ self._test_create_bad_request({"extra_specs": {"key1": None}})
+
+ def test_create_zero_length_key(self):
+ self._test_create_bad_request({"extra_specs": {"": "value1"}})
+
+ def test_create_long_key(self):
+ key = "a" * 256
+ self._test_create_bad_request({"extra_specs": {key: "value1"}})
+
+ def test_create_long_value(self):
+ value = "a" * 256
+ self._test_create_bad_request({"extra_specs": {"key1": value}})
+
+ @mock.patch('nova.db.flavor_extra_specs_update_or_create')
+ def test_create_really_long_integer_value(self, mock_flavor_extra_specs):
+ value = 10 ** 1000
+ mock_flavor_extra_specs.side_effects = return_create_flavor_extra_specs
+
+ req = self._get_request('1/os-extra_specs', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, 1, body={"extra_specs": {"key1": value}})
+
+ @mock.patch('nova.db.flavor_extra_specs_update_or_create')
+ def test_create_invalid_specs_key(self, mock_flavor_extra_specs):
+ invalid_keys = ("key1/", "<key>", "$$akey$", "!akey", "")
+ mock_flavor_extra_specs.side_effects = return_create_flavor_extra_specs
+
+ for key in invalid_keys:
+ body = {"extra_specs": {key: "value1"}}
+ req = self._get_request('1/os-extra_specs', use_admin_context=True)
+ self.assertRaises(self.bad_request, self.controller.create,
+ req, 1, body=body)
+
+ @mock.patch('nova.db.flavor_extra_specs_update_or_create')
+ def test_create_valid_specs_key(self, mock_flavor_extra_specs):
+ valid_keys = ("key1", "month.price", "I_am-a Key", "finance:g2")
+ mock_flavor_extra_specs.side_effects = return_create_flavor_extra_specs
+
+ for key in valid_keys:
+ body = {"extra_specs": {key: "value1"}}
+ req = self._get_request('1/os-extra_specs', use_admin_context=True)
+ res_dict = self.controller.create(req, 1, body=body)
+ self.assertEqual('value1', res_dict['extra_specs'][key])
+
+ def test_update_item(self):
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ body = {"key1": "value1"}
+
+ req = self._get_request('1/os-extra_specs/key1',
+ use_admin_context=True)
+ res_dict = self.controller.update(req, 1, 'key1', body=body)
+
+ self.assertEqual('value1', res_dict['key1'])
+
+ def test_update_item_no_admin(self):
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ body = {"key1": "value1"}
+
+ req = self._get_request('1/os-extra_specs/key1')
+ self.assertRaises(exception.Forbidden, self.controller.update,
+ req, 1, 'key1', body=body)
+
+ def _test_update_item_bad_request(self, body):
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+
+ req = self._get_request('1/os-extra_specs/key1',
+ use_admin_context=True)
+ self.assertRaises(self.bad_request, self.controller.update,
+ req, 1, 'key1', body=body)
+
+ def test_update_item_empty_body(self):
+ self._test_update_item_bad_request('')
+
+ def test_update_item_too_many_keys(self):
+ body = {"key1": "value1", "key2": "value2"}
+ self._test_update_item_bad_request(body)
+
+ def test_update_item_non_dict_extra_specs(self):
+ self._test_update_item_bad_request("non_dict")
+
+ def test_update_item_non_string_key(self):
+ self._test_update_item_bad_request({None: "value1"})
+
+ def test_update_item_non_string_value(self):
+ self._test_update_item_bad_request({"key1": None})
+
+ def test_update_item_zero_length_key(self):
+ self._test_update_item_bad_request({"": "value1"})
+
+ def test_update_item_long_key(self):
+ key = "a" * 256
+ self._test_update_item_bad_request({key: "value1"})
+
+ def test_update_item_long_value(self):
+ value = "a" * 256
+ self._test_update_item_bad_request({"key1": value})
+
+ def test_update_item_body_uri_mismatch(self):
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ body = {"key1": "value1"}
+
+ req = self._get_request('1/os-extra_specs/bad', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 1, 'bad', body=body)
+
+ def test_update_flavor_not_found(self):
+ def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
+ raise exception.FlavorNotFound(flavor_id='')
+
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ fake_instance_type_extra_specs_update_or_create)
+ body = {"key1": "value1"}
+
+ req = self._get_request('1/os-extra_specs/key1',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
+ req, 1, 'key1', body=body)
+
+ def test_update_flavor_db_duplicate(self):
+ def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
+ raise exception.FlavorExtraSpecUpdateCreateFailed(id=1, retries=5)
+
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ fake_instance_type_extra_specs_update_or_create)
+ body = {"key1": "value1"}
+
+ req = self._get_request('1/os-extra_specs/key1',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPConflict, self.controller.update,
+ req, 1, 'key1', body=body)
+
+ def test_update_really_long_integer_value(self):
+ value = 10 ** 1000
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+
+ req = self._get_request('1/os-extra_specs/key1',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 1, 'key1', body={"key1": value})
+
+
+class FlavorsExtraSpecsTestV2(FlavorsExtraSpecsTestV21):
+ bad_request = webob.exc.HTTPBadRequest
+ flavorextraspecs = flavorextraspecs_v2
+
+
+class FlavorsExtraSpecsXMLSerializerTest(test.TestCase):
+ def test_serializer(self):
+ serializer = flavorextraspecs_v2.ExtraSpecsTemplate()
+ expected = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<extra_specs><key1>value1</key1></extra_specs>')
+ text = serializer.serialize(dict(extra_specs={"key1": "value1"}))
+ self.assertEqual(text, expected)
+
+ def test_show_update_serializer(self):
+ serializer = flavorextraspecs_v2.ExtraSpecTemplate()
+ expected = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<extra_spec key="key1">value1</extra_spec>')
+ text = serializer.serialize(dict({"key1": "value1"}))
+ self.assertEqual(text, expected)
+
+ def test_serializer_with_colon_tagname(self):
+ # Our test object to serialize
+ obj = {'extra_specs': {'foo:bar': '999'}}
+ serializer = flavorextraspecs_v2.ExtraSpecsTemplate()
+ expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<extra_specs><foo:bar xmlns:foo="foo">999</foo:bar>'
+ '</extra_specs>'))
+ result = serializer.serialize(obj)
+ self.assertEqual(expected_xml, result)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_floating_ip_dns.py b/nova/tests/unit/api/openstack/compute/contrib/test_floating_ip_dns.py
new file mode 100644
index 0000000000..9a68e0de60
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_floating_ip_dns.py
@@ -0,0 +1,412 @@
+# Copyright 2011 Andrew Bogott for the Wikimedia Foundation
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import urllib
+
+from lxml import etree
+import webob
+
+from nova.api.openstack.compute.contrib import floating_ip_dns as fipdns_v2
+from nova.api.openstack.compute.plugins.v3 import floating_ip_dns as \
+ fipdns_v21
+from nova import context
+from nova import db
+from nova import exception
+from nova import network
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+name = "arbitraryname"
+name2 = "anotherarbitraryname"
+
+test_ipv4_address = '10.0.0.66'
+test_ipv4_address2 = '10.0.0.67'
+
+test_ipv6_address = 'fe80:0:0:0:0:0:a00:42'
+
+domain = "example.org"
+domain2 = "example.net"
+floating_ip_id = '1'
+
+
+def _quote_domain(domain):
+ """Domain names tend to have .'s in them. Urllib doesn't quote dots,
+ but Routes tends to choke on them, so we need an extra level of
+ by-hand quoting here. This function needs to duplicate the one in
+ python-novaclient/novaclient/v1_1/floating_ip_dns.py
+ """
+ return urllib.quote(domain.replace('.', '%2E'))
+
+
+def network_api_get_floating_ip(self, context, id):
+ return {'id': floating_ip_id, 'address': test_ipv4_address,
+ 'fixed_ip': None}
+
+
+def network_get_dns_domains(self, context):
+ return [{'domain': 'example.org', 'scope': 'public'},
+ {'domain': 'example.com', 'scope': 'public',
+ 'project': 'project1'},
+ {'domain': 'private.example.com', 'scope': 'private',
+ 'availability_zone': 'avzone'}]
+
+
+def network_get_dns_entries_by_address(self, context, address, domain):
+ return [name, name2]
+
+
+def network_get_dns_entries_by_name(self, context, address, domain):
+ return [test_ipv4_address]
+
+
+def network_add_dns_entry(self, context, address, name, dns_type, domain):
+ return {'dns_entry': {'ip': test_ipv4_address,
+ 'name': name,
+ 'type': dns_type,
+ 'domain': domain}}
+
+
+def network_modify_dns_entry(self, context, address, name, domain):
+ return {'dns_entry': {'name': name,
+ 'ip': address,
+ 'domain': domain}}
+
+
+def network_create_private_dns_domain(self, context, domain, avail_zone):
+ pass
+
+
+def network_create_public_dns_domain(self, context, domain, project):
+ pass
+
+
+class FloatingIpDNSTestV21(test.TestCase):
+ floating_ip_dns = fipdns_v21
+
+ def _create_floating_ip(self):
+ """Create a floating ip object."""
+ host = "fake_host"
+ db.floating_ip_create(self.context,
+ {'address': test_ipv4_address,
+ 'host': host})
+ db.floating_ip_create(self.context,
+ {'address': test_ipv6_address,
+ 'host': host})
+
+ def _delete_floating_ip(self):
+ db.floating_ip_destroy(self.context, test_ipv4_address)
+ db.floating_ip_destroy(self.context, test_ipv6_address)
+
+ def _check_status(self, expected_status, res, controller_methord):
+ self.assertEqual(expected_status, controller_methord.wsgi_code)
+
+ def _bad_request(self):
+ return webob.exc.HTTPBadRequest
+
+ def setUp(self):
+ super(FloatingIpDNSTestV21, self).setUp()
+ self.stubs.Set(network.api.API, "get_dns_domains",
+ network_get_dns_domains)
+ self.stubs.Set(network.api.API, "get_dns_entries_by_address",
+ network_get_dns_entries_by_address)
+ self.stubs.Set(network.api.API, "get_dns_entries_by_name",
+ network_get_dns_entries_by_name)
+ self.stubs.Set(network.api.API, "get_floating_ip",
+ network_api_get_floating_ip)
+ self.stubs.Set(network.api.API, "add_dns_entry",
+ network_add_dns_entry)
+ self.stubs.Set(network.api.API, "modify_dns_entry",
+ network_modify_dns_entry)
+ self.stubs.Set(network.api.API, "create_public_dns_domain",
+ network_create_public_dns_domain)
+ self.stubs.Set(network.api.API, "create_private_dns_domain",
+ network_create_private_dns_domain)
+
+ self.context = context.get_admin_context()
+
+ self._create_floating_ip()
+ temp = self.floating_ip_dns.FloatingIPDNSDomainController()
+ self.domain_controller = temp
+ self.entry_controller = self.floating_ip_dns.\
+ FloatingIPDNSEntryController()
+
+ def tearDown(self):
+ self._delete_floating_ip()
+ super(FloatingIpDNSTestV21, self).tearDown()
+
+ def test_dns_domains_list(self):
+ req = fakes.HTTPRequest.blank('/v2/123/os-floating-ip-dns')
+ res_dict = self.domain_controller.index(req)
+ entries = res_dict['domain_entries']
+ self.assertTrue(entries)
+ self.assertEqual(entries[0]['domain'], "example.org")
+ self.assertFalse(entries[0]['project'])
+ self.assertFalse(entries[0]['availability_zone'])
+ self.assertEqual(entries[1]['domain'], "example.com")
+ self.assertEqual(entries[1]['project'], "project1")
+ self.assertFalse(entries[1]['availability_zone'])
+ self.assertEqual(entries[2]['domain'], "private.example.com")
+ self.assertFalse(entries[2]['project'])
+ self.assertEqual(entries[2]['availability_zone'], "avzone")
+
+ def _test_get_dns_entries_by_address(self, address):
+
+ qparams = {'ip': address}
+ params = "?%s" % urllib.urlencode(qparams) if qparams else ""
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/os-floating-ip-dns/%s/entries/%s'
+ % (_quote_domain(domain), params))
+ entries = self.entry_controller.show(req, _quote_domain(domain),
+ address)
+ entries = entries.obj
+ self.assertEqual(len(entries['dns_entries']), 2)
+ self.assertEqual(entries['dns_entries'][0]['name'],
+ name)
+ self.assertEqual(entries['dns_entries'][1]['name'],
+ name2)
+ self.assertEqual(entries['dns_entries'][0]['domain'],
+ domain)
+
+ def test_get_dns_entries_by_ipv4_address(self):
+ self._test_get_dns_entries_by_address(test_ipv4_address)
+
+ def test_get_dns_entries_by_ipv6_address(self):
+ self._test_get_dns_entries_by_address(test_ipv6_address)
+
+ def test_get_dns_entries_by_name(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/os-floating-ip-dns/%s/entries/%s' %
+ (_quote_domain(domain), name))
+ entry = self.entry_controller.show(req, _quote_domain(domain), name)
+
+ self.assertEqual(entry['dns_entry']['ip'],
+ test_ipv4_address)
+ self.assertEqual(entry['dns_entry']['domain'],
+ domain)
+
+ def test_dns_entries_not_found(self):
+ def fake_get_dns_entries_by_name(self, context, address, domain):
+ raise webob.exc.HTTPNotFound()
+
+ self.stubs.Set(network.api.API, "get_dns_entries_by_name",
+ fake_get_dns_entries_by_name)
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/os-floating-ip-dns/%s/entries/%s' %
+ (_quote_domain(domain), 'nonexistent'))
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.entry_controller.show,
+ req, _quote_domain(domain), 'nonexistent')
+
+ def test_create_entry(self):
+ body = {'dns_entry':
+ {'ip': test_ipv4_address,
+ 'dns_type': 'A'}}
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/os-floating-ip-dns/%s/entries/%s' %
+ (_quote_domain(domain), name))
+ entry = self.entry_controller.update(req, _quote_domain(domain),
+ name, body=body)
+ self.assertEqual(entry['dns_entry']['ip'], test_ipv4_address)
+
+ def test_create_domain(self):
+ req = fakes.HTTPRequest.blank('/v2/123/os-floating-ip-dns/%s' %
+ _quote_domain(domain))
+ body = {'domain_entry':
+ {'scope': 'private',
+ 'project': 'testproject'}}
+ self.assertRaises(self._bad_request(),
+ self.domain_controller.update,
+ req, _quote_domain(domain), body=body)
+
+ body = {'domain_entry':
+ {'scope': 'public',
+ 'availability_zone': 'zone1'}}
+ self.assertRaises(self._bad_request(),
+ self.domain_controller.update,
+ req, _quote_domain(domain), body=body)
+
+ body = {'domain_entry':
+ {'scope': 'public',
+ 'project': 'testproject'}}
+ entry = self.domain_controller.update(req, _quote_domain(domain),
+ body=body)
+ self.assertEqual(entry['domain_entry']['domain'], domain)
+ self.assertEqual(entry['domain_entry']['scope'], 'public')
+ self.assertEqual(entry['domain_entry']['project'], 'testproject')
+
+ body = {'domain_entry':
+ {'scope': 'private',
+ 'availability_zone': 'zone1'}}
+ entry = self.domain_controller.update(req, _quote_domain(domain),
+ body=body)
+ self.assertEqual(entry['domain_entry']['domain'], domain)
+ self.assertEqual(entry['domain_entry']['scope'], 'private')
+ self.assertEqual(entry['domain_entry']['availability_zone'], 'zone1')
+
+ def test_delete_entry(self):
+ calls = []
+
+ def network_delete_dns_entry(fakeself, context, name, domain):
+ calls.append((name, domain))
+
+ self.stubs.Set(network.api.API, "delete_dns_entry",
+ network_delete_dns_entry)
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/os-floating-ip-dns/%s/entries/%s' %
+ (_quote_domain(domain), name))
+ res = self.entry_controller.delete(req, _quote_domain(domain), name)
+
+ self._check_status(202, res, self.entry_controller.delete)
+ self.assertEqual([(name, domain)], calls)
+
+ def test_delete_entry_notfound(self):
+ def delete_dns_entry_notfound(fakeself, context, name, domain):
+ raise exception.NotFound
+
+ self.stubs.Set(network.api.API, "delete_dns_entry",
+ delete_dns_entry_notfound)
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/os-floating-ip-dns/%s/entries/%s' %
+ (_quote_domain(domain), name))
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.entry_controller.delete, req, _quote_domain(domain), name)
+
+ def test_delete_domain(self):
+ calls = []
+
+ def network_delete_dns_domain(fakeself, context, fqdomain):
+ calls.append(fqdomain)
+
+ self.stubs.Set(network.api.API, "delete_dns_domain",
+ network_delete_dns_domain)
+
+ req = fakes.HTTPRequest.blank('/v2/123/os-floating-ip-dns/%s' %
+ _quote_domain(domain))
+ res = self.domain_controller.delete(req, _quote_domain(domain))
+
+ self._check_status(202, res, self.domain_controller.delete)
+ self.assertEqual([domain], calls)
+
+ def test_delete_domain_notfound(self):
+ def delete_dns_domain_notfound(fakeself, context, fqdomain):
+ raise exception.NotFound
+
+ self.stubs.Set(network.api.API, "delete_dns_domain",
+ delete_dns_domain_notfound)
+
+ req = fakes.HTTPRequest.blank('/v2/123/os-floating-ip-dns/%s' %
+ _quote_domain(domain))
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.domain_controller.delete, req, _quote_domain(domain))
+
+ def test_modify(self):
+ body = {'dns_entry':
+ {'ip': test_ipv4_address2,
+ 'dns_type': 'A'}}
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/os-floating-ip-dns/%s/entries/%s' % (domain, name))
+ entry = self.entry_controller.update(req, domain, name, body=body)
+
+ self.assertEqual(entry['dns_entry']['ip'], test_ipv4_address2)
+
+
+class FloatingIpDNSTestV2(FloatingIpDNSTestV21):
+ floating_ip_dns = fipdns_v2
+
+ def _check_status(self, expected_status, res, controller_methord):
+ self.assertEqual(expected_status, res.status_int)
+
+ def _bad_request(self):
+ return webob.exc.HTTPUnprocessableEntity
+
+
+class FloatingIpDNSSerializerTestV2(test.TestCase):
+ floating_ip_dns = fipdns_v2
+
+ def test_domains(self):
+ serializer = self.floating_ip_dns.DomainsTemplate()
+ text = serializer.serialize(dict(
+ domain_entries=[
+ dict(domain=domain, scope='public', project='testproject'),
+ dict(domain=domain2, scope='private',
+ availability_zone='avzone')]))
+
+ tree = etree.fromstring(text)
+ self.assertEqual('domain_entries', tree.tag)
+ self.assertEqual(2, len(tree))
+ self.assertEqual(domain, tree[0].get('domain'))
+ self.assertEqual(domain2, tree[1].get('domain'))
+ self.assertEqual('avzone', tree[1].get('availability_zone'))
+
+ def test_domain_serializer(self):
+ serializer = self.floating_ip_dns.DomainTemplate()
+ text = serializer.serialize(dict(
+ domain_entry=dict(domain=domain,
+ scope='public',
+ project='testproject')))
+
+ tree = etree.fromstring(text)
+ self.assertEqual('domain_entry', tree.tag)
+ self.assertEqual(domain, tree.get('domain'))
+ self.assertEqual('testproject', tree.get('project'))
+
+ def test_entries_serializer(self):
+ serializer = self.floating_ip_dns.FloatingIPDNSsTemplate()
+ text = serializer.serialize(dict(
+ dns_entries=[
+ dict(ip=test_ipv4_address,
+ type='A',
+ domain=domain,
+ name=name),
+ dict(ip=test_ipv4_address2,
+ type='C',
+ domain=domain,
+ name=name2)]))
+
+ tree = etree.fromstring(text)
+ self.assertEqual('dns_entries', tree.tag)
+ self.assertEqual(2, len(tree))
+ self.assertEqual('dns_entry', tree[0].tag)
+ self.assertEqual('dns_entry', tree[1].tag)
+ self.assertEqual(test_ipv4_address, tree[0].get('ip'))
+ self.assertEqual('A', tree[0].get('type'))
+ self.assertEqual(domain, tree[0].get('domain'))
+ self.assertEqual(name, tree[0].get('name'))
+ self.assertEqual(test_ipv4_address2, tree[1].get('ip'))
+ self.assertEqual('C', tree[1].get('type'))
+ self.assertEqual(domain, tree[1].get('domain'))
+ self.assertEqual(name2, tree[1].get('name'))
+
+ def test_entry_serializer(self):
+ serializer = self.floating_ip_dns.FloatingIPDNSTemplate()
+ text = serializer.serialize(dict(
+ dns_entry=dict(
+ ip=test_ipv4_address,
+ type='A',
+ domain=domain,
+ name=name)))
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('dns_entry', tree.tag)
+ self.assertEqual(test_ipv4_address, tree.get('ip'))
+ self.assertEqual(domain, tree.get('domain'))
+ self.assertEqual(name, tree.get('name'))
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_floating_ip_pools.py b/nova/tests/unit/api/openstack/compute/contrib/test_floating_ip_pools.py
new file mode 100644
index 0000000000..926e88c6ae
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_floating_ip_pools.py
@@ -0,0 +1,83 @@
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+
+from nova.api.openstack.compute.contrib import floating_ip_pools as fipp_v2
+from nova.api.openstack.compute.plugins.v3 import floating_ip_pools as\
+ fipp_v21
+from nova import context
+from nova import network
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+def fake_get_floating_ip_pools(self, context):
+ return ['nova', 'other']
+
+
+class FloatingIpPoolTestV21(test.NoDBTestCase):
+ floating_ip_pools = fipp_v21
+ url = '/v2/fake/os-floating-ip-pools'
+
+ def setUp(self):
+ super(FloatingIpPoolTestV21, self).setUp()
+ self.stubs.Set(network.api.API, "get_floating_ip_pools",
+ fake_get_floating_ip_pools)
+
+ self.context = context.RequestContext('fake', 'fake')
+ self.controller = self.floating_ip_pools.FloatingIPPoolsController()
+
+ def test_translate_floating_ip_pools_view(self):
+ pools = fake_get_floating_ip_pools(None, self.context)
+ view = self.floating_ip_pools._translate_floating_ip_pools_view(pools)
+ self.assertIn('floating_ip_pools', view)
+ self.assertEqual(view['floating_ip_pools'][0]['name'],
+ pools[0])
+ self.assertEqual(view['floating_ip_pools'][1]['name'],
+ pools[1])
+
+ def test_floating_ips_pools_list(self):
+ req = fakes.HTTPRequest.blank(self.url)
+ res_dict = self.controller.index(req)
+
+ pools = fake_get_floating_ip_pools(None, self.context)
+ response = {'floating_ip_pools': [{'name': name} for name in pools]}
+ self.assertEqual(res_dict, response)
+
+
+class FloatingIpPoolTestV2(FloatingIpPoolTestV21):
+ floating_ip_pools = fipp_v2
+
+
+class FloatingIpPoolSerializerTestV2(test.NoDBTestCase):
+ floating_ip_pools = fipp_v2
+
+ def test_index_serializer(self):
+ serializer = self.floating_ip_pools.FloatingIPPoolsTemplate()
+ text = serializer.serialize(dict(
+ floating_ip_pools=[
+ dict(name='nova'),
+ dict(name='other')
+ ]))
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('floating_ip_pools', tree.tag)
+ self.assertEqual(2, len(tree))
+ self.assertEqual('floating_ip_pool', tree[0].tag)
+ self.assertEqual('floating_ip_pool', tree[1].tag)
+ self.assertEqual('nova', tree[0].get('name'))
+ self.assertEqual('other', tree[1].get('name'))
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_floating_ips.py b/nova/tests/unit/api/openstack/compute/contrib/test_floating_ips.py
new file mode 100644
index 0000000000..b383d1dbc1
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_floating_ips.py
@@ -0,0 +1,853 @@
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2011 Eldar Nugaev
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import uuid
+
+from lxml import etree
+import mock
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import floating_ips
+from nova.api.openstack import extensions
+from nova import compute
+from nova.compute import utils as compute_utils
+from nova import context
+from nova import db
+from nova import exception
+from nova import network
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_network
+
+
+FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+
+
+def network_api_get_floating_ip(self, context, id):
+ return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
+ 'fixed_ip_id': None}
+
+
+def network_api_get_floating_ip_by_address(self, context, address):
+ return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
+ 'fixed_ip_id': 10}
+
+
+def network_api_get_floating_ips_by_project(self, context):
+ return [{'id': 1,
+ 'address': '10.10.10.10',
+ 'pool': 'nova',
+ 'fixed_ip': {'address': '10.0.0.1',
+ 'instance_uuid': FAKE_UUID,
+ 'instance': {'uuid': FAKE_UUID}}},
+ {'id': 2,
+ 'pool': 'nova', 'interface': 'eth0',
+ 'address': '10.10.10.11',
+ 'fixed_ip': None}]
+
+
+def compute_api_get(self, context, instance_id, expected_attrs=None,
+ want_objects=False):
+ return dict(uuid=FAKE_UUID, id=instance_id, instance_type_id=1, host='bob')
+
+
+def network_api_allocate(self, context):
+ return '10.10.10.10'
+
+
+def network_api_release(self, context, address):
+ pass
+
+
+def compute_api_associate(self, context, instance_id, address):
+ pass
+
+
+def network_api_associate(self, context, floating_address, fixed_address):
+ pass
+
+
+def network_api_disassociate(self, context, instance, floating_address):
+ pass
+
+
+def fake_instance_get(context, instance_id):
+ return {
+ "id": 1,
+ "uuid": uuid.uuid4(),
+ "name": 'fake',
+ "user_id": 'fakeuser',
+ "project_id": '123'}
+
+
+def stub_nw_info(stubs):
+ def get_nw_info_for_instance(instance):
+ return fake_network.fake_get_instance_nw_info(stubs)
+ return get_nw_info_for_instance
+
+
+def get_instance_by_floating_ip_addr(self, context, address):
+ return None
+
+
+class FloatingIpTestNeutron(test.NoDBTestCase):
+
+ def setUp(self):
+ super(FloatingIpTestNeutron, self).setUp()
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ self.controller = floating_ips.FloatingIPController()
+
+ def _get_fake_request(self):
+ return fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/1')
+
+ def test_floatingip_delete(self):
+ req = self._get_fake_request()
+ fip_val = {'address': '1.1.1.1', 'fixed_ip_id': '192.168.1.2'}
+ with contextlib.nested(
+ mock.patch.object(self.controller.network_api,
+ 'disassociate_floating_ip'),
+ mock.patch.object(self.controller.network_api,
+ 'disassociate_and_release_floating_ip'),
+ mock.patch.object(self.controller.network_api,
+ 'release_floating_ip'),
+ mock.patch.object(self.controller.network_api,
+ 'get_instance_id_by_floating_address',
+ return_value=None),
+ mock.patch.object(self.controller.network_api,
+ 'get_floating_ip',
+ return_value=fip_val)) as (
+ disoc_fip, dis_and_del, rel_fip, _, _):
+ self.controller.delete(req, 1)
+ self.assertFalse(disoc_fip.called)
+ self.assertFalse(rel_fip.called)
+ # Only disassociate_and_release_floating_ip is
+ # called if using neutron
+ self.assertTrue(dis_and_del.called)
+
+
+class FloatingIpTest(test.TestCase):
+ floating_ip = "10.10.10.10"
+ floating_ip_2 = "10.10.10.11"
+
+ def _create_floating_ips(self, floating_ips=None):
+ """Create a floating ip object."""
+ if floating_ips is None:
+ floating_ips = [self.floating_ip]
+ elif not isinstance(floating_ips, (list, tuple)):
+ floating_ips = [floating_ips]
+
+ def make_ip_dict(ip):
+ """Shortcut for creating floating ip dict."""
+ return
+
+ dict_ = {'pool': 'nova', 'host': 'fake_host'}
+ return db.floating_ip_bulk_create(
+ self.context, [dict(address=ip, **dict_) for ip in floating_ips],
+ )
+
+ def _delete_floating_ip(self):
+ db.floating_ip_destroy(self.context, self.floating_ip)
+
+ def _get_fake_fip_request(self, act=''):
+ return fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/%s' % act)
+
+ def _get_fake_server_request(self):
+ return fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+
+ def _get_fake_response(self, req, init_only):
+ return req.get_response(fakes.wsgi_app(init_only=(init_only,)))
+
+ def setUp(self):
+ super(FloatingIpTest, self).setUp()
+ self.stubs.Set(compute.api.API, "get",
+ compute_api_get)
+ self.stubs.Set(network.api.API, "get_floating_ip",
+ network_api_get_floating_ip)
+ self.stubs.Set(network.api.API, "get_floating_ip_by_address",
+ network_api_get_floating_ip_by_address)
+ self.stubs.Set(network.api.API, "get_floating_ips_by_project",
+ network_api_get_floating_ips_by_project)
+ self.stubs.Set(network.api.API, "release_floating_ip",
+ network_api_release)
+ self.stubs.Set(network.api.API, "disassociate_floating_ip",
+ network_api_disassociate)
+ self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
+ get_instance_by_floating_ip_addr)
+ self.stubs.Set(compute_utils, "get_nw_info_for_instance",
+ stub_nw_info(self.stubs))
+
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+ self.stubs.Set(db, 'instance_get',
+ fake_instance_get)
+
+ self.context = context.get_admin_context()
+ self._create_floating_ips()
+
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = floating_ips.FloatingIPController()
+ self.manager = floating_ips.FloatingIPActionController(self.ext_mgr)
+
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Floating_ips'])
+
+ def tearDown(self):
+ self._delete_floating_ip()
+ super(FloatingIpTest, self).tearDown()
+
+ def test_floatingip_delete(self):
+ req = self._get_fake_fip_request('1')
+ fip_val = {'address': '1.1.1.1', 'fixed_ip_id': '192.168.1.2'}
+ with contextlib.nested(
+ mock.patch.object(self.controller.network_api,
+ 'disassociate_floating_ip'),
+ mock.patch.object(self.controller.network_api,
+ 'release_floating_ip'),
+ mock.patch.object(self.controller.network_api,
+ 'get_instance_id_by_floating_address',
+ return_value=None),
+ mock.patch.object(self.controller.network_api,
+ 'get_floating_ip',
+ return_value=fip_val)) as (
+ disoc_fip, rel_fip, _, _):
+ self.controller.delete(req, 1)
+ self.assertTrue(disoc_fip.called)
+ self.assertTrue(rel_fip.called)
+
+ def test_translate_floating_ip_view(self):
+ floating_ip_address = self.floating_ip
+ floating_ip = db.floating_ip_get_by_address(self.context,
+ floating_ip_address)
+ # NOTE(vish): network_get uses the id not the address
+ floating_ip = db.floating_ip_get(self.context, floating_ip['id'])
+ view = floating_ips._translate_floating_ip_view(floating_ip)
+ self.assertIn('floating_ip', view)
+ self.assertTrue(view['floating_ip']['id'])
+ self.assertEqual(view['floating_ip']['ip'], self.floating_ip)
+ self.assertIsNone(view['floating_ip']['fixed_ip'])
+ self.assertIsNone(view['floating_ip']['instance_id'])
+
+ def test_translate_floating_ip_view_dict(self):
+ floating_ip = {'id': 0, 'address': '10.0.0.10', 'pool': 'nova',
+ 'fixed_ip': None}
+ view = floating_ips._translate_floating_ip_view(floating_ip)
+ self.assertIn('floating_ip', view)
+
+ def test_floating_ips_list(self):
+ req = self._get_fake_fip_request()
+ res_dict = self.controller.index(req)
+
+ response = {'floating_ips': [{'instance_id': FAKE_UUID,
+ 'ip': '10.10.10.10',
+ 'pool': 'nova',
+ 'fixed_ip': '10.0.0.1',
+ 'id': 1},
+ {'instance_id': None,
+ 'ip': '10.10.10.11',
+ 'pool': 'nova',
+ 'fixed_ip': None,
+ 'id': 2}]}
+ self.assertEqual(res_dict, response)
+
+ def test_floating_ip_release_nonexisting(self):
+ def fake_get_floating_ip(*args, **kwargs):
+ raise exception.FloatingIpNotFound(id=id)
+
+ self.stubs.Set(network.api.API, "get_floating_ip",
+ fake_get_floating_ip)
+
+ req = self._get_fake_fip_request('9876')
+ req.method = 'DELETE'
+ res = self._get_fake_response(req, 'os-floating-ips')
+ self.assertEqual(res.status_int, 404)
+ expected_msg = ('{"itemNotFound": {"message": "Floating ip not found '
+ 'for id 9876", "code": 404}}')
+ self.assertEqual(res.body, expected_msg)
+
+ def test_floating_ip_release_race_cond(self):
+ def fake_get_floating_ip(*args, **kwargs):
+ return {'fixed_ip_id': 1, 'address': self.floating_ip}
+
+ def fake_get_instance_by_floating_ip_addr(*args, **kwargs):
+ return 'test-inst'
+
+ def fake_disassociate_floating_ip(*args, **kwargs):
+ raise exception.FloatingIpNotAssociated(args[3])
+
+ self.stubs.Set(network.api.API, "get_floating_ip",
+ fake_get_floating_ip)
+ self.stubs.Set(floating_ips, "get_instance_by_floating_ip_addr",
+ fake_get_instance_by_floating_ip_addr)
+ self.stubs.Set(floating_ips, "disassociate_floating_ip",
+ fake_disassociate_floating_ip)
+
+ req = self._get_fake_fip_request('1')
+ req.method = 'DELETE'
+ res = self._get_fake_response(req, 'os-floating-ips')
+ self.assertEqual(res.status_int, 202)
+
+ def test_floating_ip_show(self):
+ req = self._get_fake_fip_request('1')
+ res_dict = self.controller.show(req, 1)
+
+ self.assertEqual(res_dict['floating_ip']['id'], 1)
+ self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
+ self.assertIsNone(res_dict['floating_ip']['instance_id'])
+
+ def test_floating_ip_show_not_found(self):
+ def fake_get_floating_ip(*args, **kwargs):
+ raise exception.FloatingIpNotFound(id='fake')
+
+ self.stubs.Set(network.api.API, "get_floating_ip",
+ fake_get_floating_ip)
+
+ req = self._get_fake_fip_request('9876')
+ res = self._get_fake_response(req, 'os-floating-ips')
+ self.assertEqual(res.status_int, 404)
+ expected_msg = ('{"itemNotFound": {"message": "Floating ip not found '
+ 'for id 9876", "code": 404}}')
+ self.assertEqual(res.body, expected_msg)
+
+ def test_show_associated_floating_ip(self):
+ def get_floating_ip(self, context, id):
+ return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
+ 'fixed_ip': {'address': '10.0.0.1',
+ 'instance_uuid': FAKE_UUID,
+ 'instance': {'uuid': FAKE_UUID}}}
+
+ self.stubs.Set(network.api.API, "get_floating_ip", get_floating_ip)
+
+ req = self._get_fake_fip_request('1')
+ res_dict = self.controller.show(req, 1)
+
+ self.assertEqual(res_dict['floating_ip']['id'], 1)
+ self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
+ self.assertEqual(res_dict['floating_ip']['fixed_ip'], '10.0.0.1')
+ self.assertEqual(res_dict['floating_ip']['instance_id'], FAKE_UUID)
+
+ def test_recreation_of_floating_ip(self):
+ self._delete_floating_ip()
+ self._create_floating_ips()
+
+ def test_floating_ip_in_bulk_creation(self):
+ self._delete_floating_ip()
+
+ self._create_floating_ips([self.floating_ip, self.floating_ip_2])
+ all_ips = db.floating_ip_get_all(self.context)
+ ip_list = [ip['address'] for ip in all_ips]
+ self.assertIn(self.floating_ip, ip_list)
+ self.assertIn(self.floating_ip_2, ip_list)
+
+ def test_fail_floating_ip_in_bulk_creation(self):
+ self.assertRaises(exception.FloatingIpExists,
+ self._create_floating_ips,
+ [self.floating_ip, self.floating_ip_2])
+ all_ips = db.floating_ip_get_all(self.context)
+ ip_list = [ip['address'] for ip in all_ips]
+ self.assertIn(self.floating_ip, ip_list)
+ self.assertNotIn(self.floating_ip_2, ip_list)
+
+ def test_floating_ip_allocate_no_free_ips(self):
+ def fake_allocate(*args, **kwargs):
+ raise exception.NoMoreFloatingIps()
+
+ self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate)
+
+ req = self._get_fake_fip_request()
+ ex = self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.create, req)
+
+ self.assertIn('No more floating ips', ex.explanation)
+
+ def test_floating_ip_allocate_no_free_ips_pool(self):
+ def fake_allocate(*args, **kwargs):
+ raise exception.NoMoreFloatingIps()
+
+ self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate)
+
+ req = self._get_fake_fip_request()
+ ex = self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.create, req, {'pool': 'non_existent_pool'})
+
+ self.assertIn('No more floating ips in pool non_existent_pool',
+ ex.explanation)
+
+ @mock.patch('nova.network.api.API.allocate_floating_ip',
+ side_effect=exception.FloatingIpLimitExceeded())
+ def test_floating_ip_allocate_over_quota(self, allocate_mock):
+ req = self._get_fake_fip_request()
+ ex = self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create, req)
+
+ self.assertIn('IP allocation over quota', ex.explanation)
+
+ @mock.patch('nova.network.api.API.allocate_floating_ip',
+ side_effect=exception.FloatingIpLimitExceeded())
+ def test_floating_ip_allocate_quota_exceed_in_pool(self, allocate_mock):
+ req = self._get_fake_fip_request()
+ ex = self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create, req, {'pool': 'non_existent_pool'})
+
+ self.assertIn('IP allocation over quota in pool non_existent_pool.',
+ ex.explanation)
+
+ @mock.patch('nova.network.api.API.allocate_floating_ip',
+ side_effect=exception.FloatingIpPoolNotFound())
+ def test_floating_ip_create_with_unknown_pool(self, allocate_mock):
+ req = self._get_fake_fip_request()
+ ex = self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.create, req, {'pool': 'non_existent_pool'})
+
+ self.assertIn('Floating ip pool not found.', ex.explanation)
+
+ def test_floating_ip_allocate(self):
+ def fake1(*args, **kwargs):
+ pass
+
+ def fake2(*args, **kwargs):
+ return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova'}
+
+ self.stubs.Set(network.api.API, "allocate_floating_ip",
+ fake1)
+ self.stubs.Set(network.api.API, "get_floating_ip_by_address",
+ fake2)
+
+ req = self._get_fake_fip_request()
+ res_dict = self.controller.create(req)
+
+ ip = res_dict['floating_ip']
+
+ expected = {
+ "id": 1,
+ "instance_id": None,
+ "ip": "10.10.10.10",
+ "fixed_ip": None,
+ "pool": 'nova'}
+ self.assertEqual(ip, expected)
+
+ def test_floating_ip_release(self):
+ req = self._get_fake_fip_request('1')
+ self.controller.delete(req, 1)
+
+ def test_floating_ip_associate(self):
+ fixed_address = '192.168.1.100'
+
+ def fake_associate_floating_ip(*args, **kwargs):
+ self.assertEqual(fixed_address, kwargs['fixed_address'])
+
+ self.stubs.Set(network.api.API, "associate_floating_ip",
+ fake_associate_floating_ip)
+ body = dict(addFloatingIp=dict(address=self.floating_ip))
+
+ req = self._get_fake_server_request()
+ rsp = self.manager._add_floating_ip(req, 'test_inst', body)
+ self.assertEqual(202, rsp.status_int)
+
+ def test_floating_ip_associate_invalid_instance(self):
+
+ def fake_get(self, context, id, expected_attrs=None,
+ want_objects=False):
+ raise exception.InstanceNotFound(instance_id=id)
+
+ self.stubs.Set(compute.api.API, "get", fake_get)
+
+ body = dict(addFloatingIp=dict(address=self.floating_ip))
+
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._add_floating_ip, req, 'test_inst',
+ body)
+
+ def test_not_extended_floating_ip_associate_fixed(self):
+ # Check that fixed_address is ignored if os-extended-floating-ips
+ # is not loaded
+ fixed_address_requested = '192.168.1.101'
+ fixed_address_allocated = '192.168.1.100'
+
+ def fake_associate_floating_ip(*args, **kwargs):
+ self.assertEqual(fixed_address_allocated,
+ kwargs['fixed_address'])
+
+ self.stubs.Set(network.api.API, "associate_floating_ip",
+ fake_associate_floating_ip)
+ body = dict(addFloatingIp=dict(address=self.floating_ip,
+ fixed_address=fixed_address_requested))
+
+ req = self._get_fake_server_request()
+ rsp = self.manager._add_floating_ip(req, 'test_inst', body)
+ self.assertEqual(202, rsp.status_int)
+
+ def test_associate_not_allocated_floating_ip_to_instance(self):
+ def fake_associate_floating_ip(self, context, instance,
+ floating_address, fixed_address,
+ affect_auto_assigned=False):
+ raise exception.FloatingIpNotFoundForAddress(
+ address=floating_address)
+ self.stubs.Set(network.api.API, "associate_floating_ip",
+ fake_associate_floating_ip)
+ floating_ip = '10.10.10.11'
+ body = dict(addFloatingIp=dict(address=floating_ip))
+ req = self._get_fake_server_request()
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ resp = self._get_fake_response(req, 'servers')
+ res_dict = jsonutils.loads(resp.body)
+ self.assertEqual(resp.status_int, 404)
+ self.assertEqual(res_dict['itemNotFound']['message'],
+ "floating ip not found")
+
+ @mock.patch.object(network.api.API, 'associate_floating_ip',
+ side_effect=exception.Forbidden)
+ def test_associate_floating_ip_forbidden(self, associate_mock):
+ body = dict(addFloatingIp=dict(address='10.10.10.11'))
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.manager._add_floating_ip, req, 'test_inst',
+ body)
+
+ def test_associate_floating_ip_bad_address_key(self):
+ body = dict(addFloatingIp=dict(bad_address='10.10.10.11'))
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._add_floating_ip, req, 'test_inst',
+ body)
+
+ def test_associate_floating_ip_bad_addfloatingip_key(self):
+ body = dict(bad_addFloatingIp=dict(address='10.10.10.11'))
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._add_floating_ip, req, 'test_inst',
+ body)
+
+ def test_floating_ip_disassociate(self):
+ def get_instance_by_floating_ip_addr(self, context, address):
+ if address == '10.10.10.10':
+ return 'test_inst'
+
+ self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
+ get_instance_by_floating_ip_addr)
+
+ body = dict(removeFloatingIp=dict(address='10.10.10.10'))
+
+ req = self._get_fake_server_request()
+ rsp = self.manager._remove_floating_ip(req, 'test_inst', body)
+ self.assertEqual(202, rsp.status_int)
+
+ def test_floating_ip_disassociate_missing(self):
+ body = dict(removeFloatingIp=dict(address='10.10.10.10'))
+
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.manager._remove_floating_ip,
+ req, 'test_inst', body)
+
+ def test_floating_ip_associate_non_existent_ip(self):
+ def fake_network_api_associate(self, context, instance,
+ floating_address=None,
+ fixed_address=None):
+ floating_ips = ["10.10.10.10", "10.10.10.11"]
+ if floating_address not in floating_ips:
+ raise exception.FloatingIpNotFoundForAddress(
+ address=floating_address)
+
+ self.stubs.Set(network.api.API, "associate_floating_ip",
+ fake_network_api_associate)
+
+ body = dict(addFloatingIp=dict(address='1.1.1.1'))
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._add_floating_ip,
+ req, 'test_inst', body)
+
+ def test_floating_ip_disassociate_non_existent_ip(self):
+ def network_api_get_floating_ip_by_address(self, context,
+ floating_address):
+ floating_ips = ["10.10.10.10", "10.10.10.11"]
+ if floating_address not in floating_ips:
+ raise exception.FloatingIpNotFoundForAddress(
+ address=floating_address)
+
+ self.stubs.Set(network.api.API, "get_floating_ip_by_address",
+ network_api_get_floating_ip_by_address)
+
+ body = dict(removeFloatingIp=dict(address='1.1.1.1'))
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._remove_floating_ip,
+ req, 'test_inst', body)
+
+ def test_floating_ip_disassociate_wrong_instance_uuid(self):
+ def get_instance_by_floating_ip_addr(self, context, address):
+ if address == '10.10.10.10':
+ return 'test_inst'
+
+ self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
+ get_instance_by_floating_ip_addr)
+
+ wrong_uuid = 'aaaaaaaa-ffff-ffff-ffff-aaaaaaaaaaaa'
+ body = dict(removeFloatingIp=dict(address='10.10.10.10'))
+
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.manager._remove_floating_ip,
+ req, wrong_uuid, body)
+
+ def test_floating_ip_disassociate_wrong_instance_id(self):
+ def get_instance_by_floating_ip_addr(self, context, address):
+ if address == '10.10.10.10':
+ return 'wrong_inst'
+
+ self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
+ get_instance_by_floating_ip_addr)
+
+ body = dict(removeFloatingIp=dict(address='10.10.10.10'))
+
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.manager._remove_floating_ip,
+ req, 'test_inst', body)
+
+ def test_floating_ip_disassociate_auto_assigned(self):
+ def fake_get_floating_ip_addr_auto_assigned(self, context, address):
+ return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
+ 'fixed_ip_id': 10, 'auto_assigned': 1}
+
+ def get_instance_by_floating_ip_addr(self, context, address):
+ if address == '10.10.10.10':
+ return 'test_inst'
+
+ def network_api_disassociate(self, context, instance,
+ floating_address):
+ raise exception.CannotDisassociateAutoAssignedFloatingIP()
+
+ self.stubs.Set(network.api.API, "get_floating_ip_by_address",
+ fake_get_floating_ip_addr_auto_assigned)
+ self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
+ get_instance_by_floating_ip_addr)
+ self.stubs.Set(network.api.API, "disassociate_floating_ip",
+ network_api_disassociate)
+ body = dict(removeFloatingIp=dict(address='10.10.10.10'))
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.manager._remove_floating_ip,
+ req, 'test_inst', body)
+
+ def test_floating_ip_disassociate_map_authorization_exc(self):
+ def fake_get_floating_ip_addr_auto_assigned(self, context, address):
+ return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
+ 'fixed_ip_id': 10, 'auto_assigned': 1}
+
+ def get_instance_by_floating_ip_addr(self, context, address):
+ if address == '10.10.10.10':
+ return 'test_inst'
+
+ def network_api_disassociate(self, context, instance, address):
+ raise exception.Forbidden()
+
+ self.stubs.Set(network.api.API, "get_floating_ip_by_address",
+ fake_get_floating_ip_addr_auto_assigned)
+ self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
+ get_instance_by_floating_ip_addr)
+ self.stubs.Set(network.api.API, "disassociate_floating_ip",
+ network_api_disassociate)
+ body = dict(removeFloatingIp=dict(address='10.10.10.10'))
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.manager._remove_floating_ip,
+ req, 'test_inst', body)
+
+# these are a few bad param tests
+
+ def test_bad_address_param_in_remove_floating_ip(self):
+ body = dict(removeFloatingIp=dict(badparam='11.0.0.1'))
+
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._remove_floating_ip, req, 'test_inst',
+ body)
+
+ def test_missing_dict_param_in_remove_floating_ip(self):
+ body = dict(removeFloatingIp='11.0.0.1')
+
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._remove_floating_ip, req, 'test_inst',
+ body)
+
+ def test_missing_dict_param_in_add_floating_ip(self):
+ body = dict(addFloatingIp='11.0.0.1')
+
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._add_floating_ip, req, 'test_inst',
+ body)
+
+
+class ExtendedFloatingIpTest(test.TestCase):
+ floating_ip = "10.10.10.10"
+ floating_ip_2 = "10.10.10.11"
+
+ def _create_floating_ips(self, floating_ips=None):
+ """Create a floating ip object."""
+ if floating_ips is None:
+ floating_ips = [self.floating_ip]
+ elif not isinstance(floating_ips, (list, tuple)):
+ floating_ips = [floating_ips]
+
+ def make_ip_dict(ip):
+ """Shortcut for creating floating ip dict."""
+ return
+
+ dict_ = {'pool': 'nova', 'host': 'fake_host'}
+ return db.floating_ip_bulk_create(
+ self.context, [dict(address=ip, **dict_) for ip in floating_ips],
+ )
+
+ def _delete_floating_ip(self):
+ db.floating_ip_destroy(self.context, self.floating_ip)
+
+ def _get_fake_request(self):
+ return fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+
+ def _get_fake_response(self, req, init_only):
+ return req.get_response(fakes.wsgi_app(init_only=(init_only,)))
+
+ def setUp(self):
+ super(ExtendedFloatingIpTest, self).setUp()
+ self.stubs.Set(compute.api.API, "get",
+ compute_api_get)
+ self.stubs.Set(network.api.API, "get_floating_ip",
+ network_api_get_floating_ip)
+ self.stubs.Set(network.api.API, "get_floating_ip_by_address",
+ network_api_get_floating_ip_by_address)
+ self.stubs.Set(network.api.API, "get_floating_ips_by_project",
+ network_api_get_floating_ips_by_project)
+ self.stubs.Set(network.api.API, "release_floating_ip",
+ network_api_release)
+ self.stubs.Set(network.api.API, "disassociate_floating_ip",
+ network_api_disassociate)
+ self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
+ get_instance_by_floating_ip_addr)
+ self.stubs.Set(compute_utils, "get_nw_info_for_instance",
+ stub_nw_info(self.stubs))
+
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+ self.stubs.Set(db, 'instance_get',
+ fake_instance_get)
+
+ self.context = context.get_admin_context()
+ self._create_floating_ips()
+
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.ext_mgr.extensions['os-floating-ips'] = True
+ self.ext_mgr.extensions['os-extended-floating-ips'] = True
+ self.controller = floating_ips.FloatingIPController()
+ self.manager = floating_ips.FloatingIPActionController(self.ext_mgr)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Floating_ips', 'Extended_floating_ips'])
+
+ def tearDown(self):
+ self._delete_floating_ip()
+ super(ExtendedFloatingIpTest, self).tearDown()
+
+ def test_extended_floating_ip_associate_fixed(self):
+ fixed_address = '192.168.1.101'
+
+ def fake_associate_floating_ip(*args, **kwargs):
+ self.assertEqual(fixed_address, kwargs['fixed_address'])
+
+ self.stubs.Set(network.api.API, "associate_floating_ip",
+ fake_associate_floating_ip)
+ body = dict(addFloatingIp=dict(address=self.floating_ip,
+ fixed_address=fixed_address))
+
+ req = self._get_fake_request()
+ rsp = self.manager._add_floating_ip(req, 'test_inst', body)
+ self.assertEqual(202, rsp.status_int)
+
+ def test_extended_floating_ip_associate_fixed_not_allocated(self):
+ def fake_associate_floating_ip(*args, **kwargs):
+ pass
+
+ self.stubs.Set(network.api.API, "associate_floating_ip",
+ fake_associate_floating_ip)
+ body = dict(addFloatingIp=dict(address=self.floating_ip,
+ fixed_address='11.11.11.11'))
+
+ req = self._get_fake_request()
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ resp = self._get_fake_response(req, 'servers')
+ res_dict = jsonutils.loads(resp.body)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual(res_dict['badRequest']['message'],
+ "Specified fixed address not assigned to instance")
+
+
+class FloatingIpSerializerTest(test.TestCase):
+ def test_default_serializer(self):
+ serializer = floating_ips.FloatingIPTemplate()
+ text = serializer.serialize(dict(
+ floating_ip=dict(
+ instance_id=1,
+ ip='10.10.10.10',
+ fixed_ip='10.0.0.1',
+ id=1)))
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('floating_ip', tree.tag)
+ self.assertEqual('1', tree.get('instance_id'))
+ self.assertEqual('10.10.10.10', tree.get('ip'))
+ self.assertEqual('10.0.0.1', tree.get('fixed_ip'))
+ self.assertEqual('1', tree.get('id'))
+
+ def test_index_serializer(self):
+ serializer = floating_ips.FloatingIPsTemplate()
+ text = serializer.serialize(dict(
+ floating_ips=[
+ dict(instance_id=1,
+ ip='10.10.10.10',
+ fixed_ip='10.0.0.1',
+ id=1),
+ dict(instance_id=None,
+ ip='10.10.10.11',
+ fixed_ip=None,
+ id=2)]))
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('floating_ips', tree.tag)
+ self.assertEqual(2, len(tree))
+ self.assertEqual('floating_ip', tree[0].tag)
+ self.assertEqual('floating_ip', tree[1].tag)
+ self.assertEqual('1', tree[0].get('instance_id'))
+ self.assertEqual('None', tree[1].get('instance_id'))
+ self.assertEqual('10.10.10.10', tree[0].get('ip'))
+ self.assertEqual('10.10.10.11', tree[1].get('ip'))
+ self.assertEqual('10.0.0.1', tree[0].get('fixed_ip'))
+ self.assertEqual('None', tree[1].get('fixed_ip'))
+ self.assertEqual('1', tree[0].get('id'))
+ self.assertEqual('2', tree[1].get('id'))
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_floating_ips_bulk.py b/nova/tests/unit/api/openstack/compute/contrib/test_floating_ips_bulk.py
new file mode 100644
index 0000000000..8c81d99ab0
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_floating_ips_bulk.py
@@ -0,0 +1,139 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import netaddr
+from oslo.config import cfg
+import webob
+
+from nova.api.openstack.compute.contrib import floating_ips_bulk as fipbulk_v2
+from nova.api.openstack.compute.plugins.v3 import floating_ips_bulk as\
+ fipbulk_v21
+from nova import context
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+CONF = cfg.CONF
+
+
+class FloatingIPBulkV21(test.TestCase):
+
+ floating_ips_bulk = fipbulk_v21
+ url = '/v2/fake/os-floating-ips-bulk'
+ delete_url = '/v2/fake/os-fixed-ips/delete'
+ bad_request = exception.ValidationError
+
+ def setUp(self):
+ super(FloatingIPBulkV21, self).setUp()
+
+ self.context = context.get_admin_context()
+ self.controller = self.floating_ips_bulk.FloatingIPBulkController()
+
+ def _setup_floating_ips(self, ip_range):
+ body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
+ req = fakes.HTTPRequest.blank(self.url)
+ res_dict = self.controller.create(req, body=body)
+ response = {"floating_ips_bulk_create": {
+ 'ip_range': ip_range,
+ 'pool': CONF.default_floating_pool,
+ 'interface': CONF.public_interface}}
+ self.assertEqual(res_dict, response)
+
+ def test_create_ips(self):
+ ip_range = '192.168.1.0/24'
+ self._setup_floating_ips(ip_range)
+
+ def test_create_ips_pool(self):
+ ip_range = '10.0.1.0/20'
+ pool = 'a new pool'
+ body = {'floating_ips_bulk_create':
+ {'ip_range': ip_range,
+ 'pool': pool}}
+ req = fakes.HTTPRequest.blank(self.url)
+ res_dict = self.controller.create(req, body=body)
+ response = {"floating_ips_bulk_create": {
+ 'ip_range': ip_range,
+ 'pool': pool,
+ 'interface': CONF.public_interface}}
+ self.assertEqual(res_dict, response)
+
+ def test_list_ips(self):
+ ip_range = '192.168.1.1/28'
+ self._setup_floating_ips(ip_range)
+ req = fakes.HTTPRequest.blank(self.url, use_admin_context=True)
+ res_dict = self.controller.index(req)
+
+ ip_info = [{'address': str(ip_addr),
+ 'pool': CONF.default_floating_pool,
+ 'interface': CONF.public_interface,
+ 'project_id': None,
+ 'instance_uuid': None}
+ for ip_addr in netaddr.IPNetwork(ip_range).iter_hosts()]
+ response = {'floating_ip_info': ip_info}
+
+ self.assertEqual(res_dict, response)
+
+ def test_list_ip_by_host(self):
+ ip_range = '192.168.1.1/28'
+ self._setup_floating_ips(ip_range)
+ req = fakes.HTTPRequest.blank(self.url, use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.show, req, 'host')
+
+ def test_delete_ips(self):
+ ip_range = '192.168.1.0/20'
+ self._setup_floating_ips(ip_range)
+
+ body = {'ip_range': ip_range}
+ req = fakes.HTTPRequest.blank(self.delete_url)
+ res_dict = self.controller.update(req, "delete", body=body)
+
+ response = {"floating_ips_bulk_delete": ip_range}
+ self.assertEqual(res_dict, response)
+
+ # Check that the IPs are actually deleted
+ req = fakes.HTTPRequest.blank(self.url, use_admin_context=True)
+ res_dict = self.controller.index(req)
+ response = {'floating_ip_info': []}
+ self.assertEqual(res_dict, response)
+
+ def test_create_duplicate_fail(self):
+ ip_range = '192.168.1.0/20'
+ self._setup_floating_ips(ip_range)
+
+ ip_range = '192.168.1.0/28'
+ body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, body=body)
+
+ def test_create_bad_cidr_fail(self):
+ # netaddr can't handle /32 or 31 cidrs
+ ip_range = '192.168.1.1/32'
+ body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, body=body)
+
+ def test_create_invalid_cidr_fail(self):
+ ip_range = 'not a cidr'
+ body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(self.bad_request, self.controller.create,
+ req, body=body)
+
+
+class FloatingIPBulkV2(FloatingIPBulkV21):
+ floating_ips_bulk = fipbulk_v2
+ bad_request = webob.exc.HTTPBadRequest
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_fping.py b/nova/tests/unit/api/openstack/compute/contrib/test_fping.py
new file mode 100644
index 0000000000..a6364d6ee7
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_fping.py
@@ -0,0 +1,106 @@
+# Copyright 2011 Grid Dynamics
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack.compute.contrib import fping
+from nova.api.openstack.compute.plugins.v3 import fping as fping_v21
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+import nova.utils
+
+
+FAKE_UUID = fakes.FAKE_UUID
+
+
+def execute(*cmd, **args):
+ return "".join(["%s is alive" % ip for ip in cmd[1:]])
+
+
+class FpingTestV21(test.TestCase):
+ controller_cls = fping_v21.FpingController
+
+ def setUp(self):
+ super(FpingTestV21, self).setUp()
+ self.flags(verbose=True, use_ipv6=False)
+ return_server = fakes.fake_instance_get()
+ return_servers = fakes.fake_instance_get_all_by_filters()
+ self.stubs.Set(nova.db, "instance_get_all_by_filters",
+ return_servers)
+ self.stubs.Set(nova.db, "instance_get_by_uuid",
+ return_server)
+ self.stubs.Set(nova.utils, "execute",
+ execute)
+ self.stubs.Set(self.controller_cls, "check_fping",
+ lambda self: None)
+ self.controller = self.controller_cls()
+
+ def _get_url(self):
+ return "/v3"
+
+ def test_fping_index(self):
+ req = fakes.HTTPRequest.blank(self._get_url() + "/os-fping")
+ res_dict = self.controller.index(req)
+ self.assertIn("servers", res_dict)
+ for srv in res_dict["servers"]:
+ for key in "project_id", "id", "alive":
+ self.assertIn(key, srv)
+
+ def test_fping_index_policy(self):
+ req = fakes.HTTPRequest.blank(self._get_url() +
+ "os-fping?all_tenants=1")
+ self.assertRaises(exception.Forbidden, self.controller.index, req)
+ req = fakes.HTTPRequest.blank(self._get_url() +
+ "/os-fping?all_tenants=1")
+ req.environ["nova.context"].is_admin = True
+ res_dict = self.controller.index(req)
+ self.assertIn("servers", res_dict)
+
+ def test_fping_index_include(self):
+ req = fakes.HTTPRequest.blank(self._get_url() + "/os-fping")
+ res_dict = self.controller.index(req)
+ ids = [srv["id"] for srv in res_dict["servers"]]
+ req = fakes.HTTPRequest.blank(self._get_url() +
+ "/os-fping?include=%s" % ids[0])
+ res_dict = self.controller.index(req)
+ self.assertEqual(len(res_dict["servers"]), 1)
+ self.assertEqual(res_dict["servers"][0]["id"], ids[0])
+
+ def test_fping_index_exclude(self):
+ req = fakes.HTTPRequest.blank(self._get_url() + "/os-fping")
+ res_dict = self.controller.index(req)
+ ids = [srv["id"] for srv in res_dict["servers"]]
+ req = fakes.HTTPRequest.blank(self._get_url() +
+ "/os-fping?exclude=%s" %
+ ",".join(ids[1:]))
+ res_dict = self.controller.index(req)
+ self.assertEqual(len(res_dict["servers"]), 1)
+ self.assertEqual(res_dict["servers"][0]["id"], ids[0])
+
+ def test_fping_show(self):
+ req = fakes.HTTPRequest.blank(self._get_url() +
+ "os-fping/%s" % FAKE_UUID)
+ res_dict = self.controller.show(req, FAKE_UUID)
+ self.assertIn("server", res_dict)
+ srv = res_dict["server"]
+ for key in "project_id", "id", "alive":
+ self.assertIn(key, srv)
+
+
+class FpingTestV2(FpingTestV21):
+ controller_cls = fping.FpingController
+
+ def _get_url(self):
+ return "/v2/1234"
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_hide_server_addresses.py b/nova/tests/unit/api/openstack/compute/contrib/test_hide_server_addresses.py
new file mode 100644
index 0000000000..217fd480f9
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_hide_server_addresses.py
@@ -0,0 +1,172 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import itertools
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack import wsgi
+from nova import compute
+from nova.compute import vm_states
+from nova import db
+from nova import exception
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+
+SENTINEL = object()
+
+
+def fake_compute_get(*args, **kwargs):
+ def _return_server(*_args, **_kwargs):
+ inst = fakes.stub_instance(*args, **kwargs)
+ return fake_instance.fake_instance_obj(_args[1], **inst)
+ return _return_server
+
+
+class HideServerAddressesTestV21(test.TestCase):
+ content_type = 'application/json'
+ base_url = '/v2/fake/servers'
+
+ def _setup_wsgi(self):
+ self.wsgi_app = fakes.wsgi_app_v21(
+ init_only=('servers', 'os-hide-server-addresses'))
+
+ def setUp(self):
+ super(HideServerAddressesTestV21, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ return_server = fakes.fake_instance_get()
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self._setup_wsgi()
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(self.wsgi_app)
+ return res
+
+ @staticmethod
+ def _get_server(body):
+ return jsonutils.loads(body).get('server')
+
+ @staticmethod
+ def _get_servers(body):
+ return jsonutils.loads(body).get('servers')
+
+ @staticmethod
+ def _get_addresses(server):
+ return server.get('addresses', SENTINEL)
+
+ def _check_addresses(self, addresses, exists):
+ self.assertTrue(addresses is not SENTINEL)
+ if exists:
+ self.assertTrue(addresses)
+ else:
+ self.assertFalse(addresses)
+
+ def test_show_hides_in_building(self):
+ instance_id = 1
+ uuid = fakes.get_fake_uuid(instance_id)
+ self.stubs.Set(compute.api.API, 'get',
+ fake_compute_get(instance_id, uuid=uuid,
+ vm_state=vm_states.BUILDING))
+ res = self._make_request(self.base_url + '/%s' % uuid)
+ self.assertEqual(res.status_int, 200)
+
+ server = self._get_server(res.body)
+ addresses = self._get_addresses(server)
+ self._check_addresses(addresses, exists=False)
+
+ def test_show(self):
+ instance_id = 1
+ uuid = fakes.get_fake_uuid(instance_id)
+ self.stubs.Set(compute.api.API, 'get',
+ fake_compute_get(instance_id, uuid=uuid,
+ vm_state=vm_states.ACTIVE))
+ res = self._make_request(self.base_url + '/%s' % uuid)
+ self.assertEqual(res.status_int, 200)
+
+ server = self._get_server(res.body)
+ addresses = self._get_addresses(server)
+ self._check_addresses(addresses, exists=True)
+
+ def test_detail_hides_building_server_addresses(self):
+ instance_0 = fakes.stub_instance(0, uuid=fakes.get_fake_uuid(0),
+ vm_state=vm_states.ACTIVE)
+ instance_1 = fakes.stub_instance(1, uuid=fakes.get_fake_uuid(1),
+ vm_state=vm_states.BUILDING)
+ instances = [instance_0, instance_1]
+
+ def get_all(*args, **kwargs):
+ fields = instance_obj.INSTANCE_DEFAULT_FIELDS
+ return instance_obj._make_instance_list(
+ args[1], objects.InstanceList(), instances, fields)
+
+ self.stubs.Set(compute.api.API, 'get_all', get_all)
+ res = self._make_request(self.base_url + '/detail')
+
+ self.assertEqual(res.status_int, 200)
+ servers = self._get_servers(res.body)
+
+ self.assertEqual(len(servers), len(instances))
+
+ for instance, server in itertools.izip(instances, servers):
+ addresses = self._get_addresses(server)
+ exists = (instance['vm_state'] == vm_states.ACTIVE)
+ self._check_addresses(addresses, exists=exists)
+
+ def test_no_instance_passthrough_404(self):
+
+ def fake_compute_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ res = self._make_request(self.base_url + '/' + fakes.get_fake_uuid())
+
+ self.assertEqual(res.status_int, 404)
+
+
+class HideServerAddressesTestV2(HideServerAddressesTestV21):
+
+ def _setup_wsgi(self):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Hide_server_addresses'])
+ self.wsgi_app = fakes.wsgi_app(init_only=('servers',))
+
+
+class HideAddressesXmlTest(HideServerAddressesTestV2):
+ content_type = 'application/xml'
+
+ @staticmethod
+ def _get_server(body):
+ return etree.XML(body)
+
+ @staticmethod
+ def _get_servers(body):
+ return etree.XML(body).getchildren()
+
+ @staticmethod
+ def _get_addresses(server):
+ addresses = server.find('{%s}addresses' % wsgi.XMLNS_V11)
+ if addresses is None:
+ return SENTINEL
+ return addresses
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_hosts.py b/nova/tests/unit/api/openstack/compute/contrib/test_hosts.py
new file mode 100644
index 0000000000..5478a7dd33
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_hosts.py
@@ -0,0 +1,471 @@
+# Copyright (c) 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import testtools
+import webob.exc
+
+from nova.api.openstack.compute.contrib import hosts as os_hosts_v2
+from nova.api.openstack.compute.plugins.v3 import hosts as os_hosts_v3
+from nova.compute import power_state
+from nova.compute import vm_states
+from nova import context as context_maker
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit import fake_hosts
+from nova.tests.unit import utils
+
+
+def stub_service_get_all(context, disabled=None):
+ return fake_hosts.SERVICES_LIST
+
+
+def stub_service_get_by_host_and_topic(context, host_name, topic):
+ for service in stub_service_get_all(context):
+ if service['host'] == host_name and service['topic'] == topic:
+ return service
+
+
+def stub_set_host_enabled(context, host_name, enabled):
+ """Simulates three possible behaviours for VM drivers or compute
+ drivers when enabling or disabling a host.
+
+ 'enabled' means new instances can go to this host
+ 'disabled' means they can't
+ """
+ results = {True: "enabled", False: "disabled"}
+ if host_name == "notimplemented":
+ # The vm driver for this host doesn't support this feature
+ raise NotImplementedError()
+ elif host_name == "dummydest":
+ # The host does not exist
+ raise exception.ComputeHostNotFound(host=host_name)
+ elif host_name == "host_c2":
+ # Simulate a failure
+ return results[not enabled]
+ else:
+ # Do the right thing
+ return results[enabled]
+
+
+def stub_set_host_maintenance(context, host_name, mode):
+ # We'll simulate success and failure by assuming
+ # that 'host_c1' always succeeds, and 'host_c2'
+ # always fails
+ results = {True: "on_maintenance", False: "off_maintenance"}
+ if host_name == "notimplemented":
+ # The vm driver for this host doesn't support this feature
+ raise NotImplementedError()
+ elif host_name == "dummydest":
+ # The host does not exist
+ raise exception.ComputeHostNotFound(host=host_name)
+ elif host_name == "host_c2":
+ # Simulate a failure
+ return results[not mode]
+ else:
+ # Do the right thing
+ return results[mode]
+
+
+def stub_host_power_action(context, host_name, action):
+ if host_name == "notimplemented":
+ raise NotImplementedError()
+ elif host_name == "dummydest":
+ # The host does not exist
+ raise exception.ComputeHostNotFound(host=host_name)
+ return action
+
+
+def _create_instance(**kwargs):
+ """Create a test instance."""
+ ctxt = context_maker.get_admin_context()
+ return db.instance_create(ctxt, _create_instance_dict(**kwargs))
+
+
+def _create_instance_dict(**kwargs):
+ """Create a dictionary for a test instance."""
+ inst = {}
+ inst['image_ref'] = 'cedef40a-ed67-4d10-800e-17455edce175'
+ inst['reservation_id'] = 'r-fakeres'
+ inst['user_id'] = kwargs.get('user_id', 'admin')
+ inst['project_id'] = kwargs.get('project_id', 'fake')
+ inst['instance_type_id'] = '1'
+ if 'host' in kwargs:
+ inst['host'] = kwargs.get('host')
+ inst['vcpus'] = kwargs.get('vcpus', 1)
+ inst['memory_mb'] = kwargs.get('memory_mb', 20)
+ inst['root_gb'] = kwargs.get('root_gb', 30)
+ inst['ephemeral_gb'] = kwargs.get('ephemeral_gb', 30)
+ inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
+ inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
+ inst['task_state'] = kwargs.get('task_state', None)
+ inst['availability_zone'] = kwargs.get('availability_zone', None)
+ inst['ami_launch_index'] = 0
+ inst['launched_on'] = kwargs.get('launched_on', 'dummy')
+ return inst
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context_maker.get_admin_context()}
+ GET = {}
+
+
+class FakeRequestWithNovaZone(object):
+ environ = {"nova.context": context_maker.get_admin_context()}
+ GET = {"zone": "nova"}
+
+
+class FakeRequestWithNovaService(object):
+ environ = {"nova.context": context_maker.get_admin_context()}
+ GET = {"service": "compute"}
+
+
+class FakeRequestWithInvalidNovaService(object):
+ environ = {"nova.context": context_maker.get_admin_context()}
+ GET = {"service": "invalid"}
+
+
+class HostTestCaseV21(test.TestCase):
+ """Test Case for hosts."""
+ validation_ex = exception.ValidationError
+ Controller = os_hosts_v3.HostController
+ policy_ex = exception.PolicyNotAuthorized
+
+ def _setup_stubs(self):
+ # Pretend we have fake_hosts.HOST_LIST in the DB
+ self.stubs.Set(db, 'service_get_all',
+ stub_service_get_all)
+ # Only hosts in our fake DB exist
+ self.stubs.Set(db, 'service_get_by_host_and_topic',
+ stub_service_get_by_host_and_topic)
+ # 'host_c1' always succeeds, and 'host_c2'
+ self.stubs.Set(self.hosts_api, 'set_host_enabled',
+ stub_set_host_enabled)
+ # 'host_c1' always succeeds, and 'host_c2'
+ self.stubs.Set(self.hosts_api, 'set_host_maintenance',
+ stub_set_host_maintenance)
+ self.stubs.Set(self.hosts_api, 'host_power_action',
+ stub_host_power_action)
+
+ def setUp(self):
+ super(HostTestCaseV21, self).setUp()
+ self.controller = self.Controller()
+ self.hosts_api = self.controller.api
+ self.req = FakeRequest()
+
+ self._setup_stubs()
+
+ def _test_host_update(self, host, key, val, expected_value):
+ body = {key: val}
+ result = self.controller.update(self.req, host, body=body)
+ self.assertEqual(result[key], expected_value)
+
+ def test_list_hosts(self):
+ """Verify that the compute hosts are returned."""
+ result = self.controller.index(self.req)
+ self.assertIn('hosts', result)
+ hosts = result['hosts']
+ self.assertEqual(fake_hosts.HOST_LIST, hosts)
+
+ def test_disable_host(self):
+ self._test_host_update('host_c1', 'status', 'disable', 'disabled')
+ self._test_host_update('host_c2', 'status', 'disable', 'enabled')
+
+ def test_enable_host(self):
+ self._test_host_update('host_c1', 'status', 'enable', 'enabled')
+ self._test_host_update('host_c2', 'status', 'enable', 'disabled')
+
+ def test_enable_maintenance(self):
+ self._test_host_update('host_c1', 'maintenance_mode',
+ 'enable', 'on_maintenance')
+
+ def test_disable_maintenance(self):
+ self._test_host_update('host_c1', 'maintenance_mode',
+ 'disable', 'off_maintenance')
+
+ def _test_host_update_notimpl(self, key, val):
+ def stub_service_get_all_notimpl(self, req):
+ return [{'host': 'notimplemented', 'topic': None,
+ 'availability_zone': None}]
+ self.stubs.Set(db, 'service_get_all',
+ stub_service_get_all_notimpl)
+ body = {key: val}
+ self.assertRaises(webob.exc.HTTPNotImplemented,
+ self.controller.update,
+ self.req, 'notimplemented', body=body)
+
+ def test_disable_host_notimpl(self):
+ self._test_host_update_notimpl('status', 'disable')
+
+ def test_enable_maintenance_notimpl(self):
+ self._test_host_update_notimpl('maintenance_mode', 'enable')
+
+ def test_host_startup(self):
+ result = self.controller.startup(self.req, "host_c1")
+ self.assertEqual(result["power_action"], "startup")
+
+ def test_host_shutdown(self):
+ result = self.controller.shutdown(self.req, "host_c1")
+ self.assertEqual(result["power_action"], "shutdown")
+
+ def test_host_reboot(self):
+ result = self.controller.reboot(self.req, "host_c1")
+ self.assertEqual(result["power_action"], "reboot")
+
+ def _test_host_power_action_notimpl(self, method):
+ self.assertRaises(webob.exc.HTTPNotImplemented,
+ method, self.req, "notimplemented")
+
+ def test_host_startup_notimpl(self):
+ self._test_host_power_action_notimpl(self.controller.startup)
+
+ def test_host_shutdown_notimpl(self):
+ self._test_host_power_action_notimpl(self.controller.shutdown)
+
+ def test_host_reboot_notimpl(self):
+ self._test_host_power_action_notimpl(self.controller.reboot)
+
+ def test_host_status_bad_host(self):
+ # A host given as an argument does not exist.
+ self.req.environ["nova.context"].is_admin = True
+ dest = 'dummydest'
+ with testtools.ExpectedException(webob.exc.HTTPNotFound,
+ ".*%s.*" % dest):
+ self.controller.update(self.req, dest, body={'status': 'enable'})
+
+ def test_host_maintenance_bad_host(self):
+ # A host given as an argument does not exist.
+ self.req.environ["nova.context"].is_admin = True
+ dest = 'dummydest'
+ with testtools.ExpectedException(webob.exc.HTTPNotFound,
+ ".*%s.*" % dest):
+ self.controller.update(self.req, dest,
+ body={'maintenance_mode': 'enable'})
+
+ def test_host_power_action_bad_host(self):
+ # A host given as an argument does not exist.
+ self.req.environ["nova.context"].is_admin = True
+ dest = 'dummydest'
+ with testtools.ExpectedException(webob.exc.HTTPNotFound,
+ ".*%s.*" % dest):
+ self.controller.reboot(self.req, dest)
+
+ def test_bad_status_value(self):
+ bad_body = {"status": "bad"}
+ self.assertRaises(self.validation_ex, self.controller.update,
+ self.req, "host_c1", body=bad_body)
+ bad_body2 = {"status": "disablabc"}
+ self.assertRaises(self.validation_ex, self.controller.update,
+ self.req, "host_c1", body=bad_body2)
+
+ def test_bad_update_key(self):
+ bad_body = {"crazy": "bad"}
+ self.assertRaises(self.validation_ex, self.controller.update,
+ self.req, "host_c1", body=bad_body)
+
+ def test_bad_update_key_and_correct_update_key(self):
+ bad_body = {"status": "disable", "crazy": "bad"}
+ self.assertRaises(self.validation_ex, self.controller.update,
+ self.req, "host_c1", body=bad_body)
+
+ def test_good_update_keys(self):
+ body = {"status": "disable", "maintenance_mode": "enable"}
+ result = self.controller.update(self.req, 'host_c1', body=body)
+ self.assertEqual(result["host"], "host_c1")
+ self.assertEqual(result["status"], "disabled")
+ self.assertEqual(result["maintenance_mode"], "on_maintenance")
+
+ def test_show_forbidden(self):
+ self.req.environ["nova.context"].is_admin = False
+ dest = 'dummydest'
+ self.assertRaises(self.policy_ex,
+ self.controller.show,
+ self.req, dest)
+ self.req.environ["nova.context"].is_admin = True
+
+ def test_show_host_not_exist(self):
+ # A host given as an argument does not exist.
+ self.req.environ["nova.context"].is_admin = True
+ dest = 'dummydest'
+ with testtools.ExpectedException(webob.exc.HTTPNotFound,
+ ".*%s.*" % dest):
+ self.controller.show(self.req, dest)
+
+ def _create_compute_service(self):
+ """Create compute-manager(ComputeNode and Service record)."""
+ ctxt = self.req.environ["nova.context"]
+ dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute',
+ 'report_count': 0}
+ s_ref = db.service_create(ctxt, dic)
+
+ dic = {'service_id': s_ref['id'],
+ 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
+ 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10,
+ 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
+ 'cpu_info': '', 'stats': ''}
+ db.compute_node_create(ctxt, dic)
+
+ return db.service_get(ctxt, s_ref['id'])
+
+ def test_show_no_project(self):
+ """No instances are running on the given host."""
+ ctxt = context_maker.get_admin_context()
+ s_ref = self._create_compute_service()
+
+ result = self.controller.show(self.req, s_ref['host'])
+
+ proj = ['(total)', '(used_now)', '(used_max)']
+ column = ['host', 'project', 'cpu', 'memory_mb', 'disk_gb']
+ self.assertEqual(len(result['host']), 3)
+ for resource in result['host']:
+ self.assertIn(resource['resource']['project'], proj)
+ self.assertEqual(len(resource['resource']), 5)
+ self.assertEqual(set(column), set(resource['resource'].keys()))
+ db.service_destroy(ctxt, s_ref['id'])
+
+ def test_show_works_correctly(self):
+ """show() works correctly as expected."""
+ ctxt = context_maker.get_admin_context()
+ s_ref = self._create_compute_service()
+ i_ref1 = _create_instance(project_id='p-01', host=s_ref['host'])
+ i_ref2 = _create_instance(project_id='p-02', vcpus=3,
+ host=s_ref['host'])
+
+ result = self.controller.show(self.req, s_ref['host'])
+
+ proj = ['(total)', '(used_now)', '(used_max)', 'p-01', 'p-02']
+ column = ['host', 'project', 'cpu', 'memory_mb', 'disk_gb']
+ self.assertEqual(len(result['host']), 5)
+ for resource in result['host']:
+ self.assertIn(resource['resource']['project'], proj)
+ self.assertEqual(len(resource['resource']), 5)
+ self.assertEqual(set(column), set(resource['resource'].keys()))
+ db.service_destroy(ctxt, s_ref['id'])
+ db.instance_destroy(ctxt, i_ref1['uuid'])
+ db.instance_destroy(ctxt, i_ref2['uuid'])
+
+ def test_list_hosts_with_zone(self):
+ result = self.controller.index(FakeRequestWithNovaZone())
+ self.assertIn('hosts', result)
+ hosts = result['hosts']
+ self.assertEqual(fake_hosts.HOST_LIST_NOVA_ZONE, hosts)
+
+ def test_list_hosts_with_service(self):
+ result = self.controller.index(FakeRequestWithNovaService())
+ self.assertEqual(fake_hosts.HOST_LIST_NOVA_ZONE, result['hosts'])
+
+ def test_list_hosts_with_invalid_service(self):
+ result = self.controller.index(FakeRequestWithInvalidNovaService())
+ self.assertEqual([], result['hosts'])
+
+
+class HostTestCaseV20(HostTestCaseV21):
+ validation_ex = webob.exc.HTTPBadRequest
+ policy_ex = webob.exc.HTTPForbidden
+ Controller = os_hosts_v2.HostController
+
+ # Note: V2 api don't support list with services
+ def test_list_hosts_with_service(self):
+ pass
+
+ def test_list_hosts_with_invalid_service(self):
+ pass
+
+
+class HostSerializerTest(test.TestCase):
+ def setUp(self):
+ super(HostSerializerTest, self).setUp()
+ self.deserializer = os_hosts_v2.HostUpdateDeserializer()
+
+ def test_index_serializer(self):
+ serializer = os_hosts_v2.HostIndexTemplate()
+ text = serializer.serialize(fake_hosts.OS_API_HOST_LIST)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('hosts', tree.tag)
+ self.assertEqual(len(fake_hosts.HOST_LIST), len(tree))
+ for i in range(len(fake_hosts.HOST_LIST)):
+ self.assertEqual('host', tree[i].tag)
+ self.assertEqual(fake_hosts.HOST_LIST[i]['host_name'],
+ tree[i].get('host_name'))
+ self.assertEqual(fake_hosts.HOST_LIST[i]['service'],
+ tree[i].get('service'))
+ self.assertEqual(fake_hosts.HOST_LIST[i]['zone'],
+ tree[i].get('zone'))
+
+ def test_update_serializer_with_status(self):
+ exemplar = dict(host='host_c1', status='enabled')
+ serializer = os_hosts_v2.HostUpdateTemplate()
+ text = serializer.serialize(exemplar)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('host', tree.tag)
+ for key, value in exemplar.items():
+ self.assertEqual(value, tree.get(key))
+
+ def test_update_serializer_with_maintenance_mode(self):
+ exemplar = dict(host='host_c1', maintenance_mode='enabled')
+ serializer = os_hosts_v2.HostUpdateTemplate()
+ text = serializer.serialize(exemplar)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('host', tree.tag)
+ for key, value in exemplar.items():
+ self.assertEqual(value, tree.get(key))
+
+ def test_update_serializer_with_maintenance_mode_and_status(self):
+ exemplar = dict(host='host_c1',
+ maintenance_mode='enabled',
+ status='enabled')
+ serializer = os_hosts_v2.HostUpdateTemplate()
+ text = serializer.serialize(exemplar)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('host', tree.tag)
+ for key, value in exemplar.items():
+ self.assertEqual(value, tree.get(key))
+
+ def test_action_serializer(self):
+ exemplar = dict(host='host_c1', power_action='reboot')
+ serializer = os_hosts_v2.HostActionTemplate()
+ text = serializer.serialize(exemplar)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('host', tree.tag)
+ for key, value in exemplar.items():
+ self.assertEqual(value, tree.get(key))
+
+ def test_update_deserializer(self):
+ exemplar = dict(status='enabled', maintenance_mode='disable')
+ intext = """<?xml version='1.0' encoding='UTF-8'?>
+ <updates>
+ <status>enabled</status>
+ <maintenance_mode>disable</maintenance_mode>
+ </updates>"""
+ result = self.deserializer.deserialize(intext)
+
+ self.assertEqual(dict(body=exemplar), result)
+
+ def test_corrupt_xml(self):
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ self.deserializer.deserialize,
+ utils.killer_xml_body())
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_hypervisor_status.py b/nova/tests/unit/api/openstack/compute/contrib/test_hypervisor_status.py
new file mode 100644
index 0000000000..2d9187a7d1
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_hypervisor_status.py
@@ -0,0 +1,92 @@
+# Copyright 2014 Intel Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+import mock
+
+from nova.api.openstack.compute.contrib import hypervisors as hypervisors_v2
+from nova.api.openstack.compute.plugins.v3 import hypervisors \
+ as hypervisors_v21
+from nova.api.openstack import extensions
+from nova import test
+from nova.tests.unit.api.openstack.compute.contrib import test_hypervisors
+
+TEST_HYPER = dict(test_hypervisors.TEST_HYPERS[0],
+ service=dict(id=1,
+ host="compute1",
+ binary="nova-compute",
+ topic="compute_topic",
+ report_count=5,
+ disabled=False,
+ disabled_reason=None,
+ availability_zone="nova"),
+ )
+
+
+class HypervisorStatusTestV21(test.NoDBTestCase):
+ def _prepare_extension(self):
+ self.controller = hypervisors_v21.HypervisorsController()
+ self.controller.servicegroup_api.service_is_up = mock.MagicMock(
+ return_value=True)
+
+ def test_view_hypervisor_service_status(self):
+ self._prepare_extension()
+ result = self.controller._view_hypervisor(
+ TEST_HYPER, False)
+ self.assertEqual('enabled', result['status'])
+ self.assertEqual('up', result['state'])
+ self.assertEqual('enabled', result['status'])
+
+ self.controller.servicegroup_api.service_is_up.return_value = False
+ result = self.controller._view_hypervisor(
+ TEST_HYPER, False)
+ self.assertEqual('down', result['state'])
+
+ hyper = copy.deepcopy(TEST_HYPER)
+ hyper['service']['disabled'] = True
+ result = self.controller._view_hypervisor(hyper, False)
+ self.assertEqual('disabled', result['status'])
+
+ def test_view_hypervisor_detail_status(self):
+ self._prepare_extension()
+
+ result = self.controller._view_hypervisor(
+ TEST_HYPER, True)
+
+ self.assertEqual('enabled', result['status'])
+ self.assertEqual('up', result['state'])
+ self.assertIsNone(result['service']['disabled_reason'])
+
+ self.controller.servicegroup_api.service_is_up.return_value = False
+ result = self.controller._view_hypervisor(
+ TEST_HYPER, True)
+ self.assertEqual('down', result['state'])
+
+ hyper = copy.deepcopy(TEST_HYPER)
+ hyper['service']['disabled'] = True
+ hyper['service']['disabled_reason'] = "fake"
+ result = self.controller._view_hypervisor(hyper, True)
+ self.assertEqual('disabled', result['status'],)
+ self.assertEqual('fake', result['service']['disabled_reason'])
+
+
+class HypervisorStatusTestV2(HypervisorStatusTestV21):
+ def _prepare_extension(self):
+ ext_mgr = extensions.ExtensionManager()
+ ext_mgr.extensions = {}
+ ext_mgr.extensions['os-hypervisor-status'] = True
+ self.controller = hypervisors_v2.HypervisorsController(ext_mgr)
+ self.controller.servicegroup_api.service_is_up = mock.MagicMock(
+ return_value=True)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_hypervisors.py b/nova/tests/unit/api/openstack/compute/contrib/test_hypervisors.py
new file mode 100644
index 0000000000..9ae3c307c5
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_hypervisors.py
@@ -0,0 +1,596 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from lxml import etree
+import mock
+from webob import exc
+
+from nova.api.openstack.compute.contrib import hypervisors as hypervisors_v2
+from nova.api.openstack.compute.plugins.v3 import hypervisors \
+ as hypervisors_v21
+from nova.api.openstack import extensions
+from nova import context
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+TEST_HYPERS = [
+ dict(id=1,
+ service_id=1,
+ service=dict(id=1,
+ host="compute1",
+ binary="nova-compute",
+ topic="compute_topic",
+ report_count=5,
+ disabled=False,
+ disabled_reason=None,
+ availability_zone="nova"),
+ vcpus=4,
+ memory_mb=10 * 1024,
+ local_gb=250,
+ vcpus_used=2,
+ memory_mb_used=5 * 1024,
+ local_gb_used=125,
+ hypervisor_type="xen",
+ hypervisor_version=3,
+ hypervisor_hostname="hyper1",
+ free_ram_mb=5 * 1024,
+ free_disk_gb=125,
+ current_workload=2,
+ running_vms=2,
+ cpu_info='cpu_info',
+ disk_available_least=100,
+ host_ip='1.1.1.1'),
+ dict(id=2,
+ service_id=2,
+ service=dict(id=2,
+ host="compute2",
+ binary="nova-compute",
+ topic="compute_topic",
+ report_count=5,
+ disabled=False,
+ disabled_reason=None,
+ availability_zone="nova"),
+ vcpus=4,
+ memory_mb=10 * 1024,
+ local_gb=250,
+ vcpus_used=2,
+ memory_mb_used=5 * 1024,
+ local_gb_used=125,
+ hypervisor_type="xen",
+ hypervisor_version=3,
+ hypervisor_hostname="hyper2",
+ free_ram_mb=5 * 1024,
+ free_disk_gb=125,
+ current_workload=2,
+ running_vms=2,
+ cpu_info='cpu_info',
+ disk_available_least=100,
+ host_ip='2.2.2.2')]
+TEST_SERVERS = [dict(name="inst1", uuid="uuid1", host="compute1"),
+ dict(name="inst2", uuid="uuid2", host="compute2"),
+ dict(name="inst3", uuid="uuid3", host="compute1"),
+ dict(name="inst4", uuid="uuid4", host="compute2")]
+
+
+def fake_compute_node_get_all(context):
+ return TEST_HYPERS
+
+
+def fake_compute_node_search_by_hypervisor(context, hypervisor_re):
+ return TEST_HYPERS
+
+
+def fake_compute_node_get(context, compute_id):
+ for hyper in TEST_HYPERS:
+ if hyper['id'] == compute_id:
+ return hyper
+ raise exception.ComputeHostNotFound(host=compute_id)
+
+
+def fake_compute_node_statistics(context):
+ result = dict(
+ count=0,
+ vcpus=0,
+ memory_mb=0,
+ local_gb=0,
+ vcpus_used=0,
+ memory_mb_used=0,
+ local_gb_used=0,
+ free_ram_mb=0,
+ free_disk_gb=0,
+ current_workload=0,
+ running_vms=0,
+ disk_available_least=0,
+ )
+
+ for hyper in TEST_HYPERS:
+ for key in result:
+ if key == 'count':
+ result[key] += 1
+ else:
+ result[key] += hyper[key]
+
+ return result
+
+
+def fake_instance_get_all_by_host(context, host):
+ results = []
+ for inst in TEST_SERVERS:
+ if inst['host'] == host:
+ results.append(inst)
+ return results
+
+
+class HypervisorsTestV21(test.NoDBTestCase):
+ DETAIL_HYPERS_DICTS = copy.deepcopy(TEST_HYPERS)
+ del DETAIL_HYPERS_DICTS[0]['service_id']
+ del DETAIL_HYPERS_DICTS[1]['service_id']
+ DETAIL_HYPERS_DICTS[0].update({'state': 'up',
+ 'status': 'enabled',
+ 'service': dict(id=1, host='compute1',
+ disabled_reason=None)})
+ DETAIL_HYPERS_DICTS[1].update({'state': 'up',
+ 'status': 'enabled',
+ 'service': dict(id=2, host='compute2',
+ disabled_reason=None)})
+
+ INDEX_HYPER_DICTS = [
+ dict(id=1, hypervisor_hostname="hyper1",
+ state='up', status='enabled'),
+ dict(id=2, hypervisor_hostname="hyper2",
+ state='up', status='enabled')]
+
+ NO_SERVER_HYPER_DICTS = copy.deepcopy(INDEX_HYPER_DICTS)
+ NO_SERVER_HYPER_DICTS[0].update({'servers': []})
+ NO_SERVER_HYPER_DICTS[1].update({'servers': []})
+
+ def _get_request(self, use_admin_context):
+ return fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/statistics',
+ use_admin_context=use_admin_context)
+
+ def _set_up_controller(self):
+ self.controller = hypervisors_v21.HypervisorsController()
+ self.controller.servicegroup_api.service_is_up = mock.MagicMock(
+ return_value=True)
+
+ def setUp(self):
+ super(HypervisorsTestV21, self).setUp()
+ self._set_up_controller()
+
+ self.stubs.Set(db, 'compute_node_get_all', fake_compute_node_get_all)
+ self.stubs.Set(db, 'compute_node_search_by_hypervisor',
+ fake_compute_node_search_by_hypervisor)
+ self.stubs.Set(db, 'compute_node_get',
+ fake_compute_node_get)
+ self.stubs.Set(db, 'compute_node_statistics',
+ fake_compute_node_statistics)
+ self.stubs.Set(db, 'instance_get_all_by_host',
+ fake_instance_get_all_by_host)
+
+ def test_view_hypervisor_nodetail_noservers(self):
+ result = self.controller._view_hypervisor(TEST_HYPERS[0], False)
+
+ self.assertEqual(result, self.INDEX_HYPER_DICTS[0])
+
+ def test_view_hypervisor_detail_noservers(self):
+ result = self.controller._view_hypervisor(TEST_HYPERS[0], True)
+
+ self.assertEqual(result, self.DETAIL_HYPERS_DICTS[0])
+
+ def test_view_hypervisor_servers(self):
+ result = self.controller._view_hypervisor(TEST_HYPERS[0], False,
+ TEST_SERVERS)
+ expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0])
+ expected_dict.update({'servers': [
+ dict(name="inst1", uuid="uuid1"),
+ dict(name="inst2", uuid="uuid2"),
+ dict(name="inst3", uuid="uuid3"),
+ dict(name="inst4", uuid="uuid4")]})
+
+ self.assertEqual(result, expected_dict)
+
+ def test_index(self):
+ req = self._get_request(True)
+ result = self.controller.index(req)
+
+ self.assertEqual(result, dict(hypervisors=self.INDEX_HYPER_DICTS))
+
+ def test_index_non_admin(self):
+ req = self._get_request(False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.index, req)
+
+ def test_detail(self):
+ req = self._get_request(True)
+ result = self.controller.detail(req)
+
+ self.assertEqual(result, dict(hypervisors=self.DETAIL_HYPERS_DICTS))
+
+ def test_detail_non_admin(self):
+ req = self._get_request(False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.detail, req)
+
+ def test_show_noid(self):
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '3')
+
+ def test_show_non_integer_id(self):
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound, self.controller.show, req, 'abc')
+
+ def test_show_withid(self):
+ req = self._get_request(True)
+ result = self.controller.show(req, '1')
+
+ self.assertEqual(result, dict(hypervisor=self.DETAIL_HYPERS_DICTS[0]))
+
+ def test_show_non_admin(self):
+ req = self._get_request(False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.show, req, '1')
+
+ def test_uptime_noid(self):
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, '3')
+
+ def test_uptime_notimplemented(self):
+ def fake_get_host_uptime(context, hyp):
+ raise exc.HTTPNotImplemented()
+
+ self.stubs.Set(self.controller.host_api, 'get_host_uptime',
+ fake_get_host_uptime)
+
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotImplemented,
+ self.controller.uptime, req, '1')
+
+ def test_uptime_implemented(self):
+ def fake_get_host_uptime(context, hyp):
+ return "fake uptime"
+
+ self.stubs.Set(self.controller.host_api, 'get_host_uptime',
+ fake_get_host_uptime)
+
+ req = self._get_request(True)
+ result = self.controller.uptime(req, '1')
+
+ expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0])
+ expected_dict.update({'uptime': "fake uptime"})
+ self.assertEqual(result, dict(hypervisor=expected_dict))
+
+ def test_uptime_non_integer_id(self):
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, 'abc')
+
+ def test_uptime_non_admin(self):
+ req = self._get_request(False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.uptime, req, '1')
+
+ def test_search(self):
+ req = self._get_request(True)
+ result = self.controller.search(req, 'hyper')
+
+ self.assertEqual(result, dict(hypervisors=self.INDEX_HYPER_DICTS))
+
+ def test_search_non_admin(self):
+ req = self._get_request(False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.search, req, '1')
+
+ def test_search_non_exist(self):
+ def fake_compute_node_search_by_hypervisor_return_empty(context,
+ hypervisor_re):
+ return []
+ self.stubs.Set(db, 'compute_node_search_by_hypervisor',
+ fake_compute_node_search_by_hypervisor_return_empty)
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound, self.controller.search, req, 'a')
+
+ def test_servers(self):
+ req = self._get_request(True)
+ result = self.controller.servers(req, 'hyper')
+
+ expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS)
+ expected_dict[0].update({'servers': [
+ dict(name="inst1", uuid="uuid1"),
+ dict(name="inst3", uuid="uuid3")]})
+ expected_dict[1].update({'servers': [
+ dict(name="inst2", uuid="uuid2"),
+ dict(name="inst4", uuid="uuid4")]})
+
+ self.assertEqual(result, dict(hypervisors=expected_dict))
+
+ def test_servers_non_id(self):
+ def fake_compute_node_search_by_hypervisor_return_empty(context,
+ hypervisor_re):
+ return []
+ self.stubs.Set(db, 'compute_node_search_by_hypervisor',
+ fake_compute_node_search_by_hypervisor_return_empty)
+
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound,
+ self.controller.servers,
+ req, '115')
+
+ def test_servers_non_admin(self):
+ req = self._get_request(False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.servers, req, '1')
+
+ def test_servers_with_non_integer_hypervisor_id(self):
+ def fake_compute_node_search_by_hypervisor_return_empty(context,
+ hypervisor_re):
+ return []
+ self.stubs.Set(db, 'compute_node_search_by_hypervisor',
+ fake_compute_node_search_by_hypervisor_return_empty)
+
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound,
+ self.controller.servers, req, 'abc')
+
+ def test_servers_with_no_server(self):
+ def fake_instance_get_all_by_host_return_empty(context, hypervisor_re):
+ return []
+ self.stubs.Set(db, 'instance_get_all_by_host',
+ fake_instance_get_all_by_host_return_empty)
+ req = self._get_request(True)
+ result = self.controller.servers(req, '1')
+ self.assertEqual(result, dict(hypervisors=self.NO_SERVER_HYPER_DICTS))
+
+ def test_statistics(self):
+ req = self._get_request(True)
+ result = self.controller.statistics(req)
+
+ self.assertEqual(result, dict(hypervisor_statistics=dict(
+ count=2,
+ vcpus=8,
+ memory_mb=20 * 1024,
+ local_gb=500,
+ vcpus_used=4,
+ memory_mb_used=10 * 1024,
+ local_gb_used=250,
+ free_ram_mb=10 * 1024,
+ free_disk_gb=250,
+ current_workload=4,
+ running_vms=4,
+ disk_available_least=200)))
+
+ def test_statistics_non_admin(self):
+ req = self._get_request(False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.statistics, req)
+
+
+class HypervisorsTestV2(HypervisorsTestV21):
+ DETAIL_HYPERS_DICTS = copy.deepcopy(
+ HypervisorsTestV21.DETAIL_HYPERS_DICTS)
+ del DETAIL_HYPERS_DICTS[0]['state']
+ del DETAIL_HYPERS_DICTS[1]['state']
+ del DETAIL_HYPERS_DICTS[0]['status']
+ del DETAIL_HYPERS_DICTS[1]['status']
+ del DETAIL_HYPERS_DICTS[0]['service']['disabled_reason']
+ del DETAIL_HYPERS_DICTS[1]['service']['disabled_reason']
+ del DETAIL_HYPERS_DICTS[0]['host_ip']
+ del DETAIL_HYPERS_DICTS[1]['host_ip']
+
+ INDEX_HYPER_DICTS = copy.deepcopy(HypervisorsTestV21.INDEX_HYPER_DICTS)
+ del INDEX_HYPER_DICTS[0]['state']
+ del INDEX_HYPER_DICTS[1]['state']
+ del INDEX_HYPER_DICTS[0]['status']
+ del INDEX_HYPER_DICTS[1]['status']
+
+ NO_SERVER_HYPER_DICTS = copy.deepcopy(
+ HypervisorsTestV21.NO_SERVER_HYPER_DICTS)
+ del NO_SERVER_HYPER_DICTS[0]['state']
+ del NO_SERVER_HYPER_DICTS[1]['state']
+ del NO_SERVER_HYPER_DICTS[0]['status']
+ del NO_SERVER_HYPER_DICTS[1]['status']
+ del NO_SERVER_HYPER_DICTS[0]['servers']
+ del NO_SERVER_HYPER_DICTS[1]['servers']
+
+ def _set_up_controller(self):
+ self.context = context.get_admin_context()
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = hypervisors_v2.HypervisorsController(self.ext_mgr)
+
+
+class HypervisorsSerializersTest(test.NoDBTestCase):
+ def compare_to_exemplar(self, exemplar, hyper):
+ # Check attributes
+ for key, value in exemplar.items():
+ if key in ('service', 'servers'):
+ # These turn into child elements and get tested
+ # separately below...
+ continue
+
+ self.assertEqual(str(value), hyper.get(key))
+
+ # Check child elements
+ required_children = set([child for child in ('service', 'servers')
+ if child in exemplar])
+ for child in hyper:
+ self.assertIn(child.tag, required_children)
+ required_children.remove(child.tag)
+
+ # Check the node...
+ if child.tag == 'service':
+ for key, value in exemplar['service'].items():
+ self.assertEqual(str(value), child.get(key))
+ elif child.tag == 'servers':
+ for idx, grandchild in enumerate(child):
+ self.assertEqual('server', grandchild.tag)
+ for key, value in exemplar['servers'][idx].items():
+ self.assertEqual(str(value), grandchild.get(key))
+
+ # Are they all accounted for?
+ self.assertEqual(len(required_children), 0)
+
+ def test_index_serializer(self):
+ serializer = hypervisors_v2.HypervisorIndexTemplate()
+ exemplar = dict(hypervisors=[
+ dict(hypervisor_hostname="hyper1",
+ id=1),
+ dict(hypervisor_hostname="hyper2",
+ id=2)])
+ text = serializer.serialize(exemplar)
+ tree = etree.fromstring(text)
+
+ self.assertEqual('hypervisors', tree.tag)
+ self.assertEqual(len(exemplar['hypervisors']), len(tree))
+ for idx, hyper in enumerate(tree):
+ self.assertEqual('hypervisor', hyper.tag)
+ self.compare_to_exemplar(exemplar['hypervisors'][idx], hyper)
+
+ def test_detail_serializer(self):
+ serializer = hypervisors_v2.HypervisorDetailTemplate()
+ exemplar = dict(hypervisors=[
+ dict(hypervisor_hostname="hyper1",
+ id=1,
+ vcpus=4,
+ memory_mb=10 * 1024,
+ local_gb=500,
+ vcpus_used=2,
+ memory_mb_used=5 * 1024,
+ local_gb_used=250,
+ hypervisor_type='xen',
+ hypervisor_version=3,
+ free_ram_mb=5 * 1024,
+ free_disk_gb=250,
+ current_workload=2,
+ running_vms=2,
+ cpu_info="json data",
+ disk_available_least=100,
+ host_ip='1.1.1.1',
+ service=dict(id=1, host="compute1")),
+ dict(hypervisor_hostname="hyper2",
+ id=2,
+ vcpus=4,
+ memory_mb=10 * 1024,
+ local_gb=500,
+ vcpus_used=2,
+ memory_mb_used=5 * 1024,
+ local_gb_used=250,
+ hypervisor_type='xen',
+ hypervisor_version=3,
+ free_ram_mb=5 * 1024,
+ free_disk_gb=250,
+ current_workload=2,
+ running_vms=2,
+ cpu_info="json data",
+ disk_available_least=100,
+ host_ip='2.2.2.2',
+ service=dict(id=2, host="compute2"))])
+ text = serializer.serialize(exemplar)
+ tree = etree.fromstring(text)
+
+ self.assertEqual('hypervisors', tree.tag)
+ self.assertEqual(len(exemplar['hypervisors']), len(tree))
+ for idx, hyper in enumerate(tree):
+ self.assertEqual('hypervisor', hyper.tag)
+ self.compare_to_exemplar(exemplar['hypervisors'][idx], hyper)
+
+ def test_show_serializer(self):
+ serializer = hypervisors_v2.HypervisorTemplate()
+ exemplar = dict(hypervisor=dict(
+ hypervisor_hostname="hyper1",
+ id=1,
+ vcpus=4,
+ memory_mb=10 * 1024,
+ local_gb=500,
+ vcpus_used=2,
+ memory_mb_used=5 * 1024,
+ local_gb_used=250,
+ hypervisor_type='xen',
+ hypervisor_version=3,
+ free_ram_mb=5 * 1024,
+ free_disk_gb=250,
+ current_workload=2,
+ running_vms=2,
+ cpu_info="json data",
+ disk_available_least=100,
+ host_ip='1.1.1.1',
+ service=dict(id=1, host="compute1")))
+ text = serializer.serialize(exemplar)
+ tree = etree.fromstring(text)
+
+ self.assertEqual('hypervisor', tree.tag)
+ self.compare_to_exemplar(exemplar['hypervisor'], tree)
+
+ def test_uptime_serializer(self):
+ serializer = hypervisors_v2.HypervisorUptimeTemplate()
+ exemplar = dict(hypervisor=dict(
+ hypervisor_hostname="hyper1",
+ id=1,
+ uptime='fake uptime'))
+ text = serializer.serialize(exemplar)
+ tree = etree.fromstring(text)
+
+ self.assertEqual('hypervisor', tree.tag)
+ self.compare_to_exemplar(exemplar['hypervisor'], tree)
+
+ def test_servers_serializer(self):
+ serializer = hypervisors_v2.HypervisorServersTemplate()
+ exemplar = dict(hypervisors=[
+ dict(hypervisor_hostname="hyper1",
+ id=1,
+ servers=[
+ dict(name="inst1",
+ uuid="uuid1"),
+ dict(name="inst2",
+ uuid="uuid2")]),
+ dict(hypervisor_hostname="hyper2",
+ id=2,
+ servers=[
+ dict(name="inst3",
+ uuid="uuid3"),
+ dict(name="inst4",
+ uuid="uuid4")])])
+ text = serializer.serialize(exemplar)
+ tree = etree.fromstring(text)
+
+ self.assertEqual('hypervisors', tree.tag)
+ self.assertEqual(len(exemplar['hypervisors']), len(tree))
+ for idx, hyper in enumerate(tree):
+ self.assertEqual('hypervisor', hyper.tag)
+ self.compare_to_exemplar(exemplar['hypervisors'][idx], hyper)
+
+ def test_statistics_serializer(self):
+ serializer = hypervisors_v2.HypervisorStatisticsTemplate()
+ exemplar = dict(hypervisor_statistics=dict(
+ count=2,
+ vcpus=8,
+ memory_mb=20 * 1024,
+ local_gb=500,
+ vcpus_used=4,
+ memory_mb_used=10 * 1024,
+ local_gb_used=250,
+ free_ram_mb=10 * 1024,
+ free_disk_gb=250,
+ current_workload=4,
+ running_vms=4,
+ disk_available_least=200))
+ text = serializer.serialize(exemplar)
+ tree = etree.fromstring(text)
+
+ self.assertEqual('hypervisor_statistics', tree.tag)
+ self.compare_to_exemplar(exemplar['hypervisor_statistics'], tree)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_image_size.py b/nova/tests/unit/api/openstack/compute/contrib/test_image_size.py
new file mode 100644
index 0000000000..2a8d95cb86
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_image_size.py
@@ -0,0 +1,138 @@
+# Copyright 2013 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import image_size
+from nova.image import glance
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+NOW_API_FORMAT = "2010-10-11T10:30:22Z"
+IMAGES = [{
+ 'id': '123',
+ 'name': 'public image',
+ 'metadata': {'key1': 'value1'},
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'ACTIVE',
+ 'progress': 100,
+ 'minDisk': 10,
+ 'minRam': 128,
+ 'size': 12345678,
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v2/fake/images/123",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/images/123",
+ }],
+ },
+ {
+ 'id': '124',
+ 'name': 'queued snapshot',
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'SAVING',
+ 'progress': 25,
+ 'minDisk': 0,
+ 'minRam': 0,
+ 'size': 87654321,
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v2/fake/images/124",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/images/124",
+ }],
+ }]
+
+
+def fake_show(*args, **kwargs):
+ return IMAGES[0]
+
+
+def fake_detail(*args, **kwargs):
+ return IMAGES
+
+
+class ImageSizeTestV21(test.NoDBTestCase):
+ content_type = 'application/json'
+ prefix = 'OS-EXT-IMG-SIZE'
+
+ def setUp(self):
+ super(ImageSizeTestV21, self).setUp()
+ self.stubs.Set(glance.GlanceImageService, 'show', fake_show)
+ self.stubs.Set(glance.GlanceImageService, 'detail', fake_detail)
+ self.flags(osapi_compute_extension=['nova.api.openstack.compute'
+ '.contrib.image_size.Image_size'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(self._get_app())
+ return res
+
+ def _get_app(self):
+ return fakes.wsgi_app_v21()
+
+ def _get_image(self, body):
+ return jsonutils.loads(body).get('image')
+
+ def _get_images(self, body):
+ return jsonutils.loads(body).get('images')
+
+ def assertImageSize(self, image, size):
+ self.assertEqual(image.get('%s:size' % self.prefix), size)
+
+ def test_show(self):
+ url = '/v2/fake/images/1'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ image = self._get_image(res.body)
+ self.assertImageSize(image, 12345678)
+
+ def test_detail(self):
+ url = '/v2/fake/images/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ images = self._get_images(res.body)
+ self.assertImageSize(images[0], 12345678)
+ self.assertImageSize(images[1], 87654321)
+
+
+class ImageSizeTestV2(ImageSizeTestV21):
+ def _get_app(self):
+ return fakes.wsgi_app()
+
+
+class ImageSizeXmlTest(ImageSizeTestV2):
+ content_type = 'application/xml'
+ prefix = '{%s}' % image_size.Image_size.namespace
+
+ def _get_image(self, body):
+ return etree.XML(body)
+
+ def _get_images(self, body):
+ return etree.XML(body).getchildren()
+
+ def assertImageSize(self, image, size):
+ self.assertEqual(int(image.get('%ssize' % self.prefix)), size)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_instance_actions.py b/nova/tests/unit/api/openstack/compute/contrib/test_instance_actions.py
new file mode 100644
index 0000000000..a5ea3784e3
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_instance_actions.py
@@ -0,0 +1,327 @@
+# Copyright 2013 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid
+
+from lxml import etree
+from webob import exc
+
+from nova.api.openstack.compute.contrib import instance_actions \
+ as instance_actions_v2
+from nova.api.openstack.compute.plugins.v3 import instance_actions \
+ as instance_actions_v21
+from nova.compute import api as compute_api
+from nova import db
+from nova.db.sqlalchemy import models
+from nova import exception
+from nova.openstack.common import policy as common_policy
+from nova import policy
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_server_actions
+
+FAKE_UUID = fake_server_actions.FAKE_UUID
+FAKE_REQUEST_ID = fake_server_actions.FAKE_REQUEST_ID1
+
+
+def format_action(action):
+ '''Remove keys that aren't serialized.'''
+ to_delete = ('id', 'finish_time', 'created_at', 'updated_at', 'deleted_at',
+ 'deleted')
+ for key in to_delete:
+ if key in action:
+ del(action[key])
+ if 'start_time' in action:
+ # NOTE(danms): Without WSGI above us, these will be just stringified
+ action['start_time'] = str(action['start_time'].replace(tzinfo=None))
+ for event in action.get('events', []):
+ format_event(event)
+ return action
+
+
+def format_event(event):
+ '''Remove keys that aren't serialized.'''
+ to_delete = ('id', 'created_at', 'updated_at', 'deleted_at', 'deleted',
+ 'action_id')
+ for key in to_delete:
+ if key in event:
+ del(event[key])
+ if 'start_time' in event:
+ # NOTE(danms): Without WSGI above us, these will be just stringified
+ event['start_time'] = str(event['start_time'].replace(tzinfo=None))
+ if 'finish_time' in event:
+ # NOTE(danms): Without WSGI above us, these will be just stringified
+ event['finish_time'] = str(event['finish_time'].replace(tzinfo=None))
+ return event
+
+
+class InstanceActionsPolicyTestV21(test.NoDBTestCase):
+ instance_actions = instance_actions_v21
+
+ def setUp(self):
+ super(InstanceActionsPolicyTestV21, self).setUp()
+ self.controller = self.instance_actions.InstanceActionsController()
+
+ def _get_http_req(self, action):
+ fake_url = '/123/servers/12/%s' % action
+ return fakes.HTTPRequest.blank(fake_url)
+
+ def _set_policy_rules(self):
+ rules = {'compute:get': common_policy.parse_rule(''),
+ 'compute_extension:v3:os-instance-actions':
+ common_policy.parse_rule('project_id:%(project_id)s')}
+ policy.set_rules(rules)
+
+ def test_list_actions_restricted_by_project(self):
+ self._set_policy_rules()
+
+ def fake_instance_get_by_uuid(context, instance_id,
+ columns_to_join=None,
+ use_slave=False):
+ return fake_instance.fake_db_instance(
+ **{'name': 'fake', 'project_id': '%s_unequal' %
+ context.project_id})
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ req = self._get_http_req('os-instance-actions')
+ self.assertRaises(exception.Forbidden, self.controller.index, req,
+ str(uuid.uuid4()))
+
+ def test_get_action_restricted_by_project(self):
+ self._set_policy_rules()
+
+ def fake_instance_get_by_uuid(context, instance_id,
+ columns_to_join=None,
+ use_slave=False):
+ return fake_instance.fake_db_instance(
+ **{'name': 'fake', 'project_id': '%s_unequal' %
+ context.project_id})
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ req = self._get_http_req('os-instance-actions/1')
+ self.assertRaises(exception.Forbidden, self.controller.show, req,
+ str(uuid.uuid4()), '1')
+
+
+class InstanceActionsPolicyTestV2(InstanceActionsPolicyTestV21):
+ instance_actions = instance_actions_v2
+
+ def _set_policy_rules(self):
+ rules = {'compute:get': common_policy.parse_rule(''),
+ 'compute_extension:instance_actions':
+ common_policy.parse_rule('project_id:%(project_id)s')}
+ policy.set_rules(rules)
+
+
+class InstanceActionsTestV21(test.NoDBTestCase):
+ instance_actions = instance_actions_v21
+
+ def setUp(self):
+ super(InstanceActionsTestV21, self).setUp()
+ self.controller = self.instance_actions.InstanceActionsController()
+ self.fake_actions = copy.deepcopy(fake_server_actions.FAKE_ACTIONS)
+ self.fake_events = copy.deepcopy(fake_server_actions.FAKE_EVENTS)
+
+ def fake_get(self, context, instance_uuid, expected_attrs=None,
+ want_objects=False):
+ return {'uuid': instance_uuid}
+
+ def fake_instance_get_by_uuid(context, instance_id, use_slave=False):
+ return {'name': 'fake', 'project_id': context.project_id}
+
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+
+ def _get_http_req(self, action, use_admin_context=False):
+ fake_url = '/123/servers/12/%s' % action
+ return fakes.HTTPRequest.blank(fake_url,
+ use_admin_context=use_admin_context)
+
+ def _set_policy_rules(self):
+ rules = {'compute:get': common_policy.parse_rule(''),
+ 'compute_extension:v3:os-instance-actions':
+ common_policy.parse_rule(''),
+ 'compute_extension:v3:os-instance-actions:events':
+ common_policy.parse_rule('is_admin:True')}
+ policy.set_rules(rules)
+
+ def test_list_actions(self):
+ def fake_get_actions(context, uuid):
+ actions = []
+ for act in self.fake_actions[uuid].itervalues():
+ action = models.InstanceAction()
+ action.update(act)
+ actions.append(action)
+ return actions
+
+ self.stubs.Set(db, 'actions_get', fake_get_actions)
+ req = self._get_http_req('os-instance-actions')
+ res_dict = self.controller.index(req, FAKE_UUID)
+ for res in res_dict['instanceActions']:
+ fake_action = self.fake_actions[FAKE_UUID][res['request_id']]
+ self.assertEqual(format_action(fake_action), format_action(res))
+
+ def test_get_action_with_events_allowed(self):
+ def fake_get_action(context, uuid, request_id):
+ action = models.InstanceAction()
+ action.update(self.fake_actions[uuid][request_id])
+ return action
+
+ def fake_get_events(context, action_id):
+ events = []
+ for evt in self.fake_events[action_id]:
+ event = models.InstanceActionEvent()
+ event.update(evt)
+ events.append(event)
+ return events
+
+ self.stubs.Set(db, 'action_get_by_request_id', fake_get_action)
+ self.stubs.Set(db, 'action_events_get', fake_get_events)
+ req = self._get_http_req('os-instance-actions/1',
+ use_admin_context=True)
+ res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID)
+ fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
+ fake_events = self.fake_events[fake_action['id']]
+ fake_action['events'] = fake_events
+ self.assertEqual(format_action(fake_action),
+ format_action(res_dict['instanceAction']))
+
+ def test_get_action_with_events_not_allowed(self):
+ def fake_get_action(context, uuid, request_id):
+ return self.fake_actions[uuid][request_id]
+
+ def fake_get_events(context, action_id):
+ return self.fake_events[action_id]
+
+ self.stubs.Set(db, 'action_get_by_request_id', fake_get_action)
+ self.stubs.Set(db, 'action_events_get', fake_get_events)
+
+ self._set_policy_rules()
+ req = self._get_http_req('os-instance-actions/1')
+ res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID)
+ fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
+ self.assertEqual(format_action(fake_action),
+ format_action(res_dict['instanceAction']))
+
+ def test_action_not_found(self):
+ def fake_no_action(context, uuid, action_id):
+ return None
+
+ self.stubs.Set(db, 'action_get_by_request_id', fake_no_action)
+ req = self._get_http_req('os-instance-actions/1')
+ self.assertRaises(exc.HTTPNotFound, self.controller.show, req,
+ FAKE_UUID, FAKE_REQUEST_ID)
+
+ def test_index_instance_not_found(self):
+ def fake_get(self, context, instance_uuid, expected_attrs=None,
+ want_objects=False):
+ raise exception.InstanceNotFound(instance_id=instance_uuid)
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+ req = self._get_http_req('os-instance-actions')
+ self.assertRaises(exc.HTTPNotFound, self.controller.index, req,
+ FAKE_UUID)
+
+ def test_show_instance_not_found(self):
+ def fake_get(self, context, instance_uuid, expected_attrs=None,
+ want_objects=False):
+ raise exception.InstanceNotFound(instance_id=instance_uuid)
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+ req = self._get_http_req('os-instance-actions/fake')
+ self.assertRaises(exc.HTTPNotFound, self.controller.show, req,
+ FAKE_UUID, 'fake')
+
+
+class InstanceActionsTestV2(InstanceActionsTestV21):
+ instance_actions = instance_actions_v2
+
+ def _set_policy_rules(self):
+ rules = {'compute:get': common_policy.parse_rule(''),
+ 'compute_extension:instance_actions':
+ common_policy.parse_rule(''),
+ 'compute_extension:instance_actions:events':
+ common_policy.parse_rule('is_admin:True')}
+ policy.set_rules(rules)
+
+
+class InstanceActionsSerializerTestV2(test.NoDBTestCase):
+ def setUp(self):
+ super(InstanceActionsSerializerTestV2, self).setUp()
+ self.fake_actions = copy.deepcopy(fake_server_actions.FAKE_ACTIONS)
+ self.fake_events = copy.deepcopy(fake_server_actions.FAKE_EVENTS)
+
+ def _verify_instance_action_attachment(self, attach, tree):
+ for key in attach.keys():
+ if key != 'events':
+ self.assertEqual(attach[key], tree.get(key),
+ '%s did not match' % key)
+
+ def _verify_instance_action_event_attachment(self, attach, tree):
+ for key in attach.keys():
+ self.assertEqual(attach[key], tree.get(key),
+ '%s did not match' % key)
+
+ def test_instance_action_serializer(self):
+ serializer = instance_actions_v2.InstanceActionTemplate()
+ action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
+ text = serializer.serialize({'instanceAction': action})
+ tree = etree.fromstring(text)
+
+ action = format_action(action)
+ self.assertEqual('instanceAction', tree.tag)
+ self._verify_instance_action_attachment(action, tree)
+ found_events = False
+ for child in tree:
+ if child.tag == 'events':
+ found_events = True
+ self.assertFalse(found_events)
+
+ def test_instance_action_events_serializer(self):
+ serializer = instance_actions_v2.InstanceActionTemplate()
+ action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
+ event = self.fake_events[action['id']][0]
+ action['events'] = [dict(event), dict(event)]
+ text = serializer.serialize({'instanceAction': action})
+ tree = etree.fromstring(text)
+
+ action = format_action(action)
+ self.assertEqual('instanceAction', tree.tag)
+ self._verify_instance_action_attachment(action, tree)
+
+ event = format_event(event)
+ found_events = False
+ for child in tree:
+ if child.tag == 'events':
+ found_events = True
+ for key in event:
+ self.assertEqual(event[key], child.get(key))
+ self.assertTrue(found_events)
+
+ def test_instance_actions_serializer(self):
+ serializer = instance_actions_v2.InstanceActionsTemplate()
+ action_list = self.fake_actions[FAKE_UUID].values()
+ text = serializer.serialize({'instanceActions': action_list})
+ tree = etree.fromstring(text)
+
+ action_list = [format_action(action) for action in action_list]
+ self.assertEqual('instanceActions', tree.tag)
+ self.assertEqual(len(action_list), len(tree))
+ for idx, child in enumerate(tree):
+ self.assertEqual('instanceAction', child.tag)
+ request_id = child.get('request_id')
+ self._verify_instance_action_attachment(
+ self.fake_actions[FAKE_UUID][request_id],
+ child)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_instance_usage_audit_log.py b/nova/tests/unit/api/openstack/compute/contrib/test_instance_usage_audit_log.py
new file mode 100644
index 0000000000..1ae85c8625
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_instance_usage_audit_log.py
@@ -0,0 +1,210 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from oslo.utils import timeutils
+
+from nova.api.openstack.compute.contrib import instance_usage_audit_log as ial
+from nova import context
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.objects import test_service
+from nova import utils
+
+
+service_base = test_service.fake_service
+TEST_COMPUTE_SERVICES = [dict(service_base, host='foo', topic='compute'),
+ dict(service_base, host='bar', topic='compute'),
+ dict(service_base, host='baz', topic='compute'),
+ dict(service_base, host='plonk', topic='compute'),
+ dict(service_base, host='wibble', topic='bogus'),
+ ]
+
+
+begin1 = datetime.datetime(2012, 7, 4, 6, 0, 0)
+begin2 = end1 = datetime.datetime(2012, 7, 5, 6, 0, 0)
+begin3 = end2 = datetime.datetime(2012, 7, 6, 6, 0, 0)
+end3 = datetime.datetime(2012, 7, 7, 6, 0, 0)
+
+
+# test data
+
+
+TEST_LOGS1 = [
+ # all services done, no errors.
+ dict(host="plonk", period_beginning=begin1, period_ending=end1,
+ state="DONE", errors=0, task_items=23, message="test1"),
+ dict(host="baz", period_beginning=begin1, period_ending=end1,
+ state="DONE", errors=0, task_items=17, message="test2"),
+ dict(host="bar", period_beginning=begin1, period_ending=end1,
+ state="DONE", errors=0, task_items=10, message="test3"),
+ dict(host="foo", period_beginning=begin1, period_ending=end1,
+ state="DONE", errors=0, task_items=7, message="test4"),
+ ]
+
+
+TEST_LOGS2 = [
+ # some still running...
+ dict(host="plonk", period_beginning=begin2, period_ending=end2,
+ state="DONE", errors=0, task_items=23, message="test5"),
+ dict(host="baz", period_beginning=begin2, period_ending=end2,
+ state="DONE", errors=0, task_items=17, message="test6"),
+ dict(host="bar", period_beginning=begin2, period_ending=end2,
+ state="RUNNING", errors=0, task_items=10, message="test7"),
+ dict(host="foo", period_beginning=begin2, period_ending=end2,
+ state="DONE", errors=0, task_items=7, message="test8"),
+ ]
+
+
+TEST_LOGS3 = [
+ # some errors..
+ dict(host="plonk", period_beginning=begin3, period_ending=end3,
+ state="DONE", errors=0, task_items=23, message="test9"),
+ dict(host="baz", period_beginning=begin3, period_ending=end3,
+ state="DONE", errors=2, task_items=17, message="test10"),
+ dict(host="bar", period_beginning=begin3, period_ending=end3,
+ state="DONE", errors=0, task_items=10, message="test11"),
+ dict(host="foo", period_beginning=begin3, period_ending=end3,
+ state="DONE", errors=1, task_items=7, message="test12"),
+ ]
+
+
+def fake_task_log_get_all(context, task_name, begin, end,
+ host=None, state=None):
+ assert task_name == "instance_usage_audit"
+
+ if begin == begin1 and end == end1:
+ return TEST_LOGS1
+ if begin == begin2 and end == end2:
+ return TEST_LOGS2
+ if begin == begin3 and end == end3:
+ return TEST_LOGS3
+ raise AssertionError("Invalid date %s to %s" % (begin, end))
+
+
+def fake_last_completed_audit_period(unit=None, before=None):
+ audit_periods = [(begin3, end3),
+ (begin2, end2),
+ (begin1, end1)]
+ if before is not None:
+ for begin, end in audit_periods:
+ if before > end:
+ return begin, end
+ raise AssertionError("Invalid before date %s" % (before))
+ return begin1, end1
+
+
+class InstanceUsageAuditLogTest(test.NoDBTestCase):
+ def setUp(self):
+ super(InstanceUsageAuditLogTest, self).setUp()
+ self.context = context.get_admin_context()
+ timeutils.set_time_override(datetime.datetime(2012, 7, 5, 10, 0, 0))
+ self.controller = ial.InstanceUsageAuditLogController()
+ self.host_api = self.controller.host_api
+
+ def fake_service_get_all(context, disabled):
+ self.assertIsNone(disabled)
+ return TEST_COMPUTE_SERVICES
+
+ self.stubs.Set(utils, 'last_completed_audit_period',
+ fake_last_completed_audit_period)
+ self.stubs.Set(db, 'service_get_all',
+ fake_service_get_all)
+ self.stubs.Set(db, 'task_log_get_all',
+ fake_task_log_get_all)
+
+ def tearDown(self):
+ super(InstanceUsageAuditLogTest, self).tearDown()
+ timeutils.clear_time_override()
+
+ def test_index(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-instance_usage_audit_log',
+ use_admin_context=True)
+ result = self.controller.index(req)
+ self.assertIn('instance_usage_audit_logs', result)
+ logs = result['instance_usage_audit_logs']
+ self.assertEqual(57, logs['total_instances'])
+ self.assertEqual(0, logs['total_errors'])
+ self.assertEqual(4, len(logs['log']))
+ self.assertEqual(4, logs['num_hosts'])
+ self.assertEqual(4, logs['num_hosts_done'])
+ self.assertEqual(0, logs['num_hosts_running'])
+ self.assertEqual(0, logs['num_hosts_not_run'])
+ self.assertEqual("ALL hosts done. 0 errors.", logs['overall_status'])
+
+ def test_index_non_admin(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-instance_usage_audit_log',
+ use_admin_context=False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.index, req)
+
+ def test_show(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-instance_usage_audit_log/show',
+ use_admin_context=True)
+ result = self.controller.show(req, '2012-07-05 10:00:00')
+ self.assertIn('instance_usage_audit_log', result)
+ logs = result['instance_usage_audit_log']
+ self.assertEqual(57, logs['total_instances'])
+ self.assertEqual(0, logs['total_errors'])
+ self.assertEqual(4, len(logs['log']))
+ self.assertEqual(4, logs['num_hosts'])
+ self.assertEqual(4, logs['num_hosts_done'])
+ self.assertEqual(0, logs['num_hosts_running'])
+ self.assertEqual(0, logs['num_hosts_not_run'])
+ self.assertEqual("ALL hosts done. 0 errors.", logs['overall_status'])
+
+ def test_show_non_admin(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-instance_usage_audit_log',
+ use_admin_context=False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.show, req, '2012-07-05 10:00:00')
+
+ def test_show_with_running(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-instance_usage_audit_log/show',
+ use_admin_context=True)
+ result = self.controller.show(req, '2012-07-06 10:00:00')
+ self.assertIn('instance_usage_audit_log', result)
+ logs = result['instance_usage_audit_log']
+ self.assertEqual(57, logs['total_instances'])
+ self.assertEqual(0, logs['total_errors'])
+ self.assertEqual(4, len(logs['log']))
+ self.assertEqual(4, logs['num_hosts'])
+ self.assertEqual(3, logs['num_hosts_done'])
+ self.assertEqual(1, logs['num_hosts_running'])
+ self.assertEqual(0, logs['num_hosts_not_run'])
+ self.assertEqual("3 of 4 hosts done. 0 errors.",
+ logs['overall_status'])
+
+ def test_show_with_errors(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-instance_usage_audit_log/show',
+ use_admin_context=True)
+ result = self.controller.show(req, '2012-07-07 10:00:00')
+ self.assertIn('instance_usage_audit_log', result)
+ logs = result['instance_usage_audit_log']
+ self.assertEqual(57, logs['total_instances'])
+ self.assertEqual(3, logs['total_errors'])
+ self.assertEqual(4, len(logs['log']))
+ self.assertEqual(4, logs['num_hosts'])
+ self.assertEqual(4, logs['num_hosts_done'])
+ self.assertEqual(0, logs['num_hosts_running'])
+ self.assertEqual(0, logs['num_hosts_not_run'])
+ self.assertEqual("ALL hosts done. 3 errors.",
+ logs['overall_status'])
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_keypairs.py b/nova/tests/unit/api/openstack/compute/contrib/test_keypairs.py
new file mode 100644
index 0000000000..6a6c6f0736
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_keypairs.py
@@ -0,0 +1,497 @@
+# Copyright 2011 Eldar Nugaev
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import keypairs as keypairs_v2
+from nova.api.openstack.compute.plugins.v3 import keypairs as keypairs_v21
+from nova.api.openstack import wsgi
+from nova import db
+from nova import exception
+from nova.openstack.common import policy as common_policy
+from nova import policy
+from nova import quota
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.objects import test_keypair
+
+
+QUOTAS = quota.QUOTAS
+
+
+keypair_data = {
+ 'public_key': 'FAKE_KEY',
+ 'fingerprint': 'FAKE_FINGERPRINT',
+}
+
+
+def fake_keypair(name):
+ return dict(test_keypair.fake_keypair,
+ name=name, **keypair_data)
+
+
+def db_key_pair_get_all_by_user(self, user_id):
+ return [fake_keypair('FAKE')]
+
+
+def db_key_pair_create(self, keypair):
+ return fake_keypair(name=keypair['name'])
+
+
+def db_key_pair_destroy(context, user_id, name):
+ if not (user_id and name):
+ raise Exception()
+
+
+def db_key_pair_create_duplicate(context, keypair):
+ raise exception.KeyPairExists(key_name=keypair.get('name', ''))
+
+
+class KeypairsTestV21(test.TestCase):
+ base_url = '/v2/fake'
+
+ def _setup_app(self):
+ self.app = fakes.wsgi_app_v21(init_only=('os-keypairs', 'servers'))
+ self.app_server = self.app
+
+ def setUp(self):
+ super(KeypairsTestV21, self).setUp()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+
+ self.stubs.Set(db, "key_pair_get_all_by_user",
+ db_key_pair_get_all_by_user)
+ self.stubs.Set(db, "key_pair_create",
+ db_key_pair_create)
+ self.stubs.Set(db, "key_pair_destroy",
+ db_key_pair_destroy)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Keypairs'])
+ self._setup_app()
+
+ def test_keypair_list(self):
+ req = webob.Request.blank(self.base_url + '/os-keypairs')
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 200)
+ res_dict = jsonutils.loads(res.body)
+ response = {'keypairs': [{'keypair': dict(keypair_data, name='FAKE')}]}
+ self.assertEqual(res_dict, response)
+
+ def test_keypair_create(self):
+ body = {'keypair': {'name': 'create_test'}}
+ req = webob.Request.blank(self.base_url + '/os-keypairs')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 200)
+ res_dict = jsonutils.loads(res.body)
+ self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
+ self.assertTrue(len(res_dict['keypair']['private_key']) > 0)
+
+ def _test_keypair_create_bad_request_case(self, body):
+ req = webob.Request.blank(self.base_url + '/os-keypairs')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+
+ def test_keypair_create_with_empty_name(self):
+ body = {'keypair': {'name': ''}}
+ self._test_keypair_create_bad_request_case(body)
+
+ def test_keypair_create_with_name_too_long(self):
+ body = {
+ 'keypair': {
+ 'name': 'a' * 256
+ }
+ }
+ self._test_keypair_create_bad_request_case(body)
+
+ def test_keypair_create_with_non_alphanumeric_name(self):
+ body = {
+ 'keypair': {
+ 'name': 'test/keypair'
+ }
+ }
+ self._test_keypair_create_bad_request_case(body)
+
+ def test_keypair_import_bad_key(self):
+ body = {
+ 'keypair': {
+ 'name': 'create_test',
+ 'public_key': 'ssh-what negative',
+ },
+ }
+ self._test_keypair_create_bad_request_case(body)
+
+ def test_keypair_create_with_invalid_keypair_body(self):
+ body = {'alpha': {'name': 'create_test'}}
+ self._test_keypair_create_bad_request_case(body)
+
+ def test_keypair_import(self):
+ body = {
+ 'keypair': {
+ 'name': 'create_test',
+ 'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
+ 'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
+ 'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
+ 'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
+ 'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
+ 'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
+ 'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
+ 'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
+ 'bHkXa6OciiJDvkRzJXzf',
+ },
+ }
+
+ req = webob.Request.blank(self.base_url + '/os-keypairs')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 200)
+ # FIXME(ja): sholud we check that public_key was sent to create?
+ res_dict = jsonutils.loads(res.body)
+ self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
+ self.assertNotIn('private_key', res_dict['keypair'])
+
+ def test_keypair_import_quota_limit(self):
+
+ def fake_quotas_count(self, context, resource, *args, **kwargs):
+ return 100
+
+ self.stubs.Set(QUOTAS, "count", fake_quotas_count)
+
+ body = {
+ 'keypair': {
+ 'name': 'create_test',
+ 'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
+ 'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
+ 'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
+ 'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
+ 'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
+ 'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
+ 'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
+ 'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
+ 'bHkXa6OciiJDvkRzJXzf',
+ },
+ }
+
+ req = webob.Request.blank(self.base_url + '/os-keypairs')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 403)
+ res_dict = jsonutils.loads(res.body)
+ self.assertEqual(
+ "Quota exceeded, too many key pairs.",
+ res_dict['forbidden']['message'])
+
+ def test_keypair_create_quota_limit(self):
+
+ def fake_quotas_count(self, context, resource, *args, **kwargs):
+ return 100
+
+ self.stubs.Set(QUOTAS, "count", fake_quotas_count)
+
+ body = {
+ 'keypair': {
+ 'name': 'create_test',
+ },
+ }
+
+ req = webob.Request.blank(self.base_url + '/os-keypairs')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 403)
+ res_dict = jsonutils.loads(res.body)
+ self.assertEqual(
+ "Quota exceeded, too many key pairs.",
+ res_dict['forbidden']['message'])
+
+ def test_keypair_create_duplicate(self):
+ self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate)
+ body = {'keypair': {'name': 'create_duplicate'}}
+ req = webob.Request.blank(self.base_url + '/os-keypairs')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 409)
+ res_dict = jsonutils.loads(res.body)
+ self.assertEqual(
+ "Key pair 'create_duplicate' already exists.",
+ res_dict['conflictingRequest']['message'])
+
+ def test_keypair_delete(self):
+ req = webob.Request.blank(self.base_url + '/os-keypairs/FAKE')
+ req.method = 'DELETE'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 202)
+
+ def test_keypair_get_keypair_not_found(self):
+ req = webob.Request.blank(self.base_url + '/os-keypairs/DOESNOTEXIST')
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 404)
+
+ def test_keypair_delete_not_found(self):
+
+ def db_key_pair_get_not_found(context, user_id, name):
+ raise exception.KeypairNotFound(user_id=user_id, name=name)
+
+ self.stubs.Set(db, "key_pair_get",
+ db_key_pair_get_not_found)
+ req = webob.Request.blank(self.base_url + '/os-keypairs/WHAT')
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 404)
+
+ def test_keypair_show(self):
+
+ def _db_key_pair_get(context, user_id, name):
+ return dict(test_keypair.fake_keypair,
+ name='foo', public_key='XXX', fingerprint='YYY')
+
+ self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
+
+ req = webob.Request.blank(self.base_url + '/os-keypairs/FAKE')
+ req.method = 'GET'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ res_dict = jsonutils.loads(res.body)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual('foo', res_dict['keypair']['name'])
+ self.assertEqual('XXX', res_dict['keypair']['public_key'])
+ self.assertEqual('YYY', res_dict['keypair']['fingerprint'])
+
+ def test_keypair_show_not_found(self):
+
+ def _db_key_pair_get(context, user_id, name):
+ raise exception.KeypairNotFound(user_id=user_id, name=name)
+
+ self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
+
+ req = webob.Request.blank(self.base_url + '/os-keypairs/FAKE')
+ req.method = 'GET'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 404)
+
+ def test_show_server(self):
+ self.stubs.Set(db, 'instance_get',
+ fakes.fake_instance_get())
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get())
+ req = webob.Request.blank(self.base_url + '/servers/1')
+ req.headers['Content-Type'] = 'application/json'
+ response = req.get_response(self.app_server)
+ self.assertEqual(response.status_int, 200)
+ res_dict = jsonutils.loads(response.body)
+ self.assertIn('key_name', res_dict['server'])
+ self.assertEqual(res_dict['server']['key_name'], '')
+
+ def test_detail_servers(self):
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fakes.fake_instance_get_all_by_filters())
+ req = fakes.HTTPRequest.blank(self.base_url + '/servers/detail')
+ res = req.get_response(self.app_server)
+ server_dicts = jsonutils.loads(res.body)['servers']
+ self.assertEqual(len(server_dicts), 5)
+
+ for server_dict in server_dicts:
+ self.assertIn('key_name', server_dict)
+ self.assertEqual(server_dict['key_name'], '')
+
+
+class KeypairPolicyTestV21(test.TestCase):
+ KeyPairController = keypairs_v21.KeypairController()
+ policy_path = 'compute_extension:v3:os-keypairs'
+ base_url = '/v2/fake'
+
+ def setUp(self):
+ super(KeypairPolicyTestV21, self).setUp()
+
+ def _db_key_pair_get(context, user_id, name):
+ return dict(test_keypair.fake_keypair,
+ name='foo', public_key='XXX', fingerprint='YYY')
+
+ self.stubs.Set(db, "key_pair_get",
+ _db_key_pair_get)
+ self.stubs.Set(db, "key_pair_get_all_by_user",
+ db_key_pair_get_all_by_user)
+ self.stubs.Set(db, "key_pair_create",
+ db_key_pair_create)
+ self.stubs.Set(db, "key_pair_destroy",
+ db_key_pair_destroy)
+
+ def test_keypair_list_fail_policy(self):
+ rules = {self.policy_path + ':index':
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs')
+ self.assertRaises(exception.Forbidden,
+ self.KeyPairController.index,
+ req)
+
+ def test_keypair_list_pass_policy(self):
+ rules = {self.policy_path + ':index':
+ common_policy.parse_rule('')}
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs')
+ res = self.KeyPairController.index(req)
+ self.assertIn('keypairs', res)
+
+ def test_keypair_show_fail_policy(self):
+ rules = {self.policy_path + ':show':
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs/FAKE')
+ self.assertRaises(exception.Forbidden,
+ self.KeyPairController.show,
+ req, 'FAKE')
+
+ def test_keypair_show_pass_policy(self):
+ rules = {self.policy_path + ':show':
+ common_policy.parse_rule('')}
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs/FAKE')
+ res = self.KeyPairController.show(req, 'FAKE')
+ self.assertIn('keypair', res)
+
+ def test_keypair_create_fail_policy(self):
+ body = {'keypair': {'name': 'create_test'}}
+ rules = {self.policy_path + ':create':
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs')
+ req.method = 'POST'
+ self.assertRaises(exception.Forbidden,
+ self.KeyPairController.create,
+ req, body=body)
+
+ def test_keypair_create_pass_policy(self):
+ body = {'keypair': {'name': 'create_test'}}
+ rules = {self.policy_path + ':create':
+ common_policy.parse_rule('')}
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs')
+ req.method = 'POST'
+ res = self.KeyPairController.create(req, body=body)
+ self.assertIn('keypair', res)
+
+ def test_keypair_delete_fail_policy(self):
+ rules = {self.policy_path + ':delete':
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs/FAKE')
+ req.method = 'DELETE'
+ self.assertRaises(exception.Forbidden,
+ self.KeyPairController.delete,
+ req, 'FAKE')
+
+ def test_keypair_delete_pass_policy(self):
+ rules = {self.policy_path + ':delete':
+ common_policy.parse_rule('')}
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs/FAKE')
+ req.method = 'DELETE'
+ res = self.KeyPairController.delete(req, 'FAKE')
+
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ if isinstance(self.KeyPairController, keypairs_v21.KeypairController):
+ status_int = self.KeyPairController.delete.wsgi_code
+ else:
+ status_int = res.status_int
+ self.assertEqual(202, status_int)
+
+
+class KeypairsXMLSerializerTest(test.TestCase):
+ def setUp(self):
+ super(KeypairsXMLSerializerTest, self).setUp()
+ self.deserializer = wsgi.XMLDeserializer()
+
+ def test_default_serializer(self):
+ exemplar = dict(keypair=dict(
+ public_key='fake_public_key',
+ private_key='fake_private_key',
+ fingerprint='fake_fingerprint',
+ user_id='fake_user_id',
+ name='fake_key_name'))
+ serializer = keypairs_v2.KeypairTemplate()
+ text = serializer.serialize(exemplar)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('keypair', tree.tag)
+ for child in tree:
+ self.assertIn(child.tag, exemplar['keypair'])
+ self.assertEqual(child.text, exemplar['keypair'][child.tag])
+
+ def test_index_serializer(self):
+ exemplar = dict(keypairs=[
+ dict(keypair=dict(
+ name='key1_name',
+ public_key='key1_key',
+ fingerprint='key1_fingerprint')),
+ dict(keypair=dict(
+ name='key2_name',
+ public_key='key2_key',
+ fingerprint='key2_fingerprint'))])
+ serializer = keypairs_v2.KeypairsTemplate()
+ text = serializer.serialize(exemplar)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('keypairs', tree.tag)
+ self.assertEqual(len(exemplar['keypairs']), len(tree))
+ for idx, keypair in enumerate(tree):
+ self.assertEqual('keypair', keypair.tag)
+ kp_data = exemplar['keypairs'][idx]['keypair']
+ for child in keypair:
+ self.assertIn(child.tag, kp_data)
+ self.assertEqual(child.text, kp_data[child.tag])
+
+ def test_deserializer(self):
+ exemplar = dict(keypair=dict(
+ name='key_name',
+ public_key='public_key'))
+ intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<keypair><name>key_name</name>'
+ '<public_key>public_key</public_key></keypair>')
+
+ result = self.deserializer.deserialize(intext)['body']
+ self.assertEqual(result, exemplar)
+
+
+class KeypairsTestV2(KeypairsTestV21):
+
+ def _setup_app(self):
+ self.app = fakes.wsgi_app(init_only=('os-keypairs',))
+ self.app_server = fakes.wsgi_app(init_only=('servers',))
+
+
+class KeypairPolicyTestV2(KeypairPolicyTestV21):
+ KeyPairController = keypairs_v2.KeypairController()
+ policy_path = 'compute_extension:keypairs'
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_migrate_server.py b/nova/tests/unit/api/openstack/compute/contrib/test_migrate_server.py
new file mode 100644
index 0000000000..069b688837
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_migrate_server.py
@@ -0,0 +1,231 @@
+# Copyright 2011 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack.compute.plugins.v3 import migrate_server
+from nova import exception
+from nova.openstack.common import uuidutils
+from nova.tests.unit.api.openstack.compute.plugins.v3 import \
+ admin_only_action_common
+from nova.tests.unit.api.openstack import fakes
+
+
+class MigrateServerTests(admin_only_action_common.CommonTests):
+ def setUp(self):
+ super(MigrateServerTests, self).setUp()
+ self.controller = migrate_server.MigrateServerController()
+ self.compute_api = self.controller.compute_api
+
+ def _fake_controller(*args, **kwargs):
+ return self.controller
+
+ self.stubs.Set(migrate_server, 'MigrateServerController',
+ _fake_controller)
+ self.app = fakes.wsgi_app_v21(init_only=('servers',
+ 'os-migrate-server'),
+ fake_auth_context=self.context)
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def test_migrate(self):
+ method_translations = {'migrate': 'resize',
+ 'os-migrateLive': 'live_migrate'}
+ body_map = {'os-migrateLive': {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}}
+ args_map = {'os-migrateLive': ((False, False, 'hostname'), {})}
+ self._test_actions(['migrate', 'os-migrateLive'], body_map=body_map,
+ method_translations=method_translations,
+ args_map=args_map)
+
+ def test_migrate_none_hostname(self):
+ method_translations = {'migrate': 'resize',
+ 'os-migrateLive': 'live_migrate'}
+ body_map = {'os-migrateLive': {'host': None,
+ 'block_migration': False,
+ 'disk_over_commit': False}}
+ args_map = {'os-migrateLive': ((False, False, None), {})}
+ self._test_actions(['migrate', 'os-migrateLive'], body_map=body_map,
+ method_translations=method_translations,
+ args_map=args_map)
+
+ def test_migrate_with_non_existed_instance(self):
+ body_map = {'os-migrateLive': {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}}
+ self._test_actions_with_non_existed_instance(
+ ['migrate', 'os-migrateLive'], body_map=body_map)
+
+ def test_migrate_raise_conflict_on_invalid_state(self):
+ method_translations = {'migrate': 'resize',
+ 'os-migrateLive': 'live_migrate'}
+ body_map = {'os-migrateLive': {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}}
+ args_map = {'os-migrateLive': ((False, False, 'hostname'), {})}
+ self._test_actions_raise_conflict_on_invalid_state(
+ ['migrate', 'os-migrateLive'], body_map=body_map,
+ args_map=args_map, method_translations=method_translations)
+
+ def test_actions_with_locked_instance(self):
+ method_translations = {'migrate': 'resize',
+ 'os-migrateLive': 'live_migrate'}
+ body_map = {'os-migrateLive': {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}}
+ args_map = {'os-migrateLive': ((False, False, 'hostname'), {})}
+ self._test_actions_with_locked_instance(
+ ['migrate', 'os-migrateLive'], body_map=body_map,
+ args_map=args_map, method_translations=method_translations)
+
+ def _test_migrate_exception(self, exc_info, expected_result):
+ self.mox.StubOutWithMock(self.compute_api, 'resize')
+ instance = self._stub_instance_get()
+ self.compute_api.resize(self.context, instance).AndRaise(exc_info)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance['uuid'],
+ {'migrate': None})
+ self.assertEqual(expected_result, res.status_int)
+
+ def test_migrate_too_many_instances(self):
+ exc_info = exception.TooManyInstances(overs='', req='', used=0,
+ allowed=0, resource='')
+ self._test_migrate_exception(exc_info, 403)
+
+ def _test_migrate_live_succeeded(self, param):
+ self.mox.StubOutWithMock(self.compute_api, 'live_migrate')
+ instance = self._stub_instance_get()
+ self.compute_api.live_migrate(self.context, instance, False,
+ False, 'hostname')
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance.uuid,
+ {'os-migrateLive': param})
+ self.assertEqual(202, res.status_int)
+
+ def test_migrate_live_enabled(self):
+ param = {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}
+ self._test_migrate_live_succeeded(param)
+
+ def test_migrate_live_enabled_with_string_param(self):
+ param = {'host': 'hostname',
+ 'block_migration': "False",
+ 'disk_over_commit': "False"}
+ self._test_migrate_live_succeeded(param)
+
+ def test_migrate_live_without_host(self):
+ res = self._make_request('/servers/FAKE/action',
+ {'os-migrateLive':
+ {'block_migration': False,
+ 'disk_over_commit': False}})
+ self.assertEqual(400, res.status_int)
+
+ def test_migrate_live_without_block_migration(self):
+ res = self._make_request('/servers/FAKE/action',
+ {'os-migrateLive':
+ {'host': 'hostname',
+ 'disk_over_commit': False}})
+ self.assertEqual(400, res.status_int)
+
+ def test_migrate_live_without_disk_over_commit(self):
+ res = self._make_request('/servers/FAKE/action',
+ {'os-migrateLive':
+ {'host': 'hostname',
+ 'block_migration': False}})
+ self.assertEqual(400, res.status_int)
+
+ def test_migrate_live_with_invalid_block_migration(self):
+ res = self._make_request('/servers/FAKE/action',
+ {'os-migrateLive':
+ {'host': 'hostname',
+ 'block_migration': "foo",
+ 'disk_over_commit': False}})
+ self.assertEqual(400, res.status_int)
+
+ def test_migrate_live_with_invalid_disk_over_commit(self):
+ res = self._make_request('/servers/FAKE/action',
+ {'os-migrateLive':
+ {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': "foo"}})
+ self.assertEqual(400, res.status_int)
+
+ def _test_migrate_live_failed_with_exception(self, fake_exc,
+ uuid=None):
+ self.mox.StubOutWithMock(self.compute_api, 'live_migrate')
+
+ instance = self._stub_instance_get(uuid=uuid)
+ self.compute_api.live_migrate(self.context, instance, False,
+ False, 'hostname').AndRaise(fake_exc)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance.uuid,
+ {'os-migrateLive':
+ {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}})
+ self.assertEqual(400, res.status_int)
+ self.assertIn(unicode(fake_exc), res.body)
+
+ def test_migrate_live_compute_service_unavailable(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.ComputeServiceUnavailable(host='host'))
+
+ def test_migrate_live_invalid_hypervisor_type(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.InvalidHypervisorType())
+
+ def test_migrate_live_invalid_cpu_info(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.InvalidCPUInfo(reason=""))
+
+ def test_migrate_live_unable_to_migrate_to_self(self):
+ uuid = uuidutils.generate_uuid()
+ self._test_migrate_live_failed_with_exception(
+ exception.UnableToMigrateToSelf(instance_id=uuid,
+ host='host'),
+ uuid=uuid)
+
+ def test_migrate_live_destination_hypervisor_too_old(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.DestinationHypervisorTooOld())
+
+ def test_migrate_live_no_valid_host(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.NoValidHost(reason=''))
+
+ def test_migrate_live_invalid_local_storage(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.InvalidLocalStorage(path='', reason=''))
+
+ def test_migrate_live_invalid_shared_storage(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.InvalidSharedStorage(path='', reason=''))
+
+ def test_migrate_live_hypervisor_unavailable(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.HypervisorUnavailable(host=""))
+
+ def test_migrate_live_instance_not_running(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.InstanceNotRunning(instance_id=""))
+
+ def test_migrate_live_pre_check_error(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.MigrationPreCheckError(reason=''))
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_migrations.py b/nova/tests/unit/api/openstack/compute/contrib/test_migrations.py
new file mode 100644
index 0000000000..ac18576389
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_migrations.py
@@ -0,0 +1,139 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from lxml import etree
+
+from nova.api.openstack.compute.contrib import migrations
+from nova import context
+from nova import exception
+from nova import objects
+from nova.objects import base
+from nova.openstack.common.fixture import moxstubout
+from nova import test
+
+fake_migrations = [
+ {
+ 'id': 1234,
+ 'source_node': 'node1',
+ 'dest_node': 'node2',
+ 'source_compute': 'compute1',
+ 'dest_compute': 'compute2',
+ 'dest_host': '1.2.3.4',
+ 'status': 'Done',
+ 'instance_uuid': 'instance_id_123',
+ 'old_instance_type_id': 1,
+ 'new_instance_type_id': 2,
+ 'created_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'deleted_at': None,
+ 'deleted': False
+ },
+ {
+ 'id': 5678,
+ 'source_node': 'node10',
+ 'dest_node': 'node20',
+ 'source_compute': 'compute10',
+ 'dest_compute': 'compute20',
+ 'dest_host': '5.6.7.8',
+ 'status': 'Done',
+ 'instance_uuid': 'instance_id_456',
+ 'old_instance_type_id': 5,
+ 'new_instance_type_id': 6,
+ 'created_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
+ 'updated_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
+ 'deleted_at': None,
+ 'deleted': False
+ }
+]
+
+migrations_obj = base.obj_make_list(
+ 'fake-context',
+ objects.MigrationList(),
+ objects.Migration,
+ fake_migrations)
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {}
+
+
+class MigrationsTestCase(test.NoDBTestCase):
+ def setUp(self):
+ """Run before each test."""
+ super(MigrationsTestCase, self).setUp()
+ self.controller = migrations.MigrationsController()
+ self.context = context.get_admin_context()
+ self.req = FakeRequest()
+ self.req.environ['nova.context'] = self.context
+ mox_fixture = self.useFixture(moxstubout.MoxStubout())
+ self.mox = mox_fixture.mox
+
+ def test_index(self):
+ migrations_in_progress = {
+ 'migrations': migrations.output(migrations_obj)}
+
+ for mig in migrations_in_progress['migrations']:
+ self.assertIn('id', mig)
+ self.assertNotIn('deleted', mig)
+ self.assertNotIn('deleted_at', mig)
+
+ filters = {'host': 'host1', 'status': 'migrating',
+ 'cell_name': 'ChildCell'}
+ self.req.GET = filters
+ self.mox.StubOutWithMock(self.controller.compute_api,
+ "get_migrations")
+
+ self.controller.compute_api.get_migrations(
+ self.context, filters).AndReturn(migrations_obj)
+ self.mox.ReplayAll()
+
+ response = self.controller.index(self.req)
+ self.assertEqual(migrations_in_progress, response)
+
+ def test_index_needs_authorization(self):
+ user_context = context.RequestContext(user_id=None,
+ project_id=None,
+ is_admin=False,
+ read_deleted="no",
+ overwrite=False)
+ self.req.environ['nova.context'] = user_context
+
+ self.assertRaises(exception.PolicyNotAuthorized, self.controller.index,
+ self.req)
+
+
+class MigrationsTemplateTest(test.NoDBTestCase):
+ def setUp(self):
+ super(MigrationsTemplateTest, self).setUp()
+ self.serializer = migrations.MigrationsTemplate()
+
+ def test_index_serialization(self):
+ migrations_out = migrations.output(migrations_obj)
+ res_xml = self.serializer.serialize(
+ {'migrations': migrations_out})
+
+ tree = etree.XML(res_xml)
+ children = tree.findall('migration')
+ self.assertEqual(tree.tag, 'migrations')
+ self.assertEqual(2, len(children))
+
+ for idx, child in enumerate(children):
+ self.assertEqual(child.tag, 'migration')
+ migration = migrations_out[idx]
+ for attr in migration.keys():
+ self.assertEqual(str(migration[attr]),
+ child.get(attr))
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_multinic.py b/nova/tests/unit/api/openstack/compute/contrib/test_multinic.py
new file mode 100644
index 0000000000..dcf1dd299f
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_multinic.py
@@ -0,0 +1,204 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.serialization import jsonutils
+import webob
+
+from nova import compute
+from nova import exception
+from nova import objects
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+UUID = '70f6db34-de8d-4fbd-aafb-4065bdfa6114'
+last_add_fixed_ip = (None, None)
+last_remove_fixed_ip = (None, None)
+
+
+def compute_api_add_fixed_ip(self, context, instance, network_id):
+ global last_add_fixed_ip
+
+ last_add_fixed_ip = (instance['uuid'], network_id)
+
+
+def compute_api_remove_fixed_ip(self, context, instance, address):
+ global last_remove_fixed_ip
+
+ last_remove_fixed_ip = (instance['uuid'], address)
+
+
+def compute_api_get(self, context, instance_id, want_objects=False,
+ expected_attrs=None):
+ instance = objects.Instance()
+ instance.uuid = instance_id
+ instance.id = 1
+ instance.vm_state = 'fake'
+ instance.task_state = 'fake'
+ instance.obj_reset_changes()
+ return instance
+
+
+class FixedIpTestV21(test.NoDBTestCase):
+ def setUp(self):
+ super(FixedIpTestV21, self).setUp()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ self.stubs.Set(compute.api.API, "add_fixed_ip",
+ compute_api_add_fixed_ip)
+ self.stubs.Set(compute.api.API, "remove_fixed_ip",
+ compute_api_remove_fixed_ip)
+ self.stubs.Set(compute.api.API, 'get', compute_api_get)
+ self.app = self._get_app()
+
+ def _get_app(self):
+ return fakes.wsgi_app_v21(init_only=('servers', 'os-multinic'))
+
+ def _get_url(self):
+ return '/v2/fake'
+
+ def test_add_fixed_ip(self):
+ global last_add_fixed_ip
+ last_add_fixed_ip = (None, None)
+
+ body = dict(addFixedIp=dict(networkId='test_net'))
+ req = webob.Request.blank(
+ self._get_url() + '/servers/%s/action' % UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+ self.assertEqual(last_add_fixed_ip, (UUID, 'test_net'))
+
+ def _test_add_fixed_ip_bad_request(self, body):
+ req = webob.Request.blank(
+ self._get_url() + '/servers/%s/action' % UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+ resp = req.get_response(self.app)
+ self.assertEqual(400, resp.status_int)
+
+ def test_add_fixed_ip_empty_network_id(self):
+ body = {'addFixedIp': {'network_id': ''}}
+ self._test_add_fixed_ip_bad_request(body)
+
+ def test_add_fixed_ip_network_id_bigger_than_36(self):
+ body = {'addFixedIp': {'network_id': 'a' * 37}}
+ self._test_add_fixed_ip_bad_request(body)
+
+ def test_add_fixed_ip_no_network(self):
+ global last_add_fixed_ip
+ last_add_fixed_ip = (None, None)
+
+ body = dict(addFixedIp=dict())
+ req = webob.Request.blank(
+ self._get_url() + '/servers/%s/action' % UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual(last_add_fixed_ip, (None, None))
+
+ @mock.patch.object(compute.api.API, 'add_fixed_ip')
+ def test_add_fixed_ip_no_more_ips_available(self, mock_add_fixed_ip):
+ mock_add_fixed_ip.side_effect = exception.NoMoreFixedIps(net='netid')
+
+ body = dict(addFixedIp=dict(networkId='test_net'))
+ req = webob.Request.blank(
+ self._get_url() + '/servers/%s/action' % UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+
+ def test_remove_fixed_ip(self):
+ global last_remove_fixed_ip
+ last_remove_fixed_ip = (None, None)
+
+ body = dict(removeFixedIp=dict(address='10.10.10.1'))
+ req = webob.Request.blank(
+ self._get_url() + '/servers/%s/action' % UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+ self.assertEqual(last_remove_fixed_ip, (UUID, '10.10.10.1'))
+
+ def test_remove_fixed_ip_no_address(self):
+ global last_remove_fixed_ip
+ last_remove_fixed_ip = (None, None)
+
+ body = dict(removeFixedIp=dict())
+ req = webob.Request.blank(
+ self._get_url() + '/servers/%s/action' % UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual(last_remove_fixed_ip, (None, None))
+
+ def test_remove_fixed_ip_invalid_address(self):
+ body = {'remove_fixed_ip': {'address': ''}}
+ req = webob.Request.blank(
+ self._get_url() + '/servers/%s/action' % UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+ resp = req.get_response(self.app)
+ self.assertEqual(400, resp.status_int)
+
+ @mock.patch.object(compute.api.API, 'remove_fixed_ip',
+ side_effect=exception.FixedIpNotFoundForSpecificInstance(
+ instance_uuid=UUID, ip='10.10.10.1'))
+ def test_remove_fixed_ip_not_found(self, _remove_fixed_ip):
+
+ body = {'remove_fixed_ip': {'address': '10.10.10.1'}}
+ req = webob.Request.blank(
+ self._get_url() + '/servers/%s/action' % UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(400, resp.status_int)
+
+
+class FixedIpTestV2(FixedIpTestV21):
+ def setUp(self):
+ super(FixedIpTestV2, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Multinic'])
+
+ def _get_app(self):
+ return fakes.wsgi_app(init_only=('servers',))
+
+ def test_remove_fixed_ip_invalid_address(self):
+ # NOTE(cyeoh): This test is disabled for the V2 API because it is
+ # has poorer input validation.
+ pass
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_networks.py b/nova/tests/unit/api/openstack/compute/contrib/test_networks.py
new file mode 100644
index 0000000000..5636a06d0d
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_networks.py
@@ -0,0 +1,610 @@
+# Copyright 2011 Grid Dynamics
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import datetime
+import math
+import uuid
+
+import iso8601
+import mock
+import netaddr
+from oslo.config import cfg
+import webob
+
+from nova.api.openstack.compute.contrib import networks_associate
+from nova.api.openstack.compute.contrib import os_networks as networks
+from nova.api.openstack.compute.plugins.v3 import networks as networks_v21
+from nova.api.openstack.compute.plugins.v3 import networks_associate as \
+ networks_associate_v21
+from nova.api.openstack import extensions
+import nova.context
+from nova import exception
+from nova.network import manager
+from nova import objects
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+import nova.utils
+
+CONF = cfg.CONF
+
+UTC = iso8601.iso8601.Utc()
+FAKE_NETWORKS = [
+ {
+ 'bridge': 'br100', 'vpn_public_port': 1000,
+ 'dhcp_start': '10.0.0.3', 'bridge_interface': 'eth0',
+ 'updated_at': datetime.datetime(2011, 8, 16, 9, 26, 13, 48257,
+ tzinfo=UTC),
+ 'id': 1, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf047',
+ 'cidr_v6': None, 'deleted_at': None,
+ 'gateway': '10.0.0.1', 'label': 'mynet_0',
+ 'project_id': '1234', 'rxtx_base': None,
+ 'vpn_private_address': '10.0.0.2', 'deleted': False,
+ 'vlan': 100, 'broadcast': '10.0.0.7',
+ 'netmask': '255.255.255.248', 'injected': False,
+ 'cidr': '10.0.0.0/29',
+ 'vpn_public_address': '127.0.0.1', 'multi_host': False,
+ 'dns1': None, 'dns2': None, 'host': 'nsokolov-desktop',
+ 'gateway_v6': None, 'netmask_v6': None, 'priority': None,
+ 'created_at': datetime.datetime(2011, 8, 15, 6, 19, 19, 387525,
+ tzinfo=UTC),
+ 'mtu': None, 'dhcp_server': '10.0.0.1', 'enable_dhcp': True,
+ 'share_address': False,
+ },
+ {
+ 'bridge': 'br101', 'vpn_public_port': 1001,
+ 'dhcp_start': '10.0.0.11', 'bridge_interface': 'eth0',
+ 'updated_at': None, 'id': 2, 'cidr_v6': None,
+ 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf000',
+ 'deleted_at': None, 'gateway': '10.0.0.9',
+ 'label': 'mynet_1', 'project_id': None,
+ 'vpn_private_address': '10.0.0.10', 'deleted': False,
+ 'vlan': 101, 'broadcast': '10.0.0.15', 'rxtx_base': None,
+ 'netmask': '255.255.255.248', 'injected': False,
+ 'cidr': '10.0.0.10/29', 'vpn_public_address': None,
+ 'multi_host': False, 'dns1': None, 'dns2': None, 'host': None,
+ 'gateway_v6': None, 'netmask_v6': None, 'priority': None,
+ 'created_at': datetime.datetime(2011, 8, 15, 6, 19, 19, 885495,
+ tzinfo=UTC),
+ 'mtu': None, 'dhcp_server': '10.0.0.9', 'enable_dhcp': True,
+ 'share_address': False,
+ },
+]
+
+
+FAKE_USER_NETWORKS = [
+ {
+ 'id': 1, 'cidr': '10.0.0.0/29', 'netmask': '255.255.255.248',
+ 'gateway': '10.0.0.1', 'broadcast': '10.0.0.7', 'dns1': None,
+ 'dns2': None, 'cidr_v6': None, 'gateway_v6': None, 'label': 'mynet_0',
+ 'netmask_v6': None, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf047',
+ },
+ {
+ 'id': 2, 'cidr': '10.0.0.10/29', 'netmask': '255.255.255.248',
+ 'gateway': '10.0.0.9', 'broadcast': '10.0.0.15', 'dns1': None,
+ 'dns2': None, 'cidr_v6': None, 'gateway_v6': None, 'label': 'mynet_1',
+ 'netmask_v6': None, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf000',
+ },
+]
+
+NEW_NETWORK = {
+ "network": {
+ "bridge_interface": "eth0",
+ "cidr": "10.20.105.0/24",
+ "label": "new net 111",
+ "vlan_start": 111,
+ "injected": False,
+ "multi_host": False,
+ 'mtu': None,
+ 'dhcp_server': '10.0.0.1',
+ 'enable_dhcp': True,
+ 'share_address': False,
+ }
+}
+
+
+class FakeNetworkAPI(object):
+
+ _sentinel = object()
+ _vlan_is_disabled = False
+
+ def __init__(self):
+ self.networks = copy.deepcopy(FAKE_NETWORKS)
+
+ def disable_vlan(self):
+ self._vlan_is_disabled = True
+
+ def delete(self, context, network_id):
+ if network_id == 'always_delete':
+ return True
+ if network_id == -1:
+ raise exception.NetworkInUse(network_id=network_id)
+ for i, network in enumerate(self.networks):
+ if network['id'] == network_id:
+ del self.networks[0]
+ return True
+ raise exception.NetworkNotFoundForUUID(uuid=network_id)
+
+ def disassociate(self, context, network_uuid):
+ for network in self.networks:
+ if network.get('uuid') == network_uuid:
+ network['project_id'] = None
+ return True
+ raise exception.NetworkNotFound(network_id=network_uuid)
+
+ def associate(self, context, network_uuid, host=_sentinel,
+ project=_sentinel):
+ for network in self.networks:
+ if network.get('uuid') == network_uuid:
+ if host is not FakeNetworkAPI._sentinel:
+ network['host'] = host
+ if project is not FakeNetworkAPI._sentinel:
+ network['project_id'] = project
+ return True
+ raise exception.NetworkNotFound(network_id=network_uuid)
+
+ def add_network_to_project(self, context,
+ project_id, network_uuid=None):
+ if self._vlan_is_disabled:
+ raise NotImplementedError()
+ if network_uuid:
+ for network in self.networks:
+ if network.get('project_id', None) is None:
+ network['project_id'] = project_id
+ return
+ return
+ for network in self.networks:
+ if network.get('uuid') == network_uuid:
+ network['project_id'] = project_id
+ return
+
+ def get_all(self, context):
+ return self._fake_db_network_get_all(context, project_only=True)
+
+ def _fake_db_network_get_all(self, context, project_only="allow_none"):
+ project_id = context.project_id
+ nets = self.networks
+ if nova.context.is_user_context(context) and project_only:
+ if project_only == 'allow_none':
+ nets = [n for n in self.networks
+ if (n['project_id'] == project_id or
+ n['project_id'] is None)]
+ else:
+ nets = [n for n in self.networks
+ if n['project_id'] == project_id]
+ objs = [objects.Network._from_db_object(context,
+ objects.Network(),
+ net)
+ for net in nets]
+ return objects.NetworkList(objects=objs)
+
+ def get(self, context, network_id):
+ for network in self.networks:
+ if network.get('uuid') == network_id:
+ return objects.Network._from_db_object(context,
+ objects.Network(),
+ network)
+ raise exception.NetworkNotFound(network_id=network_id)
+
+ def create(self, context, **kwargs):
+ subnet_bits = int(math.ceil(math.log(kwargs.get(
+ 'network_size', CONF.network_size), 2)))
+ fixed_net_v4 = netaddr.IPNetwork(kwargs['cidr'])
+ prefixlen_v4 = 32 - subnet_bits
+ subnets_v4 = list(fixed_net_v4.subnet(
+ prefixlen_v4,
+ count=kwargs.get('num_networks', CONF.num_networks)))
+ new_networks = []
+ new_id = max((net['id'] for net in self.networks))
+ for index, subnet_v4 in enumerate(subnets_v4):
+ new_id += 1
+ net = {'id': new_id, 'uuid': str(uuid.uuid4())}
+
+ net['cidr'] = str(subnet_v4)
+ net['netmask'] = str(subnet_v4.netmask)
+ net['gateway'] = kwargs.get('gateway') or str(subnet_v4[1])
+ net['broadcast'] = str(subnet_v4.broadcast)
+ net['dhcp_start'] = str(subnet_v4[2])
+
+ for key in FAKE_NETWORKS[0].iterkeys():
+ net.setdefault(key, kwargs.get(key))
+ new_networks.append(net)
+ self.networks += new_networks
+ return new_networks
+
+
+# NOTE(vish): tests that network create Exceptions actually return
+# the proper error responses
+class NetworkCreateExceptionsTestV21(test.TestCase):
+ url_prefix = '/v2/1234'
+
+ class PassthroughAPI(object):
+ def __init__(self):
+ self.network_manager = manager.FlatDHCPManager()
+
+ def create(self, *args, **kwargs):
+ if kwargs['label'] == 'fail_NetworkNotCreated':
+ raise exception.NetworkNotCreated(req='fake_fail')
+ return self.network_manager.create_networks(*args, **kwargs)
+
+ def setUp(self):
+ super(NetworkCreateExceptionsTestV21, self).setUp()
+ self._setup()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+
+ def _setup(self):
+ self.controller = networks_v21.NetworkController(self.PassthroughAPI())
+
+ def test_network_create_bad_vlan(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['vlan_start'] = 'foo'
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, net)
+
+ def test_network_create_no_cidr(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['cidr'] = ''
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, net)
+
+ def test_network_create_invalid_fixed_cidr(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['fixed_cidr'] = 'foo'
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, net)
+
+ def test_network_create_invalid_start(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['allowed_start'] = 'foo'
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, net)
+
+ def test_network_create_handle_network_not_created(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['label'] = 'fail_NetworkNotCreated'
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, net)
+
+ def test_network_create_cidr_conflict(self):
+
+ @staticmethod
+ def get_all(context):
+ ret = objects.NetworkList(context=context, objects=[])
+ net = objects.Network(cidr='10.0.0.0/23')
+ ret.objects.append(net)
+ return ret
+
+ self.stubs.Set(objects.NetworkList, 'get_all', get_all)
+
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['cidr'] = '10.0.0.0/24'
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller.create, req, net)
+
+
+class NetworkCreateExceptionsTestV2(NetworkCreateExceptionsTestV21):
+
+ def _setup(self):
+ ext_mgr = extensions.ExtensionManager()
+ ext_mgr.extensions = {'os-extended-networks': 'fake'}
+
+ self.controller = networks.NetworkController(
+ self.PassthroughAPI(), ext_mgr)
+
+
+class NetworksTestV21(test.NoDBTestCase):
+ url_prefix = '/v2/1234'
+
+ def setUp(self):
+ super(NetworksTestV21, self).setUp()
+ self.fake_network_api = FakeNetworkAPI()
+ self._setup()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+
+ def _setup(self):
+ self.controller = networks_v21.NetworkController(
+ self.fake_network_api)
+
+ def _check_status(self, res, method, code):
+ self.assertEqual(method.wsgi_code, 202)
+
+ @staticmethod
+ def network_uuid_to_id(network):
+ network['id'] = network['uuid']
+ del network['uuid']
+
+ def test_network_list_all_as_user(self):
+ self.maxDiff = None
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ res_dict = self.controller.index(req)
+ self.assertEqual(res_dict, {'networks': []})
+
+ project_id = req.environ["nova.context"].project_id
+ cxt = req.environ["nova.context"]
+ uuid = FAKE_NETWORKS[0]['uuid']
+ self.fake_network_api.associate(context=cxt,
+ network_uuid=uuid,
+ project=project_id)
+ res_dict = self.controller.index(req)
+ expected = [copy.deepcopy(FAKE_USER_NETWORKS[0])]
+ for network in expected:
+ self.network_uuid_to_id(network)
+ self.assertEqual({'networks': expected}, res_dict)
+
+ def test_network_list_all_as_admin(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ req.environ["nova.context"].is_admin = True
+ res_dict = self.controller.index(req)
+ expected = copy.deepcopy(FAKE_NETWORKS)
+ for network in expected:
+ self.network_uuid_to_id(network)
+ self.assertEqual({'networks': expected}, res_dict)
+
+ def test_network_disassociate(self):
+ uuid = FAKE_NETWORKS[0]['uuid']
+ req = fakes.HTTPRequest.blank(self.url_prefix +
+ '/os-networks/%s/action' % uuid)
+ res = self.controller._disassociate_host_and_project(
+ req, uuid, {'disassociate': None})
+ self._check_status(res, self.controller._disassociate_host_and_project,
+ 202)
+ self.assertIsNone(self.fake_network_api.networks[0]['project_id'])
+ self.assertIsNone(self.fake_network_api.networks[0]['host'])
+
+ def test_network_disassociate_not_found(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix +
+ '/os-networks/100/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._disassociate_host_and_project,
+ req, 100, {'disassociate': None})
+
+ def test_network_get_as_user(self):
+ uuid = FAKE_USER_NETWORKS[0]['uuid']
+ req = fakes.HTTPRequest.blank(self.url_prefix +
+ '/os-networks/%s' % uuid)
+ res_dict = self.controller.show(req, uuid)
+ expected = {'network': copy.deepcopy(FAKE_USER_NETWORKS[0])}
+ self.network_uuid_to_id(expected['network'])
+ self.assertEqual(expected, res_dict)
+
+ def test_network_get_as_admin(self):
+ uuid = FAKE_NETWORKS[0]['uuid']
+ req = fakes.HTTPRequest.blank(self.url_prefix +
+ '/os-networks/%s' % uuid)
+ req.environ["nova.context"].is_admin = True
+ res_dict = self.controller.show(req, uuid)
+ expected = {'network': copy.deepcopy(FAKE_NETWORKS[0])}
+ self.network_uuid_to_id(expected['network'])
+ self.assertEqual(expected, res_dict)
+
+ def test_network_get_not_found(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks/100')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.show, req, 100)
+
+ def test_network_delete(self):
+ uuid = FAKE_NETWORKS[0]['uuid']
+ req = fakes.HTTPRequest.blank(self.url_prefix +
+ '/os-networks/%s' % uuid)
+ res = self.controller.delete(req, 1)
+ self._check_status(res, self.controller._disassociate_host_and_project,
+ 202)
+
+ def test_network_delete_not_found(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks/100')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.delete, req, 100)
+
+ def test_network_delete_in_use(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks/-1')
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller.delete, req, -1)
+
+ def test_network_add(self):
+ uuid = FAKE_NETWORKS[1]['uuid']
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks/add')
+ res = self.controller.add(req, {'id': uuid})
+ self._check_status(res, self.controller._disassociate_host_and_project,
+ 202)
+ req = fakes.HTTPRequest.blank(self.url_prefix +
+ '/os-networks/%s' % uuid)
+ req.environ["nova.context"].is_admin = True
+ res_dict = self.controller.show(req, uuid)
+ self.assertEqual(res_dict['network']['project_id'], 'fake')
+
+ @mock.patch('nova.tests.unit.api.openstack.compute.contrib.test_networks.'
+ 'FakeNetworkAPI.add_network_to_project',
+ side_effect=exception.NoMoreNetworks)
+ def test_network_add_no_more_networks_fail(self, mock_add):
+ uuid = FAKE_NETWORKS[1]['uuid']
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks/add')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.add, req,
+ {'id': uuid})
+
+ @mock.patch('nova.tests.unit.api.openstack.compute.contrib.test_networks.'
+ 'FakeNetworkAPI.add_network_to_project',
+ side_effect=exception.NetworkNotFoundForUUID(uuid='fake_uuid'))
+ def test_network_add_network_not_found_networks_fail(self, mock_add):
+ uuid = FAKE_NETWORKS[1]['uuid']
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks/add')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.add, req,
+ {'id': uuid})
+
+ def test_network_create(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ res_dict = self.controller.create(req, NEW_NETWORK)
+ self.assertIn('network', res_dict)
+ uuid = res_dict['network']['id']
+ req = fakes.HTTPRequest.blank(self.url_prefix +
+ '/os-networks/%s' % uuid)
+ res_dict = self.controller.show(req, uuid)
+ self.assertTrue(res_dict['network']['label'].
+ startswith(NEW_NETWORK['network']['label']))
+
+ def test_network_create_large(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ large_network = copy.deepcopy(NEW_NETWORK)
+ large_network['network']['cidr'] = '128.0.0.0/4'
+ res_dict = self.controller.create(req, large_network)
+ self.assertEqual(res_dict['network']['cidr'],
+ large_network['network']['cidr'])
+
+ def test_network_create_bad_cidr(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['cidr'] = '128.0.0.0/900'
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, net)
+
+ def test_network_neutron_disassociate_not_implemented(self):
+ uuid = FAKE_NETWORKS[1]['uuid']
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ controller = networks.NetworkController()
+ req = fakes.HTTPRequest.blank(self.url_prefix +
+ '/os-networks/%s/action' % uuid)
+ self.assertRaises(webob.exc.HTTPNotImplemented,
+ controller._disassociate_host_and_project,
+ req, uuid, {'disassociate': None})
+
+
+class NetworksTestV2(NetworksTestV21):
+
+ def _setup(self):
+ ext_mgr = extensions.ExtensionManager()
+ ext_mgr.extensions = {'os-extended-networks': 'fake'}
+ self.controller = networks.NetworkController(self.fake_network_api,
+ ext_mgr)
+
+ def _check_status(self, res, method, code):
+ self.assertEqual(res.status_int, 202)
+
+ def test_network_create_not_extended(self):
+ self.stubs.Set(self.controller, 'extended', False)
+ # NOTE(vish): Verify that new params are not passed through if
+ # extension is not enabled.
+
+ def no_mtu(*args, **kwargs):
+ if 'mtu' in kwargs:
+ raise test.TestingException("mtu should not pass through")
+ return [{}]
+
+ self.stubs.Set(self.controller.network_api, 'create', no_mtu)
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['mtu'] = 9000
+ self.controller.create(req, net)
+
+
+class NetworksAssociateTestV21(test.NoDBTestCase):
+
+ def setUp(self):
+ super(NetworksAssociateTestV21, self).setUp()
+ self.fake_network_api = FakeNetworkAPI()
+ self._setup()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+
+ def _setup(self):
+ self.controller = networks.NetworkController(self.fake_network_api)
+ self.associate_controller = networks_associate_v21\
+ .NetworkAssociateActionController(self.fake_network_api)
+
+ def _check_status(self, res, method, code):
+ self.assertEqual(method.wsgi_code, code)
+
+ def test_network_disassociate_host_only(self):
+ uuid = FAKE_NETWORKS[0]['uuid']
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
+ res = self.associate_controller._disassociate_host_only(
+ req, uuid, {'disassociate_host': None})
+ self._check_status(res,
+ self.associate_controller._disassociate_host_only,
+ 202)
+ self.assertIsNotNone(self.fake_network_api.networks[0]['project_id'])
+ self.assertIsNone(self.fake_network_api.networks[0]['host'])
+
+ def test_network_disassociate_project_only(self):
+ uuid = FAKE_NETWORKS[0]['uuid']
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
+ res = self.associate_controller._disassociate_project_only(
+ req, uuid, {'disassociate_project': None})
+ self._check_status(
+ res, self.associate_controller._disassociate_project_only, 202)
+ self.assertIsNone(self.fake_network_api.networks[0]['project_id'])
+ self.assertIsNotNone(self.fake_network_api.networks[0]['host'])
+
+ def test_network_associate_with_host(self):
+ uuid = FAKE_NETWORKS[1]['uuid']
+ req = fakes.HTTPRequest.blank('/v2/1234//os-networks/%s/action' % uuid)
+ res = self.associate_controller._associate_host(
+ req, uuid, {'associate_host': "TestHost"})
+ self._check_status(res, self.associate_controller._associate_host, 202)
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s' % uuid)
+ req.environ["nova.context"].is_admin = True
+ res_dict = self.controller.show(req, uuid)
+ self.assertEqual(res_dict['network']['host'], 'TestHost')
+
+ def test_network_neutron_associate_not_implemented(self):
+ uuid = FAKE_NETWORKS[1]['uuid']
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ assoc_ctrl = networks_associate.NetworkAssociateActionController()
+
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
+ self.assertRaises(webob.exc.HTTPNotImplemented,
+ assoc_ctrl._associate_host,
+ req, uuid, {'associate_host': "TestHost"})
+
+ def test_network_neutron_disassociate_project_not_implemented(self):
+ uuid = FAKE_NETWORKS[1]['uuid']
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ assoc_ctrl = networks_associate.NetworkAssociateActionController()
+
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
+ self.assertRaises(webob.exc.HTTPNotImplemented,
+ assoc_ctrl._disassociate_project_only,
+ req, uuid, {'disassociate_project': None})
+
+ def test_network_neutron_disassociate_host_not_implemented(self):
+ uuid = FAKE_NETWORKS[1]['uuid']
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ assoc_ctrl = networks_associate.NetworkAssociateActionController()
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
+ self.assertRaises(webob.exc.HTTPNotImplemented,
+ assoc_ctrl._disassociate_host_only,
+ req, uuid, {'disassociate_host': None})
+
+
+class NetworksAssociateTestV2(NetworksAssociateTestV21):
+
+ def _setup(self):
+ ext_mgr = extensions.ExtensionManager()
+ ext_mgr.extensions = {'os-extended-networks': 'fake'}
+ self.controller = networks.NetworkController(
+ self.fake_network_api,
+ ext_mgr)
+ self.associate_controller = networks_associate\
+ .NetworkAssociateActionController(self.fake_network_api)
+
+ def _check_status(self, res, method, code):
+ self.assertEqual(res.status_int, 202)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_neutron_security_groups.py b/nova/tests/unit/api/openstack/compute/contrib/test_neutron_security_groups.py
new file mode 100644
index 0000000000..704de21005
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_neutron_security_groups.py
@@ -0,0 +1,918 @@
+# Copyright 2013 Nicira, Inc.
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import uuid
+
+from lxml import etree
+import mock
+from neutronclient.common import exceptions as n_exc
+from neutronclient.neutron import v2_0 as neutronv20
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import security_groups
+from nova.api.openstack import xmlutil
+from nova import compute
+from nova import context
+import nova.db
+from nova import exception
+from nova.network import model
+from nova.network import neutronv2
+from nova.network.neutronv2 import api as neutron_api
+from nova.network.security_group import neutron_driver
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack.compute.contrib import test_security_groups
+from nova.tests.unit.api.openstack import fakes
+
+
+class TestNeutronSecurityGroupsTestCase(test.TestCase):
+ def setUp(self):
+ super(TestNeutronSecurityGroupsTestCase, self).setUp()
+ cfg.CONF.set_override('security_group_api', 'neutron')
+ self.original_client = neutronv2.get_client
+ neutronv2.get_client = get_client
+
+ def tearDown(self):
+ neutronv2.get_client = self.original_client
+ get_client()._reset()
+ super(TestNeutronSecurityGroupsTestCase, self).tearDown()
+
+
+class TestNeutronSecurityGroupsV21(
+ test_security_groups.TestSecurityGroupsV21,
+ TestNeutronSecurityGroupsTestCase):
+
+ def _create_sg_template(self, **kwargs):
+ sg = test_security_groups.security_group_template(**kwargs)
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ return self.controller.create(req, {'security_group': sg})
+
+ def _create_network(self):
+ body = {'network': {'name': 'net1'}}
+ neutron = get_client()
+ net = neutron.create_network(body)
+ body = {'subnet': {'network_id': net['network']['id'],
+ 'cidr': '10.0.0.0/24'}}
+ neutron.create_subnet(body)
+ return net
+
+ def _create_port(self, **kwargs):
+ body = {'port': {'binding:vnic_type': model.VNIC_TYPE_NORMAL}}
+ fields = ['security_groups', 'device_id', 'network_id',
+ 'port_security_enabled']
+ for field in fields:
+ if field in kwargs:
+ body['port'][field] = kwargs[field]
+ neutron = get_client()
+ return neutron.create_port(body)
+
+ def _create_security_group(self, **kwargs):
+ body = {'security_group': {}}
+ fields = ['name', 'description']
+ for field in fields:
+ if field in kwargs:
+ body['security_group'][field] = kwargs[field]
+ neutron = get_client()
+ return neutron.create_security_group(body)
+
+ def test_create_security_group_with_no_description(self):
+ # Neutron's security group description field is optional.
+ pass
+
+ def test_create_security_group_with_empty_description(self):
+ # Neutron's security group description field is optional.
+ pass
+
+ def test_create_security_group_with_blank_name(self):
+ # Neutron's security group name field is optional.
+ pass
+
+ def test_create_security_group_with_whitespace_name(self):
+ # Neutron allows security group name to be whitespace.
+ pass
+
+ def test_create_security_group_with_blank_description(self):
+ # Neutron's security group description field is optional.
+ pass
+
+ def test_create_security_group_with_whitespace_description(self):
+ # Neutron allows description to be whitespace.
+ pass
+
+ def test_create_security_group_with_duplicate_name(self):
+ # Neutron allows duplicate names for security groups.
+ pass
+
+ def test_create_security_group_non_string_name(self):
+ # Neutron allows security group name to be non string.
+ pass
+
+ def test_create_security_group_non_string_description(self):
+ # Neutron allows non string description.
+ pass
+
+ def test_create_security_group_quota_limit(self):
+ # Enforced by Neutron server.
+ pass
+
+ def test_update_security_group(self):
+ # Enforced by Neutron server.
+ pass
+
+ def test_get_security_group_list(self):
+ self._create_sg_template().get('security_group')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ list_dict = self.controller.index(req)
+ self.assertEqual(len(list_dict['security_groups']), 2)
+
+ def test_get_security_group_list_all_tenants(self):
+ pass
+
+ def test_get_security_group_by_instance(self):
+ sg = self._create_sg_template().get('security_group')
+ net = self._create_network()
+ self._create_port(
+ network_id=net['network']['id'], security_groups=[sg['id']],
+ device_id=test_security_groups.FAKE_UUID1)
+ expected = [{'rules': [], 'tenant_id': 'fake', 'id': sg['id'],
+ 'name': 'test', 'description': 'test-description'}]
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ test_security_groups.return_server_by_uuid)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/os-security-groups'
+ % test_security_groups.FAKE_UUID1)
+ res_dict = self.server_controller.index(
+ req, test_security_groups.FAKE_UUID1)['security_groups']
+ self.assertEqual(expected, res_dict)
+
+ def test_get_security_group_by_id(self):
+ sg = self._create_sg_template().get('security_group')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
+ % sg['id'])
+ res_dict = self.controller.show(req, sg['id'])
+ expected = {'security_group': sg}
+ self.assertEqual(res_dict, expected)
+
+ def test_delete_security_group_by_id(self):
+ sg = self._create_sg_template().get('security_group')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
+ sg['id'])
+ self.controller.delete(req, sg['id'])
+
+ def test_delete_security_group_by_admin(self):
+ sg = self._create_sg_template().get('security_group')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
+ sg['id'], use_admin_context=True)
+ self.controller.delete(req, sg['id'])
+
+ def test_delete_security_group_in_use(self):
+ sg = self._create_sg_template().get('security_group')
+ self._create_network()
+ db_inst = fakes.stub_instance(id=1, nw_cache=[], security_groups=[])
+ _context = context.get_admin_context()
+ instance = instance_obj.Instance._from_db_object(
+ _context, instance_obj.Instance(), db_inst,
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
+ neutron = neutron_api.API()
+ with mock.patch.object(nova.db, 'instance_get_by_uuid',
+ return_value=db_inst):
+ neutron.allocate_for_instance(_context, instance,
+ security_groups=[sg['id']])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
+ % sg['id'])
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
+ req, sg['id'])
+
+ def test_associate_non_running_instance(self):
+ # Neutron does not care if the instance is running or not. When the
+ # instances is detected by nuetron it will push down the security
+ # group policy to it.
+ pass
+
+ def test_associate_already_associated_security_group_to_instance(self):
+ # Neutron security groups does not raise an error if you update a
+ # port adding a security group to it that was already associated
+ # to the port. This is because PUT semantics are used.
+ pass
+
+ def test_associate(self):
+ sg = self._create_sg_template().get('security_group')
+ net = self._create_network()
+ self._create_port(
+ network_id=net['network']['id'], security_groups=[sg['id']],
+ device_id=test_security_groups.FAKE_UUID1)
+
+ self.stubs.Set(nova.db, 'instance_get',
+ test_security_groups.return_server)
+ body = dict(addSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.manager._addSecurityGroup(req, '1', body)
+
+ def test_associate_duplicate_names(self):
+ sg1 = self._create_security_group(name='sg1',
+ description='sg1')['security_group']
+ self._create_security_group(name='sg1',
+ description='sg1')['security_group']
+ net = self._create_network()
+ self._create_port(
+ network_id=net['network']['id'], security_groups=[sg1['id']],
+ device_id=test_security_groups.FAKE_UUID1)
+
+ self.stubs.Set(nova.db, 'instance_get',
+ test_security_groups.return_server)
+ body = dict(addSecurityGroup=dict(name="sg1"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.manager._addSecurityGroup, req, '1', body)
+
+ def test_associate_port_security_enabled_true(self):
+ sg = self._create_sg_template().get('security_group')
+ net = self._create_network()
+ self._create_port(
+ network_id=net['network']['id'], security_groups=[sg['id']],
+ port_security_enabled=True,
+ device_id=test_security_groups.FAKE_UUID1)
+
+ self.stubs.Set(nova.db, 'instance_get',
+ test_security_groups.return_server)
+ body = dict(addSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.manager._addSecurityGroup(req, '1', body)
+
+ def test_associate_port_security_enabled_false(self):
+ self._create_sg_template().get('security_group')
+ net = self._create_network()
+ self._create_port(
+ network_id=net['network']['id'], port_security_enabled=False,
+ device_id=test_security_groups.FAKE_UUID1)
+
+ self.stubs.Set(nova.db, 'instance_get',
+ test_security_groups.return_server)
+ body = dict(addSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._addSecurityGroup,
+ req, '1', body)
+
+ def test_disassociate_by_non_existing_security_group_name(self):
+ self.stubs.Set(nova.db, 'instance_get',
+ test_security_groups.return_server)
+ body = dict(removeSecurityGroup=dict(name='non-existing'))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._removeSecurityGroup, req, '1', body)
+
+ def test_disassociate_non_running_instance(self):
+ # Neutron does not care if the instance is running or not. When the
+ # instances is detected by neutron it will push down the security
+ # group policy to it.
+ pass
+
+ def test_disassociate_already_associated_security_group_to_instance(self):
+ # Neutron security groups does not raise an error if you update a
+ # port adding a security group to it that was already associated
+ # to the port. This is because PUT semantics are used.
+ pass
+
+ def test_disassociate(self):
+ sg = self._create_sg_template().get('security_group')
+ net = self._create_network()
+ self._create_port(
+ network_id=net['network']['id'], security_groups=[sg['id']],
+ device_id=test_security_groups.FAKE_UUID1)
+
+ self.stubs.Set(nova.db, 'instance_get',
+ test_security_groups.return_server)
+ body = dict(removeSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.manager._removeSecurityGroup(req, '1', body)
+
+ def test_get_raises_no_unique_match_error(self):
+
+ def fake_find_resourceid_by_name_or_id(client, param, name,
+ project_id=None):
+ raise n_exc.NeutronClientNoUniqueMatch()
+
+ self.stubs.Set(neutronv20, 'find_resourceid_by_name_or_id',
+ fake_find_resourceid_by_name_or_id)
+ security_group_api = self.controller.security_group_api
+ self.assertRaises(exception.NoUniqueMatch, security_group_api.get,
+ context.get_admin_context(), 'foobar')
+
+ def test_get_instances_security_groups_bindings(self):
+ servers = [{'id': test_security_groups.FAKE_UUID1},
+ {'id': test_security_groups.FAKE_UUID2}]
+ sg1 = self._create_sg_template(name='test1').get('security_group')
+ sg2 = self._create_sg_template(name='test2').get('security_group')
+ # test name='' is replaced with id
+ sg3 = self._create_sg_template(name='').get('security_group')
+ net = self._create_network()
+ self._create_port(
+ network_id=net['network']['id'], security_groups=[sg1['id'],
+ sg2['id']],
+ device_id=test_security_groups.FAKE_UUID1)
+ self._create_port(
+ network_id=net['network']['id'], security_groups=[sg2['id'],
+ sg3['id']],
+ device_id=test_security_groups.FAKE_UUID2)
+ expected = {test_security_groups.FAKE_UUID1: [{'name': sg1['name']},
+ {'name': sg2['name']}],
+ test_security_groups.FAKE_UUID2: [{'name': sg2['name']},
+ {'name': sg3['id']}]}
+ security_group_api = self.controller.security_group_api
+ bindings = (
+ security_group_api.get_instances_security_groups_bindings(
+ context.get_admin_context(), servers))
+ self.assertEqual(bindings, expected)
+
+ def test_get_instance_security_groups(self):
+ sg1 = self._create_sg_template(name='test1').get('security_group')
+ sg2 = self._create_sg_template(name='test2').get('security_group')
+ # test name='' is replaced with id
+ sg3 = self._create_sg_template(name='').get('security_group')
+ net = self._create_network()
+ self._create_port(
+ network_id=net['network']['id'], security_groups=[sg1['id'],
+ sg2['id'],
+ sg3['id']],
+ device_id=test_security_groups.FAKE_UUID1)
+
+ expected = [{'name': sg1['name']}, {'name': sg2['name']},
+ {'name': sg3['id']}]
+ security_group_api = self.controller.security_group_api
+ sgs = security_group_api.get_instance_security_groups(
+ context.get_admin_context(), test_security_groups.FAKE_UUID1)
+ self.assertEqual(sgs, expected)
+
+ @mock.patch('nova.network.security_group.neutron_driver.SecurityGroupAPI.'
+ 'get_instances_security_groups_bindings')
+ def test_get_security_group_empty_for_instance(self, neutron_sg_bind_mock):
+ servers = [{'id': test_security_groups.FAKE_UUID1}]
+ neutron_sg_bind_mock.return_value = {}
+
+ security_group_api = self.controller.security_group_api
+ ctx = context.get_admin_context()
+ sgs = security_group_api.get_instance_security_groups(ctx,
+ test_security_groups.FAKE_UUID1)
+
+ neutron_sg_bind_mock.assert_called_once_with(ctx, servers, False)
+ self.assertEqual([], sgs)
+
+ def test_create_port_with_sg_and_port_security_enabled_true(self):
+ sg1 = self._create_sg_template(name='test1').get('security_group')
+ net = self._create_network()
+ self._create_port(
+ network_id=net['network']['id'], security_groups=[sg1['id']],
+ port_security_enabled=True,
+ device_id=test_security_groups.FAKE_UUID1)
+ security_group_api = self.controller.security_group_api
+ sgs = security_group_api.get_instance_security_groups(
+ context.get_admin_context(), test_security_groups.FAKE_UUID1)
+ self.assertEqual(sgs, [{'name': 'test1'}])
+
+ def test_create_port_with_sg_and_port_security_enabled_false(self):
+ sg1 = self._create_sg_template(name='test1').get('security_group')
+ net = self._create_network()
+ self.assertRaises(exception.SecurityGroupCannotBeApplied,
+ self._create_port,
+ network_id=net['network']['id'],
+ security_groups=[sg1['id']],
+ port_security_enabled=False,
+ device_id=test_security_groups.FAKE_UUID1)
+
+
+class TestNeutronSecurityGroupsV2(TestNeutronSecurityGroupsV21):
+ controller_cls = security_groups.SecurityGroupController
+ server_secgrp_ctl_cls = security_groups.ServerSecurityGroupController
+ secgrp_act_ctl_cls = security_groups.SecurityGroupActionController
+
+
+class TestNeutronSecurityGroupRulesTestCase(TestNeutronSecurityGroupsTestCase):
+ def setUp(self):
+ super(TestNeutronSecurityGroupRulesTestCase, self).setUp()
+ id1 = '11111111-1111-1111-1111-111111111111'
+ sg_template1 = test_security_groups.security_group_template(
+ security_group_rules=[], id=id1)
+ id2 = '22222222-2222-2222-2222-222222222222'
+ sg_template2 = test_security_groups.security_group_template(
+ security_group_rules=[], id=id2)
+ self.controller_sg = security_groups.SecurityGroupController()
+ neutron = get_client()
+ neutron._fake_security_groups[id1] = sg_template1
+ neutron._fake_security_groups[id2] = sg_template2
+
+ def tearDown(self):
+ neutronv2.get_client = self.original_client
+ get_client()._reset()
+ super(TestNeutronSecurityGroupsTestCase, self).tearDown()
+
+
+class _TestNeutronSecurityGroupRulesBase(object):
+
+ def test_create_add_existing_rules_by_cidr(self):
+ sg = test_security_groups.security_group_template()
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.controller_sg.create(req, {'security_group': sg})
+ rule = test_security_groups.security_group_rule_template(
+ cidr='15.0.0.0/8', parent_group_id=self.sg2['id'])
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.controller.create(req, {'security_group_rule': rule})
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_add_existing_rules_by_group_id(self):
+ sg = test_security_groups.security_group_template()
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.controller_sg.create(req, {'security_group': sg})
+ rule = test_security_groups.security_group_rule_template(
+ group=self.sg1['id'], parent_group_id=self.sg2['id'])
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.controller.create(req, {'security_group_rule': rule})
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_delete(self):
+ rule = test_security_groups.security_group_rule_template(
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+ security_group_rule = res_dict['security_group_rule']
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
+ % security_group_rule['id'])
+ self.controller.delete(req, security_group_rule['id'])
+
+ def test_create_rule_quota_limit(self):
+ # Enforced by neutron
+ pass
+
+
+class TestNeutronSecurityGroupRulesV2(
+ _TestNeutronSecurityGroupRulesBase,
+ test_security_groups.TestSecurityGroupRulesV2,
+ TestNeutronSecurityGroupRulesTestCase):
+ pass
+
+
+class TestNeutronSecurityGroupRulesV21(
+ _TestNeutronSecurityGroupRulesBase,
+ test_security_groups.TestSecurityGroupRulesV21,
+ TestNeutronSecurityGroupRulesTestCase):
+ pass
+
+
+class TestNeutronSecurityGroupsXMLDeserializer(
+ test_security_groups.TestSecurityGroupXMLDeserializer,
+ TestNeutronSecurityGroupsTestCase):
+ pass
+
+
+class TestNeutronSecurityGroupsXMLSerializer(
+ test_security_groups.TestSecurityGroupXMLSerializer,
+ TestNeutronSecurityGroupsTestCase):
+ pass
+
+
+class TestNeutronSecurityGroupsOutputTest(TestNeutronSecurityGroupsTestCase):
+ content_type = 'application/json'
+
+ def setUp(self):
+ super(TestNeutronSecurityGroupsOutputTest, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.controller = security_groups.SecurityGroupController()
+ self.stubs.Set(compute.api.API, 'get',
+ test_security_groups.fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all',
+ test_security_groups.fake_compute_get_all)
+ self.stubs.Set(compute.api.API, 'create',
+ test_security_groups.fake_compute_create)
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'get_instances_security_groups_bindings',
+ (test_security_groups.
+ fake_get_instances_security_groups_bindings))
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Security_groups'])
+
+ def _make_request(self, url, body=None):
+ req = webob.Request.blank(url)
+ if body:
+ req.method = 'POST'
+ req.body = self._encode_body(body)
+ req.content_type = self.content_type
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ return res
+
+ def _encode_body(self, body):
+ return jsonutils.dumps(body)
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def _get_groups(self, server):
+ return server.get('security_groups')
+
+ def test_create(self):
+ url = '/v2/fake/servers'
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ security_groups = [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
+ for security_group in security_groups:
+ sg = test_security_groups.security_group_template(
+ name=security_group['name'])
+ self.controller.create(req, {'security_group': sg})
+
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2,
+ security_groups=security_groups)
+ res = self._make_request(url, {'server': server})
+ self.assertEqual(res.status_int, 202)
+ server = self._get_server(res.body)
+ for i, group in enumerate(self._get_groups(server)):
+ name = 'fake-2-%s' % i
+ self.assertEqual(group.get('name'), name)
+
+ def test_create_server_get_default_security_group(self):
+ url = '/v2/fake/servers'
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
+ res = self._make_request(url, {'server': server})
+ self.assertEqual(res.status_int, 202)
+ server = self._get_server(res.body)
+ group = self._get_groups(server)[0]
+ self.assertEqual(group.get('name'), 'default')
+
+ def test_show(self):
+ def fake_get_instance_security_groups(inst, context, id):
+ return [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
+
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'get_instance_security_groups',
+ fake_get_instance_security_groups)
+
+ url = '/v2/fake/servers'
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ security_groups = [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
+ for security_group in security_groups:
+ sg = test_security_groups.security_group_template(
+ name=security_group['name'])
+ self.controller.create(req, {'security_group': sg})
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2,
+ security_groups=security_groups)
+
+ res = self._make_request(url, {'server': server})
+ self.assertEqual(res.status_int, 202)
+ server = self._get_server(res.body)
+ for i, group in enumerate(self._get_groups(server)):
+ name = 'fake-2-%s' % i
+ self.assertEqual(group.get('name'), name)
+
+ # Test that show (GET) returns the same information as create (POST)
+ url = '/v2/fake/servers/' + test_security_groups.UUID3
+ res = self._make_request(url)
+ self.assertEqual(res.status_int, 200)
+ server = self._get_server(res.body)
+
+ for i, group in enumerate(self._get_groups(server)):
+ name = 'fake-2-%s' % i
+ self.assertEqual(group.get('name'), name)
+
+ def test_detail(self):
+ url = '/v2/fake/servers/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ for i, server in enumerate(self._get_servers(res.body)):
+ for j, group in enumerate(self._get_groups(server)):
+ name = 'fake-%s-%s' % (i, j)
+ self.assertEqual(group.get('name'), name)
+
+ def test_no_instance_passthrough_404(self):
+
+ def fake_compute_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 404)
+
+
+class TestNeutronSecurityGroupsOutputXMLTest(
+ TestNeutronSecurityGroupsOutputTest):
+
+ content_type = 'application/xml'
+
+ class MinimalCreateServerTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('server', selector='server')
+ root.set('name')
+ root.set('id')
+ root.set('imageRef')
+ root.set('flavorRef')
+ elem = xmlutil.SubTemplateElement(root, 'security_groups')
+ sg = xmlutil.SubTemplateElement(elem, 'security_group',
+ selector='security_groups')
+ sg.set('name')
+ return xmlutil.MasterTemplate(root, 1,
+ nsmap={None: xmlutil.XMLNS_V11})
+
+ def _encode_body(self, body):
+ serializer = self.MinimalCreateServerTemplate()
+ return serializer.serialize(body)
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
+
+ def _get_groups(self, server):
+ # NOTE(vish): we are adding security groups without an extension
+ # namespace so we don't break people using the existing
+ # functionality, but that means we need to use find with
+ # the existing server namespace.
+ namespace = server.nsmap[None]
+ return server.find('{%s}security_groups' % namespace).getchildren()
+
+
+def get_client(context=None, admin=False):
+ return MockClient()
+
+
+class MockClient(object):
+
+ # Needs to be global to survive multiple calls to get_client.
+ _fake_security_groups = {}
+ _fake_ports = {}
+ _fake_networks = {}
+ _fake_subnets = {}
+ _fake_security_group_rules = {}
+
+ def __init__(self):
+ # add default security group
+ if not len(self._fake_security_groups):
+ ret = {'name': 'default', 'description': 'default',
+ 'tenant_id': 'fake_tenant', 'security_group_rules': [],
+ 'id': str(uuid.uuid4())}
+ self._fake_security_groups[ret['id']] = ret
+
+ def _reset(self):
+ self._fake_security_groups.clear()
+ self._fake_ports.clear()
+ self._fake_networks.clear()
+ self._fake_subnets.clear()
+ self._fake_security_group_rules.clear()
+
+ def create_security_group(self, body=None):
+ s = body.get('security_group')
+ if len(s.get('name')) > 255 or len(s.get('description')) > 255:
+ msg = 'Security Group name great than 255'
+ raise n_exc.NeutronClientException(message=msg, status_code=401)
+ ret = {'name': s.get('name'), 'description': s.get('description'),
+ 'tenant_id': 'fake', 'security_group_rules': [],
+ 'id': str(uuid.uuid4())}
+
+ self._fake_security_groups[ret['id']] = ret
+ return {'security_group': ret}
+
+ def create_network(self, body):
+ n = body.get('network')
+ ret = {'status': 'ACTIVE', 'subnets': [], 'name': n.get('name'),
+ 'admin_state_up': n.get('admin_state_up', True),
+ 'tenant_id': 'fake_tenant',
+ 'id': str(uuid.uuid4())}
+ if 'port_security_enabled' in n:
+ ret['port_security_enabled'] = n['port_security_enabled']
+ self._fake_networks[ret['id']] = ret
+ return {'network': ret}
+
+ def create_subnet(self, body):
+ s = body.get('subnet')
+ try:
+ net = self._fake_networks[s.get('network_id')]
+ except KeyError:
+ msg = 'Network %s not found' % s.get('network_id')
+ raise n_exc.NeutronClientException(message=msg, status_code=404)
+ ret = {'name': s.get('name'), 'network_id': s.get('network_id'),
+ 'tenant_id': 'fake_tenant', 'cidr': s.get('cidr'),
+ 'id': str(uuid.uuid4()), 'gateway_ip': '10.0.0.1'}
+ net['subnets'].append(ret['id'])
+ self._fake_networks[net['id']] = net
+ self._fake_subnets[ret['id']] = ret
+ return {'subnet': ret}
+
+ def create_port(self, body):
+ p = body.get('port')
+ ret = {'status': 'ACTIVE', 'id': str(uuid.uuid4()),
+ 'mac_address': p.get('mac_address', 'fa:16:3e:b8:f5:fb'),
+ 'device_id': p.get('device_id', str(uuid.uuid4())),
+ 'admin_state_up': p.get('admin_state_up', True),
+ 'security_groups': p.get('security_groups', []),
+ 'network_id': p.get('network_id'),
+ 'binding:vnic_type':
+ p.get('binding:vnic_type') or model.VNIC_TYPE_NORMAL}
+
+ network = self._fake_networks[p['network_id']]
+ if 'port_security_enabled' in p:
+ ret['port_security_enabled'] = p['port_security_enabled']
+ elif 'port_security_enabled' in network:
+ ret['port_security_enabled'] = network['port_security_enabled']
+
+ port_security = ret.get('port_security_enabled', True)
+ # port_security must be True if security groups are present
+ if not port_security and ret['security_groups']:
+ raise exception.SecurityGroupCannotBeApplied()
+
+ if network['subnets']:
+ ret['fixed_ips'] = [{'subnet_id': network['subnets'][0],
+ 'ip_address': '10.0.0.1'}]
+ if not ret['security_groups'] and (port_security is None or
+ port_security is True):
+ for security_group in self._fake_security_groups.values():
+ if security_group['name'] == 'default':
+ ret['security_groups'] = [security_group['id']]
+ break
+ self._fake_ports[ret['id']] = ret
+ return {'port': ret}
+
+ def create_security_group_rule(self, body):
+ # does not handle bulk case so just picks rule[0]
+ r = body.get('security_group_rules')[0]
+ fields = ['direction', 'protocol', 'port_range_min', 'port_range_max',
+ 'ethertype', 'remote_ip_prefix', 'tenant_id',
+ 'security_group_id', 'remote_group_id']
+ ret = {}
+ for field in fields:
+ ret[field] = r.get(field)
+ ret['id'] = str(uuid.uuid4())
+ self._fake_security_group_rules[ret['id']] = ret
+ return {'security_group_rules': [ret]}
+
+ def show_security_group(self, security_group, **_params):
+ try:
+ sg = self._fake_security_groups[security_group]
+ except KeyError:
+ msg = 'Security Group %s not found' % security_group
+ raise n_exc.NeutronClientException(message=msg, status_code=404)
+ for security_group_rule in self._fake_security_group_rules.values():
+ if security_group_rule['security_group_id'] == sg['id']:
+ sg['security_group_rules'].append(security_group_rule)
+
+ return {'security_group': sg}
+
+ def show_security_group_rule(self, security_group_rule, **_params):
+ try:
+ return {'security_group_rule':
+ self._fake_security_group_rules[security_group_rule]}
+ except KeyError:
+ msg = 'Security Group rule %s not found' % security_group_rule
+ raise n_exc.NeutronClientException(message=msg, status_code=404)
+
+ def show_network(self, network, **_params):
+ try:
+ return {'network':
+ self._fake_networks[network]}
+ except KeyError:
+ msg = 'Network %s not found' % network
+ raise n_exc.NeutronClientException(message=msg, status_code=404)
+
+ def show_port(self, port, **_params):
+ try:
+ return {'port':
+ self._fake_ports[port]}
+ except KeyError:
+ msg = 'Port %s not found' % port
+ raise n_exc.NeutronClientException(message=msg, status_code=404)
+
+ def show_subnet(self, subnet, **_params):
+ try:
+ return {'subnet':
+ self._fake_subnets[subnet]}
+ except KeyError:
+ msg = 'Port %s not found' % subnet
+ raise n_exc.NeutronClientException(message=msg, status_code=404)
+
+ def list_security_groups(self, **_params):
+ ret = []
+ for security_group in self._fake_security_groups.values():
+ names = _params.get('name')
+ if names:
+ if not isinstance(names, list):
+ names = [names]
+ for name in names:
+ if security_group.get('name') == name:
+ ret.append(security_group)
+ ids = _params.get('id')
+ if ids:
+ if not isinstance(ids, list):
+ ids = [ids]
+ for id in ids:
+ if security_group.get('id') == id:
+ ret.append(security_group)
+ elif not (names or ids):
+ ret.append(security_group)
+ return {'security_groups': ret}
+
+ def list_networks(self, **_params):
+ # neutronv2/api.py _get_available_networks calls this assuming
+ # search_opts filter "shared" is implemented and not ignored
+ shared = _params.get("shared", None)
+ if shared:
+ return {'networks': []}
+ else:
+ return {'networks':
+ [network for network in self._fake_networks.values()]}
+
+ def list_ports(self, **_params):
+ ret = []
+ device_id = _params.get('device_id')
+ for port in self._fake_ports.values():
+ if device_id:
+ if port['device_id'] in device_id:
+ ret.append(port)
+ else:
+ ret.append(port)
+ return {'ports': ret}
+
+ def list_subnets(self, **_params):
+ return {'subnets':
+ [subnet for subnet in self._fake_subnets.values()]}
+
+ def list_floatingips(self, **_params):
+ return {'floatingips': []}
+
+ def delete_security_group(self, security_group):
+ self.show_security_group(security_group)
+ ports = self.list_ports()
+ for port in ports.get('ports'):
+ for sg_port in port['security_groups']:
+ if sg_port == security_group:
+ msg = ('Unable to delete Security group %s in use'
+ % security_group)
+ raise n_exc.NeutronClientException(message=msg,
+ status_code=409)
+ del self._fake_security_groups[security_group]
+
+ def delete_security_group_rule(self, security_group_rule):
+ self.show_security_group_rule(security_group_rule)
+ del self._fake_security_group_rules[security_group_rule]
+
+ def delete_network(self, network):
+ self.show_network(network)
+ self._check_ports_on_network(network)
+ for subnet in self._fake_subnets.values():
+ if subnet['network_id'] == network:
+ del self._fake_subnets[subnet['id']]
+ del self._fake_networks[network]
+
+ def delete_subnet(self, subnet):
+ subnet = self.show_subnet(subnet).get('subnet')
+ self._check_ports_on_network(subnet['network_id'])
+ del self._fake_subnet[subnet]
+
+ def delete_port(self, port):
+ self.show_port(port)
+ del self._fake_ports[port]
+
+ def update_port(self, port, body=None):
+ self.show_port(port)
+ self._fake_ports[port].update(body['port'])
+ return {'port': self._fake_ports[port]}
+
+ def list_extensions(self, **_parms):
+ return {'extensions': []}
+
+ def _check_ports_on_network(self, network):
+ ports = self.list_ports()
+ for port in ports:
+ if port['network_id'] == network:
+ msg = ('Unable to complete operation on network %s. There is '
+ 'one or more ports still in use on the network'
+ % network)
+ raise n_exc.NeutronClientException(message=msg, status_code=409)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_quota_classes.py b/nova/tests/unit/api/openstack/compute/contrib/test_quota_classes.py
new file mode 100644
index 0000000000..228b44f369
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_quota_classes.py
@@ -0,0 +1,222 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import webob
+
+from nova.api.openstack.compute.contrib import quota_classes
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+def quota_set(class_name):
+ return {'quota_class_set': {'id': class_name, 'metadata_items': 128,
+ 'ram': 51200, 'floating_ips': 10,
+ 'fixed_ips': -1, 'instances': 10,
+ 'injected_files': 5, 'cores': 20,
+ 'injected_file_content_bytes': 10240,
+ 'security_groups': 10,
+ 'security_group_rules': 20, 'key_pairs': 100,
+ 'injected_file_path_bytes': 255}}
+
+
+class QuotaClassSetsTest(test.TestCase):
+
+ def setUp(self):
+ super(QuotaClassSetsTest, self).setUp()
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = quota_classes.QuotaClassSetsController(self.ext_mgr)
+
+ def test_format_quota_set(self):
+ raw_quota_set = {
+ 'instances': 10,
+ 'cores': 20,
+ 'ram': 51200,
+ 'floating_ips': 10,
+ 'fixed_ips': -1,
+ 'metadata_items': 128,
+ 'injected_files': 5,
+ 'injected_file_path_bytes': 255,
+ 'injected_file_content_bytes': 10240,
+ 'security_groups': 10,
+ 'security_group_rules': 20,
+ 'key_pairs': 100,
+ }
+
+ quota_set = self.controller._format_quota_set('test_class',
+ raw_quota_set)
+ qs = quota_set['quota_class_set']
+
+ self.assertEqual(qs['id'], 'test_class')
+ self.assertEqual(qs['instances'], 10)
+ self.assertEqual(qs['cores'], 20)
+ self.assertEqual(qs['ram'], 51200)
+ self.assertEqual(qs['floating_ips'], 10)
+ self.assertEqual(qs['fixed_ips'], -1)
+ self.assertEqual(qs['metadata_items'], 128)
+ self.assertEqual(qs['injected_files'], 5)
+ self.assertEqual(qs['injected_file_path_bytes'], 255)
+ self.assertEqual(qs['injected_file_content_bytes'], 10240)
+ self.assertEqual(qs['security_groups'], 10)
+ self.assertEqual(qs['security_group_rules'], 20)
+ self.assertEqual(qs['key_pairs'], 100)
+
+ def test_quotas_show_as_admin(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake4/os-quota-class-sets/test_class',
+ use_admin_context=True)
+ res_dict = self.controller.show(req, 'test_class')
+
+ self.assertEqual(res_dict, quota_set('test_class'))
+
+ def test_quotas_show_as_unauthorized_user(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake4/os-quota-class-sets/test_class')
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
+ req, 'test_class')
+
+ def test_quotas_update_as_admin(self):
+ body = {'quota_class_set': {'instances': 50, 'cores': 50,
+ 'ram': 51200, 'floating_ips': 10,
+ 'fixed_ips': -1, 'metadata_items': 128,
+ 'injected_files': 5,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'security_groups': 10,
+ 'security_group_rules': 20,
+ 'key_pairs': 100}}
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake4/os-quota-class-sets/test_class',
+ use_admin_context=True)
+ res_dict = self.controller.update(req, 'test_class', body)
+
+ self.assertEqual(res_dict, body)
+
+ def test_quotas_update_as_user(self):
+ body = {'quota_class_set': {'instances': 50, 'cores': 50,
+ 'ram': 51200, 'floating_ips': 10,
+ 'fixed_ips': -1, 'metadata_items': 128,
+ 'injected_files': 5,
+ 'injected_file_content_bytes': 10240,
+ 'security_groups': 10,
+ 'security_group_rules': 20,
+ 'key_pairs': 100,
+ }}
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake4/os-quota-class-sets/test_class')
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
+ req, 'test_class', body)
+
+ def test_quotas_update_with_empty_body(self):
+ body = {}
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake4/os-quota-class-sets/test_class',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 'test_class', body)
+
+ def test_quotas_update_with_non_integer(self):
+ body = {'quota_class_set': {'instances': "abc"}}
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake4/os-quota-class-sets/test_class',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 'test_class', body)
+
+ body = {'quota_class_set': {'instances': 50.5}}
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake4/os-quota-class-sets/test_class',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 'test_class', body)
+
+ body = {'quota_class_set': {
+ 'instances': u'\u30aa\u30fc\u30d7\u30f3'}}
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake4/os-quota-class-sets/test_class',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 'test_class', body)
+
+
+class QuotaTemplateXMLSerializerTest(test.TestCase):
+ def setUp(self):
+ super(QuotaTemplateXMLSerializerTest, self).setUp()
+ self.serializer = quota_classes.QuotaClassTemplate()
+ self.deserializer = wsgi.XMLDeserializer()
+
+ def test_serializer(self):
+ exemplar = dict(quota_class_set=dict(
+ id='test_class',
+ metadata_items=10,
+ injected_file_path_bytes=255,
+ injected_file_content_bytes=20,
+ ram=50,
+ floating_ips=60,
+ fixed_ips=-1,
+ instances=70,
+ injected_files=80,
+ security_groups=10,
+ security_group_rules=20,
+ key_pairs=100,
+ cores=90))
+ text = self.serializer.serialize(exemplar)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('quota_class_set', tree.tag)
+ self.assertEqual('test_class', tree.get('id'))
+ self.assertEqual(len(exemplar['quota_class_set']) - 1, len(tree))
+ for child in tree:
+ self.assertIn(child.tag, exemplar['quota_class_set'])
+ self.assertEqual(int(child.text),
+ exemplar['quota_class_set'][child.tag])
+
+ def test_deserializer(self):
+ exemplar = dict(quota_class_set=dict(
+ metadata_items='10',
+ injected_file_content_bytes='20',
+ ram='50',
+ floating_ips='60',
+ fixed_ips='-1',
+ instances='70',
+ injected_files='80',
+ security_groups='10',
+ security_group_rules='20',
+ key_pairs='100',
+ cores='90'))
+ intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<quota_class_set>'
+ '<metadata_items>10</metadata_items>'
+ '<injected_file_content_bytes>20'
+ '</injected_file_content_bytes>'
+ '<ram>50</ram>'
+ '<floating_ips>60</floating_ips>'
+ '<fixed_ips>-1</fixed_ips>'
+ '<instances>70</instances>'
+ '<injected_files>80</injected_files>'
+ '<cores>90</cores>'
+ '<security_groups>10</security_groups>'
+ '<security_group_rules>20</security_group_rules>'
+ '<key_pairs>100</key_pairs>'
+ '</quota_class_set>')
+
+ result = self.deserializer.deserialize(intext)['body']
+ self.assertEqual(result, exemplar)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_quotas.py b/nova/tests/unit/api/openstack/compute/contrib/test_quotas.py
new file mode 100644
index 0000000000..33511b0cc3
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_quotas.py
@@ -0,0 +1,648 @@
+# Copyright 2011 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from lxml import etree
+import mock
+import webob
+
+from nova.api.openstack.compute.contrib import quotas as quotas_v2
+from nova.api.openstack.compute.plugins.v3 import quota_sets as quotas_v21
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova import context as context_maker
+from nova import exception
+from nova import quota
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+def quota_set(id, include_server_group_quotas=True):
+ res = {'quota_set': {'id': id, 'metadata_items': 128,
+ 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1,
+ 'instances': 10, 'injected_files': 5, 'cores': 20,
+ 'injected_file_content_bytes': 10240,
+ 'security_groups': 10, 'security_group_rules': 20,
+ 'key_pairs': 100, 'injected_file_path_bytes': 255}}
+ if include_server_group_quotas:
+ res['quota_set']['server_groups'] = 10
+ res['quota_set']['server_group_members'] = 10
+ return res
+
+
+class BaseQuotaSetsTest(test.TestCase):
+
+ def _is_v20_api_test(self):
+ # NOTE(oomichi): If a test is for v2.0 API, this method returns
+ # True. Otherwise(v2.1 API test), returns False.
+ return (self.plugin == quotas_v2)
+
+ def get_update_expected_response(self, base_body):
+ # NOTE(oomichi): "id" parameter is added to a response of
+ # "update quota" API since v2.1 API, because it makes the
+ # API consistent and it is not backwards incompatible change.
+ # This method adds "id" for an expected body of a response.
+ if self._is_v20_api_test():
+ expected_body = base_body
+ else:
+ expected_body = copy.deepcopy(base_body)
+ expected_body['quota_set'].update({'id': 'update_me'})
+ return expected_body
+
+ def setup_mock_for_show(self):
+ if self._is_v20_api_test():
+ self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
+ self.mox.ReplayAll()
+
+ def setup_mock_for_update(self):
+ if self._is_v20_api_test():
+ self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
+ self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
+ self.mox.ReplayAll()
+
+ def get_delete_status_int(self, res):
+ if self._is_v20_api_test():
+ return res.status_int
+ else:
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ return self.controller.delete.wsgi_code
+
+
+class QuotaSetsTestV21(BaseQuotaSetsTest):
+ plugin = quotas_v21
+ validation_error = exception.ValidationError
+ include_server_group_quotas = True
+
+ def setUp(self):
+ super(QuotaSetsTestV21, self).setUp()
+ self._setup_controller()
+ self.default_quotas = {
+ 'instances': 10,
+ 'cores': 20,
+ 'ram': 51200,
+ 'floating_ips': 10,
+ 'fixed_ips': -1,
+ 'metadata_items': 128,
+ 'injected_files': 5,
+ 'injected_file_path_bytes': 255,
+ 'injected_file_content_bytes': 10240,
+ 'security_groups': 10,
+ 'security_group_rules': 20,
+ 'key_pairs': 100,
+ }
+ if self.include_server_group_quotas:
+ self.default_quotas['server_groups'] = 10
+ self.default_quotas['server_group_members'] = 10
+
+ def _setup_controller(self):
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
+
+ def test_format_quota_set(self):
+ quota_set = self.controller._format_quota_set('1234',
+ self.default_quotas)
+ qs = quota_set['quota_set']
+
+ self.assertEqual(qs['id'], '1234')
+ self.assertEqual(qs['instances'], 10)
+ self.assertEqual(qs['cores'], 20)
+ self.assertEqual(qs['ram'], 51200)
+ self.assertEqual(qs['floating_ips'], 10)
+ self.assertEqual(qs['fixed_ips'], -1)
+ self.assertEqual(qs['metadata_items'], 128)
+ self.assertEqual(qs['injected_files'], 5)
+ self.assertEqual(qs['injected_file_path_bytes'], 255)
+ self.assertEqual(qs['injected_file_content_bytes'], 10240)
+ self.assertEqual(qs['security_groups'], 10)
+ self.assertEqual(qs['security_group_rules'], 20)
+ self.assertEqual(qs['key_pairs'], 100)
+ if self.include_server_group_quotas:
+ self.assertEqual(qs['server_groups'], 10)
+ self.assertEqual(qs['server_group_members'], 10)
+
+ def test_quotas_defaults(self):
+ uri = '/v2/fake_tenant/os-quota-sets/fake_tenant/defaults'
+
+ req = fakes.HTTPRequest.blank(uri)
+ res_dict = self.controller.defaults(req, 'fake_tenant')
+ self.default_quotas.update({'id': 'fake_tenant'})
+ expected = {'quota_set': self.default_quotas}
+
+ self.assertEqual(res_dict, expected)
+
+ def test_quotas_show_as_admin(self):
+ self.setup_mock_for_show()
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234',
+ use_admin_context=True)
+ res_dict = self.controller.show(req, 1234)
+
+ ref_quota_set = quota_set('1234', self.include_server_group_quotas)
+ self.assertEqual(res_dict, ref_quota_set)
+
+ def test_quotas_show_as_unauthorized_user(self):
+ self.setup_mock_for_show()
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
+ req, 1234)
+
+ def test_quotas_update_as_admin(self):
+ self.setup_mock_for_update()
+ self.default_quotas.update({
+ 'instances': 50,
+ 'cores': 50
+ })
+ body = {'quota_set': self.default_quotas}
+ expected_body = self.get_update_expected_response(body)
+
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
+ use_admin_context=True)
+ res_dict = self.controller.update(req, 'update_me', body=body)
+ self.assertEqual(expected_body, res_dict)
+
+ def test_quotas_update_zero_value_as_admin(self):
+ self.setup_mock_for_update()
+ body = {'quota_set': {'instances': 0, 'cores': 0,
+ 'ram': 0, 'floating_ips': 0,
+ 'metadata_items': 0,
+ 'injected_files': 0,
+ 'injected_file_content_bytes': 0,
+ 'injected_file_path_bytes': 0,
+ 'security_groups': 0,
+ 'security_group_rules': 0,
+ 'key_pairs': 100, 'fixed_ips': -1}}
+ if self.include_server_group_quotas:
+ body['quota_set']['server_groups'] = 10
+ body['quota_set']['server_group_members'] = 10
+ expected_body = self.get_update_expected_response(body)
+
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
+ use_admin_context=True)
+ res_dict = self.controller.update(req, 'update_me', body=body)
+ self.assertEqual(expected_body, res_dict)
+
+ def test_quotas_update_as_user(self):
+ self.setup_mock_for_update()
+ self.default_quotas.update({
+ 'instances': 50,
+ 'cores': 50
+ })
+ body = {'quota_set': self.default_quotas}
+
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me')
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
+ req, 'update_me', body=body)
+
+ def _quotas_update_bad_request_case(self, body):
+ self.setup_mock_for_update()
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
+ use_admin_context=True)
+ self.assertRaises(self.validation_error, self.controller.update,
+ req, 'update_me', body=body)
+
+ def test_quotas_update_invalid_key(self):
+ body = {'quota_set': {'instances2': -2, 'cores': -2,
+ 'ram': -2, 'floating_ips': -2,
+ 'metadata_items': -2, 'injected_files': -2,
+ 'injected_file_content_bytes': -2}}
+ self._quotas_update_bad_request_case(body)
+
+ def test_quotas_update_invalid_limit(self):
+ body = {'quota_set': {'instances': -2, 'cores': -2,
+ 'ram': -2, 'floating_ips': -2, 'fixed_ips': -2,
+ 'metadata_items': -2, 'injected_files': -2,
+ 'injected_file_content_bytes': -2}}
+ self._quotas_update_bad_request_case(body)
+
+ def test_quotas_update_empty_body(self):
+ body = {}
+ self._quotas_update_bad_request_case(body)
+
+ def test_quotas_update_invalid_value_non_int(self):
+ # when PUT non integer value
+ self.default_quotas.update({
+ 'instances': 'test'
+ })
+ body = {'quota_set': self.default_quotas}
+ self._quotas_update_bad_request_case(body)
+
+ def test_quotas_update_invalid_value_with_float(self):
+ # when PUT non integer value
+ self.default_quotas.update({
+ 'instances': 50.5
+ })
+ body = {'quota_set': self.default_quotas}
+ self._quotas_update_bad_request_case(body)
+
+ def test_quotas_update_invalid_value_with_unicode(self):
+ # when PUT non integer value
+ self.default_quotas.update({
+ 'instances': u'\u30aa\u30fc\u30d7\u30f3'
+ })
+ body = {'quota_set': self.default_quotas}
+ self._quotas_update_bad_request_case(body)
+
+ def test_quotas_delete_as_unauthorized_user(self):
+ if self._is_v20_api_test():
+ self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
+ self.mox.ReplayAll()
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
+ req, 1234)
+
+ def test_quotas_delete_as_admin(self):
+ if self._is_v20_api_test():
+ self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
+ context = context_maker.get_admin_context()
+ self.req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
+ self.req.environ['nova.context'] = context
+ self.mox.StubOutWithMock(quota.QUOTAS,
+ "destroy_all_by_project")
+ quota.QUOTAS.destroy_all_by_project(context, 1234)
+ self.mox.ReplayAll()
+ res = self.controller.delete(self.req, 1234)
+ self.mox.VerifyAll()
+ self.assertEqual(202, self.get_delete_status_int(res))
+
+
+class QuotaXMLSerializerTest(test.TestCase):
+ def setUp(self):
+ super(QuotaXMLSerializerTest, self).setUp()
+ self.serializer = quotas_v2.QuotaTemplate()
+ self.deserializer = wsgi.XMLDeserializer()
+
+ def test_serializer(self):
+ exemplar = dict(quota_set=dict(
+ id='project_id',
+ metadata_items=10,
+ injected_file_path_bytes=255,
+ injected_file_content_bytes=20,
+ ram=50,
+ floating_ips=60,
+ fixed_ips=-1,
+ instances=70,
+ injected_files=80,
+ security_groups=10,
+ security_group_rules=20,
+ key_pairs=100,
+ cores=90))
+ text = self.serializer.serialize(exemplar)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('quota_set', tree.tag)
+ self.assertEqual('project_id', tree.get('id'))
+ self.assertEqual(len(exemplar['quota_set']) - 1, len(tree))
+ for child in tree:
+ self.assertIn(child.tag, exemplar['quota_set'])
+ self.assertEqual(int(child.text), exemplar['quota_set'][child.tag])
+
+ def test_deserializer(self):
+ exemplar = dict(quota_set=dict(
+ metadata_items='10',
+ injected_file_content_bytes='20',
+ ram='50',
+ floating_ips='60',
+ fixed_ips='-1',
+ instances='70',
+ injected_files='80',
+ security_groups='10',
+ security_group_rules='20',
+ key_pairs='100',
+ cores='90'))
+ intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<quota_set>'
+ '<metadata_items>10</metadata_items>'
+ '<injected_file_content_bytes>20'
+ '</injected_file_content_bytes>'
+ '<ram>50</ram>'
+ '<floating_ips>60</floating_ips>'
+ '<fixed_ips>-1</fixed_ips>'
+ '<instances>70</instances>'
+ '<injected_files>80</injected_files>'
+ '<security_groups>10</security_groups>'
+ '<security_group_rules>20</security_group_rules>'
+ '<key_pairs>100</key_pairs>'
+ '<cores>90</cores>'
+ '</quota_set>')
+
+ result = self.deserializer.deserialize(intext)['body']
+ self.assertEqual(result, exemplar)
+
+
+class ExtendedQuotasTestV21(BaseQuotaSetsTest):
+ plugin = quotas_v21
+
+ def setUp(self):
+ super(ExtendedQuotasTestV21, self).setUp()
+ self._setup_controller()
+ self.setup_mock_for_update()
+
+ fake_quotas = {'ram': {'limit': 51200,
+ 'in_use': 12800,
+ 'reserved': 12800},
+ 'cores': {'limit': 20,
+ 'in_use': 10,
+ 'reserved': 5},
+ 'instances': {'limit': 100,
+ 'in_use': 0,
+ 'reserved': 0}}
+
+ def _setup_controller(self):
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
+
+ def fake_get_quotas(self, context, id, user_id=None, usages=False):
+ if usages:
+ return self.fake_quotas
+ else:
+ return dict((k, v['limit']) for k, v in self.fake_quotas.items())
+
+ def fake_get_settable_quotas(self, context, project_id, user_id=None):
+ return {
+ 'ram': {'minimum': self.fake_quotas['ram']['in_use'] +
+ self.fake_quotas['ram']['reserved'],
+ 'maximum': -1},
+ 'cores': {'minimum': self.fake_quotas['cores']['in_use'] +
+ self.fake_quotas['cores']['reserved'],
+ 'maximum': -1},
+ 'instances': {'minimum': self.fake_quotas['instances']['in_use'] +
+ self.fake_quotas['instances']['reserved'],
+ 'maximum': -1},
+ }
+
+ def test_quotas_update_exceed_in_used(self):
+ patcher = mock.patch.object(quota.QUOTAS, 'get_settable_quotas')
+ get_settable_quotas = patcher.start()
+
+ body = {'quota_set': {'cores': 10}}
+
+ get_settable_quotas.side_effect = self.fake_get_settable_quotas
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 'update_me', body=body)
+ mock.patch.stopall()
+
+ def test_quotas_force_update_exceed_in_used(self):
+ patcher = mock.patch.object(quota.QUOTAS, 'get_settable_quotas')
+ get_settable_quotas = patcher.start()
+ patcher = mock.patch.object(self.plugin.QuotaSetsController,
+ '_get_quotas')
+ _get_quotas = patcher.start()
+
+ body = {'quota_set': {'cores': 10, 'force': 'True'}}
+
+ get_settable_quotas.side_effect = self.fake_get_settable_quotas
+ _get_quotas.side_effect = self.fake_get_quotas
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
+ use_admin_context=True)
+ self.controller.update(req, 'update_me', body=body)
+ mock.patch.stopall()
+
+
+class UserQuotasTestV21(BaseQuotaSetsTest):
+ plugin = quotas_v21
+ include_server_group_quotas = True
+
+ def setUp(self):
+ super(UserQuotasTestV21, self).setUp()
+ self._setup_controller()
+
+ def _setup_controller(self):
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
+
+ def test_user_quotas_show_as_admin(self):
+ self.setup_mock_for_show()
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1',
+ use_admin_context=True)
+ res_dict = self.controller.show(req, 1234)
+ ref_quota_set = quota_set('1234', self.include_server_group_quotas)
+ self.assertEqual(res_dict, ref_quota_set)
+
+ def test_user_quotas_show_as_unauthorized_user(self):
+ self.setup_mock_for_show()
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1')
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
+ req, 1234)
+
+ def test_user_quotas_update_as_admin(self):
+ self.setup_mock_for_update()
+ body = {'quota_set': {'instances': 10, 'cores': 20,
+ 'ram': 51200, 'floating_ips': 10,
+ 'fixed_ips': -1, 'metadata_items': 128,
+ 'injected_files': 5,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'security_groups': 10,
+ 'security_group_rules': 20,
+ 'key_pairs': 100}}
+ if self.include_server_group_quotas:
+ body['quota_set']['server_groups'] = 10
+ body['quota_set']['server_group_members'] = 10
+
+ expected_body = self.get_update_expected_response(body)
+
+ url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
+ req = fakes.HTTPRequest.blank(url, use_admin_context=True)
+ res_dict = self.controller.update(req, 'update_me', body=body)
+
+ self.assertEqual(expected_body, res_dict)
+
+ def test_user_quotas_update_as_user(self):
+ self.setup_mock_for_update()
+ body = {'quota_set': {'instances': 10, 'cores': 20,
+ 'ram': 51200, 'floating_ips': 10,
+ 'fixed_ips': -1, 'metadata_items': 128,
+ 'injected_files': 5,
+ 'injected_file_content_bytes': 10240,
+ 'security_groups': 10,
+ 'security_group_rules': 20,
+ 'key_pairs': 100,
+ 'server_groups': 10,
+ 'server_group_members': 10}}
+
+ url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
+ req = fakes.HTTPRequest.blank(url)
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
+ req, 'update_me', body=body)
+
+ def test_user_quotas_update_exceed_project(self):
+ self.setup_mock_for_update()
+ body = {'quota_set': {'instances': 20}}
+
+ url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
+ req = fakes.HTTPRequest.blank(url, use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 'update_me', body=body)
+
+ def test_user_quotas_delete_as_unauthorized_user(self):
+ self.setup_mock_for_update()
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1')
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
+ req, 1234)
+
+ def test_user_quotas_delete_as_admin(self):
+ if self._is_v20_api_test():
+ self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
+ self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
+ context = context_maker.get_admin_context()
+ url = '/v2/fake4/os-quota-sets/1234?user_id=1'
+ self.req = fakes.HTTPRequest.blank(url)
+ self.req.environ['nova.context'] = context
+ self.mox.StubOutWithMock(quota.QUOTAS,
+ "destroy_all_by_project_and_user")
+ quota.QUOTAS.destroy_all_by_project_and_user(context, 1234, '1')
+ self.mox.ReplayAll()
+ res = self.controller.delete(self.req, 1234)
+ self.mox.VerifyAll()
+ self.assertEqual(202, self.get_delete_status_int(res))
+
+
+class QuotaSetsTestV2(QuotaSetsTestV21):
+ plugin = quotas_v2
+ validation_error = webob.exc.HTTPBadRequest
+
+ def _setup_controller(self):
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.ext_mgr.is_loaded('os-server-group-quotas').MultipleTimes().\
+ AndReturn(self.include_server_group_quotas)
+ self.mox.ReplayAll()
+ self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
+ self.mox.ResetAll()
+
+ # NOTE: The following tests are tricky and v2.1 API does not allow
+ # this kind of input by strong input validation. Just for test coverage,
+ # we keep them now.
+ def test_quotas_update_invalid_value_json_fromat_empty_string(self):
+ self.setup_mock_for_update()
+ self.default_quotas.update({
+ 'instances': 50,
+ 'cores': 50
+ })
+ expected_resp = {'quota_set': self.default_quotas}
+
+ # when PUT JSON format with empty string for quota
+ body = copy.deepcopy(expected_resp)
+ body['quota_set']['ram'] = ''
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
+ use_admin_context=True)
+ res_dict = self.controller.update(req, 'update_me', body)
+ self.assertEqual(res_dict, expected_resp)
+
+ def test_quotas_update_invalid_value_xml_fromat_empty_string(self):
+ self.default_quotas.update({
+ 'instances': 50,
+ 'cores': 50
+ })
+ expected_resp = {'quota_set': self.default_quotas}
+
+ # when PUT XML format with empty string for quota
+ body = copy.deepcopy(expected_resp)
+ body['quota_set']['ram'] = {}
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
+ use_admin_context=True)
+ self.setup_mock_for_update()
+ res_dict = self.controller.update(req, 'update_me', body)
+ self.assertEqual(res_dict, expected_resp)
+
+ # NOTE: os-extended-quotas and os-user-quotas are only for v2.0.
+ # On v2.1, these features are always enable. So we need the following
+ # tests only for v2.0.
+ def test_delete_quotas_when_extension_not_loaded(self):
+ self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(False)
+ self.mox.ReplayAll()
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, 1234)
+
+ def test_delete_user_quotas_when_extension_not_loaded(self):
+ self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
+ self.ext_mgr.is_loaded('os-user-quotas').AndReturn(False)
+ self.mox.ReplayAll()
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, 1234)
+
+
+class QuotaSetsTestV2WithoutServerGroupQuotas(QuotaSetsTestV2):
+ include_server_group_quotas = False
+
+ # NOTE: os-server-group-quotas is only for v2.0. On v2.1 this feature
+ # is always enabled, so this test is only needed for v2.0
+ def test_quotas_update_without_server_group_quotas_extenstion(self):
+ self.setup_mock_for_update()
+ self.default_quotas.update({
+ 'server_groups': 50,
+ 'sever_group_members': 50
+ })
+ body = {'quota_set': self.default_quotas}
+
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 'update_me', body=body)
+
+
+class ExtendedQuotasTestV2(ExtendedQuotasTestV21):
+ plugin = quotas_v2
+
+ def _setup_controller(self):
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.ext_mgr.is_loaded('os-server-group-quotas').MultipleTimes().\
+ AndReturn(False)
+ self.mox.ReplayAll()
+ self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
+ self.mox.ResetAll()
+
+
+class UserQuotasTestV2(UserQuotasTestV21):
+ plugin = quotas_v2
+
+ def _setup_controller(self):
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.ext_mgr.is_loaded('os-server-group-quotas').MultipleTimes().\
+ AndReturn(self.include_server_group_quotas)
+ self.mox.ReplayAll()
+ self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
+ self.mox.ResetAll()
+
+
+class UserQuotasTestV2WithoutServerGroupQuotas(UserQuotasTestV2):
+ include_server_group_quotas = False
+
+ # NOTE: os-server-group-quotas is only for v2.0. On v2.1 this feature
+ # is always enabled, so this test is only needed for v2.0
+ def test_user_quotas_update_as_admin_without_sg_quota_extension(self):
+ self.setup_mock_for_update()
+ body = {'quota_set': {'instances': 10, 'cores': 20,
+ 'ram': 51200, 'floating_ips': 10,
+ 'fixed_ips': -1, 'metadata_items': 128,
+ 'injected_files': 5,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'security_groups': 10,
+ 'security_group_rules': 20,
+ 'key_pairs': 100,
+ 'server_groups': 100,
+ 'server_group_members': 200}}
+
+ url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
+ req = fakes.HTTPRequest.blank(url, use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 'update_me', body=body)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_rescue.py b/nova/tests/unit/api/openstack/compute/contrib/test_rescue.py
new file mode 100644
index 0000000000..f8de7de291
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_rescue.py
@@ -0,0 +1,270 @@
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova import compute
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
+
+
+def rescue(self, context, instance, rescue_password=None,
+ rescue_image_ref=None):
+ pass
+
+
+def unrescue(self, context, instance):
+ pass
+
+
+def fake_compute_get(*args, **kwargs):
+ uuid = '70f6db34-de8d-4fbd-aafb-4065bdfa6114'
+ return {'id': 1, 'uuid': uuid}
+
+
+class RescueTestV21(test.NoDBTestCase):
+ _prefix = '/v2/fake'
+
+ def setUp(self):
+ super(RescueTestV21, self).setUp()
+
+ self.stubs.Set(compute.api.API, "get", fake_compute_get)
+ self.stubs.Set(compute.api.API, "rescue", rescue)
+ self.stubs.Set(compute.api.API, "unrescue", unrescue)
+ self.app = self._get_app()
+
+ def _get_app(self):
+ return fakes.wsgi_app_v21(init_only=('servers', 'os-rescue'))
+
+ def test_rescue_from_locked_server(self):
+ def fake_rescue_from_locked_server(self, context,
+ instance, rescue_password=None, rescue_image_ref=None):
+ raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
+
+ self.stubs.Set(compute.api.API,
+ 'rescue',
+ fake_rescue_from_locked_server)
+ body = {"rescue": {"adminPass": "AABBCC112233"}}
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 409)
+
+ def test_rescue_with_preset_password(self):
+ body = {"rescue": {"adminPass": "AABBCC112233"}}
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ resp_json = jsonutils.loads(resp.body)
+ self.assertEqual("AABBCC112233", resp_json['adminPass'])
+
+ def test_rescue_generates_password(self):
+ body = dict(rescue=None)
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ resp_json = jsonutils.loads(resp.body)
+ self.assertEqual(CONF.password_length, len(resp_json['adminPass']))
+
+ def test_rescue_of_rescued_instance(self):
+ body = dict(rescue=None)
+
+ def fake_rescue(*args, **kwargs):
+ raise exception.InstanceInvalidState('fake message')
+
+ self.stubs.Set(compute.api.API, "rescue", fake_rescue)
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 409)
+
+ def test_unrescue(self):
+ body = dict(unrescue=None)
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+
+ def test_unrescue_from_locked_server(self):
+ def fake_unrescue_from_locked_server(self, context,
+ instance):
+ raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
+
+ self.stubs.Set(compute.api.API,
+ 'unrescue',
+ fake_unrescue_from_locked_server)
+
+ body = dict(unrescue=None)
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 409)
+
+ def test_unrescue_of_active_instance(self):
+ body = dict(unrescue=None)
+
+ def fake_unrescue(*args, **kwargs):
+ raise exception.InstanceInvalidState('fake message')
+
+ self.stubs.Set(compute.api.API, "unrescue", fake_unrescue)
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 409)
+
+ def test_rescue_raises_unrescuable(self):
+ body = dict(rescue=None)
+
+ def fake_rescue(*args, **kwargs):
+ raise exception.InstanceNotRescuable('fake message')
+
+ self.stubs.Set(compute.api.API, "rescue", fake_rescue)
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+
+ @mock.patch('nova.compute.api.API.rescue')
+ def test_rescue_with_image_specified(self, mock_compute_api_rescue):
+ instance = fake_compute_get()
+ body = {"rescue": {"adminPass": "ABC123",
+ "rescue_image_ref": "img-id"}}
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ resp_json = jsonutils.loads(resp.body)
+ self.assertEqual("ABC123", resp_json['adminPass'])
+
+ mock_compute_api_rescue.assert_called_with(mock.ANY, instance,
+ rescue_password=u'ABC123',
+ rescue_image_ref=u'img-id')
+
+ @mock.patch('nova.compute.api.API.rescue')
+ def test_rescue_without_image_specified(self, mock_compute_api_rescue):
+ instance = fake_compute_get()
+ body = {"rescue": {"adminPass": "ABC123"}}
+
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ resp_json = jsonutils.loads(resp.body)
+ self.assertEqual("ABC123", resp_json['adminPass'])
+
+ mock_compute_api_rescue.assert_called_with(mock.ANY, instance,
+ rescue_password=u'ABC123',
+ rescue_image_ref=None)
+
+ def test_rescue_with_none(self):
+ body = dict(rescue=None)
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(200, resp.status_int)
+
+ def test_rescue_with_empty_dict(self):
+ body = dict(rescue=dict())
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(200, resp.status_int)
+
+ def test_rescue_disable_password(self):
+ self.flags(enable_instance_password=False)
+ body = dict(rescue=None)
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(200, resp.status_int)
+ resp_json = jsonutils.loads(resp.body)
+ self.assertNotIn('adminPass', resp_json)
+
+ def test_rescue_with_invalid_property(self):
+ body = {"rescue": {"test": "test"}}
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(400, resp.status_int)
+
+
+class RescueTestV20(RescueTestV21):
+
+ def _get_app(self):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=None)
+ return fakes.wsgi_app(init_only=('servers',))
+
+ def test_rescue_with_invalid_property(self):
+ # NOTE(cyeoh): input validation in original v2 code does not
+ # check for invalid properties.
+ pass
+
+ def test_rescue_disable_password(self):
+ # NOTE(cyeoh): Original v2.0 code does not support disabling
+ # the admin password being returned through a conf setting
+ pass
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_scheduler_hints.py b/nova/tests/unit/api/openstack/compute/contrib/test_scheduler_hints.py
new file mode 100644
index 0000000000..fba3a02eec
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_scheduler_hints.py
@@ -0,0 +1,220 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+
+from nova.api.openstack import compute
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import servers as servers_v21
+from nova.api.openstack.compute import servers as servers_v2
+from nova.api.openstack import extensions
+import nova.compute.api
+from nova.compute import flavors
+from nova import db
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit.image import fake
+
+
+UUID = fakes.FAKE_UUID
+
+
+CONF = cfg.CONF
+
+
+class SchedulerHintsTestCaseV21(test.TestCase):
+
+ def setUp(self):
+ super(SchedulerHintsTestCaseV21, self).setUp()
+ self.fake_instance = fakes.stub_instance(1, uuid=UUID)
+ self._set_up_router()
+
+ def _set_up_router(self):
+ self.app = compute.APIRouterV3(init_only=('servers',
+ 'os-scheduler-hints'))
+
+ def _get_request(self):
+ return fakes.HTTPRequestV3.blank('/servers')
+
+ def test_create_server_without_hints(self):
+
+ def fake_create(*args, **kwargs):
+ self.assertEqual(kwargs['scheduler_hints'], {})
+ return ([self.fake_instance], '')
+
+ self.stubs.Set(nova.compute.api.API, 'create', fake_create)
+
+ req = self._get_request()
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'server': {
+ 'name': 'server_test',
+ 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'flavorRef': '1',
+ }}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(202, res.status_int)
+
+ def test_create_server_with_hints(self):
+
+ def fake_create(*args, **kwargs):
+ self.assertEqual(kwargs['scheduler_hints'], {'a': 'b'})
+ return ([self.fake_instance], '')
+
+ self.stubs.Set(nova.compute.api.API, 'create', fake_create)
+
+ req = self._get_request()
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'flavorRef': '1',
+ },
+ 'os:scheduler_hints': {'a': 'b'},
+ }
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(202, res.status_int)
+
+ def test_create_server_bad_hints(self):
+ req = self._get_request()
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'flavorRef': '1',
+ },
+ 'os:scheduler_hints': 'here',
+ }
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(400, res.status_int)
+
+
+class SchedulerHintsTestCaseV2(SchedulerHintsTestCaseV21):
+
+ def _set_up_router(self):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Scheduler_hints'])
+ self.app = compute.APIRouter(init_only=('servers',))
+
+ def _get_request(self):
+ return fakes.HTTPRequest.blank('/fake/servers')
+
+
+class ServersControllerCreateTestV21(test.TestCase):
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTestV21, self).setUp()
+
+ self.instance_cache_num = 0
+ self._set_up_controller()
+
+ def instance_create(context, inst):
+ inst_type = flavors.get_flavor_by_flavor_id(3)
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ def_image_ref = 'http://localhost/images/%s' % image_uuid
+ self.instance_cache_num += 1
+ instance = fake_instance.fake_db_instance(**{
+ 'id': self.instance_cache_num,
+ 'display_name': inst['display_name'] or 'test',
+ 'uuid': fakes.FAKE_UUID,
+ 'instance_type': inst_type,
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fead::1234',
+ 'image_ref': inst.get('image_ref', def_image_ref),
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'reservation_id': inst['reservation_id'],
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "progress": 0,
+ "fixed_ips": [],
+ "task_state": "",
+ "vm_state": "",
+ "root_device_name": inst.get('root_device_name', 'vda'),
+ })
+
+ return instance
+
+ fake.stub_out_image_service(self.stubs)
+ self.stubs.Set(db, 'instance_create', instance_create)
+
+ def _set_up_controller(self):
+ ext_info = plugins.LoadedExtensionInfo()
+ CONF.set_override('extensions_blacklist', 'os-scheduler-hints',
+ 'osapi_v3')
+ self.no_scheduler_hints_controller = servers_v21.ServersController(
+ extension_info=ext_info)
+
+ def _verify_availability_zone(self, **kwargs):
+ self.assertNotIn('scheduler_hints', kwargs)
+
+ def _get_request(self):
+ return fakes.HTTPRequestV3.blank('/servers')
+
+ def _test_create_extra(self, params):
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
+ body = dict(server=server)
+ body.update(params)
+ req = self._get_request()
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ server = self.no_scheduler_hints_controller.create(
+ req, body=body).obj['server']
+
+ def test_create_instance_with_scheduler_hints_disabled(self):
+ hints = {'same_host': '48e6a9f6-30af-47e0-bc04-acaed113bb4e'}
+ params = {'OS-SCH-HNT:scheduler_hints': hints}
+ old_create = nova.compute.api.API.create
+
+ def create(*args, **kwargs):
+ self._verify_availability_zone(**kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(nova.compute.api.API, 'create', create)
+ self._test_create_extra(params)
+
+
+class ServersControllerCreateTestV2(ServersControllerCreateTestV21):
+
+ def _set_up_controller(self):
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.no_scheduler_hints_controller = servers_v2.Controller(
+ self.ext_mgr)
+
+ def _verify_availability_zone(self, **kwargs):
+ self.assertEqual(kwargs['scheduler_hints'], {})
+
+ def _get_request(self):
+ return fakes.HTTPRequest.blank('/fake/servers')
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_security_group_default_rules.py b/nova/tests/unit/api/openstack/compute/contrib/test_security_group_default_rules.py
new file mode 100644
index 0000000000..a735f4722e
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_security_group_default_rules.py
@@ -0,0 +1,515 @@
+# Copyright 2013 Metacloud, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.config import cfg
+import webob
+
+from nova.api.openstack.compute.contrib import \
+ security_group_default_rules as security_group_default_rules_v2
+from nova.api.openstack.compute.plugins.v3 import \
+ security_group_default_rules as security_group_default_rules_v21
+from nova.api.openstack import wsgi
+from nova import context
+import nova.db
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+CONF = cfg.CONF
+
+
+class AttrDict(dict):
+ def __getattr__(self, k):
+ return self[k]
+
+
+def security_group_default_rule_template(**kwargs):
+ rule = kwargs.copy()
+ rule.setdefault('ip_protocol', 'TCP')
+ rule.setdefault('from_port', 22)
+ rule.setdefault('to_port', 22)
+ rule.setdefault('cidr', '10.10.10.0/24')
+ return rule
+
+
+def security_group_default_rule_db(security_group_default_rule, id=None):
+ attrs = security_group_default_rule.copy()
+ if id is not None:
+ attrs['id'] = id
+ return AttrDict(attrs)
+
+
+class TestSecurityGroupDefaultRulesNeutronV21(test.TestCase):
+ controller_cls = (security_group_default_rules_v21.
+ SecurityGroupDefaultRulesController)
+
+ def setUp(self):
+ self.flags(security_group_api='neutron')
+ super(TestSecurityGroupDefaultRulesNeutronV21, self).setUp()
+ self.controller = self.controller_cls()
+
+ def test_create_security_group_default_rule_not_implemented_neutron(self):
+ sgr = security_group_default_rule_template()
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_security_group_default_rules_list_not_implemented_neturon(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.index,
+ req)
+
+ def test_security_group_default_rules_show_not_implemented_neturon(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.show,
+ req, '602ed77c-a076-4f9b-a617-f93b847b62c5')
+
+ def test_security_group_default_rules_delete_not_implemented_neturon(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.delete,
+ req, '602ed77c-a076-4f9b-a617-f93b847b62c5')
+
+
+class TestSecurityGroupDefaultRulesNeutronV2(test.TestCase):
+ controller_cls = (security_group_default_rules_v2.
+ SecurityGroupDefaultRulesController)
+
+
+class TestSecurityGroupDefaultRulesV21(test.TestCase):
+ controller_cls = (security_group_default_rules_v21.
+ SecurityGroupDefaultRulesController)
+
+ def setUp(self):
+ super(TestSecurityGroupDefaultRulesV21, self).setUp()
+ self.controller = self.controller_cls()
+
+ def test_create_security_group_default_rule(self):
+ sgr = security_group_default_rule_template()
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ sgr_dict = dict(security_group_default_rule=sgr)
+ res_dict = self.controller.create(req, sgr_dict)
+ security_group_default_rule = res_dict['security_group_default_rule']
+ self.assertEqual(security_group_default_rule['ip_protocol'],
+ sgr['ip_protocol'])
+ self.assertEqual(security_group_default_rule['from_port'],
+ sgr['from_port'])
+ self.assertEqual(security_group_default_rule['to_port'],
+ sgr['to_port'])
+ self.assertEqual(security_group_default_rule['ip_range']['cidr'],
+ sgr['cidr'])
+
+ def test_create_security_group_default_rule_with_no_to_port(self):
+ sgr = security_group_default_rule_template()
+ del sgr['to_port']
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_no_from_port(self):
+ sgr = security_group_default_rule_template()
+ del sgr['from_port']
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_no_ip_protocol(self):
+ sgr = security_group_default_rule_template()
+ del sgr['ip_protocol']
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_no_cidr(self):
+ sgr = security_group_default_rule_template()
+ del sgr['cidr']
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ res_dict = self.controller.create(req,
+ {'security_group_default_rule': sgr})
+ security_group_default_rule = res_dict['security_group_default_rule']
+ self.assertNotEqual(security_group_default_rule['id'], 0)
+ self.assertEqual(security_group_default_rule['ip_range']['cidr'],
+ '0.0.0.0/0')
+
+ def test_create_security_group_default_rule_with_blank_to_port(self):
+ sgr = security_group_default_rule_template(to_port='')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_blank_from_port(self):
+ sgr = security_group_default_rule_template(from_port='')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_blank_ip_protocol(self):
+ sgr = security_group_default_rule_template(ip_protocol='')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_blank_cidr(self):
+ sgr = security_group_default_rule_template(cidr='')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ res_dict = self.controller.create(req,
+ {'security_group_default_rule': sgr})
+ security_group_default_rule = res_dict['security_group_default_rule']
+ self.assertNotEqual(security_group_default_rule['id'], 0)
+ self.assertEqual(security_group_default_rule['ip_range']['cidr'],
+ '0.0.0.0/0')
+
+ def test_create_security_group_default_rule_non_numerical_to_port(self):
+ sgr = security_group_default_rule_template(to_port='invalid')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_non_numerical_from_port(self):
+ sgr = security_group_default_rule_template(from_port='invalid')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_invalid_ip_protocol(self):
+ sgr = security_group_default_rule_template(ip_protocol='invalid')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_invalid_cidr(self):
+ sgr = security_group_default_rule_template(cidr='10.10.2222.0/24')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_invalid_to_port(self):
+ sgr = security_group_default_rule_template(to_port='666666')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_invalid_from_port(self):
+ sgr = security_group_default_rule_template(from_port='666666')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_no_body(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, None)
+
+ def test_create_duplicate_security_group_default_rule(self):
+ sgr = security_group_default_rule_template()
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.controller.create(req, {'security_group_default_rule': sgr})
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_security_group_default_rules_list(self):
+ self.test_create_security_group_default_rule()
+ rules = [dict(id=1,
+ ip_protocol='TCP',
+ from_port=22,
+ to_port=22,
+ ip_range=dict(cidr='10.10.10.0/24'))]
+ expected = {'security_group_default_rules': rules}
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ res_dict = self.controller.index(req)
+ self.assertEqual(res_dict, expected)
+
+ def test_default_security_group_default_rule_show(self):
+ sgr = security_group_default_rule_template(id=1)
+
+ self.test_create_security_group_default_rule()
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ res_dict = self.controller.show(req, '1')
+
+ security_group_default_rule = res_dict['security_group_default_rule']
+
+ self.assertEqual(security_group_default_rule['ip_protocol'],
+ sgr['ip_protocol'])
+ self.assertEqual(security_group_default_rule['to_port'],
+ sgr['to_port'])
+ self.assertEqual(security_group_default_rule['from_port'],
+ sgr['from_port'])
+ self.assertEqual(security_group_default_rule['ip_range']['cidr'],
+ sgr['cidr'])
+
+ def test_delete_security_group_default_rule(self):
+ sgr = security_group_default_rule_template(id=1)
+
+ self.test_create_security_group_default_rule()
+
+ self.called = False
+
+ def security_group_default_rule_destroy(context, id):
+ self.called = True
+
+ def return_security_group_default_rule(context, id):
+ self.assertEqual(sgr['id'], id)
+ return security_group_default_rule_db(sgr)
+
+ self.stubs.Set(nova.db, 'security_group_default_rule_destroy',
+ security_group_default_rule_destroy)
+ self.stubs.Set(nova.db, 'security_group_default_rule_get',
+ return_security_group_default_rule)
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.controller.delete(req, '1')
+
+ self.assertTrue(self.called)
+
+ def test_security_group_ensure_default(self):
+ sgr = security_group_default_rule_template(id=1)
+ self.test_create_security_group_default_rule()
+
+ ctxt = context.get_admin_context()
+
+ setattr(ctxt, 'project_id', 'new_project_id')
+
+ sg = nova.db.security_group_ensure_default(ctxt)
+ rules = nova.db.security_group_rule_get_by_security_group(ctxt, sg.id)
+ security_group_rule = rules[0]
+ self.assertEqual(sgr['id'], security_group_rule.id)
+ self.assertEqual(sgr['ip_protocol'], security_group_rule.protocol)
+ self.assertEqual(sgr['from_port'], security_group_rule.from_port)
+ self.assertEqual(sgr['to_port'], security_group_rule.to_port)
+ self.assertEqual(sgr['cidr'], security_group_rule.cidr)
+
+
+class TestSecurityGroupDefaultRulesV2(test.TestCase):
+ controller_cls = (security_group_default_rules_v2.
+ SecurityGroupDefaultRulesController)
+
+
+class TestSecurityGroupDefaultRulesXMLDeserializer(test.TestCase):
+ def setUp(self):
+ super(TestSecurityGroupDefaultRulesXMLDeserializer, self).setUp()
+ deserializer = security_group_default_rules_v2.\
+ SecurityGroupDefaultRulesXMLDeserializer()
+ self.deserializer = deserializer
+
+ def test_create_request(self):
+ serial_request = """
+<security_group_default_rule>
+ <from_port>22</from_port>
+ <to_port>22</to_port>
+ <ip_protocol>TCP</ip_protocol>
+ <cidr>10.10.10.0/24</cidr>
+</security_group_default_rule>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group_default_rule": {
+ "from_port": "22",
+ "to_port": "22",
+ "ip_protocol": "TCP",
+ "cidr": "10.10.10.0/24"
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_no_to_port_request(self):
+ serial_request = """
+<security_group_default_rule>
+ <from_port>22</from_port>
+ <ip_protocol>TCP</ip_protocol>
+ <cidr>10.10.10.0/24</cidr>
+</security_group_default_rule>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group_default_rule": {
+ "from_port": "22",
+ "ip_protocol": "TCP",
+ "cidr": "10.10.10.0/24"
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_no_from_port_request(self):
+ serial_request = """
+<security_group_default_rule>
+ <to_port>22</to_port>
+ <ip_protocol>TCP</ip_protocol>
+ <cidr>10.10.10.0/24</cidr>
+</security_group_default_rule>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group_default_rule": {
+ "to_port": "22",
+ "ip_protocol": "TCP",
+ "cidr": "10.10.10.0/24"
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_no_ip_protocol_request(self):
+ serial_request = """
+<security_group_default_rule>
+ <from_port>22</from_port>
+ <to_port>22</to_port>
+ <cidr>10.10.10.0/24</cidr>
+</security_group_default_rule>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group_default_rule": {
+ "from_port": "22",
+ "to_port": "22",
+ "cidr": "10.10.10.0/24"
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_no_cidr_request(self):
+ serial_request = """
+<security_group_default_rule>
+ <from_port>22</from_port>
+ <to_port>22</to_port>
+ <ip_protocol>TCP</ip_protocol>
+</security_group_default_rule>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group_default_rule": {
+ "from_port": "22",
+ "to_port": "22",
+ "ip_protocol": "TCP",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+
+class TestSecurityGroupDefaultRuleXMLSerializer(test.TestCase):
+ def setUp(self):
+ super(TestSecurityGroupDefaultRuleXMLSerializer, self).setUp()
+ self.namespace = wsgi.XMLNS_V11
+ self.rule_serializer =\
+ security_group_default_rules_v2.SecurityGroupDefaultRuleTemplate()
+ self.index_serializer =\
+ security_group_default_rules_v2.SecurityGroupDefaultRulesTemplate()
+
+ def _tag(self, elem):
+ tagname = elem.tag
+ self.assertEqual(tagname[0], '{')
+ tmp = tagname.partition('}')
+ namespace = tmp[0][1:]
+ self.assertEqual(namespace, self.namespace)
+ return tmp[2]
+
+ def _verify_security_group_default_rule(self, raw_rule, tree):
+ self.assertEqual(raw_rule['id'], tree.get('id'))
+
+ seen = set()
+ expected = set(['ip_protocol', 'from_port', 'to_port', 'ip_range',
+ 'ip_range/cidr'])
+
+ for child in tree:
+ child_tag = self._tag(child)
+ seen.add(child_tag)
+ if child_tag == 'ip_range':
+ for gr_child in child:
+ gr_child_tag = self._tag(gr_child)
+ self.assertIn(gr_child_tag, raw_rule[child_tag])
+ seen.add('%s/%s' % (child_tag, gr_child_tag))
+ self.assertEqual(gr_child.text,
+ raw_rule[child_tag][gr_child_tag])
+ else:
+ self.assertEqual(child.text, raw_rule[child_tag])
+ self.assertEqual(seen, expected)
+
+ def test_rule_serializer(self):
+ raw_rule = dict(id='123',
+ ip_protocol='TCP',
+ from_port='22',
+ to_port='22',
+ ip_range=dict(cidr='10.10.10.0/24'))
+ rule = dict(security_group_default_rule=raw_rule)
+ text = self.rule_serializer.serialize(rule)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('security_group_default_rule', self._tag(tree))
+ self._verify_security_group_default_rule(raw_rule, tree)
+
+ def test_index_serializer(self):
+ rules = [dict(id='123',
+ ip_protocol='TCP',
+ from_port='22',
+ to_port='22',
+ ip_range=dict(cidr='10.10.10.0/24')),
+ dict(id='234',
+ ip_protocol='UDP',
+ from_port='23456',
+ to_port='234567',
+ ip_range=dict(cidr='10.12.0.0/18')),
+ dict(id='345',
+ ip_protocol='tcp',
+ from_port='3456',
+ to_port='4567',
+ ip_range=dict(cidr='192.168.1.0/32'))]
+
+ rules_dict = dict(security_group_default_rules=rules)
+
+ text = self.index_serializer.serialize(rules_dict)
+
+ tree = etree.fromstring(text)
+ self.assertEqual('security_group_default_rules', self._tag(tree))
+ self.assertEqual(len(rules), len(tree))
+ for idx, child in enumerate(tree):
+ self._verify_security_group_default_rule(rules[idx], child)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_security_groups.py b/nova/tests/unit/api/openstack/compute/contrib/test_security_groups.py
new file mode 100644
index 0000000000..d1620b6a28
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_security_groups.py
@@ -0,0 +1,1767 @@
+# Copyright 2011 OpenStack Foundation
+# Copyright 2012 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import mock
+import mox
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import security_groups as secgroups_v2
+from nova.api.openstack.compute.plugins.v3 import security_groups as \
+ secgroups_v21
+from nova.api.openstack import wsgi
+from nova.api.openstack import xmlutil
+from nova import compute
+from nova.compute import power_state
+from nova import context as context_maker
+import nova.db
+from nova import exception
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova import quota
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit import utils
+
+CONF = cfg.CONF
+FAKE_UUID1 = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
+FAKE_UUID2 = 'c6e6430a-6563-4efa-9542-5e93c9e97d18'
+
+
+class AttrDict(dict):
+ def __getattr__(self, k):
+ return self[k]
+
+
+def security_group_template(**kwargs):
+ sg = kwargs.copy()
+ sg.setdefault('tenant_id', '123')
+ sg.setdefault('name', 'test')
+ sg.setdefault('description', 'test-description')
+ return sg
+
+
+def security_group_db(security_group, id=None):
+ attrs = security_group.copy()
+ if 'tenant_id' in attrs:
+ attrs['project_id'] = attrs.pop('tenant_id')
+ if id is not None:
+ attrs['id'] = id
+ attrs.setdefault('rules', [])
+ attrs.setdefault('instances', [])
+ return AttrDict(attrs)
+
+
+def security_group_rule_template(**kwargs):
+ rule = kwargs.copy()
+ rule.setdefault('ip_protocol', 'tcp')
+ rule.setdefault('from_port', 22)
+ rule.setdefault('to_port', 22)
+ rule.setdefault('parent_group_id', 2)
+ return rule
+
+
+def security_group_rule_db(rule, id=None):
+ attrs = rule.copy()
+ if 'ip_protocol' in attrs:
+ attrs['protocol'] = attrs.pop('ip_protocol')
+ return AttrDict(attrs)
+
+
+def return_server(context, server_id,
+ columns_to_join=None, use_slave=False):
+ return fake_instance.fake_db_instance(
+ **{'id': int(server_id),
+ 'power_state': 0x01,
+ 'host': "localhost",
+ 'uuid': FAKE_UUID1,
+ 'name': 'asdf'})
+
+
+def return_server_by_uuid(context, server_uuid,
+ columns_to_join=None,
+ use_slave=False):
+ return fake_instance.fake_db_instance(
+ **{'id': 1,
+ 'power_state': 0x01,
+ 'host': "localhost",
+ 'uuid': server_uuid,
+ 'name': 'asdf'})
+
+
+def return_non_running_server(context, server_id, columns_to_join=None):
+ return fake_instance.fake_db_instance(
+ **{'id': server_id, 'power_state': power_state.SHUTDOWN,
+ 'uuid': FAKE_UUID1, 'host': "localhost", 'name': 'asdf'})
+
+
+def return_security_group_by_name(context, project_id, group_name):
+ return {'id': 1, 'name': group_name,
+ "instances": [{'id': 1, 'uuid': FAKE_UUID1}]}
+
+
+def return_security_group_without_instances(context, project_id, group_name):
+ return {'id': 1, 'name': group_name}
+
+
+def return_server_nonexistent(context, server_id, columns_to_join=None):
+ raise exception.InstanceNotFound(instance_id=server_id)
+
+
+class TestSecurityGroupsV21(test.TestCase):
+ secgrp_ctl_cls = secgroups_v21.SecurityGroupController
+ server_secgrp_ctl_cls = secgroups_v21.ServerSecurityGroupController
+ secgrp_act_ctl_cls = secgroups_v21.SecurityGroupActionController
+
+ def setUp(self):
+ super(TestSecurityGroupsV21, self).setUp()
+
+ self.controller = self.secgrp_ctl_cls()
+ self.server_controller = self.server_secgrp_ctl_cls()
+ self.manager = self.secgrp_act_ctl_cls()
+
+ # This needs to be done here to set fake_id because the derived
+ # class needs to be called first if it wants to set
+ # 'security_group_api' and this setUp method needs to be called.
+ if self.controller.security_group_api.id_is_uuid:
+ self.fake_id = '11111111-1111-1111-1111-111111111111'
+ else:
+ self.fake_id = '11111111'
+
+ def _assert_no_security_groups_reserved(self, context):
+ """Check that no reservations are leaked during tests."""
+ result = quota.QUOTAS.get_project_quotas(context, context.project_id)
+ self.assertEqual(result['security_groups']['reserved'], 0)
+
+ def _assert_security_groups_in_use(self, project_id, user_id, in_use):
+ context = context_maker.get_admin_context()
+ result = quota.QUOTAS.get_user_quotas(context, project_id, user_id)
+ self.assertEqual(result['security_groups']['in_use'], in_use)
+
+ def test_create_security_group(self):
+ sg = security_group_template()
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ res_dict = self.controller.create(req, {'security_group': sg})
+ self.assertEqual(res_dict['security_group']['name'], 'test')
+ self.assertEqual(res_dict['security_group']['description'],
+ 'test-description')
+
+ def test_create_security_group_with_no_name(self):
+ sg = security_group_template()
+ del sg['name']
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, sg)
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_with_no_description(self):
+ sg = security_group_template()
+ del sg['description']
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_with_empty_description(self):
+ sg = security_group_template()
+ sg['description'] = ""
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ try:
+ self.controller.create(req, {'security_group': sg})
+ self.fail('Should have raised BadRequest exception')
+ except webob.exc.HTTPBadRequest as exc:
+ self.assertEqual('description has a minimum character requirement'
+ ' of 1.', exc.explanation)
+ except exception.InvalidInput as exc:
+ self.fail('Should have raised BadRequest exception instead of')
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_with_blank_name(self):
+ sg = security_group_template(name='')
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_with_whitespace_name(self):
+ sg = security_group_template(name=' ')
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_with_blank_description(self):
+ sg = security_group_template(description='')
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_with_whitespace_description(self):
+ sg = security_group_template(description=' ')
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_with_duplicate_name(self):
+ sg = security_group_template()
+
+ # FIXME: Stub out _get instead of creating twice
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.controller.create(req, {'security_group': sg})
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_with_no_body(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, None)
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_with_no_security_group(self):
+ body = {'no-securityGroup': None}
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_above_255_characters_name(self):
+ sg = security_group_template(name='1234567890' * 26)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_above_255_characters_description(self):
+ sg = security_group_template(description='1234567890' * 26)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_non_string_name(self):
+ sg = security_group_template(name=12)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_non_string_description(self):
+ sg = security_group_template(description=12)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_quota_limit(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ for num in range(1, CONF.quota_security_groups):
+ name = 'test%s' % num
+ sg = security_group_template(name=name)
+ res_dict = self.controller.create(req, {'security_group': sg})
+ self.assertEqual(res_dict['security_group']['name'], name)
+
+ sg = security_group_template()
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
+ req, {'security_group': sg})
+
+ def test_get_security_group_list(self):
+ groups = []
+ for i, name in enumerate(['default', 'test']):
+ sg = security_group_template(id=i + 1,
+ name=name,
+ description=name + '-desc',
+ rules=[])
+ groups.append(sg)
+ expected = {'security_groups': groups}
+
+ def return_security_groups(context, project_id):
+ return [security_group_db(sg) for sg in groups]
+
+ self.stubs.Set(nova.db, 'security_group_get_by_project',
+ return_security_groups)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ res_dict = self.controller.index(req)
+
+ self.assertEqual(res_dict, expected)
+
+ def test_get_security_group_list_missing_group_id_rule(self):
+ groups = []
+ rule1 = security_group_rule_template(cidr='10.2.3.124/24',
+ parent_group_id=1,
+ group_id={}, id=88,
+ protocol='TCP')
+ rule2 = security_group_rule_template(cidr='10.2.3.125/24',
+ parent_group_id=1,
+ id=99, protocol=88,
+ group_id='HAS_BEEN_DELETED')
+ sg = security_group_template(id=1,
+ name='test',
+ description='test-desc',
+ rules=[rule1, rule2])
+
+ groups.append(sg)
+ # An expected rule here needs to be created as the api returns
+ # different attributes on the rule for a response than what was
+ # passed in. For example:
+ # "cidr": "0.0.0.0/0" ->"ip_range": {"cidr": "0.0.0.0/0"}
+ expected_rule = security_group_rule_template(
+ ip_range={'cidr': '10.2.3.124/24'}, parent_group_id=1,
+ group={}, id=88, ip_protocol='TCP')
+ expected = security_group_template(id=1,
+ name='test',
+ description='test-desc',
+ rules=[expected_rule])
+
+ expected = {'security_groups': [expected]}
+
+ def return_security_groups(context, project, search_opts):
+ return [security_group_db(sg) for sg in groups]
+
+ self.stubs.Set(self.controller.security_group_api, 'list',
+ return_security_groups)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ res_dict = self.controller.index(req)
+
+ self.assertEqual(res_dict, expected)
+
+ def test_get_security_group_list_all_tenants(self):
+ all_groups = []
+ tenant_groups = []
+
+ for i, name in enumerate(['default', 'test']):
+ sg = security_group_template(id=i + 1,
+ name=name,
+ description=name + '-desc',
+ rules=[])
+ all_groups.append(sg)
+ if name == 'default':
+ tenant_groups.append(sg)
+
+ all = {'security_groups': all_groups}
+ tenant_specific = {'security_groups': tenant_groups}
+
+ def return_all_security_groups(context):
+ return [security_group_db(sg) for sg in all_groups]
+
+ self.stubs.Set(nova.db, 'security_group_get_all',
+ return_all_security_groups)
+
+ def return_tenant_security_groups(context, project_id):
+ return [security_group_db(sg) for sg in tenant_groups]
+
+ self.stubs.Set(nova.db, 'security_group_get_by_project',
+ return_tenant_security_groups)
+
+ path = '/v2/fake/os-security-groups'
+
+ req = fakes.HTTPRequest.blank(path, use_admin_context=True)
+ res_dict = self.controller.index(req)
+ self.assertEqual(res_dict, tenant_specific)
+
+ req = fakes.HTTPRequest.blank('%s?all_tenants=1' % path,
+ use_admin_context=True)
+ res_dict = self.controller.index(req)
+ self.assertEqual(res_dict, all)
+
+ def test_get_security_group_by_instance(self):
+ groups = []
+ for i, name in enumerate(['default', 'test']):
+ sg = security_group_template(id=i + 1,
+ name=name,
+ description=name + '-desc',
+ rules=[])
+ groups.append(sg)
+ expected = {'security_groups': groups}
+
+ def return_instance(context, server_id,
+ columns_to_join=None, use_slave=False):
+ self.assertEqual(server_id, FAKE_UUID1)
+ return return_server_by_uuid(context, server_id)
+
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_instance)
+
+ def return_security_groups(context, instance_uuid):
+ self.assertEqual(instance_uuid, FAKE_UUID1)
+ return [security_group_db(sg) for sg in groups]
+
+ self.stubs.Set(nova.db, 'security_group_get_by_instance',
+ return_security_groups)
+
+ req = fakes.HTTPRequest.blank('/v2/%s/servers/%s/os-security-groups' %
+ ('fake', FAKE_UUID1))
+ res_dict = self.server_controller.index(req, FAKE_UUID1)
+
+ self.assertEqual(res_dict, expected)
+
+ @mock.patch('nova.db.instance_get_by_uuid')
+ @mock.patch('nova.db.security_group_get_by_instance', return_value=[])
+ def test_get_security_group_empty_for_instance(self, mock_sec_group,
+ mock_db_get_ins):
+ expected = {'security_groups': []}
+
+ def return_instance(context, server_id,
+ columns_to_join=None, use_slave=False):
+ self.assertEqual(server_id, FAKE_UUID1)
+ return return_server_by_uuid(context, server_id)
+ mock_db_get_ins.side_effect = return_instance
+ req = fakes.HTTPRequest.blank('/v2/%s/servers/%s/os-security-groups' %
+ ('fake', FAKE_UUID1))
+ res_dict = self.server_controller.index(req, FAKE_UUID1)
+ self.assertEqual(expected, res_dict)
+ mock_sec_group.assert_called_once_with(req.environ['nova.context'],
+ FAKE_UUID1)
+
+ def test_get_security_group_by_instance_non_existing(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_nonexistent)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/os-security-groups')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.server_controller.index, req, '1')
+
+ def test_get_security_group_by_instance_invalid_id(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/servers/invalid/os-security-groups')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.server_controller.index, req, 'invalid')
+
+ def test_get_security_group_by_id(self):
+ sg = security_group_template(id=2, rules=[])
+
+ def return_security_group(context, group_id):
+ self.assertEqual(sg['id'], group_id)
+ return security_group_db(sg)
+
+ self.stubs.Set(nova.db, 'security_group_get',
+ return_security_group)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
+ res_dict = self.controller.show(req, '2')
+
+ expected = {'security_group': sg}
+ self.assertEqual(res_dict, expected)
+
+ def test_get_security_group_by_invalid_id(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/invalid')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
+ req, 'invalid')
+
+ def test_get_security_group_by_non_existing_id(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
+ self.fake_id)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, self.fake_id)
+
+ def test_update_security_group(self):
+ sg = security_group_template(id=2, rules=[])
+ sg_update = security_group_template(id=2, rules=[],
+ name='update_name', description='update_desc')
+
+ def return_security_group(context, group_id):
+ self.assertEqual(sg['id'], group_id)
+ return security_group_db(sg)
+
+ def return_update_security_group(context, group_id, values,
+ columns_to_join=None):
+ self.assertEqual(sg_update['id'], group_id)
+ self.assertEqual(sg_update['name'], values['name'])
+ self.assertEqual(sg_update['description'], values['description'])
+ return security_group_db(sg_update)
+
+ self.stubs.Set(nova.db, 'security_group_update',
+ return_update_security_group)
+ self.stubs.Set(nova.db, 'security_group_get',
+ return_security_group)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
+ res_dict = self.controller.update(req, '2',
+ {'security_group': sg_update})
+
+ expected = {'security_group': sg_update}
+ self.assertEqual(res_dict, expected)
+
+ def test_update_security_group_name_to_default(self):
+ sg = security_group_template(id=2, rules=[], name='default')
+
+ def return_security_group(context, group_id):
+ self.assertEqual(sg['id'], group_id)
+ return security_group_db(sg)
+
+ self.stubs.Set(nova.db, 'security_group_get',
+ return_security_group)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, '2', {'security_group': sg})
+
+ def test_update_default_security_group_fail(self):
+ sg = security_group_template()
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, '1', {'security_group': sg})
+
+ def test_delete_security_group_by_id(self):
+ sg = security_group_template(id=1, project_id='fake_project',
+ user_id='fake_user', rules=[])
+
+ self.called = False
+
+ def security_group_destroy(context, id):
+ self.called = True
+
+ def return_security_group(context, group_id):
+ self.assertEqual(sg['id'], group_id)
+ return security_group_db(sg)
+
+ self.stubs.Set(nova.db, 'security_group_destroy',
+ security_group_destroy)
+ self.stubs.Set(nova.db, 'security_group_get',
+ return_security_group)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
+ self.controller.delete(req, '1')
+
+ self.assertTrue(self.called)
+
+ def test_delete_security_group_by_admin(self):
+ sg = security_group_template(id=2, rules=[])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.controller.create(req, {'security_group': sg})
+ context = req.environ['nova.context']
+
+ # Ensure quota usage for security group is correct.
+ self._assert_security_groups_in_use(context.project_id,
+ context.user_id, 2)
+
+ # Delete the security group by admin.
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2',
+ use_admin_context=True)
+ self.controller.delete(req, '2')
+
+ # Ensure quota for security group in use is released.
+ self._assert_security_groups_in_use(context.project_id,
+ context.user_id, 1)
+
+ def test_delete_security_group_by_invalid_id(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/invalid')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
+ req, 'invalid')
+
+ def test_delete_security_group_by_non_existing_id(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
+ % self.fake_id)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, self.fake_id)
+
+ def test_delete_security_group_in_use(self):
+ sg = security_group_template(id=1, rules=[])
+
+ def security_group_in_use(context, id):
+ return True
+
+ def return_security_group(context, group_id):
+ self.assertEqual(sg['id'], group_id)
+ return security_group_db(sg)
+
+ self.stubs.Set(nova.db, 'security_group_in_use',
+ security_group_in_use)
+ self.stubs.Set(nova.db, 'security_group_get',
+ return_security_group)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
+ req, '1')
+
+ def test_associate_by_non_existing_security_group_name(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.assertEqual(return_server(None, '1'),
+ nova.db.instance_get(None, '1'))
+ body = dict(addSecurityGroup=dict(name='non-existing'))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._addSecurityGroup, req, '1', body)
+
+ def test_associate_by_invalid_server_id(self):
+ body = dict(addSecurityGroup=dict(name='test'))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/invalid/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._addSecurityGroup, req, 'invalid', body)
+
+ def test_associate_without_body(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ body = dict(addSecurityGroup=None)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._addSecurityGroup, req, '1', body)
+
+ def test_associate_no_security_group_name(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ body = dict(addSecurityGroup=dict())
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._addSecurityGroup, req, '1', body)
+
+ def test_associate_security_group_name_with_whitespaces(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ body = dict(addSecurityGroup=dict(name=" "))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._addSecurityGroup, req, '1', body)
+
+ def test_associate_non_existing_instance(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_nonexistent)
+ body = dict(addSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._addSecurityGroup, req, '1', body)
+
+ def test_associate_non_running_instance(self):
+ self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_non_running_server)
+ self.stubs.Set(nova.db, 'security_group_get_by_name',
+ return_security_group_without_instances)
+ body = dict(addSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.manager._addSecurityGroup(req, '1', body)
+
+ def test_associate_already_associated_security_group_to_instance(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_by_uuid)
+ self.stubs.Set(nova.db, 'security_group_get_by_name',
+ return_security_group_by_name)
+ body = dict(addSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._addSecurityGroup, req, '1', body)
+
+ def test_associate(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_by_uuid)
+ self.mox.StubOutWithMock(nova.db, 'instance_add_security_group')
+ nova.db.instance_add_security_group(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.stubs.Set(nova.db, 'security_group_get_by_name',
+ return_security_group_without_instances)
+ self.mox.ReplayAll()
+
+ body = dict(addSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.manager._addSecurityGroup(req, '1', body)
+
+ def test_disassociate_by_non_existing_security_group_name(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.assertEqual(return_server(None, '1'),
+ nova.db.instance_get(None, '1'))
+ body = dict(removeSecurityGroup=dict(name='non-existing'))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._removeSecurityGroup, req, '1', body)
+
+ def test_disassociate_by_invalid_server_id(self):
+ self.stubs.Set(nova.db, 'security_group_get_by_name',
+ return_security_group_by_name)
+ body = dict(removeSecurityGroup=dict(name='test'))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/invalid/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._removeSecurityGroup, req, 'invalid',
+ body)
+
+ def test_disassociate_without_body(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ body = dict(removeSecurityGroup=None)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._removeSecurityGroup, req, '1', body)
+
+ def test_disassociate_no_security_group_name(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ body = dict(removeSecurityGroup=dict())
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._removeSecurityGroup, req, '1', body)
+
+ def test_disassociate_security_group_name_with_whitespaces(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ body = dict(removeSecurityGroup=dict(name=" "))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._removeSecurityGroup, req, '1', body)
+
+ def test_disassociate_non_existing_instance(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
+ self.stubs.Set(nova.db, 'security_group_get_by_name',
+ return_security_group_by_name)
+ body = dict(removeSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._removeSecurityGroup, req, '1', body)
+
+ def test_disassociate_non_running_instance(self):
+ self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_non_running_server)
+ self.stubs.Set(nova.db, 'security_group_get_by_name',
+ return_security_group_by_name)
+ body = dict(removeSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.manager._removeSecurityGroup(req, '1', body)
+
+ def test_disassociate_already_associated_security_group_to_instance(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_by_uuid)
+ self.stubs.Set(nova.db, 'security_group_get_by_name',
+ return_security_group_without_instances)
+ body = dict(removeSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._removeSecurityGroup, req, '1', body)
+
+ def test_disassociate(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_by_uuid)
+ self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group')
+ nova.db.instance_remove_security_group(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.stubs.Set(nova.db, 'security_group_get_by_name',
+ return_security_group_by_name)
+ self.mox.ReplayAll()
+
+ body = dict(removeSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.manager._removeSecurityGroup(req, '1', body)
+
+
+class TestSecurityGroupsV2(TestSecurityGroupsV21):
+ secgrp_ctl_cls = secgroups_v2.SecurityGroupController
+ server_secgrp_ctl_cls = secgroups_v2.ServerSecurityGroupController
+ secgrp_act_ctl_cls = secgroups_v2.SecurityGroupActionController
+
+
+class TestSecurityGroupRulesV21(test.TestCase):
+ secgrp_ctl_cls = secgroups_v21.SecurityGroupRulesController
+
+ def setUp(self):
+ super(TestSecurityGroupRulesV21, self).setUp()
+
+ self.controller = self.secgrp_ctl_cls()
+ if self.controller.security_group_api.id_is_uuid:
+ id1 = '11111111-1111-1111-1111-111111111111'
+ id2 = '22222222-2222-2222-2222-222222222222'
+ self.invalid_id = '33333333-3333-3333-3333-333333333333'
+ else:
+ id1 = 1
+ id2 = 2
+ self.invalid_id = '33333333'
+
+ self.sg1 = security_group_template(id=id1)
+ self.sg2 = security_group_template(
+ id=id2, name='authorize_revoke',
+ description='authorize-revoke testing')
+
+ db1 = security_group_db(self.sg1)
+ db2 = security_group_db(self.sg2)
+
+ def return_security_group(context, group_id, columns_to_join=None):
+ if group_id == db1['id']:
+ return db1
+ if group_id == db2['id']:
+ return db2
+ raise exception.SecurityGroupNotFound(security_group_id=group_id)
+
+ self.stubs.Set(nova.db, 'security_group_get',
+ return_security_group)
+
+ self.parent_security_group = db2
+
+ def test_create_by_cidr(self):
+ rule = security_group_rule_template(cidr='10.2.3.124/24',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+ security_group_rule = res_dict['security_group_rule']
+ self.assertNotEqual(security_group_rule['id'], 0)
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.sg2['id'])
+ self.assertEqual(security_group_rule['ip_range']['cidr'],
+ "10.2.3.124/24")
+
+ def test_create_by_group_id(self):
+ rule = security_group_rule_template(group_id=self.sg1['id'],
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+
+ security_group_rule = res_dict['security_group_rule']
+ self.assertNotEqual(security_group_rule['id'], 0)
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.sg2['id'])
+
+ def test_create_by_same_group_id(self):
+ rule1 = security_group_rule_template(group_id=self.sg1['id'],
+ from_port=80, to_port=80,
+ parent_group_id=self.sg2['id'])
+ self.parent_security_group['rules'] = [security_group_rule_db(rule1)]
+
+ rule2 = security_group_rule_template(group_id=self.sg1['id'],
+ from_port=81, to_port=81,
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule2})
+
+ security_group_rule = res_dict['security_group_rule']
+ self.assertNotEqual(security_group_rule['id'], 0)
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.sg2['id'])
+ self.assertEqual(security_group_rule['from_port'], 81)
+ self.assertEqual(security_group_rule['to_port'], 81)
+
+ def test_create_none_value_from_to_port(self):
+ rule = {'parent_group_id': self.sg1['id'],
+ 'group_id': self.sg1['id']}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+ security_group_rule = res_dict['security_group_rule']
+ self.assertIsNone(security_group_rule['from_port'])
+ self.assertIsNone(security_group_rule['to_port'])
+ self.assertEqual(security_group_rule['group']['name'], 'test')
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.sg1['id'])
+
+ def test_create_none_value_from_to_port_icmp(self):
+ rule = {'parent_group_id': self.sg1['id'],
+ 'group_id': self.sg1['id'],
+ 'ip_protocol': 'ICMP'}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+ security_group_rule = res_dict['security_group_rule']
+ self.assertEqual(security_group_rule['ip_protocol'], 'ICMP')
+ self.assertEqual(security_group_rule['from_port'], -1)
+ self.assertEqual(security_group_rule['to_port'], -1)
+ self.assertEqual(security_group_rule['group']['name'], 'test')
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.sg1['id'])
+
+ def test_create_none_value_from_to_port_tcp(self):
+ rule = {'parent_group_id': self.sg1['id'],
+ 'group_id': self.sg1['id'],
+ 'ip_protocol': 'TCP'}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+ security_group_rule = res_dict['security_group_rule']
+ self.assertEqual(security_group_rule['ip_protocol'], 'TCP')
+ self.assertEqual(security_group_rule['from_port'], 1)
+ self.assertEqual(security_group_rule['to_port'], 65535)
+ self.assertEqual(security_group_rule['group']['name'], 'test')
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.sg1['id'])
+
+ def test_create_by_invalid_cidr_json(self):
+ rule = security_group_rule_template(
+ ip_protocol="tcp",
+ from_port=22,
+ to_port=22,
+ parent_group_id=self.sg2['id'],
+ cidr="10.2.3.124/2433")
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_by_invalid_tcp_port_json(self):
+ rule = security_group_rule_template(
+ ip_protocol="tcp",
+ from_port=75534,
+ to_port=22,
+ parent_group_id=self.sg2['id'],
+ cidr="10.2.3.124/24")
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_by_invalid_icmp_port_json(self):
+ rule = security_group_rule_template(
+ ip_protocol="icmp",
+ from_port=1,
+ to_port=256,
+ parent_group_id=self.sg2['id'],
+ cidr="10.2.3.124/24")
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_add_existing_rules_by_cidr(self):
+ rule = security_group_rule_template(cidr='10.0.0.0/24',
+ parent_group_id=self.sg2['id'])
+
+ self.parent_security_group['rules'] = [security_group_rule_db(rule)]
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_add_existing_rules_by_group_id(self):
+ rule = security_group_rule_template(group_id=1)
+
+ self.parent_security_group['rules'] = [security_group_rule_db(rule)]
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_no_body(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, None)
+
+ def test_create_with_no_security_group_rule_in_body(self):
+ rules = {'test': 'test'}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, rules)
+
+ def test_create_with_invalid_parent_group_id(self):
+ rule = security_group_rule_template(parent_group_id='invalid')
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_non_existing_parent_group_id(self):
+ rule = security_group_rule_template(group_id=None,
+ parent_group_id=self.invalid_id)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_non_existing_group_id(self):
+ rule = security_group_rule_template(group_id='invalid',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_invalid_protocol(self):
+ rule = security_group_rule_template(ip_protocol='invalid-protocol',
+ cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_no_protocol(self):
+ rule = security_group_rule_template(cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
+ del rule['ip_protocol']
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_invalid_from_port(self):
+ rule = security_group_rule_template(from_port='666666',
+ cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_invalid_to_port(self):
+ rule = security_group_rule_template(to_port='666666',
+ cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_non_numerical_from_port(self):
+ rule = security_group_rule_template(from_port='invalid',
+ cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_non_numerical_to_port(self):
+ rule = security_group_rule_template(to_port='invalid',
+ cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_no_from_port(self):
+ rule = security_group_rule_template(cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
+ del rule['from_port']
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_no_to_port(self):
+ rule = security_group_rule_template(cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
+ del rule['to_port']
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_invalid_cidr(self):
+ rule = security_group_rule_template(cidr='10.2.2222.0/24',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_no_cidr_group(self):
+ rule = security_group_rule_template(parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+
+ security_group_rule = res_dict['security_group_rule']
+ self.assertNotEqual(security_group_rule['id'], 0)
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.parent_security_group['id'])
+ self.assertEqual(security_group_rule['ip_range']['cidr'],
+ "0.0.0.0/0")
+
+ def test_create_with_invalid_group_id(self):
+ rule = security_group_rule_template(group_id='invalid',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_empty_group_id(self):
+ rule = security_group_rule_template(group_id='',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_nonexist_group_id(self):
+ rule = security_group_rule_template(group_id=self.invalid_id,
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_same_group_parent_id_and_group_id(self):
+ rule = security_group_rule_template(group_id=self.sg1['id'],
+ parent_group_id=self.sg1['id'])
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+ security_group_rule = res_dict['security_group_rule']
+ self.assertNotEqual(security_group_rule['id'], 0)
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.sg1['id'])
+ self.assertEqual(security_group_rule['group']['name'],
+ self.sg1['name'])
+
+ def _test_create_with_no_ports_and_no_group(self, proto):
+ rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id']}
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def _test_create_with_no_ports(self, proto):
+ rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id'],
+ 'group_id': self.sg1['id']}
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+ security_group_rule = res_dict['security_group_rule']
+ expected_rule = {
+ 'from_port': 1, 'group': {'tenant_id': '123', 'name': 'test'},
+ 'ip_protocol': proto, 'to_port': 65535, 'parent_group_id':
+ self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
+ }
+ if proto == 'icmp':
+ expected_rule['to_port'] = -1
+ expected_rule['from_port'] = -1
+ self.assertEqual(expected_rule, security_group_rule)
+
+ def test_create_with_no_ports_icmp(self):
+ self._test_create_with_no_ports_and_no_group('icmp')
+ self._test_create_with_no_ports('icmp')
+
+ def test_create_with_no_ports_tcp(self):
+ self._test_create_with_no_ports_and_no_group('tcp')
+ self._test_create_with_no_ports('tcp')
+
+ def test_create_with_no_ports_udp(self):
+ self._test_create_with_no_ports_and_no_group('udp')
+ self._test_create_with_no_ports('udp')
+
+ def _test_create_with_ports(self, proto, from_port, to_port):
+ rule = {
+ 'ip_protocol': proto, 'from_port': from_port, 'to_port': to_port,
+ 'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
+ }
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+
+ security_group_rule = res_dict['security_group_rule']
+ expected_rule = {
+ 'from_port': from_port,
+ 'group': {'tenant_id': '123', 'name': 'test'},
+ 'ip_protocol': proto, 'to_port': to_port, 'parent_group_id':
+ self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
+ }
+ self.assertEqual(proto, security_group_rule['ip_protocol'])
+ self.assertEqual(from_port, security_group_rule['from_port'])
+ self.assertEqual(to_port, security_group_rule['to_port'])
+ self.assertEqual(expected_rule, security_group_rule)
+
+ def test_create_with_ports_icmp(self):
+ self._test_create_with_ports('icmp', 0, 1)
+ self._test_create_with_ports('icmp', 0, 0)
+ self._test_create_with_ports('icmp', 1, 0)
+
+ def test_create_with_ports_tcp(self):
+ self._test_create_with_ports('tcp', 1, 1)
+ self._test_create_with_ports('tcp', 1, 65535)
+ self._test_create_with_ports('tcp', 65535, 65535)
+
+ def test_create_with_ports_udp(self):
+ self._test_create_with_ports('udp', 1, 1)
+ self._test_create_with_ports('udp', 1, 65535)
+ self._test_create_with_ports('udp', 65535, 65535)
+
+ def test_delete(self):
+ rule = security_group_rule_template(id=self.sg2['id'],
+ parent_group_id=self.sg2['id'])
+
+ def security_group_rule_get(context, id):
+ return security_group_rule_db(rule)
+
+ def security_group_rule_destroy(context, id):
+ pass
+
+ self.stubs.Set(nova.db, 'security_group_rule_get',
+ security_group_rule_get)
+ self.stubs.Set(nova.db, 'security_group_rule_destroy',
+ security_group_rule_destroy)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
+ % self.sg2['id'])
+ self.controller.delete(req, self.sg2['id'])
+
+ def test_delete_invalid_rule_id(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules' +
+ '/invalid')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
+ req, 'invalid')
+
+ def test_delete_non_existing_rule_id(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
+ % self.invalid_id)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, self.invalid_id)
+
+ def test_create_rule_quota_limit(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ for num in range(100, 100 + CONF.quota_security_group_rules):
+ rule = {
+ 'ip_protocol': 'tcp', 'from_port': num,
+ 'to_port': num, 'parent_group_id': self.sg2['id'],
+ 'group_id': self.sg1['id']
+ }
+ self.controller.create(req, {'security_group_rule': rule})
+
+ rule = {
+ 'ip_protocol': 'tcp', 'from_port': '121', 'to_port': '121',
+ 'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
+ }
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_rule_cidr_allow_all(self):
+ rule = security_group_rule_template(cidr='0.0.0.0/0',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+
+ security_group_rule = res_dict['security_group_rule']
+ self.assertNotEqual(security_group_rule['id'], 0)
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.parent_security_group['id'])
+ self.assertEqual(security_group_rule['ip_range']['cidr'],
+ "0.0.0.0/0")
+
+ def test_create_rule_cidr_ipv6_allow_all(self):
+ rule = security_group_rule_template(cidr='::/0',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+
+ security_group_rule = res_dict['security_group_rule']
+ self.assertNotEqual(security_group_rule['id'], 0)
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.parent_security_group['id'])
+ self.assertEqual(security_group_rule['ip_range']['cidr'],
+ "::/0")
+
+ def test_create_rule_cidr_allow_some(self):
+ rule = security_group_rule_template(cidr='15.0.0.0/8',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+
+ security_group_rule = res_dict['security_group_rule']
+ self.assertNotEqual(security_group_rule['id'], 0)
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.parent_security_group['id'])
+ self.assertEqual(security_group_rule['ip_range']['cidr'],
+ "15.0.0.0/8")
+
+ def test_create_rule_cidr_bad_netmask(self):
+ rule = security_group_rule_template(cidr='15.0.0.0/0')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+
+class TestSecurityGroupRulesV2(TestSecurityGroupRulesV21):
+ secgrp_ctl_cls = secgroups_v2.SecurityGroupRulesController
+
+
+class TestSecurityGroupRulesXMLDeserializer(test.TestCase):
+
+ def setUp(self):
+ super(TestSecurityGroupRulesXMLDeserializer, self).setUp()
+ self.deserializer = secgroups_v2.SecurityGroupRulesXMLDeserializer()
+
+ def test_create_request(self):
+ serial_request = """
+<security_group_rule>
+ <parent_group_id>12</parent_group_id>
+ <from_port>22</from_port>
+ <to_port>22</to_port>
+ <group_id></group_id>
+ <ip_protocol>tcp</ip_protocol>
+ <cidr>10.0.0.0/24</cidr>
+</security_group_rule>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group_rule": {
+ "parent_group_id": "12",
+ "from_port": "22",
+ "to_port": "22",
+ "ip_protocol": "tcp",
+ "group_id": "",
+ "cidr": "10.0.0.0/24",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_no_protocol_request(self):
+ serial_request = """
+<security_group_rule>
+ <parent_group_id>12</parent_group_id>
+ <from_port>22</from_port>
+ <to_port>22</to_port>
+ <group_id></group_id>
+ <cidr>10.0.0.0/24</cidr>
+</security_group_rule>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group_rule": {
+ "parent_group_id": "12",
+ "from_port": "22",
+ "to_port": "22",
+ "group_id": "",
+ "cidr": "10.0.0.0/24",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_corrupt_xml(self):
+ """Should throw a 400 error on corrupt xml."""
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ self.deserializer.deserialize,
+ utils.killer_xml_body())
+
+
+class TestSecurityGroupXMLDeserializer(test.TestCase):
+
+ def setUp(self):
+ super(TestSecurityGroupXMLDeserializer, self).setUp()
+ self.deserializer = secgroups_v2.SecurityGroupXMLDeserializer()
+
+ def test_create_request(self):
+ serial_request = """
+<security_group name="test">
+ <description>test</description>
+</security_group>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group": {
+ "name": "test",
+ "description": "test",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_no_description_request(self):
+ serial_request = """
+<security_group name="test">
+</security_group>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group": {
+ "name": "test",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_no_name_request(self):
+ serial_request = """
+<security_group>
+<description>test</description>
+</security_group>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group": {
+ "description": "test",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_corrupt_xml(self):
+ """Should throw a 400 error on corrupt xml."""
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ self.deserializer.deserialize,
+ utils.killer_xml_body())
+
+
+class TestSecurityGroupXMLSerializer(test.TestCase):
+ def setUp(self):
+ super(TestSecurityGroupXMLSerializer, self).setUp()
+ self.namespace = wsgi.XMLNS_V11
+ self.rule_serializer = secgroups_v2.SecurityGroupRuleTemplate()
+ self.index_serializer = secgroups_v2.SecurityGroupsTemplate()
+ self.default_serializer = secgroups_v2.SecurityGroupTemplate()
+
+ def _tag(self, elem):
+ tagname = elem.tag
+ self.assertEqual(tagname[0], '{')
+ tmp = tagname.partition('}')
+ namespace = tmp[0][1:]
+ self.assertEqual(namespace, self.namespace)
+ return tmp[2]
+
+ def _verify_security_group_rule(self, raw_rule, tree):
+ self.assertEqual(raw_rule['id'], tree.get('id'))
+ self.assertEqual(raw_rule['parent_group_id'],
+ tree.get('parent_group_id'))
+
+ seen = set()
+ expected = set(['ip_protocol', 'from_port', 'to_port',
+ 'group', 'group/name', 'group/tenant_id',
+ 'ip_range', 'ip_range/cidr'])
+
+ for child in tree:
+ child_tag = self._tag(child)
+ self.assertIn(child_tag, raw_rule)
+ seen.add(child_tag)
+ if child_tag in ('group', 'ip_range'):
+ for gr_child in child:
+ gr_child_tag = self._tag(gr_child)
+ self.assertIn(gr_child_tag, raw_rule[child_tag])
+ seen.add('%s/%s' % (child_tag, gr_child_tag))
+ self.assertEqual(gr_child.text,
+ raw_rule[child_tag][gr_child_tag])
+ else:
+ self.assertEqual(child.text, raw_rule[child_tag])
+ self.assertEqual(seen, expected)
+
+ def _verify_security_group(self, raw_group, tree):
+ rules = raw_group['rules']
+ self.assertEqual('security_group', self._tag(tree))
+ self.assertEqual(raw_group['id'], tree.get('id'))
+ self.assertEqual(raw_group['tenant_id'], tree.get('tenant_id'))
+ self.assertEqual(raw_group['name'], tree.get('name'))
+ self.assertEqual(2, len(tree))
+ for child in tree:
+ child_tag = self._tag(child)
+ if child_tag == 'rules':
+ self.assertEqual(2, len(child))
+ for idx, gr_child in enumerate(child):
+ self.assertEqual(self._tag(gr_child), 'rule')
+ self._verify_security_group_rule(rules[idx], gr_child)
+ else:
+ self.assertEqual('description', child_tag)
+ self.assertEqual(raw_group['description'], child.text)
+
+ def test_rule_serializer(self):
+ raw_rule = dict(
+ id='123',
+ parent_group_id='456',
+ ip_protocol='tcp',
+ from_port='789',
+ to_port='987',
+ group=dict(name='group', tenant_id='tenant'),
+ ip_range=dict(cidr='10.0.0.0/8'))
+ rule = dict(security_group_rule=raw_rule)
+ text = self.rule_serializer.serialize(rule)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('security_group_rule', self._tag(tree))
+ self._verify_security_group_rule(raw_rule, tree)
+
+ def test_group_serializer(self):
+ rules = [dict(
+ id='123',
+ parent_group_id='456',
+ ip_protocol='tcp',
+ from_port='789',
+ to_port='987',
+ group=dict(name='group1', tenant_id='tenant1'),
+ ip_range=dict(cidr='10.55.44.0/24')),
+ dict(
+ id='654',
+ parent_group_id='321',
+ ip_protocol='udp',
+ from_port='234',
+ to_port='567',
+ group=dict(name='group2', tenant_id='tenant2'),
+ ip_range=dict(cidr='10.44.55.0/24'))]
+ raw_group = dict(
+ id='890',
+ description='description',
+ name='name',
+ tenant_id='tenant',
+ rules=rules)
+ sg_group = dict(security_group=raw_group)
+ text = self.default_serializer.serialize(sg_group)
+
+ tree = etree.fromstring(text)
+
+ self._verify_security_group(raw_group, tree)
+
+ def test_groups_serializer(self):
+ rules = [dict(
+ id='123',
+ parent_group_id='1234',
+ ip_protocol='tcp',
+ from_port='12345',
+ to_port='123456',
+ group=dict(name='group1', tenant_id='tenant1'),
+ ip_range=dict(cidr='10.123.0.0/24')),
+ dict(
+ id='234',
+ parent_group_id='2345',
+ ip_protocol='udp',
+ from_port='23456',
+ to_port='234567',
+ group=dict(name='group2', tenant_id='tenant2'),
+ ip_range=dict(cidr='10.234.0.0/24')),
+ dict(
+ id='345',
+ parent_group_id='3456',
+ ip_protocol='tcp',
+ from_port='34567',
+ to_port='345678',
+ group=dict(name='group3', tenant_id='tenant3'),
+ ip_range=dict(cidr='10.345.0.0/24')),
+ dict(
+ id='456',
+ parent_group_id='4567',
+ ip_protocol='udp',
+ from_port='45678',
+ to_port='456789',
+ group=dict(name='group4', tenant_id='tenant4'),
+ ip_range=dict(cidr='10.456.0.0/24'))]
+ groups = [dict(
+ id='567',
+ description='description1',
+ name='name1',
+ tenant_id='tenant1',
+ rules=rules[0:2]),
+ dict(
+ id='678',
+ description='description2',
+ name='name2',
+ tenant_id='tenant2',
+ rules=rules[2:4])]
+ sg_groups = dict(security_groups=groups)
+ text = self.index_serializer.serialize(sg_groups)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('security_groups', self._tag(tree))
+ self.assertEqual(len(groups), len(tree))
+ for idx, child in enumerate(tree):
+ self._verify_security_group(groups[idx], child)
+
+
+UUID1 = '00000000-0000-0000-0000-000000000001'
+UUID2 = '00000000-0000-0000-0000-000000000002'
+UUID3 = '00000000-0000-0000-0000-000000000003'
+
+
+def fake_compute_get_all(*args, **kwargs):
+ base = {'id': 1, 'description': 'foo', 'user_id': 'bar',
+ 'project_id': 'baz', 'deleted': False, 'deleted_at': None,
+ 'updated_at': None, 'created_at': None}
+ db_list = [
+ fakes.stub_instance(
+ 1, uuid=UUID1,
+ security_groups=[dict(base, **{'name': 'fake-0-0'}),
+ dict(base, **{'name': 'fake-0-1'})]),
+ fakes.stub_instance(
+ 2, uuid=UUID2,
+ security_groups=[dict(base, **{'name': 'fake-1-0'}),
+ dict(base, **{'name': 'fake-1-1'})])
+ ]
+
+ return instance_obj._make_instance_list(args[1],
+ objects.InstanceList(),
+ db_list,
+ ['metadata', 'system_metadata',
+ 'security_groups', 'info_cache'])
+
+
+def fake_compute_get(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID3,
+ security_groups=[{'name': 'fake-2-0'},
+ {'name': 'fake-2-1'}])
+ return fake_instance.fake_instance_obj(args[1],
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, **inst)
+
+
+def fake_compute_create(*args, **kwargs):
+ return ([fake_compute_get(*args, **kwargs)], '')
+
+
+def fake_get_instances_security_groups_bindings(inst, context, servers):
+ groups = {UUID1: [{'name': 'fake-0-0'}, {'name': 'fake-0-1'}],
+ UUID2: [{'name': 'fake-1-0'}, {'name': 'fake-1-1'}],
+ UUID3: [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]}
+ result = {}
+ for server in servers:
+ result[server['id']] = groups.get(server['id'])
+ return result
+
+
+class SecurityGroupsOutputTestV21(test.TestCase):
+ base_url = '/v2/fake/servers'
+ content_type = 'application/json'
+
+ def setUp(self):
+ super(SecurityGroupsOutputTestV21, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+ self.stubs.Set(compute.api.API, 'create', fake_compute_create)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Security_groups'])
+ self.app = self._setup_app()
+
+ def _setup_app(self):
+ return fakes.wsgi_app_v21(init_only=('os-security-groups', 'servers'))
+
+ def _make_request(self, url, body=None):
+ req = webob.Request.blank(url)
+ if body:
+ req.method = 'POST'
+ req.body = self._encode_body(body)
+ req.content_type = self.content_type
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(self.app)
+ return res
+
+ def _encode_body(self, body):
+ return jsonutils.dumps(body)
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def _get_groups(self, server):
+ return server.get('security_groups')
+
+ def test_create(self):
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
+ res = self._make_request(self.base_url, {'server': server})
+ self.assertEqual(res.status_int, 202)
+ server = self._get_server(res.body)
+ for i, group in enumerate(self._get_groups(server)):
+ name = 'fake-2-%s' % i
+ self.assertEqual(group.get('name'), name)
+
+ def test_show(self):
+ url = self.base_url + '/' + UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ server = self._get_server(res.body)
+ for i, group in enumerate(self._get_groups(server)):
+ name = 'fake-2-%s' % i
+ self.assertEqual(group.get('name'), name)
+
+ def test_detail(self):
+ url = self.base_url + '/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ for i, server in enumerate(self._get_servers(res.body)):
+ for j, group in enumerate(self._get_groups(server)):
+ name = 'fake-%s-%s' % (i, j)
+ self.assertEqual(group.get('name'), name)
+
+ def test_no_instance_passthrough_404(self):
+
+ def fake_compute_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ url = self.base_url + '/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 404)
+
+
+class SecurityGroupsOutputTestV2(SecurityGroupsOutputTestV21):
+
+ def _setup_app(self):
+ return fakes.wsgi_app(init_only=('servers',))
+
+
+class SecurityGroupsOutputXmlTest(SecurityGroupsOutputTestV2):
+ content_type = 'application/xml'
+
+ class MinimalCreateServerTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('server', selector='server')
+ root.set('name')
+ root.set('id')
+ root.set('imageRef')
+ root.set('flavorRef')
+ return xmlutil.MasterTemplate(root, 1,
+ nsmap={None: xmlutil.XMLNS_V11})
+
+ def _encode_body(self, body):
+ serializer = self.MinimalCreateServerTemplate()
+ return serializer.serialize(body)
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
+
+ def _get_groups(self, server):
+ # NOTE(vish): we are adding security groups without an extension
+ # namespace so we don't break people using the existing
+ # functionality, but that means we need to use find with
+ # the existing server namespace.
+ namespace = server.nsmap[None]
+ return server.find('{%s}security_groups' % namespace).getchildren()
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_server_diagnostics.py b/nova/tests/unit/api/openstack/compute/contrib/test_server_diagnostics.py
new file mode 100644
index 0000000000..535a1afa15
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_server_diagnostics.py
@@ -0,0 +1,132 @@
+# Copyright 2011 Eldar Nugaev
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from lxml import etree
+import mock
+from oslo.serialization import jsonutils
+
+from nova.api.openstack import compute
+from nova.api.openstack.compute.contrib import server_diagnostics
+from nova.api.openstack import wsgi
+from nova.compute import api as compute_api
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+UUID = 'abc'
+
+
+def fake_get_diagnostics(self, _context, instance_uuid):
+ return {'data': 'Some diagnostic info'}
+
+
+def fake_instance_get(self, _context, instance_uuid, want_objects=False,
+ expected_attrs=None):
+ if instance_uuid != UUID:
+ raise Exception("Invalid UUID")
+ return {'uuid': instance_uuid}
+
+
+class ServerDiagnosticsTestV21(test.NoDBTestCase):
+
+ def _setup_router(self):
+ self.router = compute.APIRouterV3(init_only=('servers',
+ 'os-server-diagnostics'))
+
+ def _get_request(self):
+ return fakes.HTTPRequestV3.blank(
+ '/servers/%s/diagnostics' % UUID)
+
+ def setUp(self):
+ super(ServerDiagnosticsTestV21, self).setUp()
+ self._setup_router()
+
+ @mock.patch.object(compute_api.API, 'get_diagnostics',
+ fake_get_diagnostics)
+ @mock.patch.object(compute_api.API, 'get',
+ fake_instance_get)
+ def test_get_diagnostics(self):
+ req = self._get_request()
+ res = req.get_response(self.router)
+ output = jsonutils.loads(res.body)
+ self.assertEqual(output, {'data': 'Some diagnostic info'})
+
+ @mock.patch.object(compute_api.API, 'get_diagnostics',
+ fake_get_diagnostics)
+ @mock.patch.object(compute_api.API, 'get',
+ side_effect=exception.InstanceNotFound(instance_id=UUID))
+ def test_get_diagnostics_with_non_existed_instance(self, mock_get):
+ req = self._get_request()
+ res = req.get_response(self.router)
+ self.assertEqual(res.status_int, 404)
+
+ @mock.patch.object(compute_api.API, 'get_diagnostics',
+ side_effect=exception.InstanceInvalidState('fake message'))
+ @mock.patch.object(compute_api.API, 'get', fake_instance_get)
+ def test_get_diagnostics_raise_conflict_on_invalid_state(self,
+ mock_get_diagnostics):
+ req = self._get_request()
+ res = req.get_response(self.router)
+ self.assertEqual(409, res.status_int)
+
+ @mock.patch.object(compute_api.API, 'get_diagnostics',
+ side_effect=NotImplementedError)
+ @mock.patch.object(compute_api.API, 'get', fake_instance_get)
+ def test_get_diagnostics_raise_no_notimplementederror(self,
+ mock_get_diagnostics):
+ req = self._get_request()
+ res = req.get_response(self.router)
+ self.assertEqual(501, res.status_int)
+
+
+class ServerDiagnosticsTestV2(ServerDiagnosticsTestV21):
+
+ def _setup_router(self):
+ self.flags(verbose=True,
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Server_diagnostics'])
+
+ self.router = compute.APIRouter(init_only=('servers', 'diagnostics'))
+
+ def _get_request(self):
+ return fakes.HTTPRequest.blank(
+ '/fake/servers/%s/diagnostics' % UUID)
+
+
+class TestServerDiagnosticsXMLSerializer(test.NoDBTestCase):
+ namespace = wsgi.XMLNS_V11
+
+ def _tag(self, elem):
+ tagname = elem.tag
+ self.assertEqual(tagname[0], '{')
+ tmp = tagname.partition('}')
+ namespace = tmp[0][1:]
+ self.assertEqual(namespace, self.namespace)
+ return tmp[2]
+
+ def test_index_serializer(self):
+ serializer = server_diagnostics.ServerDiagnosticsTemplate()
+ exemplar = dict(diag1='foo', diag2='bar')
+ text = serializer.serialize(exemplar)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('diagnostics', self._tag(tree))
+ self.assertEqual(len(tree), len(exemplar))
+ for child in tree:
+ tag = self._tag(child)
+ self.assertIn(tag, exemplar)
+ self.assertEqual(child.text, exemplar[tag])
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_server_external_events.py b/nova/tests/unit/api/openstack/compute/contrib/test_server_external_events.py
new file mode 100644
index 0000000000..61801ba648
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_server_external_events.py
@@ -0,0 +1,158 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import server_external_events
+from nova import context
+from nova import exception
+from nova import objects
+from nova import test
+
+fake_instances = {
+ '00000000-0000-0000-0000-000000000001': objects.Instance(
+ uuid='00000000-0000-0000-0000-000000000001', host='host1'),
+ '00000000-0000-0000-0000-000000000002': objects.Instance(
+ uuid='00000000-0000-0000-0000-000000000002', host='host1'),
+ '00000000-0000-0000-0000-000000000003': objects.Instance(
+ uuid='00000000-0000-0000-0000-000000000003', host='host2'),
+ '00000000-0000-0000-0000-000000000004': objects.Instance(
+ uuid='00000000-0000-0000-0000-000000000004', host=None),
+}
+fake_instance_uuids = sorted(fake_instances.keys())
+MISSING_UUID = '00000000-0000-0000-0000-000000000005'
+
+
+@classmethod
+def fake_get_by_uuid(cls, context, uuid):
+ try:
+ return fake_instances[uuid]
+ except KeyError:
+ raise exception.InstanceNotFound(instance_id=uuid)
+
+
+@mock.patch('nova.objects.instance.Instance.get_by_uuid', fake_get_by_uuid)
+class ServerExternalEventsTest(test.NoDBTestCase):
+ def setUp(self):
+ super(ServerExternalEventsTest, self).setUp()
+ self.api = server_external_events.ServerExternalEventsController()
+ self.context = context.get_admin_context()
+ self.event_1 = {'name': 'network-vif-plugged',
+ 'tag': 'foo',
+ 'server_uuid': fake_instance_uuids[0]}
+ self.event_2 = {'name': 'network-changed',
+ 'server_uuid': fake_instance_uuids[1]}
+ self.default_body = {'events': [self.event_1, self.event_2]}
+ self.resp_event_1 = dict(self.event_1)
+ self.resp_event_1['code'] = 200
+ self.resp_event_1['status'] = 'completed'
+ self.resp_event_2 = dict(self.event_2)
+ self.resp_event_2['code'] = 200
+ self.resp_event_2['status'] = 'completed'
+ self.default_resp_body = {'events': [self.resp_event_1,
+ self.resp_event_2]}
+
+ def _create_req(self, body):
+ req = webob.Request.blank('/v2/fake/os-server-external-events')
+ req.method = 'POST'
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ req.body = jsonutils.dumps(body)
+ return req
+
+ def _assert_call(self, req, body, expected_uuids, expected_events):
+ with mock.patch.object(self.api.compute_api,
+ 'external_instance_event') as api_method:
+ response = self.api.create(req, body)
+
+ result = response.obj
+ code = response._code
+
+ self.assertEqual(1, api_method.call_count)
+ for inst in api_method.call_args_list[0][0][1]:
+ expected_uuids.remove(inst.uuid)
+ self.assertEqual([], expected_uuids)
+ for event in api_method.call_args_list[0][0][2]:
+ expected_events.remove(event.name)
+ self.assertEqual([], expected_events)
+ return result, code
+
+ def test_create(self):
+ req = self._create_req(self.default_body)
+ result, code = self._assert_call(req, self.default_body,
+ fake_instance_uuids[:2],
+ ['network-vif-plugged',
+ 'network-changed'])
+ self.assertEqual(self.default_resp_body, result)
+ self.assertEqual(200, code)
+
+ def test_create_one_bad_instance(self):
+ body = self.default_body
+ body['events'][1]['server_uuid'] = MISSING_UUID
+ req = self._create_req(body)
+ result, code = self._assert_call(req, body, [fake_instance_uuids[0]],
+ ['network-vif-plugged'])
+ self.assertEqual('failed', result['events'][1]['status'])
+ self.assertEqual(200, result['events'][0]['code'])
+ self.assertEqual(404, result['events'][1]['code'])
+ self.assertEqual(207, code)
+
+ def test_create_event_instance_has_no_host(self):
+ body = self.default_body
+ body['events'][0]['server_uuid'] = fake_instance_uuids[-1]
+ req = self._create_req(body)
+ # the instance without host should not be passed to the compute layer
+ result, code = self._assert_call(req, body,
+ [fake_instance_uuids[1]],
+ ['network-changed'])
+ self.assertEqual(422, result['events'][0]['code'])
+ self.assertEqual('failed', result['events'][0]['status'])
+ self.assertEqual(200, result['events'][1]['code'])
+ self.assertEqual(207, code)
+
+ def test_create_no_good_instances(self):
+ body = self.default_body
+ body['events'][0]['server_uuid'] = MISSING_UUID
+ body['events'][1]['server_uuid'] = MISSING_UUID
+ req = self._create_req(body)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.api.create, req, body)
+
+ def test_create_bad_status(self):
+ body = self.default_body
+ body['events'][1]['status'] = 'foo'
+ req = self._create_req(body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.api.create, req, body)
+
+ def test_create_extra_gorp(self):
+ body = self.default_body
+ body['events'][0]['foobar'] = 'bad stuff'
+ req = self._create_req(body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.api.create, req, body)
+
+ def test_create_bad_events(self):
+ body = {'events': 'foo'}
+ req = self._create_req(body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.api.create, req, body)
+
+ def test_create_bad_body(self):
+ body = {'foo': 'bar'}
+ req = self._create_req(body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.api.create, req, body)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_server_group_quotas.py b/nova/tests/unit/api/openstack/compute/contrib/test_server_group_quotas.py
new file mode 100644
index 0000000000..9e756cf157
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_server_group_quotas.py
@@ -0,0 +1,188 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+import webob
+
+from nova.api.openstack.compute.contrib import server_groups
+from nova.api.openstack.compute.plugins.v3 import server_groups as sg_v3
+from nova.api.openstack import extensions
+from nova import context
+import nova.db
+from nova.openstack.common import uuidutils
+from nova import quota
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+CONF = cfg.CONF
+
+
+class AttrDict(dict):
+ def __getattr__(self, k):
+ return self[k]
+
+
+def server_group_template(**kwargs):
+ sgroup = kwargs.copy()
+ sgroup.setdefault('name', 'test')
+ return sgroup
+
+
+def server_group_db(sg):
+ attrs = sg.copy()
+ if 'id' in attrs:
+ attrs['uuid'] = attrs.pop('id')
+ if 'policies' in attrs:
+ policies = attrs.pop('policies')
+ attrs['policies'] = policies
+ else:
+ attrs['policies'] = []
+ if 'members' in attrs:
+ members = attrs.pop('members')
+ attrs['members'] = members
+ else:
+ attrs['members'] = []
+ if 'metadata' in attrs:
+ attrs['metadetails'] = attrs.pop('metadata')
+ else:
+ attrs['metadetails'] = {}
+ attrs['deleted'] = 0
+ attrs['deleted_at'] = None
+ attrs['created_at'] = None
+ attrs['updated_at'] = None
+ if 'user_id' not in attrs:
+ attrs['user_id'] = 'user_id'
+ if 'project_id' not in attrs:
+ attrs['project_id'] = 'project_id'
+ attrs['id'] = 7
+
+ return AttrDict(attrs)
+
+
+class ServerGroupQuotasTestV21(test.TestCase):
+
+ def setUp(self):
+ super(ServerGroupQuotasTestV21, self).setUp()
+ self._setup_controller()
+ self.app = self._get_app()
+
+ def _setup_controller(self):
+ self.controller = sg_v3.ServerGroupController()
+
+ def _get_app(self):
+ return fakes.wsgi_app_v21(init_only=('os-server-groups',))
+
+ def _get_url(self):
+ return '/v2/fake'
+
+ def _setup_quotas(self):
+ pass
+
+ def _assert_server_groups_in_use(self, project_id, user_id, in_use):
+ ctxt = context.get_admin_context()
+ result = quota.QUOTAS.get_user_quotas(ctxt, project_id, user_id)
+ self.assertEqual(result['server_groups']['in_use'], in_use)
+
+ def test_create_server_group_normal(self):
+ self._setup_quotas()
+ req = fakes.HTTPRequest.blank('/v2/fake/os-server-groups')
+ sgroup = server_group_template()
+ policies = ['anti-affinity']
+ sgroup['policies'] = policies
+ res_dict = self.controller.create(req, {'server_group': sgroup})
+ self.assertEqual(res_dict['server_group']['name'], 'test')
+ self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
+ self.assertEqual(res_dict['server_group']['policies'], policies)
+
+ def test_create_server_group_quota_limit(self):
+ self._setup_quotas()
+ req = fakes.HTTPRequest.blank('/v2/fake/os-server-groups')
+ sgroup = server_group_template()
+ policies = ['anti-affinity']
+ sgroup['policies'] = policies
+ # Start by creating as many server groups as we're allowed to.
+ for i in range(CONF.quota_server_groups):
+ self.controller.create(req, {'server_group': sgroup})
+
+ # Then, creating a server group should fail.
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create,
+ req, {'server_group': sgroup})
+
+ def test_delete_server_group_by_admin(self):
+ self._setup_quotas()
+ sgroup = server_group_template()
+ policies = ['anti-affinity']
+ sgroup['policies'] = policies
+ req = fakes.HTTPRequest.blank('/v2/fake/os-server-groups')
+ res = self.controller.create(req, {'server_group': sgroup})
+ sg_id = res['server_group']['id']
+ context = req.environ['nova.context']
+
+ self._assert_server_groups_in_use(context.project_id,
+ context.user_id, 1)
+
+ # Delete the server group we've just created.
+ req = fakes.HTTPRequest.blank('/v2/fake/os-server-groups/%s' % sg_id,
+ use_admin_context=True)
+ self.controller.delete(req, sg_id)
+
+ # Make sure the quota in use has been released.
+ self._assert_server_groups_in_use(context.project_id,
+ context.user_id, 0)
+
+ def test_delete_server_group_by_id(self):
+ self._setup_quotas()
+ sg = server_group_template(id='123')
+ self.called = False
+
+ def server_group_delete(context, id):
+ self.called = True
+
+ def return_server_group(context, group_id):
+ self.assertEqual(sg['id'], group_id)
+ return server_group_db(sg)
+
+ self.stubs.Set(nova.db, 'instance_group_delete',
+ server_group_delete)
+ self.stubs.Set(nova.db, 'instance_group_get',
+ return_server_group)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-server-groups/123')
+ resp = self.controller.delete(req, '123')
+ self.assertTrue(self.called)
+
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ if isinstance(self.controller, sg_v3.ServerGroupController):
+ status_int = self.controller.delete.wsgi_code
+ else:
+ status_int = resp.status_int
+ self.assertEqual(204, status_int)
+
+
+class ServerGroupQuotasTestV2(ServerGroupQuotasTestV21):
+
+ def _setup_controller(self):
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.controller = server_groups.ServerGroupController(self.ext_mgr)
+
+ def _setup_quotas(self):
+ self.ext_mgr.is_loaded('os-server-group-quotas').MultipleTimes()\
+ .AndReturn(True)
+ self.mox.ReplayAll()
+
+ def _get_app(self):
+ return fakes.wsgi_app(init_only=('os-server-groups',))
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_server_groups.py b/nova/tests/unit/api/openstack/compute/contrib/test_server_groups.py
new file mode 100644
index 0000000000..7dd2675c9e
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_server_groups.py
@@ -0,0 +1,521 @@
+# Copyright (c) 2014 Cisco Systems, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import webob
+
+from nova.api.openstack.compute.contrib import server_groups
+from nova.api.openstack.compute.plugins.v3 import server_groups as sg_v3
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova import context
+import nova.db
+from nova import exception
+from nova import objects
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import utils
+
+FAKE_UUID1 = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
+FAKE_UUID2 = 'c6e6430a-6563-4efa-9542-5e93c9e97d18'
+FAKE_UUID3 = 'b8713410-9ba3-e913-901b-13410ca90121'
+
+
+class AttrDict(dict):
+ def __getattr__(self, k):
+ return self[k]
+
+
+def server_group_template(**kwargs):
+ sgroup = kwargs.copy()
+ sgroup.setdefault('name', 'test')
+ return sgroup
+
+
+def server_group_resp_template(**kwargs):
+ sgroup = kwargs.copy()
+ sgroup.setdefault('name', 'test')
+ sgroup.setdefault('policies', [])
+ sgroup.setdefault('members', [])
+ return sgroup
+
+
+def server_group_db(sg):
+ attrs = sg.copy()
+ if 'id' in attrs:
+ attrs['uuid'] = attrs.pop('id')
+ if 'policies' in attrs:
+ policies = attrs.pop('policies')
+ attrs['policies'] = policies
+ else:
+ attrs['policies'] = []
+ if 'members' in attrs:
+ members = attrs.pop('members')
+ attrs['members'] = members
+ else:
+ attrs['members'] = []
+ attrs['deleted'] = 0
+ attrs['deleted_at'] = None
+ attrs['created_at'] = None
+ attrs['updated_at'] = None
+ if 'user_id' not in attrs:
+ attrs['user_id'] = 'user_id'
+ if 'project_id' not in attrs:
+ attrs['project_id'] = 'project_id'
+ attrs['id'] = 7
+
+ return AttrDict(attrs)
+
+
+class ServerGroupTestV21(test.TestCase):
+
+ def setUp(self):
+ super(ServerGroupTestV21, self).setUp()
+ self._setup_controller()
+ self.app = self._get_app()
+
+ def _setup_controller(self):
+ self.controller = sg_v3.ServerGroupController()
+
+ def _get_app(self):
+ return fakes.wsgi_app_v21(init_only=('os-server-groups',))
+
+ def _get_url(self):
+ return '/v2/fake'
+
+ def test_create_server_group_with_no_policies(self):
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ sgroup = server_group_template()
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ def test_create_server_group_normal(self):
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ sgroup = server_group_template()
+ policies = ['anti-affinity']
+ sgroup['policies'] = policies
+ res_dict = self.controller.create(req, {'server_group': sgroup})
+ self.assertEqual(res_dict['server_group']['name'], 'test')
+ self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
+ self.assertEqual(res_dict['server_group']['policies'], policies)
+
+ def _create_instance(self, context):
+ instance = objects.Instance(image_ref=1, node='node1',
+ reservation_id='a', host='host1', project_id='fake',
+ vm_state='fake', system_metadata={'key': 'value'})
+ instance.create(context)
+ return instance
+
+ def _create_instance_group(self, context, members):
+ ig = objects.InstanceGroup(name='fake_name',
+ user_id='fake_user', project_id='fake',
+ members=members)
+ ig.create(context)
+ return ig.uuid
+
+ def _create_groups_and_instances(self, ctx):
+ instances = [self._create_instance(ctx), self._create_instance(ctx)]
+ members = [instance.uuid for instance in instances]
+ ig_uuid = self._create_instance_group(ctx, members)
+ return (ig_uuid, instances, members)
+
+ def test_display_members(self):
+ ctx = context.RequestContext('fake_user', 'fake')
+ (ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ res_dict = self.controller.show(req, ig_uuid)
+ result_members = res_dict['server_group']['members']
+ self.assertEqual(2, len(result_members))
+ for member in members:
+ self.assertIn(member, result_members)
+
+ def test_display_active_members_only(self):
+ ctx = context.RequestContext('fake_user', 'fake')
+ (ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+
+ # delete an instance
+ instances[1].destroy(ctx)
+ # check that the instance does not exist
+ self.assertRaises(exception.InstanceNotFound,
+ objects.Instance.get_by_uuid,
+ ctx, instances[1].uuid)
+ res_dict = self.controller.show(req, ig_uuid)
+ result_members = res_dict['server_group']['members']
+ # check that only the active instance is displayed
+ self.assertEqual(1, len(result_members))
+ self.assertIn(instances[0].uuid, result_members)
+
+ def test_create_server_group_with_illegal_name(self):
+ # blank name
+ sgroup = server_group_template(name='', policies=['test_policy'])
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ # name with length 256
+ sgroup = server_group_template(name='1234567890' * 26,
+ policies=['test_policy'])
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ # non-string name
+ sgroup = server_group_template(name=12, policies=['test_policy'])
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ # name with leading spaces
+ sgroup = server_group_template(name=' leading spaces',
+ policies=['test_policy'])
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ # name with trailing spaces
+ sgroup = server_group_template(name='trailing space ',
+ policies=['test_policy'])
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ # name with all spaces
+ sgroup = server_group_template(name=' ',
+ policies=['test_policy'])
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ def test_create_server_group_with_illegal_policies(self):
+ # blank policy
+ sgroup = server_group_template(name='fake-name', policies='')
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ # policy as integer
+ sgroup = server_group_template(name='fake-name', policies=7)
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ # policy as string
+ sgroup = server_group_template(name='fake-name', policies='invalid')
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ # policy as None
+ sgroup = server_group_template(name='fake-name', policies=None)
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ def test_create_server_group_conflicting_policies(self):
+ sgroup = server_group_template()
+ policies = ['anti-affinity', 'affinity']
+ sgroup['policies'] = policies
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ def test_create_server_group_with_duplicate_policies(self):
+ sgroup = server_group_template()
+ policies = ['affinity', 'affinity']
+ sgroup['policies'] = policies
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ def test_create_server_group_not_supported(self):
+ sgroup = server_group_template()
+ policies = ['storage-affinity', 'anti-affinity', 'rack-affinity']
+ sgroup['policies'] = policies
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ def test_create_server_group_with_no_body(self):
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, None)
+
+ def test_create_server_group_with_no_server_group(self):
+ body = {'no-instanceGroup': None}
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ def test_list_server_group_by_tenant(self):
+ groups = []
+ policies = ['anti-affinity']
+ members = []
+ metadata = {} # always empty
+ names = ['default-x', 'test']
+ sg1 = server_group_resp_template(id=str(1345),
+ name=names[0],
+ policies=policies,
+ members=members,
+ metadata=metadata)
+ sg2 = server_group_resp_template(id=str(891),
+ name=names[1],
+ policies=policies,
+ members=members,
+ metadata=metadata)
+ groups = [sg1, sg2]
+ expected = {'server_groups': groups}
+
+ def return_server_groups(context, project_id):
+ return [server_group_db(sg) for sg in groups]
+
+ self.stubs.Set(nova.db, 'instance_group_get_all_by_project_id',
+ return_server_groups)
+
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ res_dict = self.controller.index(req)
+ self.assertEqual(res_dict, expected)
+
+ def test_list_server_group_all(self):
+ all_groups = []
+ tenant_groups = []
+ policies = ['anti-affinity']
+ members = []
+ metadata = {} # always empty
+ names = ['default-x', 'test']
+ sg1 = server_group_resp_template(id=str(1345),
+ name=names[0],
+ policies=[],
+ members=members,
+ metadata=metadata)
+ sg2 = server_group_resp_template(id=str(891),
+ name=names[1],
+ policies=policies,
+ members=members,
+ metadata={})
+ tenant_groups = [sg2]
+ all_groups = [sg1, sg2]
+
+ all = {'server_groups': all_groups}
+ tenant_specific = {'server_groups': tenant_groups}
+
+ def return_all_server_groups(context):
+ return [server_group_db(sg) for sg in all_groups]
+
+ self.stubs.Set(nova.db, 'instance_group_get_all',
+ return_all_server_groups)
+
+ def return_tenant_server_groups(context, project_id):
+ return [server_group_db(sg) for sg in tenant_groups]
+
+ self.stubs.Set(nova.db, 'instance_group_get_all_by_project_id',
+ return_tenant_server_groups)
+
+ path = self._get_url() + '/os-server-groups?all_projects=True'
+
+ req = fakes.HTTPRequest.blank(path, use_admin_context=True)
+ res_dict = self.controller.index(req)
+ self.assertEqual(res_dict, all)
+ req = fakes.HTTPRequest.blank(path)
+ res_dict = self.controller.index(req)
+ self.assertEqual(res_dict, tenant_specific)
+
+ def test_delete_server_group_by_id(self):
+ sg = server_group_template(id='123')
+
+ self.called = False
+
+ def server_group_delete(context, id):
+ self.called = True
+
+ def return_server_group(context, group_id):
+ self.assertEqual(sg['id'], group_id)
+ return server_group_db(sg)
+
+ self.stubs.Set(nova.db, 'instance_group_delete',
+ server_group_delete)
+ self.stubs.Set(nova.db, 'instance_group_get',
+ return_server_group)
+
+ req = fakes.HTTPRequest.blank(self._get_url() +
+ '/os-server-groups/123')
+ resp = self.controller.delete(req, '123')
+ self.assertTrue(self.called)
+
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ if isinstance(self.controller, sg_v3.ServerGroupController):
+ status_int = self.controller.delete.wsgi_code
+ else:
+ status_int = resp.status_int
+ self.assertEqual(204, status_int)
+
+ def test_delete_non_existing_server_group(self):
+ req = fakes.HTTPRequest.blank(self._get_url() +
+ '/os-server-groups/invalid')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, 'invalid')
+
+
+class ServerGroupTestV2(ServerGroupTestV21):
+
+ def _setup_controller(self):
+ ext_mgr = extensions.ExtensionManager()
+ ext_mgr.extensions = {}
+ self.controller = server_groups.ServerGroupController(ext_mgr)
+
+ def _get_app(self):
+ return fakes.wsgi_app(init_only=('os-server-groups',))
+
+
+class TestServerGroupXMLDeserializer(test.TestCase):
+
+ def setUp(self):
+ super(TestServerGroupXMLDeserializer, self).setUp()
+ self.deserializer = server_groups.ServerGroupXMLDeserializer()
+
+ def test_create_request(self):
+ serial_request = """
+<server_group name="test">
+</server_group>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server_group": {
+ "name": "test",
+ "policies": []
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_update_request(self):
+ serial_request = """
+<server_group name="test">
+<policies>
+<policy>policy-1</policy>
+<policy>policy-2</policy>
+</policies>
+</server_group>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server_group": {
+ "name": 'test',
+ "policies": ['policy-1', 'policy-2']
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_request_no_name(self):
+ serial_request = """
+<server_group>
+</server_group>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server_group": {
+ "policies": []
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_corrupt_xml(self):
+ """Should throw a 400 error on corrupt xml."""
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ self.deserializer.deserialize,
+ utils.killer_xml_body())
+
+
+class TestServerGroupXMLSerializer(test.TestCase):
+ def setUp(self):
+ super(TestServerGroupXMLSerializer, self).setUp()
+ self.namespace = wsgi.XMLNS_V11
+ self.index_serializer = server_groups.ServerGroupsTemplate()
+ self.default_serializer = server_groups.ServerGroupTemplate()
+
+ def _tag(self, elem):
+ tagname = elem.tag
+ self.assertEqual(tagname[0], '{')
+ tmp = tagname.partition('}')
+ namespace = tmp[0][1:]
+ self.assertEqual(namespace, self.namespace)
+ return tmp[2]
+
+ def _verify_server_group(self, raw_group, tree):
+ policies = raw_group['policies']
+ members = raw_group['members']
+ self.assertEqual('server_group', self._tag(tree))
+ self.assertEqual(raw_group['id'], tree.get('id'))
+ self.assertEqual(raw_group['name'], tree.get('name'))
+ self.assertEqual(3, len(tree))
+ for child in tree:
+ child_tag = self._tag(child)
+ if child_tag == 'policies':
+ self.assertEqual(len(policies), len(child))
+ for idx, gr_child in enumerate(child):
+ self.assertEqual(self._tag(gr_child), 'policy')
+ self.assertEqual(policies[idx],
+ gr_child.text)
+ elif child_tag == 'members':
+ self.assertEqual(len(members), len(child))
+ for idx, gr_child in enumerate(child):
+ self.assertEqual(self._tag(gr_child), 'member')
+ self.assertEqual(members[idx],
+ gr_child.text)
+ elif child_tag == 'metadata':
+ self.assertEqual(0, len(child))
+
+ def _verify_server_group_brief(self, raw_group, tree):
+ self.assertEqual('server_group', self._tag(tree))
+ self.assertEqual(raw_group['id'], tree.get('id'))
+ self.assertEqual(raw_group['name'], tree.get('name'))
+
+ def test_group_serializer(self):
+ policies = ["policy-1", "policy-2"]
+ members = ["1", "2"]
+ raw_group = dict(
+ id='890',
+ name='name',
+ policies=policies,
+ members=members)
+ sg_group = dict(server_group=raw_group)
+ text = self.default_serializer.serialize(sg_group)
+
+ tree = etree.fromstring(text)
+
+ self._verify_server_group(raw_group, tree)
+
+ def test_groups_serializer(self):
+ policies = ["policy-1", "policy-2",
+ "policy-3"]
+ members = ["1", "2", "3"]
+ groups = [dict(
+ id='890',
+ name='test',
+ policies=policies[0:2],
+ members=members[0:2]),
+ dict(
+ id='123',
+ name='default',
+ policies=policies[2:],
+ members=members[2:])]
+ sg_groups = dict(server_groups=groups)
+ text = self.index_serializer.serialize(sg_groups)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('server_groups', self._tag(tree))
+ self.assertEqual(len(groups), len(tree))
+ for idx, child in enumerate(tree):
+ self._verify_server_group_brief(groups[idx], child)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_server_password.py b/nova/tests/unit/api/openstack/compute/contrib/test_server_password.py
new file mode 100644
index 0000000000..d29b0480f3
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_server_password.py
@@ -0,0 +1,94 @@
+# Copyright 2012 Nebula, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.metadata import password
+from nova import compute
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+
+CONF = cfg.CONF
+CONF.import_opt('osapi_compute_ext_list', 'nova.api.openstack.compute.contrib')
+
+
+class ServerPasswordTest(test.TestCase):
+ content_type = 'application/json'
+
+ def setUp(self):
+ super(ServerPasswordTest, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(
+ compute.api.API, 'get',
+ lambda self, ctxt, *a, **kw:
+ fake_instance.fake_instance_obj(
+ ctxt,
+ system_metadata={},
+ expected_attrs=['system_metadata']))
+ self.password = 'fakepass'
+
+ def fake_extract_password(instance):
+ return self.password
+
+ def fake_convert_password(context, password):
+ self.password = password
+ return {}
+
+ self.stubs.Set(password, 'extract_password', fake_extract_password)
+ self.stubs.Set(password, 'convert_password', fake_convert_password)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Server_password'])
+
+ def _make_request(self, url, method='GET'):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ req.method = method
+ res = req.get_response(
+ fakes.wsgi_app(init_only=('servers', 'os-server-password')))
+ return res
+
+ def _get_pass(self, body):
+ return jsonutils.loads(body).get('password')
+
+ def test_get_password(self):
+ url = '/v2/fake/servers/fake/os-server-password'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(self._get_pass(res.body), 'fakepass')
+
+ def test_reset_password(self):
+ url = '/v2/fake/servers/fake/os-server-password'
+ res = self._make_request(url, 'DELETE')
+ self.assertEqual(res.status_int, 204)
+
+ res = self._make_request(url)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(self._get_pass(res.body), '')
+
+
+class ServerPasswordXmlTest(ServerPasswordTest):
+ content_type = 'application/xml'
+
+ def _get_pass(self, body):
+ # NOTE(vish): first element is password
+ return etree.XML(body).text or ''
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_server_start_stop.py b/nova/tests/unit/api/openstack/compute/contrib/test_server_start_stop.py
new file mode 100644
index 0000000000..6be2a52b86
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_server_start_stop.py
@@ -0,0 +1,183 @@
+# Copyright (c) 2012 Midokura Japan K.K.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mox
+import webob
+
+from nova.api.openstack.compute.contrib import server_start_stop \
+ as server_v2
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import servers \
+ as server_v21
+from nova.compute import api as compute_api
+from nova import db
+from nova import exception
+from nova.openstack.common import policy as common_policy
+from nova import policy
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+def fake_instance_get(context, instance_id,
+ columns_to_join=None, use_slave=False):
+ result = fakes.stub_instance(id=1, uuid=instance_id)
+ result['created_at'] = None
+ result['deleted_at'] = None
+ result['updated_at'] = None
+ result['deleted'] = 0
+ result['info_cache'] = {'network_info': '[]',
+ 'instance_uuid': result['uuid']}
+ return result
+
+
+def fake_start_stop_not_ready(self, context, instance):
+ raise exception.InstanceNotReady(instance_id=instance["uuid"])
+
+
+def fake_start_stop_locked_server(self, context, instance):
+ raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
+
+
+def fake_start_stop_invalid_state(self, context, instance):
+ raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
+
+
+class ServerStartStopTestV21(test.TestCase):
+ start_policy = "compute:v3:servers:start"
+ stop_policy = "compute:v3:servers:stop"
+
+ def setUp(self):
+ super(ServerStartStopTestV21, self).setUp()
+ self._setup_controller()
+
+ def _setup_controller(self):
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = server_v21.ServersController(
+ extension_info=ext_info)
+
+ def test_start(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.mox.StubOutWithMock(compute_api.API, 'start')
+ compute_api.API.start(mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(start="")
+ self.controller._start_server(req, 'test_inst', body)
+
+ def test_start_policy_failed(self):
+ rules = {
+ self.start_policy:
+ common_policy.parse_rule("project_id:non_fake")
+ }
+ policy.set_rules(rules)
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(start="")
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._start_server,
+ req, 'test_inst', body)
+ self.assertIn(self.start_policy, exc.format_message())
+
+ def test_start_not_ready(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stubs.Set(compute_api.API, 'start', fake_start_stop_not_ready)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._start_server, req, 'test_inst', body)
+
+ def test_start_locked_server(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stubs.Set(compute_api.API, 'start', fake_start_stop_locked_server)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._start_server, req, 'test_inst', body)
+
+ def test_start_invalid_state(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stubs.Set(compute_api.API, 'start', fake_start_stop_invalid_state)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._start_server, req, 'test_inst', body)
+
+ def test_stop(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.mox.StubOutWithMock(compute_api.API, 'stop')
+ compute_api.API.stop(mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(stop="")
+ self.controller._stop_server(req, 'test_inst', body)
+
+ def test_stop_policy_failed(self):
+ rules = {
+ self.stop_policy:
+ common_policy.parse_rule("project_id:non_fake")
+ }
+ policy.set_rules(rules)
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(stop="")
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._stop_server,
+ req, 'test_inst', body)
+ self.assertIn(self.stop_policy, exc.format_message())
+
+ def test_stop_not_ready(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stubs.Set(compute_api.API, 'stop', fake_start_stop_not_ready)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(stop="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._stop_server, req, 'test_inst', body)
+
+ def test_stop_locked_server(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stubs.Set(compute_api.API, 'stop', fake_start_stop_locked_server)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(stop="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._stop_server, req, 'test_inst', body)
+
+ def test_stop_invalid_state(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stubs.Set(compute_api.API, 'stop', fake_start_stop_invalid_state)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._stop_server, req, 'test_inst', body)
+
+ def test_start_with_bogus_id(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._start_server, req, 'test_inst', body)
+
+ def test_stop_with_bogus_id(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(stop="")
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._stop_server, req, 'test_inst', body)
+
+
+class ServerStartStopTestV2(ServerStartStopTestV21):
+ start_policy = "compute:start"
+ stop_policy = "compute:stop"
+
+ def _setup_controller(self):
+ self.controller = server_v2.ServerStartStopActionController()
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_server_usage.py b/nova/tests/unit/api/openstack/compute/contrib/test_server_usage.py
new file mode 100644
index 0000000000..ee0d9a0ef4
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_server_usage.py
@@ -0,0 +1,159 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from lxml import etree
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+
+from nova.api.openstack.compute.contrib import server_usage
+from nova import compute
+from nova import db
+from nova import exception
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+UUID1 = '00000000-0000-0000-0000-000000000001'
+UUID2 = '00000000-0000-0000-0000-000000000002'
+UUID3 = '00000000-0000-0000-0000-000000000003'
+
+DATE1 = datetime.datetime(year=2013, month=4, day=5, hour=12)
+DATE2 = datetime.datetime(year=2013, month=4, day=5, hour=13)
+DATE3 = datetime.datetime(year=2013, month=4, day=5, hour=14)
+
+
+def fake_compute_get(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID3, launched_at=DATE1,
+ terminated_at=DATE2)
+ return fake_instance.fake_instance_obj(args[1], **inst)
+
+
+def fake_compute_get_all(*args, **kwargs):
+ db_list = [
+ fakes.stub_instance(2, uuid=UUID1, launched_at=DATE2,
+ terminated_at=DATE3),
+ fakes.stub_instance(3, uuid=UUID2, launched_at=DATE1,
+ terminated_at=DATE3),
+ ]
+ fields = instance_obj.INSTANCE_DEFAULT_FIELDS
+ return instance_obj._make_instance_list(args[1],
+ objects.InstanceList(),
+ db_list, fields)
+
+
+class ServerUsageTestV21(test.TestCase):
+ content_type = 'application/json'
+ prefix = 'OS-SRV-USG:'
+ _prefix = "/v2/fake"
+
+ def setUp(self):
+ super(ServerUsageTestV21, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Server_usage'])
+ return_server = fakes.fake_instance_get()
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ def _make_request(self, url):
+ req = fakes.HTTPRequest.blank(url)
+ req.accept = self.content_type
+ res = req.get_response(self._get_app())
+ return res
+
+ def _get_app(self):
+ return fakes.wsgi_app_v21(init_only=('servers', 'os-server-usage'))
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def assertServerUsage(self, server, launched_at, terminated_at):
+ resp_launched_at = timeutils.parse_isotime(
+ server.get('%slaunched_at' % self.prefix))
+ self.assertEqual(timeutils.normalize_time(resp_launched_at),
+ launched_at)
+ resp_terminated_at = timeutils.parse_isotime(
+ server.get('%sterminated_at' % self.prefix))
+ self.assertEqual(timeutils.normalize_time(resp_terminated_at),
+ terminated_at)
+
+ def test_show(self):
+ url = self._prefix + ('/servers/%s' % UUID3)
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ now = timeutils.utcnow()
+ timeutils.set_time_override(now)
+ self.assertServerUsage(self._get_server(res.body),
+ launched_at=DATE1,
+ terminated_at=DATE2)
+
+ def test_detail(self):
+ url = self._prefix + '/servers/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ servers = self._get_servers(res.body)
+ self.assertServerUsage(servers[0],
+ launched_at=DATE2,
+ terminated_at=DATE3)
+ self.assertServerUsage(servers[1],
+ launched_at=DATE1,
+ terminated_at=DATE3)
+
+ def test_no_instance_passthrough_404(self):
+
+ def fake_compute_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ url = self._prefix + '/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 404)
+
+
+class ServerUsageTestV20(ServerUsageTestV21):
+
+ def setUp(self):
+ super(ServerUsageTestV20, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Server_usage'])
+
+ def _get_app(self):
+ return fakes.wsgi_app(init_only=('servers',))
+
+
+class ServerUsageXmlTest(ServerUsageTestV20):
+ content_type = 'application/xml'
+ prefix = '{%s}' % server_usage.Server_usage.namespace
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_services.py b/nova/tests/unit/api/openstack/compute/contrib/test_services.py
new file mode 100644
index 0000000000..87297c567b
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_services.py
@@ -0,0 +1,576 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import calendar
+import datetime
+
+import iso8601
+import mock
+from oslo.utils import timeutils
+import webob.exc
+
+from nova.api.openstack.compute.contrib import services
+from nova.api.openstack import extensions
+from nova import availability_zones
+from nova.compute import cells_api
+from nova import context
+from nova import db
+from nova import exception
+from nova.servicegroup.drivers import db as db_driver
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.objects import test_service
+
+
+fake_services_list = [
+ dict(test_service.fake_service,
+ binary='nova-scheduler',
+ host='host1',
+ id=1,
+ disabled=True,
+ topic='scheduler',
+ updated_at=datetime.datetime(2012, 10, 29, 13, 42, 2),
+ created_at=datetime.datetime(2012, 9, 18, 2, 46, 27),
+ disabled_reason='test1'),
+ dict(test_service.fake_service,
+ binary='nova-compute',
+ host='host1',
+ id=2,
+ disabled=True,
+ topic='compute',
+ updated_at=datetime.datetime(2012, 10, 29, 13, 42, 5),
+ created_at=datetime.datetime(2012, 9, 18, 2, 46, 27),
+ disabled_reason='test2'),
+ dict(test_service.fake_service,
+ binary='nova-scheduler',
+ host='host2',
+ id=3,
+ disabled=False,
+ topic='scheduler',
+ updated_at=datetime.datetime(2012, 9, 19, 6, 55, 34),
+ created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
+ disabled_reason=None),
+ dict(test_service.fake_service,
+ binary='nova-compute',
+ host='host2',
+ id=4,
+ disabled=True,
+ topic='compute',
+ updated_at=datetime.datetime(2012, 9, 18, 8, 3, 38),
+ created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
+ disabled_reason='test4'),
+ ]
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {}
+
+
+class FakeRequestWithService(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {"binary": "nova-compute"}
+
+
+class FakeRequestWithHost(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {"host": "host1"}
+
+
+class FakeRequestWithHostService(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {"host": "host1", "binary": "nova-compute"}
+
+
+def fake_service_get_all(services):
+ def service_get_all(context, filters=None, set_zones=False):
+ if set_zones or 'availability_zone' in filters:
+ return availability_zones.set_availability_zones(context,
+ services)
+ return services
+ return service_get_all
+
+
+def fake_db_api_service_get_all(context, disabled=None):
+ return fake_services_list
+
+
+def fake_db_service_get_by_host_binary(services):
+ def service_get_by_host_binary(context, host, binary):
+ for service in services:
+ if service['host'] == host and service['binary'] == binary:
+ return service
+ raise exception.HostBinaryNotFound(host=host, binary=binary)
+ return service_get_by_host_binary
+
+
+def fake_service_get_by_host_binary(context, host, binary):
+ fake = fake_db_service_get_by_host_binary(fake_services_list)
+ return fake(context, host, binary)
+
+
+def _service_get_by_id(services, value):
+ for service in services:
+ if service['id'] == value:
+ return service
+ return None
+
+
+def fake_db_service_update(services):
+ def service_update(context, service_id, values):
+ service = _service_get_by_id(services, service_id)
+ if service is None:
+ raise exception.ServiceNotFound(service_id=service_id)
+ return service
+ return service_update
+
+
+def fake_service_update(context, service_id, values):
+ fake = fake_db_service_update(fake_services_list)
+ return fake(context, service_id, values)
+
+
+def fake_utcnow():
+ return datetime.datetime(2012, 10, 29, 13, 42, 11)
+
+
+fake_utcnow.override_time = None
+
+
+def fake_utcnow_ts():
+ d = fake_utcnow()
+ return calendar.timegm(d.utctimetuple())
+
+
+class ServicesTest(test.TestCase):
+
+ def setUp(self):
+ super(ServicesTest, self).setUp()
+
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = services.ServiceController(self.ext_mgr)
+
+ self.stubs.Set(timeutils, "utcnow", fake_utcnow)
+ self.stubs.Set(timeutils, "utcnow_ts", fake_utcnow_ts)
+
+ self.stubs.Set(self.controller.host_api, "service_get_all",
+ fake_service_get_all(fake_services_list))
+
+ self.stubs.Set(db, "service_get_by_args",
+ fake_db_service_get_by_host_binary(fake_services_list))
+ self.stubs.Set(db, "service_update",
+ fake_db_service_update(fake_services_list))
+
+ def test_services_list(self):
+ req = FakeRequest()
+ res_dict = self.controller.index(req)
+
+ response = {'services': [
+ {'binary': 'nova-scheduler',
+ 'host': 'host1',
+ 'zone': 'internal',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
+ {'binary': 'nova-scheduler',
+ 'host': 'host2',
+ 'zone': 'internal',
+ 'status': 'enabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34)},
+ {'binary': 'nova-compute',
+ 'host': 'host2',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_list_with_host(self):
+ req = FakeRequestWithHost()
+ res_dict = self.controller.index(req)
+
+ response = {'services': [
+ {'binary': 'nova-scheduler',
+ 'host': 'host1',
+ 'zone': 'internal',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_list_with_service(self):
+ req = FakeRequestWithService()
+ res_dict = self.controller.index(req)
+
+ response = {'services': [
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
+ {'binary': 'nova-compute',
+ 'host': 'host2',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_list_with_host_service(self):
+ req = FakeRequestWithHostService()
+ res_dict = self.controller.index(req)
+
+ response = {'services': [
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_detail(self):
+ self.ext_mgr.extensions['os-extended-services'] = True
+ req = FakeRequest()
+ res_dict = self.controller.index(req)
+ response = {'services': [
+ {'binary': 'nova-scheduler',
+ 'host': 'host1',
+ 'zone': 'internal',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'disabled_reason': 'test1'},
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
+ 'disabled_reason': 'test2'},
+ {'binary': 'nova-scheduler',
+ 'host': 'host2',
+ 'zone': 'internal',
+ 'status': 'enabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34),
+ 'disabled_reason': None},
+ {'binary': 'nova-compute',
+ 'host': 'host2',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
+ 'disabled_reason': 'test4'}]}
+ self.assertEqual(res_dict, response)
+
+ def test_service_detail_with_host(self):
+ self.ext_mgr.extensions['os-extended-services'] = True
+ req = FakeRequestWithHost()
+ res_dict = self.controller.index(req)
+ response = {'services': [
+ {'binary': 'nova-scheduler',
+ 'host': 'host1',
+ 'zone': 'internal',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'disabled_reason': 'test1'},
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
+ 'disabled_reason': 'test2'}]}
+ self.assertEqual(res_dict, response)
+
+ def test_service_detail_with_service(self):
+ self.ext_mgr.extensions['os-extended-services'] = True
+ req = FakeRequestWithService()
+ res_dict = self.controller.index(req)
+ response = {'services': [
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
+ 'disabled_reason': 'test2'},
+ {'binary': 'nova-compute',
+ 'host': 'host2',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
+ 'disabled_reason': 'test4'}]}
+ self.assertEqual(res_dict, response)
+
+ def test_service_detail_with_host_service(self):
+ self.ext_mgr.extensions['os-extended-services'] = True
+ req = FakeRequestWithHostService()
+ res_dict = self.controller.index(req)
+ response = {'services': [
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
+ 'disabled_reason': 'test2'}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_detail_with_delete_extension(self):
+ self.ext_mgr.extensions['os-extended-services-delete'] = True
+ req = FakeRequest()
+ res_dict = self.controller.index(req)
+ response = {'services': [
+ {'binary': 'nova-scheduler',
+ 'host': 'host1',
+ 'id': 1,
+ 'zone': 'internal',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'id': 2,
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
+ {'binary': 'nova-scheduler',
+ 'host': 'host2',
+ 'id': 3,
+ 'zone': 'internal',
+ 'status': 'enabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34)},
+ {'binary': 'nova-compute',
+ 'host': 'host2',
+ 'id': 4,
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_enable(self):
+ def _service_update(context, service_id, values):
+ self.assertIsNone(values['disabled_reason'])
+ return dict(test_service.fake_service, id=service_id, **values)
+
+ self.stubs.Set(db, "service_update", _service_update)
+
+ body = {'host': 'host1', 'binary': 'nova-compute'}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-services/enable')
+
+ res_dict = self.controller.update(req, "enable", body)
+ self.assertEqual(res_dict['service']['status'], 'enabled')
+ self.assertNotIn('disabled_reason', res_dict['service'])
+
+ def test_services_enable_with_invalid_host(self):
+ body = {'host': 'invalid', 'binary': 'nova-compute'}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-services/enable')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update,
+ req,
+ "enable",
+ body)
+
+ def test_services_enable_with_invalid_binary(self):
+ body = {'host': 'host1', 'binary': 'invalid'}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-services/enable')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update,
+ req,
+ "enable",
+ body)
+
+ # This test is just to verify that the servicegroup API gets used when
+ # calling this API.
+ def test_services_with_exception(self):
+ def dummy_is_up(self, dummy):
+ raise KeyError()
+
+ self.stubs.Set(db_driver.DbDriver, 'is_up', dummy_is_up)
+ req = FakeRequestWithHostService()
+ self.assertRaises(KeyError, self.controller.index, req)
+
+ def test_services_disable(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-services/disable')
+ body = {'host': 'host1', 'binary': 'nova-compute'}
+ res_dict = self.controller.update(req, "disable", body)
+
+ self.assertEqual(res_dict['service']['status'], 'disabled')
+ self.assertNotIn('disabled_reason', res_dict['service'])
+
+ def test_services_disable_with_invalid_host(self):
+ body = {'host': 'invalid', 'binary': 'nova-compute'}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-services/disable')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update,
+ req,
+ "disable",
+ body)
+
+ def test_services_disable_with_invalid_binary(self):
+ body = {'host': 'host1', 'binary': 'invalid'}
+ req = fakes.HTTPRequestV3.blank('/v2/fake/os-services/disable')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update,
+ req,
+ "disable",
+ body)
+
+ def test_services_disable_log_reason(self):
+ self.ext_mgr.extensions['os-extended-services'] = True
+ req = \
+ fakes.HTTPRequest.blank('v2/fakes/os-services/disable-log-reason')
+ body = {'host': 'host1',
+ 'binary': 'nova-compute',
+ 'disabled_reason': 'test-reason',
+ }
+ res_dict = self.controller.update(req, "disable-log-reason", body)
+
+ self.assertEqual(res_dict['service']['status'], 'disabled')
+ self.assertEqual(res_dict['service']['disabled_reason'], 'test-reason')
+
+ def test_mandatory_reason_field(self):
+ self.ext_mgr.extensions['os-extended-services'] = True
+ req = \
+ fakes.HTTPRequest.blank('v2/fakes/os-services/disable-log-reason')
+ body = {'host': 'host1',
+ 'binary': 'nova-compute',
+ }
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, "disable-log-reason", body)
+
+ def test_invalid_reason_field(self):
+ reason = ' '
+ self.assertFalse(self.controller._is_valid_as_reason(reason))
+ reason = 'a' * 256
+ self.assertFalse(self.controller._is_valid_as_reason(reason))
+ reason = 'it\'s a valid reason.'
+ self.assertTrue(self.controller._is_valid_as_reason(reason))
+
+ def test_services_delete(self):
+ self.ext_mgr.extensions['os-extended-services-delete'] = True
+
+ request = fakes.HTTPRequest.blank('/v2/fakes/os-services/1',
+ use_admin_context=True)
+ request.method = 'DELETE'
+
+ with mock.patch.object(self.controller.host_api,
+ 'service_delete') as service_delete:
+ self.controller.delete(request, '1')
+ service_delete.assert_called_once_with(
+ request.environ['nova.context'], '1')
+ self.assertEqual(self.controller.delete.wsgi_code, 204)
+
+ def test_services_delete_not_found(self):
+ self.ext_mgr.extensions['os-extended-services-delete'] = True
+
+ request = fakes.HTTPRequest.blank('/v2/fakes/os-services/abc',
+ use_admin_context=True)
+ request.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.delete, request, 'abc')
+
+ def test_services_delete_not_enabled(self):
+ request = fakes.HTTPRequest.blank('/v2/fakes/os-services/300',
+ use_admin_context=True)
+ request.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPMethodNotAllowed,
+ self.controller.delete, request, '300')
+
+
+class ServicesCellsTest(test.TestCase):
+ def setUp(self):
+ super(ServicesCellsTest, self).setUp()
+
+ host_api = cells_api.HostAPI()
+
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = services.ServiceController(self.ext_mgr)
+ self.controller.host_api = host_api
+
+ self.stubs.Set(timeutils, "utcnow", fake_utcnow)
+ self.stubs.Set(timeutils, "utcnow_ts", fake_utcnow_ts)
+
+ services_list = []
+ for service in fake_services_list:
+ service = service.copy()
+ service['id'] = 'cell1@%d' % service['id']
+ services_list.append(service)
+
+ self.stubs.Set(host_api.cells_rpcapi, "service_get_all",
+ fake_service_get_all(services_list))
+
+ def test_services_detail(self):
+ self.ext_mgr.extensions['os-extended-services-delete'] = True
+ req = FakeRequest()
+ res_dict = self.controller.index(req)
+ utc = iso8601.iso8601.Utc()
+ response = {'services': [
+ {'id': 'cell1@1',
+ 'binary': 'nova-scheduler',
+ 'host': 'host1',
+ 'zone': 'internal',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2,
+ tzinfo=utc)},
+ {'id': 'cell1@2',
+ 'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5,
+ tzinfo=utc)},
+ {'id': 'cell1@3',
+ 'binary': 'nova-scheduler',
+ 'host': 'host2',
+ 'zone': 'internal',
+ 'status': 'enabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34,
+ tzinfo=utc)},
+ {'id': 'cell1@4',
+ 'binary': 'nova-compute',
+ 'host': 'host2',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38,
+ tzinfo=utc)}]}
+ self.assertEqual(res_dict, response)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_shelve.py b/nova/tests/unit/api/openstack/compute/contrib/test_shelve.py
new file mode 100644
index 0000000000..df1c6fc449
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_shelve.py
@@ -0,0 +1,148 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import webob
+
+from nova.api.openstack.compute.contrib import shelve as shelve_v2
+from nova.api.openstack.compute.plugins.v3 import shelve as shelve_v21
+from nova.compute import api as compute_api
+from nova import db
+from nova import exception
+from nova.openstack.common import policy as common_policy
+from nova import policy
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+
+def fake_instance_get_by_uuid(context, instance_id,
+ columns_to_join=None, use_slave=False):
+ return fake_instance.fake_db_instance(
+ **{'name': 'fake', 'project_id': '%s_unequal' % context.project_id})
+
+
+def fake_auth_context(context):
+ return True
+
+
+class ShelvePolicyTestV21(test.NoDBTestCase):
+ plugin = shelve_v21
+ prefix = 'v3:os-shelve:'
+ offload = 'shelve_offload'
+
+ def setUp(self):
+ super(ShelvePolicyTestV21, self).setUp()
+ self.controller = self.plugin.ShelveController()
+
+ def _fake_request(self):
+ return fakes.HTTPRequestV3.blank('/servers/12/os-shelve')
+
+ def test_shelve_restricted_by_role(self):
+ rules = {'compute_extension:%sshelve' % self.prefix:
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rules)
+
+ req = self._fake_request()
+ self.assertRaises(exception.Forbidden, self.controller._shelve,
+ req, str(uuid.uuid4()), {})
+
+ def test_shelve_allowed(self):
+ rules = {'compute:get': common_policy.parse_rule(''),
+ 'compute_extension:%sshelve' % self.prefix:
+ common_policy.parse_rule('')}
+ policy.set_rules(rules)
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ req = self._fake_request()
+ self.assertRaises(exception.Forbidden, self.controller._shelve,
+ req, str(uuid.uuid4()), {})
+
+ def test_shelve_locked_server(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ self.stubs.Set(self.plugin, 'auth_shelve', fake_auth_context)
+ self.stubs.Set(compute_api.API, 'shelve',
+ fakes.fake_actions_to_locked_server)
+ req = self._fake_request()
+ self.assertRaises(webob.exc.HTTPConflict, self.controller._shelve,
+ req, str(uuid.uuid4()), {})
+
+ def test_unshelve_restricted_by_role(self):
+ rules = {'compute_extension:%sunshelve' % self.prefix:
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rules)
+
+ req = self._fake_request()
+ self.assertRaises(exception.Forbidden, self.controller._unshelve,
+ req, str(uuid.uuid4()), {})
+
+ def test_unshelve_allowed(self):
+ rules = {'compute:get': common_policy.parse_rule(''),
+ 'compute_extension:%sunshelve' % self.prefix:
+ common_policy.parse_rule('')}
+ policy.set_rules(rules)
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ req = self._fake_request()
+ self.assertRaises(exception.Forbidden, self.controller._unshelve,
+ req, str(uuid.uuid4()), {})
+
+ def test_unshelve_locked_server(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ self.stubs.Set(self.plugin, 'auth_unshelve', fake_auth_context)
+ self.stubs.Set(compute_api.API, 'unshelve',
+ fakes.fake_actions_to_locked_server)
+ req = self._fake_request()
+ self.assertRaises(webob.exc.HTTPConflict, self.controller._unshelve,
+ req, str(uuid.uuid4()), {})
+
+ def test_shelve_offload_restricted_by_role(self):
+ rules = {'compute_extension:%s%s' % (self.prefix, self.offload):
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rules)
+
+ req = self._fake_request()
+ self.assertRaises(exception.Forbidden,
+ self.controller._shelve_offload, req, str(uuid.uuid4()), {})
+
+ def test_shelve_offload_allowed(self):
+ rules = {'compute:get': common_policy.parse_rule(''),
+ 'compute_extension:%s%s' % (self.prefix, self.offload):
+ common_policy.parse_rule('')}
+ policy.set_rules(rules)
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ req = self._fake_request()
+ self.assertRaises(exception.Forbidden,
+ self.controller._shelve_offload, req, str(uuid.uuid4()), {})
+
+ def test_shelve_offload_locked_server(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ self.stubs.Set(self.plugin, 'auth_shelve_offload', fake_auth_context)
+ self.stubs.Set(compute_api.API, 'shelve_offload',
+ fakes.fake_actions_to_locked_server)
+ req = self._fake_request()
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._shelve_offload,
+ req, str(uuid.uuid4()), {})
+
+
+class ShelvePolicyTestV2(ShelvePolicyTestV21):
+ plugin = shelve_v2
+ prefix = ''
+ offload = 'shelveOffload'
+
+ def _fake_request(self):
+ return fakes.HTTPRequest.blank('/v2/123/servers/12/os-shelve')
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_simple_tenant_usage.py b/nova/tests/unit/api/openstack/compute/contrib/test_simple_tenant_usage.py
new file mode 100644
index 0000000000..9639b886ae
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_simple_tenant_usage.py
@@ -0,0 +1,539 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from lxml import etree
+import mock
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import webob
+
+from nova.api.openstack.compute.contrib import simple_tenant_usage as \
+ simple_tenant_usage_v2
+from nova.api.openstack.compute.plugins.v3 import simple_tenant_usage as \
+ simple_tenant_usage_v21
+from nova.compute import flavors
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova import objects
+from nova.openstack.common import policy as common_policy
+from nova import policy
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova import utils
+
+SERVERS = 5
+TENANTS = 2
+HOURS = 24
+ROOT_GB = 10
+EPHEMERAL_GB = 20
+MEMORY_MB = 1024
+VCPUS = 2
+NOW = timeutils.utcnow()
+START = NOW - datetime.timedelta(hours=HOURS)
+STOP = NOW
+
+
+FAKE_INST_TYPE = {'id': 1,
+ 'vcpus': VCPUS,
+ 'root_gb': ROOT_GB,
+ 'ephemeral_gb': EPHEMERAL_GB,
+ 'memory_mb': MEMORY_MB,
+ 'name': 'fakeflavor',
+ 'flavorid': 'foo',
+ 'rxtx_factor': 1.0,
+ 'vcpu_weight': 1,
+ 'swap': 0,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'disabled': False,
+ 'is_public': True,
+ 'extra_specs': {'foo': 'bar'}}
+
+
+def get_fake_db_instance(start, end, instance_id, tenant_id,
+ vm_state=vm_states.ACTIVE):
+ sys_meta = utils.dict_to_metadata(
+ flavors.save_flavor_info({}, FAKE_INST_TYPE))
+ # NOTE(mriedem): We use fakes.stub_instance since it sets the fields
+ # needed on the db instance for converting it to an object, but we still
+ # need to override system_metadata to use our fake flavor.
+ inst = fakes.stub_instance(
+ id=instance_id,
+ uuid='00000000-0000-0000-0000-00000000000000%02d' % instance_id,
+ image_ref='1',
+ project_id=tenant_id,
+ user_id='fakeuser',
+ display_name='name',
+ flavor_id=FAKE_INST_TYPE['id'],
+ launched_at=start,
+ terminated_at=end,
+ vm_state=vm_state,
+ memory_mb=MEMORY_MB,
+ vcpus=VCPUS,
+ root_gb=ROOT_GB,
+ ephemeral_gb=EPHEMERAL_GB,)
+ inst['system_metadata'] = sys_meta
+ return inst
+
+
+def fake_instance_get_active_by_window_joined(context, begin, end,
+ project_id, host):
+ return [get_fake_db_instance(START,
+ STOP,
+ x,
+ "faketenant_%s" % (x / SERVERS))
+ for x in xrange(TENANTS * SERVERS)]
+
+
+@mock.patch.object(db, 'instance_get_active_by_window_joined',
+ fake_instance_get_active_by_window_joined)
+class SimpleTenantUsageTestV21(test.TestCase):
+ url = '/v2/faketenant_0/os-simple-tenant-usage'
+ alt_url = '/v2/faketenant_1/os-simple-tenant-usage'
+ policy_rule_prefix = "compute_extension:v3:os-simple-tenant-usage"
+
+ def setUp(self):
+ super(SimpleTenantUsageTestV21, self).setUp()
+ self.admin_context = context.RequestContext('fakeadmin_0',
+ 'faketenant_0',
+ is_admin=True)
+ self.user_context = context.RequestContext('fakeadmin_0',
+ 'faketenant_0',
+ is_admin=False)
+ self.alt_user_context = context.RequestContext('fakeadmin_0',
+ 'faketenant_1',
+ is_admin=False)
+
+ def _get_wsgi_app(self, context):
+ return fakes.wsgi_app_v21(fake_auth_context=context,
+ init_only=('servers',
+ 'os-simple-tenant-usage'))
+
+ def _test_verify_index(self, start, stop):
+ req = webob.Request.blank(
+ self.url + '?start=%s&end=%s' %
+ (start.isoformat(), stop.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self._get_wsgi_app(self.admin_context))
+
+ self.assertEqual(res.status_int, 200)
+ res_dict = jsonutils.loads(res.body)
+ usages = res_dict['tenant_usages']
+ for i in xrange(TENANTS):
+ self.assertEqual(int(usages[i]['total_hours']),
+ SERVERS * HOURS)
+ self.assertEqual(int(usages[i]['total_local_gb_usage']),
+ SERVERS * (ROOT_GB + EPHEMERAL_GB) * HOURS)
+ self.assertEqual(int(usages[i]['total_memory_mb_usage']),
+ SERVERS * MEMORY_MB * HOURS)
+ self.assertEqual(int(usages[i]['total_vcpus_usage']),
+ SERVERS * VCPUS * HOURS)
+ self.assertFalse(usages[i].get('server_usages'))
+
+ def test_verify_index(self):
+ self._test_verify_index(START, STOP)
+
+ def test_verify_index_future_end_time(self):
+ future = NOW + datetime.timedelta(hours=HOURS)
+ self._test_verify_index(START, future)
+
+ def test_verify_show(self):
+ self._test_verify_show(START, STOP)
+
+ def test_verify_show_future_end_time(self):
+ future = NOW + datetime.timedelta(hours=HOURS)
+ self._test_verify_show(START, future)
+
+ def _get_tenant_usages(self, detailed=''):
+ req = webob.Request.blank(
+ self.url + '?detailed=%s&start=%s&end=%s' %
+ (detailed, START.isoformat(), STOP.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self._get_wsgi_app(self.admin_context))
+ self.assertEqual(res.status_int, 200)
+ res_dict = jsonutils.loads(res.body)
+ return res_dict['tenant_usages']
+
+ def test_verify_detailed_index(self):
+ usages = self._get_tenant_usages('1')
+ for i in xrange(TENANTS):
+ servers = usages[i]['server_usages']
+ for j in xrange(SERVERS):
+ self.assertEqual(int(servers[j]['hours']), HOURS)
+
+ def test_verify_simple_index(self):
+ usages = self._get_tenant_usages(detailed='0')
+ for i in xrange(TENANTS):
+ self.assertIsNone(usages[i].get('server_usages'))
+
+ def test_verify_simple_index_empty_param(self):
+ # NOTE(lzyeval): 'detailed=&start=..&end=..'
+ usages = self._get_tenant_usages()
+ for i in xrange(TENANTS):
+ self.assertIsNone(usages[i].get('server_usages'))
+
+ def _test_verify_show(self, start, stop):
+ tenant_id = 0
+ req = webob.Request.blank(
+ self.url + '/faketenant_%s?start=%s&end=%s' %
+ (tenant_id, start.isoformat(), stop.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self._get_wsgi_app(self.user_context))
+ self.assertEqual(res.status_int, 200)
+ res_dict = jsonutils.loads(res.body)
+
+ usage = res_dict['tenant_usage']
+ servers = usage['server_usages']
+ self.assertEqual(len(usage['server_usages']), SERVERS)
+ uuids = ['00000000-0000-0000-0000-00000000000000%02d' %
+ (x + (tenant_id * SERVERS)) for x in xrange(SERVERS)]
+ for j in xrange(SERVERS):
+ delta = STOP - START
+ uptime = delta.days * 24 * 3600 + delta.seconds
+ self.assertEqual(int(servers[j]['uptime']), uptime)
+ self.assertEqual(int(servers[j]['hours']), HOURS)
+ self.assertIn(servers[j]['instance_id'], uuids)
+
+ def test_verify_show_cannot_view_other_tenant(self):
+ req = webob.Request.blank(
+ self.alt_url + '/faketenant_0?start=%s&end=%s' %
+ (START.isoformat(), STOP.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ rules = {
+ self.policy_rule_prefix + ":show":
+ common_policy.parse_rule([
+ ["role:admin"], ["project_id:%(project_id)s"]
+ ])
+ }
+ policy.set_rules(rules)
+
+ try:
+ res = req.get_response(self._get_wsgi_app(self.alt_user_context))
+ self.assertEqual(res.status_int, 403)
+ finally:
+ policy.reset()
+
+ def test_get_tenants_usage_with_bad_start_date(self):
+ future = NOW + datetime.timedelta(hours=HOURS)
+ tenant_id = 0
+ req = webob.Request.blank(
+ self.url + '/'
+ 'faketenant_%s?start=%s&end=%s' %
+ (tenant_id, future.isoformat(), NOW.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self._get_wsgi_app(self.user_context))
+ self.assertEqual(res.status_int, 400)
+
+ def test_get_tenants_usage_with_invalid_start_date(self):
+ tenant_id = 0
+ req = webob.Request.blank(
+ self.url + '/'
+ 'faketenant_%s?start=%s&end=%s' %
+ (tenant_id, "xxxx", NOW.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self._get_wsgi_app(self.user_context))
+ self.assertEqual(res.status_int, 400)
+
+ def _test_get_tenants_usage_with_one_date(self, date_url_param):
+ req = webob.Request.blank(
+ self.url + '/'
+ 'faketenant_0?%s' % date_url_param)
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self._get_wsgi_app(self.user_context))
+ self.assertEqual(200, res.status_int)
+
+ def test_get_tenants_usage_with_no_start_date(self):
+ self._test_get_tenants_usage_with_one_date(
+ 'end=%s' % (NOW + datetime.timedelta(5)).isoformat())
+
+ def test_get_tenants_usage_with_no_end_date(self):
+ self._test_get_tenants_usage_with_one_date(
+ 'start=%s' % (NOW - datetime.timedelta(5)).isoformat())
+
+
+class SimpleTenantUsageTestV2(SimpleTenantUsageTestV21):
+ policy_rule_prefix = "compute_extension:simple_tenant_usage"
+
+ def _get_wsgi_app(self, context):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Simple_tenant_usage'])
+ return fakes.wsgi_app(fake_auth_context=context,
+ init_only=('os-simple-tenant-usage', ))
+
+
+class SimpleTenantUsageSerializerTest(test.TestCase):
+ def _verify_server_usage(self, raw_usage, tree):
+ self.assertEqual('server_usage', tree.tag)
+
+ # Figure out what fields we expect
+ not_seen = set(raw_usage.keys())
+
+ for child in tree:
+ self.assertIn(child.tag, not_seen)
+ not_seen.remove(child.tag)
+ self.assertEqual(str(raw_usage[child.tag]), child.text)
+
+ self.assertEqual(len(not_seen), 0)
+
+ def _verify_tenant_usage(self, raw_usage, tree):
+ self.assertEqual('tenant_usage', tree.tag)
+
+ # Figure out what fields we expect
+ not_seen = set(raw_usage.keys())
+
+ for child in tree:
+ self.assertIn(child.tag, not_seen)
+ not_seen.remove(child.tag)
+ if child.tag == 'server_usages':
+ for idx, gr_child in enumerate(child):
+ self._verify_server_usage(raw_usage['server_usages'][idx],
+ gr_child)
+ else:
+ self.assertEqual(str(raw_usage[child.tag]), child.text)
+
+ self.assertEqual(len(not_seen), 0)
+
+ def test_serializer_show(self):
+ serializer = simple_tenant_usage_v2.SimpleTenantUsageTemplate()
+ today = timeutils.utcnow()
+ yesterday = today - datetime.timedelta(days=1)
+ raw_usage = dict(
+ tenant_id='tenant',
+ total_local_gb_usage=789,
+ total_vcpus_usage=456,
+ total_memory_mb_usage=123,
+ total_hours=24,
+ start=yesterday,
+ stop=today,
+ server_usages=[dict(
+ instance_id='00000000-0000-0000-0000-0000000000000000',
+ name='test',
+ hours=24,
+ memory_mb=1024,
+ local_gb=50,
+ vcpus=1,
+ tenant_id='tenant',
+ flavor='m1.small',
+ started_at=yesterday,
+ ended_at=today,
+ state='terminated',
+ uptime=86400),
+ dict(
+ instance_id='00000000-0000-0000-0000-0000000000000002',
+ name='test2',
+ hours=12,
+ memory_mb=512,
+ local_gb=25,
+ vcpus=2,
+ tenant_id='tenant',
+ flavor='m1.tiny',
+ started_at=yesterday,
+ ended_at=today,
+ state='terminated',
+ uptime=43200),
+ ],
+ )
+ tenant_usage = dict(tenant_usage=raw_usage)
+ text = serializer.serialize(tenant_usage)
+
+ tree = etree.fromstring(text)
+
+ self._verify_tenant_usage(raw_usage, tree)
+
+ def test_serializer_index(self):
+ serializer = simple_tenant_usage_v2.SimpleTenantUsagesTemplate()
+ today = timeutils.utcnow()
+ yesterday = today - datetime.timedelta(days=1)
+ raw_usages = [dict(
+ tenant_id='tenant1',
+ total_local_gb_usage=1024,
+ total_vcpus_usage=23,
+ total_memory_mb_usage=512,
+ total_hours=24,
+ start=yesterday,
+ stop=today,
+ server_usages=[dict(
+ instance_id='00000000-0000-0000-0000-0000000000000001',
+ name='test1',
+ hours=24,
+ memory_mb=1024,
+ local_gb=50,
+ vcpus=2,
+ tenant_id='tenant1',
+ flavor='m1.small',
+ started_at=yesterday,
+ ended_at=today,
+ state='terminated',
+ uptime=86400),
+ dict(
+ instance_id='00000000-0000-0000-0000-0000000000000002',
+ name='test2',
+ hours=42,
+ memory_mb=4201,
+ local_gb=25,
+ vcpus=1,
+ tenant_id='tenant1',
+ flavor='m1.tiny',
+ started_at=today,
+ ended_at=yesterday,
+ state='terminated',
+ uptime=43200),
+ ],
+ ),
+ dict(
+ tenant_id='tenant2',
+ total_local_gb_usage=512,
+ total_vcpus_usage=32,
+ total_memory_mb_usage=1024,
+ total_hours=42,
+ start=today,
+ stop=yesterday,
+ server_usages=[dict(
+ instance_id='00000000-0000-0000-0000-0000000000000003',
+ name='test3',
+ hours=24,
+ memory_mb=1024,
+ local_gb=50,
+ vcpus=2,
+ tenant_id='tenant2',
+ flavor='m1.small',
+ started_at=yesterday,
+ ended_at=today,
+ state='terminated',
+ uptime=86400),
+ dict(
+ instance_id='00000000-0000-0000-0000-0000000000000002',
+ name='test2',
+ hours=42,
+ memory_mb=4201,
+ local_gb=25,
+ vcpus=1,
+ tenant_id='tenant4',
+ flavor='m1.tiny',
+ started_at=today,
+ ended_at=yesterday,
+ state='terminated',
+ uptime=43200),
+ ],
+ ),
+ ]
+ tenant_usages = dict(tenant_usages=raw_usages)
+ text = serializer.serialize(tenant_usages)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('tenant_usages', tree.tag)
+ self.assertEqual(len(raw_usages), len(tree))
+ for idx, child in enumerate(tree):
+ self._verify_tenant_usage(raw_usages[idx], child)
+
+
+class SimpleTenantUsageControllerTestV21(test.TestCase):
+ controller = simple_tenant_usage_v21.SimpleTenantUsageController()
+
+ def setUp(self):
+ super(SimpleTenantUsageControllerTestV21, self).setUp()
+
+ self.context = context.RequestContext('fakeuser', 'fake-project')
+
+ self.baseinst = get_fake_db_instance(START, STOP, instance_id=1,
+ tenant_id=self.context.project_id,
+ vm_state=vm_states.DELETED)
+ # convert the fake instance dict to an object
+ self.inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), self.baseinst)
+
+ def test_get_flavor_from_sys_meta(self):
+ # Non-deleted instances get their type information from their
+ # system_metadata
+ with mock.patch.object(db, 'instance_get_by_uuid',
+ return_value=self.baseinst):
+ flavor = self.controller._get_flavor(self.context,
+ self.inst_obj, {})
+ self.assertEqual(objects.Flavor, type(flavor))
+ self.assertEqual(FAKE_INST_TYPE['id'], flavor.id)
+
+ def test_get_flavor_from_non_deleted_with_id_fails(self):
+ # If an instance is not deleted and missing type information from
+ # system_metadata, then that's a bug
+ self.inst_obj.system_metadata = {}
+ self.assertRaises(KeyError,
+ self.controller._get_flavor, self.context,
+ self.inst_obj, {})
+
+ def test_get_flavor_from_deleted_with_id(self):
+ # Deleted instances may not have type info in system_metadata,
+ # so verify that they get their type from a lookup of their
+ # instance_type_id
+ self.inst_obj.system_metadata = {}
+ self.inst_obj.deleted = 1
+ flavor = self.controller._get_flavor(self.context, self.inst_obj, {})
+ self.assertEqual(objects.Flavor, type(flavor))
+ self.assertEqual(FAKE_INST_TYPE['id'], flavor.id)
+
+ def test_get_flavor_from_deleted_with_id_of_deleted(self):
+ # Verify the legacy behavior of instance_type_id pointing to a
+ # missing type being non-fatal
+ self.inst_obj.system_metadata = {}
+ self.inst_obj.deleted = 1
+ self.inst_obj.instance_type_id = 99
+ flavor = self.controller._get_flavor(self.context, self.inst_obj, {})
+ self.assertIsNone(flavor)
+
+
+class SimpleTenantUsageControllerTestV2(SimpleTenantUsageControllerTestV21):
+ controller = simple_tenant_usage_v2.SimpleTenantUsageController()
+
+
+class SimpleTenantUsageUtilsV21(test.NoDBTestCase):
+ simple_tenant_usage = simple_tenant_usage_v21
+
+ def test_valid_string(self):
+ dt = self.simple_tenant_usage.parse_strtime(
+ "2014-02-21T13:47:20.824060", "%Y-%m-%dT%H:%M:%S.%f")
+ self.assertEqual(datetime.datetime(
+ microsecond=824060, second=20, minute=47, hour=13,
+ day=21, month=2, year=2014), dt)
+
+ def test_invalid_string(self):
+ self.assertRaises(exception.InvalidStrTime,
+ self.simple_tenant_usage.parse_strtime,
+ "2014-02-21 13:47:20.824060",
+ "%Y-%m-%dT%H:%M:%S.%f")
+
+
+class SimpleTenantUsageUtilsV2(SimpleTenantUsageUtilsV21):
+ simple_tenant_usage = simple_tenant_usage_v2
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_snapshots.py b/nova/tests/unit/api/openstack/compute/contrib/test_snapshots.py
new file mode 100644
index 0000000000..74bb1948e6
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_snapshots.py
@@ -0,0 +1,209 @@
+# Copyright 2011 Denali Systems, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import webob
+
+from nova.api.openstack.compute.contrib import volumes
+from nova import context
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.volume import cinder
+
+
+class SnapshotApiTest(test.NoDBTestCase):
+ def setUp(self):
+ super(SnapshotApiTest, self).setUp()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ self.stubs.Set(cinder.API, "create_snapshot",
+ fakes.stub_snapshot_create)
+ self.stubs.Set(cinder.API, "create_snapshot_force",
+ fakes.stub_snapshot_create)
+ self.stubs.Set(cinder.API, "delete_snapshot",
+ fakes.stub_snapshot_delete)
+ self.stubs.Set(cinder.API, "get_snapshot", fakes.stub_snapshot_get)
+ self.stubs.Set(cinder.API, "get_all_snapshots",
+ fakes.stub_snapshot_get_all)
+ self.stubs.Set(cinder.API, "get", fakes.stub_volume_get)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Volumes'])
+
+ self.context = context.get_admin_context()
+ self.app = fakes.wsgi_app(init_only=('os-snapshots',))
+
+ def test_snapshot_create(self):
+ snapshot = {"volume_id": 12,
+ "force": False,
+ "display_name": "Snapshot Test Name",
+ "display_description": "Snapshot Test Desc"}
+ body = dict(snapshot=snapshot)
+ req = webob.Request.blank('/v2/fake/os-snapshots')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ resp_dict = jsonutils.loads(resp.body)
+ self.assertIn('snapshot', resp_dict)
+ self.assertEqual(resp_dict['snapshot']['displayName'],
+ snapshot['display_name'])
+ self.assertEqual(resp_dict['snapshot']['displayDescription'],
+ snapshot['display_description'])
+ self.assertEqual(resp_dict['snapshot']['volumeId'],
+ snapshot['volume_id'])
+
+ def test_snapshot_create_force(self):
+ snapshot = {"volume_id": 12,
+ "force": True,
+ "display_name": "Snapshot Test Name",
+ "display_description": "Snapshot Test Desc"}
+ body = dict(snapshot=snapshot)
+ req = webob.Request.blank('/v2/fake/os-snapshots')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+
+ resp_dict = jsonutils.loads(resp.body)
+ self.assertIn('snapshot', resp_dict)
+ self.assertEqual(resp_dict['snapshot']['displayName'],
+ snapshot['display_name'])
+ self.assertEqual(resp_dict['snapshot']['displayDescription'],
+ snapshot['display_description'])
+ self.assertEqual(resp_dict['snapshot']['volumeId'],
+ snapshot['volume_id'])
+
+ # Test invalid force paramter
+ snapshot = {"volume_id": 12,
+ "force": '**&&^^%%$$##@@'}
+ body = dict(snapshot=snapshot)
+ req = webob.Request.blank('/v2/fake/os-snapshots')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+
+ def test_snapshot_delete(self):
+ snapshot_id = 123
+ req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
+ req.method = 'DELETE'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+
+ def test_snapshot_delete_invalid_id(self):
+ snapshot_id = -1
+ req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
+ req.method = 'DELETE'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 404)
+
+ def test_snapshot_show(self):
+ snapshot_id = 123
+ req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
+ req.method = 'GET'
+ resp = req.get_response(self.app)
+
+ self.assertEqual(resp.status_int, 200)
+ resp_dict = jsonutils.loads(resp.body)
+ self.assertIn('snapshot', resp_dict)
+ self.assertEqual(resp_dict['snapshot']['id'], str(snapshot_id))
+
+ def test_snapshot_show_invalid_id(self):
+ snapshot_id = -1
+ req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
+ req.method = 'GET'
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 404)
+
+ def test_snapshot_detail(self):
+ req = webob.Request.blank('/v2/fake/os-snapshots/detail')
+ req.method = 'GET'
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+
+ resp_dict = jsonutils.loads(resp.body)
+ self.assertIn('snapshots', resp_dict)
+ resp_snapshots = resp_dict['snapshots']
+ self.assertEqual(len(resp_snapshots), 3)
+
+ resp_snapshot = resp_snapshots.pop()
+ self.assertEqual(resp_snapshot['id'], 102)
+
+
+class SnapshotSerializerTest(test.NoDBTestCase):
+ def _verify_snapshot(self, snap, tree):
+ self.assertEqual(tree.tag, 'snapshot')
+
+ for attr in ('id', 'status', 'size', 'createdAt',
+ 'displayName', 'displayDescription', 'volumeId'):
+ self.assertEqual(str(snap[attr]), tree.get(attr))
+
+ def test_snapshot_show_create_serializer(self):
+ serializer = volumes.SnapshotTemplate()
+ raw_snapshot = dict(
+ id='snap_id',
+ status='snap_status',
+ size=1024,
+ createdAt=timeutils.utcnow(),
+ displayName='snap_name',
+ displayDescription='snap_desc',
+ volumeId='vol_id',
+ )
+ text = serializer.serialize(dict(snapshot=raw_snapshot))
+
+ tree = etree.fromstring(text)
+
+ self._verify_snapshot(raw_snapshot, tree)
+
+ def test_snapshot_index_detail_serializer(self):
+ serializer = volumes.SnapshotsTemplate()
+ raw_snapshots = [dict(
+ id='snap1_id',
+ status='snap1_status',
+ size=1024,
+ createdAt=timeutils.utcnow(),
+ displayName='snap1_name',
+ displayDescription='snap1_desc',
+ volumeId='vol1_id',
+ ),
+ dict(
+ id='snap2_id',
+ status='snap2_status',
+ size=1024,
+ createdAt=timeutils.utcnow(),
+ displayName='snap2_name',
+ displayDescription='snap2_desc',
+ volumeId='vol2_id',
+ )]
+ text = serializer.serialize(dict(snapshots=raw_snapshots))
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('snapshots', tree.tag)
+ self.assertEqual(len(raw_snapshots), len(tree))
+ for idx, child in enumerate(tree):
+ self._verify_snapshot(raw_snapshots[idx], child)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_tenant_networks.py b/nova/tests/unit/api/openstack/compute/contrib/test_tenant_networks.py
new file mode 100644
index 0000000000..30d4da6ba1
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_tenant_networks.py
@@ -0,0 +1,76 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import webob
+
+from nova.api.openstack.compute.contrib import os_tenant_networks as networks
+from nova.api.openstack.compute.plugins.v3 import tenant_networks \
+ as networks_v21
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+class TenantNetworksTestV21(test.NoDBTestCase):
+ ctrlr = networks_v21.TenantNetworkController
+
+ def setUp(self):
+ super(TenantNetworksTestV21, self).setUp()
+ self.controller = self.ctrlr()
+ self.flags(enable_network_quota=True)
+
+ @mock.patch('nova.network.api.API.delete',
+ side_effect=exception.NetworkInUse(network_id=1))
+ def test_network_delete_in_use(self, mock_delete):
+ req = fakes.HTTPRequest.blank('/v2/1234/os-tenant-networks/1')
+
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller.delete, req, 1)
+
+ @mock.patch('nova.quota.QUOTAS.reserve')
+ @mock.patch('nova.quota.QUOTAS.rollback')
+ @mock.patch('nova.network.api.API.delete')
+ def _test_network_delete_exception(self, ex, expex, delete_mock,
+ rollback_mock, reserve_mock):
+ req = fakes.HTTPRequest.blank('/v2/1234/os-tenant-networks')
+ ctxt = req.environ['nova.context']
+
+ reserve_mock.return_value = 'rv'
+ delete_mock.side_effect = ex
+
+ self.assertRaises(expex, self.controller.delete, req, 1)
+
+ delete_mock.assert_called_once_with(ctxt, 1)
+ rollback_mock.assert_called_once_with(ctxt, 'rv')
+ reserve_mock.assert_called_once_with(ctxt, networks=-1)
+
+ def test_network_delete_exception_network_not_found(self):
+ ex = exception.NetworkNotFound(network_id=1)
+ expex = webob.exc.HTTPNotFound
+ self._test_network_delete_exception(ex, expex)
+
+ def test_network_delete_exception_policy_failed(self):
+ ex = exception.PolicyNotAuthorized(action='dummy')
+ expex = webob.exc.HTTPForbidden
+ self._test_network_delete_exception(ex, expex)
+
+ def test_network_delete_exception_network_in_use(self):
+ ex = exception.NetworkInUse(network_id=1)
+ expex = webob.exc.HTTPConflict
+ self._test_network_delete_exception(ex, expex)
+
+
+class TenantNetworksTestV2(TenantNetworksTestV21):
+ ctrlr = networks.NetworkController
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_used_limits.py b/nova/tests/unit/api/openstack/compute/contrib/test_used_limits.py
new file mode 100644
index 0000000000..ee2b0d703b
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_used_limits.py
@@ -0,0 +1,306 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack.compute.contrib import used_limits as used_limits_v2
+from nova.api.openstack.compute import limits
+from nova.api.openstack.compute.plugins.v3 import used_limits as \
+ used_limits_v21
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+import nova.context
+from nova import exception
+from nova import quota
+from nova import test
+
+
+class FakeRequest(object):
+ def __init__(self, context, reserved=False):
+ self.environ = {'nova.context': context}
+ self.reserved = reserved
+ self.GET = {'reserved': 1} if reserved else {}
+
+
+class UsedLimitsTestCaseV21(test.NoDBTestCase):
+ used_limit_extension = "compute_extension:v3:os-used-limits:used_limits"
+ include_server_group_quotas = True
+
+ def setUp(self):
+ """Run before each test."""
+ super(UsedLimitsTestCaseV21, self).setUp()
+ self._set_up_controller()
+ self.fake_context = nova.context.RequestContext('fake', 'fake')
+
+ def _set_up_controller(self):
+ self.ext_mgr = None
+ self.controller = used_limits_v21.UsedLimitsController()
+ self.mox.StubOutWithMock(used_limits_v21, 'authorize')
+ self.authorize = used_limits_v21.authorize
+
+ def _do_test_used_limits(self, reserved):
+ fake_req = FakeRequest(self.fake_context, reserved=reserved)
+ obj = {
+ "limits": {
+ "rate": [],
+ "absolute": {},
+ },
+ }
+ res = wsgi.ResponseObject(obj)
+ quota_map = {
+ 'totalRAMUsed': 'ram',
+ 'totalCoresUsed': 'cores',
+ 'totalInstancesUsed': 'instances',
+ 'totalFloatingIpsUsed': 'floating_ips',
+ 'totalSecurityGroupsUsed': 'security_groups',
+ 'totalServerGroupsUsed': 'server_groups',
+ }
+ limits = {}
+ expected_abs_limits = []
+ for display_name, q in quota_map.iteritems():
+ limits[q] = {'limit': len(display_name),
+ 'in_use': len(display_name) / 2,
+ 'reserved': len(display_name) / 3}
+ if (self.include_server_group_quotas or
+ display_name != 'totalServerGroupsUsed'):
+ expected_abs_limits.append(display_name)
+
+ def stub_get_project_quotas(context, project_id, usages=True):
+ return limits
+
+ self.stubs.Set(quota.QUOTAS, "get_project_quotas",
+ stub_get_project_quotas)
+ if self.ext_mgr is not None:
+ self.ext_mgr.is_loaded('os-used-limits-for-admin').AndReturn(False)
+ self.ext_mgr.is_loaded('os-server-group-quotas').AndReturn(
+ self.include_server_group_quotas)
+ self.mox.ReplayAll()
+
+ self.controller.index(fake_req, res)
+ abs_limits = res.obj['limits']['absolute']
+ for limit in expected_abs_limits:
+ value = abs_limits[limit]
+ r = limits[quota_map[limit]]['reserved'] if reserved else 0
+ self.assertEqual(value,
+ limits[quota_map[limit]]['in_use'] + r)
+
+ def test_used_limits_basic(self):
+ self._do_test_used_limits(False)
+
+ def test_used_limits_with_reserved(self):
+ self._do_test_used_limits(True)
+
+ def test_admin_can_fetch_limits_for_a_given_tenant_id(self):
+ project_id = "123456"
+ user_id = "A1234"
+ tenant_id = 'abcd'
+ self.fake_context.project_id = project_id
+ self.fake_context.user_id = user_id
+ obj = {
+ "limits": {
+ "rate": [],
+ "absolute": {},
+ },
+ }
+ target = {
+ "project_id": tenant_id,
+ "user_id": user_id
+ }
+ fake_req = FakeRequest(self.fake_context)
+ fake_req.GET = {'tenant_id': tenant_id}
+ if self.ext_mgr is not None:
+ self.ext_mgr.is_loaded('os-used-limits-for-admin').AndReturn(True)
+ self.ext_mgr.is_loaded('os-server-group-quotas').AndReturn(
+ self.include_server_group_quotas)
+ self.authorize(self.fake_context, target=target)
+ self.mox.StubOutWithMock(quota.QUOTAS, 'get_project_quotas')
+ quota.QUOTAS.get_project_quotas(self.fake_context, '%s' % tenant_id,
+ usages=True).AndReturn({})
+ self.mox.ReplayAll()
+ res = wsgi.ResponseObject(obj)
+ self.controller.index(fake_req, res)
+
+ def test_admin_can_fetch_used_limits_for_own_project(self):
+ project_id = "123456"
+ user_id = "A1234"
+ self.fake_context.project_id = project_id
+ self.fake_context.user_id = user_id
+ obj = {
+ "limits": {
+ "rate": [],
+ "absolute": {},
+ },
+ }
+ fake_req = FakeRequest(self.fake_context)
+ fake_req.GET = {}
+ if self.ext_mgr is not None:
+ self.ext_mgr.is_loaded('os-used-limits-for-admin').AndReturn(True)
+ self.ext_mgr.is_loaded('os-server-group-quotas').AndReturn(
+ self.include_server_group_quotas)
+ self.mox.StubOutWithMock(extensions, 'extension_authorizer')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'get_project_quotas')
+ quota.QUOTAS.get_project_quotas(self.fake_context, '%s' % project_id,
+ usages=True).AndReturn({})
+ self.mox.ReplayAll()
+ res = wsgi.ResponseObject(obj)
+ self.controller.index(fake_req, res)
+
+ def test_non_admin_cannot_fetch_used_limits_for_any_other_project(self):
+ project_id = "123456"
+ user_id = "A1234"
+ tenant_id = "abcd"
+ self.fake_context.project_id = project_id
+ self.fake_context.user_id = user_id
+ obj = {
+ "limits": {
+ "rate": [],
+ "absolute": {},
+ },
+ }
+ target = {
+ "project_id": tenant_id,
+ "user_id": user_id
+ }
+ fake_req = FakeRequest(self.fake_context)
+ fake_req.GET = {'tenant_id': tenant_id}
+ if self.ext_mgr is not None:
+ self.ext_mgr.is_loaded('os-used-limits-for-admin').AndReturn(True)
+ self.authorize(self.fake_context, target=target). \
+ AndRaise(exception.PolicyNotAuthorized(
+ action=self.used_limit_extension))
+ self.mox.ReplayAll()
+ res = wsgi.ResponseObject(obj)
+ self.assertRaises(exception.PolicyNotAuthorized, self.controller.index,
+ fake_req, res)
+
+ def test_used_limits_fetched_for_context_project_id(self):
+ project_id = "123456"
+ self.fake_context.project_id = project_id
+ obj = {
+ "limits": {
+ "rate": [],
+ "absolute": {},
+ },
+ }
+ fake_req = FakeRequest(self.fake_context)
+ if self.ext_mgr is not None:
+ self.ext_mgr.is_loaded('os-used-limits-for-admin').AndReturn(False)
+ self.ext_mgr.is_loaded('os-server-group-quotas').AndReturn(
+ self.include_server_group_quotas)
+ self.mox.StubOutWithMock(quota.QUOTAS, 'get_project_quotas')
+ quota.QUOTAS.get_project_quotas(self.fake_context, project_id,
+ usages=True).AndReturn({})
+ self.mox.ReplayAll()
+ res = wsgi.ResponseObject(obj)
+ self.controller.index(fake_req, res)
+
+ def test_used_ram_added(self):
+ fake_req = FakeRequest(self.fake_context)
+ obj = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ "maxTotalRAMSize": 512,
+ },
+ },
+ }
+ res = wsgi.ResponseObject(obj)
+
+ def stub_get_project_quotas(context, project_id, usages=True):
+ return {'ram': {'limit': 512, 'in_use': 256}}
+
+ if self.ext_mgr is not None:
+ self.ext_mgr.is_loaded('os-used-limits-for-admin').AndReturn(False)
+ self.ext_mgr.is_loaded('os-server-group-quotas').AndReturn(
+ self.include_server_group_quotas)
+ self.stubs.Set(quota.QUOTAS, "get_project_quotas",
+ stub_get_project_quotas)
+ self.mox.ReplayAll()
+
+ self.controller.index(fake_req, res)
+ abs_limits = res.obj['limits']['absolute']
+ self.assertIn('totalRAMUsed', abs_limits)
+ self.assertEqual(abs_limits['totalRAMUsed'], 256)
+
+ def test_no_ram_quota(self):
+ fake_req = FakeRequest(self.fake_context)
+ obj = {
+ "limits": {
+ "rate": [],
+ "absolute": {},
+ },
+ }
+ res = wsgi.ResponseObject(obj)
+
+ def stub_get_project_quotas(context, project_id, usages=True):
+ return {}
+
+ if self.ext_mgr is not None:
+ self.ext_mgr.is_loaded('os-used-limits-for-admin').AndReturn(False)
+ self.ext_mgr.is_loaded('os-server-group-quotas').AndReturn(
+ self.include_server_group_quotas)
+ self.stubs.Set(quota.QUOTAS, "get_project_quotas",
+ stub_get_project_quotas)
+ self.mox.ReplayAll()
+
+ self.controller.index(fake_req, res)
+ abs_limits = res.obj['limits']['absolute']
+ self.assertNotIn('totalRAMUsed', abs_limits)
+
+
+class UsedLimitsTestCaseV2(UsedLimitsTestCaseV21):
+ used_limit_extension = "compute_extension:used_limits_for_admin"
+
+ def _set_up_controller(self):
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.controller = used_limits_v2.UsedLimitsController(self.ext_mgr)
+ self.mox.StubOutWithMock(used_limits_v2, 'authorize_for_admin')
+ self.authorize = used_limits_v2.authorize_for_admin
+
+
+class UsedLimitsTestCaseV2WithoutServerGroupQuotas(UsedLimitsTestCaseV2):
+ used_limit_extension = "compute_extension:used_limits_for_admin"
+ include_server_group_quotas = False
+
+
+class UsedLimitsTestCaseXml(test.NoDBTestCase):
+ def setUp(self):
+ """Run before each test."""
+ super(UsedLimitsTestCaseXml, self).setUp()
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.controller = used_limits_v2.UsedLimitsController(self.ext_mgr)
+ self.fake_context = nova.context.RequestContext('fake', 'fake')
+
+ def test_used_limits_xmlns(self):
+ fake_req = FakeRequest(self.fake_context)
+ obj = {
+ "limits": {
+ "rate": [],
+ "absolute": {},
+ },
+ }
+ res = wsgi.ResponseObject(obj, xml=limits.LimitsTemplate)
+ res.preserialize('xml')
+
+ def stub_get_project_quotas(context, project_id, usages=True):
+ return {}
+
+ self.ext_mgr.is_loaded('os-used-limits-for-admin').AndReturn(False)
+ self.stubs.Set(quota.QUOTAS, "get_project_quotas",
+ stub_get_project_quotas)
+ self.ext_mgr.is_loaded('os-server-group-quotas').AndReturn(False)
+ self.mox.ReplayAll()
+
+ self.controller.index(fake_req, res)
+ response = res.serialize(None, 'xml')
+ self.assertIn(used_limits_v2.XMLNS, response.body)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_virtual_interfaces.py b/nova/tests/unit/api/openstack/compute/contrib/test_virtual_interfaces.py
new file mode 100644
index 0000000000..e8484d61b9
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_virtual_interfaces.py
@@ -0,0 +1,127 @@
+# Copyright (C) 2011 Midokura KK
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import virtual_interfaces
+from nova.api.openstack import wsgi
+from nova import compute
+from nova.compute import api as compute_api
+from nova import context
+from nova import exception
+from nova import network
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+
+
+def compute_api_get(self, context, instance_id, expected_attrs=None,
+ want_objects=False):
+ return dict(uuid=FAKE_UUID, id=instance_id, instance_type_id=1, host='bob')
+
+
+def get_vifs_by_instance(self, context, instance_id):
+ return [{'uuid': '00000000-0000-0000-0000-00000000000000000',
+ 'address': '00-00-00-00-00-00'},
+ {'uuid': '11111111-1111-1111-1111-11111111111111111',
+ 'address': '11-11-11-11-11-11'}]
+
+
+class FakeRequest(object):
+ def __init__(self, context):
+ self.environ = {'nova.context': context}
+
+
+class ServerVirtualInterfaceTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(ServerVirtualInterfaceTest, self).setUp()
+ self.stubs.Set(compute.api.API, "get",
+ compute_api_get)
+ self.stubs.Set(network.api.API, "get_vifs_by_instance",
+ get_vifs_by_instance)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Virtual_interfaces'])
+
+ def test_get_virtual_interfaces_list(self):
+ url = '/v2/fake/servers/abcd/os-virtual-interfaces'
+ req = webob.Request.blank(url)
+ res = req.get_response(fakes.wsgi_app(
+ init_only=('os-virtual-interfaces',)))
+ self.assertEqual(res.status_int, 200)
+ res_dict = jsonutils.loads(res.body)
+ response = {'virtual_interfaces': [
+ {'id': '00000000-0000-0000-0000-00000000000000000',
+ 'mac_address': '00-00-00-00-00-00'},
+ {'id': '11111111-1111-1111-1111-11111111111111111',
+ 'mac_address': '11-11-11-11-11-11'}]}
+ self.assertEqual(res_dict, response)
+
+ def test_vif_instance_not_found(self):
+ self.mox.StubOutWithMock(compute_api.API, 'get')
+ fake_context = context.RequestContext('fake', 'fake')
+ fake_req = FakeRequest(fake_context)
+
+ compute_api.API.get(fake_context, 'fake_uuid',
+ expected_attrs=None,
+ want_objects=True).AndRaise(
+ exception.InstanceNotFound(instance_id='instance-0000'))
+
+ self.mox.ReplayAll()
+ self.assertRaises(
+ webob.exc.HTTPNotFound,
+ virtual_interfaces.ServerVirtualInterfaceController().index,
+ fake_req, 'fake_uuid')
+
+
+class ServerVirtualInterfaceSerializerTest(test.NoDBTestCase):
+ def setUp(self):
+ super(ServerVirtualInterfaceSerializerTest, self).setUp()
+ self.namespace = wsgi.XMLNS_V11
+ self.serializer = virtual_interfaces.VirtualInterfaceTemplate()
+
+ def _tag(self, elem):
+ tagname = elem.tag
+ self.assertEqual(tagname[0], '{')
+ tmp = tagname.partition('}')
+ namespace = tmp[0][1:]
+ self.assertEqual(namespace, self.namespace)
+ return tmp[2]
+
+ def test_serializer(self):
+ raw_vifs = [dict(
+ id='uuid1',
+ mac_address='aa:bb:cc:dd:ee:ff'),
+ dict(
+ id='uuid2',
+ mac_address='bb:aa:dd:cc:ff:ee')]
+ vifs = dict(virtual_interfaces=raw_vifs)
+ text = self.serializer.serialize(vifs)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('virtual_interfaces', self._tag(tree))
+ self.assertEqual(len(raw_vifs), len(tree))
+ for idx, child in enumerate(tree):
+ self.assertEqual('virtual_interface', self._tag(child))
+ self.assertEqual(raw_vifs[idx]['id'], child.get('id'))
+ self.assertEqual(raw_vifs[idx]['mac_address'],
+ child.get('mac_address'))
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_volumes.py b/nova/tests/unit/api/openstack/compute/contrib/test_volumes.py
new file mode 100644
index 0000000000..e3c5b8b071
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_volumes.py
@@ -0,0 +1,1083 @@
+# Copyright 2013 Josh Durgin
+# Copyright 2013 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from lxml import etree
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import webob
+from webob import exc
+
+from nova.api.openstack.compute.contrib import assisted_volume_snapshots as \
+ assisted_snaps
+from nova.api.openstack.compute.contrib import volumes
+from nova.api.openstack.compute.plugins.v3 import volumes as volumes_v3
+from nova.api.openstack import extensions
+from nova.compute import api as compute_api
+from nova.compute import flavors
+from nova import context
+from nova import db
+from nova import exception
+from nova import objects
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.volume import cinder
+
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
+
+FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+FAKE_UUID_A = '00000000-aaaa-aaaa-aaaa-000000000000'
+FAKE_UUID_B = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
+FAKE_UUID_C = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
+FAKE_UUID_D = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
+
+IMAGE_UUID = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+
+
+def fake_get_instance(self, context, instance_id, want_objects=False,
+ expected_attrs=None):
+ return fake_instance.fake_instance_obj(context, **{'uuid': instance_id})
+
+
+def fake_get_volume(self, context, id):
+ return {'id': 'woot'}
+
+
+def fake_attach_volume(self, context, instance, volume_id, device):
+ pass
+
+
+def fake_detach_volume(self, context, instance, volume):
+ pass
+
+
+def fake_swap_volume(self, context, instance,
+ old_volume_id, new_volume_id):
+ pass
+
+
+def fake_create_snapshot(self, context, volume, name, description):
+ return {'id': 123,
+ 'volume_id': 'fakeVolId',
+ 'status': 'available',
+ 'volume_size': 123,
+ 'created_at': '2013-01-01 00:00:01',
+ 'display_name': 'myVolumeName',
+ 'display_description': 'myVolumeDescription'}
+
+
+def fake_delete_snapshot(self, context, snapshot_id):
+ pass
+
+
+def fake_compute_volume_snapshot_delete(self, context, volume_id, snapshot_id,
+ delete_info):
+ pass
+
+
+def fake_compute_volume_snapshot_create(self, context, volume_id,
+ create_info):
+ pass
+
+
+def fake_bdms_get_all_by_instance(context, instance_uuid, use_slave=False):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1,
+ 'instance_uuid': instance_uuid,
+ 'device_name': '/dev/fake0',
+ 'delete_on_termination': 'False',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'snapshot_id': None,
+ 'volume_id': FAKE_UUID_A,
+ 'volume_size': 1}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2,
+ 'instance_uuid': instance_uuid,
+ 'device_name': '/dev/fake1',
+ 'delete_on_termination': 'False',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'snapshot_id': None,
+ 'volume_id': FAKE_UUID_B,
+ 'volume_size': 1})]
+
+
+class BootFromVolumeTest(test.TestCase):
+
+ def setUp(self):
+ super(BootFromVolumeTest, self).setUp()
+ self.stubs.Set(compute_api.API, 'create',
+ self._get_fake_compute_api_create())
+ fakes.stub_out_nw_api(self.stubs)
+ self._block_device_mapping_seen = None
+ self._legacy_bdm_seen = True
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Volumes', 'Block_device_mapping_v2_boot'])
+
+ def _get_fake_compute_api_create(self):
+ def _fake_compute_api_create(cls, context, instance_type,
+ image_href, **kwargs):
+ self._block_device_mapping_seen = kwargs.get(
+ 'block_device_mapping')
+ self._legacy_bdm_seen = kwargs.get('legacy_bdm')
+
+ inst_type = flavors.get_flavor_by_flavor_id(2)
+ resv_id = None
+ return ([{'id': 1,
+ 'display_name': 'test_server',
+ 'uuid': FAKE_UUID,
+ 'instance_type': inst_type,
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fead::1234',
+ 'image_ref': IMAGE_UUID,
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
+ 'updated_at': datetime.datetime(2010, 11, 11, 11, 0, 0),
+ 'progress': 0,
+ 'fixed_ips': []
+ }], resv_id)
+ return _fake_compute_api_create
+
+ def test_create_root_volume(self):
+ body = dict(server=dict(
+ name='test_server', imageRef=IMAGE_UUID,
+ flavorRef=2, min_count=1, max_count=1,
+ block_device_mapping=[dict(
+ volume_id=1,
+ device_name='/dev/vda',
+ virtual='root',
+ delete_on_termination=False,
+ )]
+ ))
+ req = webob.Request.blank('/v2/fake/os-volumes_boot')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app(
+ init_only=('os-volumes_boot', 'servers')))
+ self.assertEqual(res.status_int, 202)
+ server = jsonutils.loads(res.body)['server']
+ self.assertEqual(FAKE_UUID, server['id'])
+ self.assertEqual(CONF.password_length, len(server['adminPass']))
+ self.assertEqual(len(self._block_device_mapping_seen), 1)
+ self.assertTrue(self._legacy_bdm_seen)
+ self.assertEqual(self._block_device_mapping_seen[0]['volume_id'], 1)
+ self.assertEqual(self._block_device_mapping_seen[0]['device_name'],
+ '/dev/vda')
+
+ def test_create_root_volume_bdm_v2(self):
+ body = dict(server=dict(
+ name='test_server', imageRef=IMAGE_UUID,
+ flavorRef=2, min_count=1, max_count=1,
+ block_device_mapping_v2=[dict(
+ source_type='volume',
+ uuid=1,
+ device_name='/dev/vda',
+ boot_index=0,
+ delete_on_termination=False,
+ )]
+ ))
+ req = webob.Request.blank('/v2/fake/os-volumes_boot')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app(
+ init_only=('os-volumes_boot', 'servers')))
+ self.assertEqual(res.status_int, 202)
+ server = jsonutils.loads(res.body)['server']
+ self.assertEqual(FAKE_UUID, server['id'])
+ self.assertEqual(CONF.password_length, len(server['adminPass']))
+ self.assertEqual(len(self._block_device_mapping_seen), 1)
+ self.assertFalse(self._legacy_bdm_seen)
+ self.assertEqual(self._block_device_mapping_seen[0]['volume_id'], 1)
+ self.assertEqual(self._block_device_mapping_seen[0]['boot_index'],
+ 0)
+ self.assertEqual(self._block_device_mapping_seen[0]['device_name'],
+ '/dev/vda')
+
+
+class VolumeApiTestV21(test.TestCase):
+ url_prefix = '/v2/fake'
+
+ def setUp(self):
+ super(VolumeApiTestV21, self).setUp()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+
+ self.stubs.Set(cinder.API, "delete", fakes.stub_volume_delete)
+ self.stubs.Set(cinder.API, "get", fakes.stub_volume_get)
+ self.stubs.Set(cinder.API, "get_all", fakes.stub_volume_get_all)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Volumes'])
+
+ self.context = context.get_admin_context()
+ self.app = self._get_app()
+
+ def _get_app(self):
+ return fakes.wsgi_app_v21()
+
+ def test_volume_create(self):
+ self.stubs.Set(cinder.API, "create", fakes.stub_volume_create)
+
+ vol = {"size": 100,
+ "display_name": "Volume Test Name",
+ "display_description": "Volume Test Desc",
+ "availability_zone": "zone1:host1"}
+ body = {"volume": vol}
+ req = webob.Request.blank(self.url_prefix + '/os-volumes')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+ resp = req.get_response(self.app)
+
+ self.assertEqual(resp.status_int, 200)
+
+ resp_dict = jsonutils.loads(resp.body)
+ self.assertIn('volume', resp_dict)
+ self.assertEqual(resp_dict['volume']['size'],
+ vol['size'])
+ self.assertEqual(resp_dict['volume']['displayName'],
+ vol['display_name'])
+ self.assertEqual(resp_dict['volume']['displayDescription'],
+ vol['display_description'])
+ self.assertEqual(resp_dict['volume']['availabilityZone'],
+ vol['availability_zone'])
+
+ def test_volume_create_bad(self):
+ def fake_volume_create(self, context, size, name, description,
+ snapshot, **param):
+ raise exception.InvalidInput(reason="bad request data")
+
+ self.stubs.Set(cinder.API, "create", fake_volume_create)
+
+ vol = {"size": '#$?',
+ "display_name": "Volume Test Name",
+ "display_description": "Volume Test Desc",
+ "availability_zone": "zone1:host1"}
+ body = {"volume": vol}
+
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ volumes.VolumeController().create, req, body)
+
+ def test_volume_index(self):
+ req = webob.Request.blank(self.url_prefix + '/os-volumes')
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+
+ def test_volume_detail(self):
+ req = webob.Request.blank(self.url_prefix + '/os-volumes/detail')
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+
+ def test_volume_show(self):
+ req = webob.Request.blank(self.url_prefix + '/os-volumes/123')
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+
+ def test_volume_show_no_volume(self):
+ self.stubs.Set(cinder.API, "get", fakes.stub_volume_notfound)
+
+ req = webob.Request.blank(self.url_prefix + '/os-volumes/456')
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 404)
+ self.assertIn('Volume 456 could not be found.', resp.body)
+
+ def test_volume_delete(self):
+ req = webob.Request.blank(self.url_prefix + '/os-volumes/123')
+ req.method = 'DELETE'
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+
+ def test_volume_delete_no_volume(self):
+ self.stubs.Set(cinder.API, "delete", fakes.stub_volume_notfound)
+
+ req = webob.Request.blank(self.url_prefix + '/os-volumes/456')
+ req.method = 'DELETE'
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 404)
+ self.assertIn('Volume 456 could not be found.', resp.body)
+
+
+class VolumeApiTestV2(VolumeApiTestV21):
+
+ def setUp(self):
+ super(VolumeApiTestV2, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Volumes'])
+
+ self.context = context.get_admin_context()
+ self.app = self._get_app()
+
+ def _get_app(self):
+ return fakes.wsgi_app()
+
+
+class VolumeAttachTests(test.TestCase):
+ def setUp(self):
+ super(VolumeAttachTests, self).setUp()
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_bdms_get_all_by_instance)
+ self.stubs.Set(compute_api.API, 'get', fake_get_instance)
+ self.stubs.Set(cinder.API, 'get', fake_get_volume)
+ self.context = context.get_admin_context()
+ self.expected_show = {'volumeAttachment':
+ {'device': '/dev/fake0',
+ 'serverId': FAKE_UUID,
+ 'id': FAKE_UUID_A,
+ 'volumeId': FAKE_UUID_A
+ }}
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.attachments = volumes.VolumeAttachmentController(self.ext_mgr)
+
+ def test_show(self):
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ result = self.attachments.show(req, FAKE_UUID, FAKE_UUID_A)
+ self.assertEqual(self.expected_show, result)
+
+ @mock.patch.object(compute_api.API, 'get',
+ side_effect=exception.InstanceNotFound(instance_id=FAKE_UUID))
+ def test_show_no_instance(self, mock_mr):
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(exc.HTTPNotFound,
+ self.attachments.show,
+ req,
+ FAKE_UUID,
+ FAKE_UUID_A)
+
+ @mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid', return_value=None)
+ def test_show_no_bdms(self, mock_mr):
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(exc.HTTPNotFound,
+ self.attachments.show,
+ req,
+ FAKE_UUID,
+ FAKE_UUID_A)
+
+ def test_show_bdms_no_mountpoint(self):
+ FAKE_UUID_NOTEXIST = '00000000-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(exc.HTTPNotFound,
+ self.attachments.show,
+ req,
+ FAKE_UUID,
+ FAKE_UUID_NOTEXIST)
+
+ def test_detach(self):
+ self.stubs.Set(compute_api.API,
+ 'detach_volume',
+ fake_detach_volume)
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
+ req.method = 'DELETE'
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ result = self.attachments.delete(req, FAKE_UUID, FAKE_UUID_A)
+ self.assertEqual('202 Accepted', result.status)
+
+ def test_detach_vol_not_found(self):
+ self.stubs.Set(compute_api.API,
+ 'detach_volume',
+ fake_detach_volume)
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
+ req.method = 'DELETE'
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(exc.HTTPNotFound,
+ self.attachments.delete,
+ req,
+ FAKE_UUID,
+ FAKE_UUID_C)
+
+ @mock.patch('nova.objects.BlockDeviceMapping.is_root',
+ new_callable=mock.PropertyMock)
+ def test_detach_vol_root(self, mock_isroot):
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
+ req.method = 'DELETE'
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ mock_isroot.return_value = True
+ self.assertRaises(exc.HTTPForbidden,
+ self.attachments.delete,
+ req,
+ FAKE_UUID,
+ FAKE_UUID_A)
+
+ def test_detach_volume_from_locked_server(self):
+ def fake_detach_volume_from_locked_server(self, context,
+ instance, volume):
+ raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
+
+ self.stubs.Set(compute_api.API,
+ 'detach_volume',
+ fake_detach_volume_from_locked_server)
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
+ req.method = 'DELETE'
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(webob.exc.HTTPConflict, self.attachments.delete,
+ req, FAKE_UUID, FAKE_UUID_A)
+
+ def test_attach_volume(self):
+ self.stubs.Set(compute_api.API,
+ 'attach_volume',
+ fake_attach_volume)
+ body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
+ 'device': '/dev/fake'}}
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ result = self.attachments.create(req, FAKE_UUID, body)
+ self.assertEqual(result['volumeAttachment']['id'],
+ '00000000-aaaa-aaaa-aaaa-000000000000')
+
+ def test_attach_volume_to_locked_server(self):
+ def fake_attach_volume_to_locked_server(self, context, instance,
+ volume_id, device=None):
+ raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
+
+ self.stubs.Set(compute_api.API,
+ 'attach_volume',
+ fake_attach_volume_to_locked_server)
+ body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
+ 'device': '/dev/fake'}}
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(webob.exc.HTTPConflict, self.attachments.create,
+ req, FAKE_UUID, body)
+
+ def test_attach_volume_bad_id(self):
+ self.stubs.Set(compute_api.API,
+ 'attach_volume',
+ fake_attach_volume)
+
+ body = {
+ 'volumeAttachment': {
+ 'device': None,
+ 'volumeId': 'TESTVOLUME',
+ }
+ }
+
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(webob.exc.HTTPBadRequest, self.attachments.create,
+ req, FAKE_UUID, body)
+
+ def test_attach_volume_without_volumeId(self):
+ self.stubs.Set(compute_api.API,
+ 'attach_volume',
+ fake_attach_volume)
+
+ body = {
+ 'volumeAttachment': {
+ 'device': None
+ }
+ }
+
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(webob.exc.HTTPBadRequest, self.attachments.create,
+ req, FAKE_UUID, body)
+
+ def _test_swap(self, uuid=FAKE_UUID_A, fake_func=None, body=None):
+ fake_func = fake_func or fake_swap_volume
+ self.stubs.Set(compute_api.API,
+ 'swap_volume',
+ fake_func)
+ body = body or {'volumeAttachment': {'volumeId': FAKE_UUID_B,
+ 'device': '/dev/fake'}}
+
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
+ req.method = 'PUT'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ return self.attachments.update(req, FAKE_UUID, uuid, body)
+
+ def test_swap_volume_for_locked_server(self):
+ self.ext_mgr.extensions['os-volume-attachment-update'] = True
+
+ def fake_swap_volume_for_locked_server(self, context, instance,
+ old_volume, new_volume):
+ raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
+
+ self.ext_mgr.extensions['os-volume-attachment-update'] = True
+ self.assertRaises(webob.exc.HTTPConflict, self._test_swap,
+ fake_func=fake_swap_volume_for_locked_server)
+
+ def test_swap_volume_no_extension(self):
+ self.assertRaises(webob.exc.HTTPBadRequest, self._test_swap)
+
+ def test_swap_volume(self):
+ self.ext_mgr.extensions['os-volume-attachment-update'] = True
+ result = self._test_swap()
+ self.assertEqual('202 Accepted', result.status)
+
+ def test_swap_volume_no_attachment(self):
+ self.ext_mgr.extensions['os-volume-attachment-update'] = True
+
+ self.assertRaises(exc.HTTPNotFound, self._test_swap, FAKE_UUID_C)
+
+ def test_swap_volume_without_volumeId(self):
+ self.ext_mgr.extensions['os-volume-attachment-update'] = True
+ body = {'volumeAttachment': {'device': '/dev/fake'}}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_swap,
+ body=body)
+
+
+class VolumeSerializerTest(test.TestCase):
+ def _verify_volume_attachment(self, attach, tree):
+ for attr in ('id', 'volumeId', 'serverId', 'device'):
+ self.assertEqual(str(attach[attr]), tree.get(attr))
+
+ def _verify_volume(self, vol, tree):
+ self.assertEqual(tree.tag, 'volume')
+
+ for attr in ('id', 'status', 'size', 'availabilityZone', 'createdAt',
+ 'displayName', 'displayDescription', 'volumeType',
+ 'snapshotId'):
+ self.assertEqual(str(vol[attr]), tree.get(attr))
+
+ for child in tree:
+ self.assertIn(child.tag, ('attachments', 'metadata'))
+ if child.tag == 'attachments':
+ self.assertEqual(1, len(child))
+ self.assertEqual('attachment', child[0].tag)
+ self._verify_volume_attachment(vol['attachments'][0], child[0])
+ elif child.tag == 'metadata':
+ not_seen = set(vol['metadata'].keys())
+ for gr_child in child:
+ self.assertIn(gr_child.get("key"), not_seen)
+ self.assertEqual(str(vol['metadata'][gr_child.get("key")]),
+ gr_child.text)
+ not_seen.remove(gr_child.get("key"))
+ self.assertEqual(0, len(not_seen))
+
+ def test_attach_show_create_serializer(self):
+ serializer = volumes.VolumeAttachmentTemplate()
+ raw_attach = dict(
+ id='vol_id',
+ volumeId='vol_id',
+ serverId='instance_uuid',
+ device='/foo')
+ text = serializer.serialize(dict(volumeAttachment=raw_attach))
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('volumeAttachment', tree.tag)
+ self._verify_volume_attachment(raw_attach, tree)
+
+ def test_attach_index_serializer(self):
+ serializer = volumes.VolumeAttachmentsTemplate()
+ raw_attaches = [dict(
+ id='vol_id1',
+ volumeId='vol_id1',
+ serverId='instance1_uuid',
+ device='/foo1'),
+ dict(
+ id='vol_id2',
+ volumeId='vol_id2',
+ serverId='instance2_uuid',
+ device='/foo2')]
+ text = serializer.serialize(dict(volumeAttachments=raw_attaches))
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('volumeAttachments', tree.tag)
+ self.assertEqual(len(raw_attaches), len(tree))
+ for idx, child in enumerate(tree):
+ self.assertEqual('volumeAttachment', child.tag)
+ self._verify_volume_attachment(raw_attaches[idx], child)
+
+ def test_volume_show_create_serializer(self):
+ serializer = volumes.VolumeTemplate()
+ raw_volume = dict(
+ id='vol_id',
+ status='vol_status',
+ size=1024,
+ availabilityZone='vol_availability',
+ createdAt=timeutils.utcnow(),
+ attachments=[dict(
+ id='vol_id',
+ volumeId='vol_id',
+ serverId='instance_uuid',
+ device='/foo')],
+ displayName='vol_name',
+ displayDescription='vol_desc',
+ volumeType='vol_type',
+ snapshotId='snap_id',
+ metadata=dict(
+ foo='bar',
+ baz='quux',
+ ),
+ )
+ text = serializer.serialize(dict(volume=raw_volume))
+
+ tree = etree.fromstring(text)
+
+ self._verify_volume(raw_volume, tree)
+
+ def test_volume_index_detail_serializer(self):
+ serializer = volumes.VolumesTemplate()
+ raw_volumes = [dict(
+ id='vol1_id',
+ status='vol1_status',
+ size=1024,
+ availabilityZone='vol1_availability',
+ createdAt=timeutils.utcnow(),
+ attachments=[dict(
+ id='vol1_id',
+ volumeId='vol1_id',
+ serverId='instance_uuid',
+ device='/foo1')],
+ displayName='vol1_name',
+ displayDescription='vol1_desc',
+ volumeType='vol1_type',
+ snapshotId='snap1_id',
+ metadata=dict(
+ foo='vol1_foo',
+ bar='vol1_bar',
+ ),
+ ),
+ dict(
+ id='vol2_id',
+ status='vol2_status',
+ size=1024,
+ availabilityZone='vol2_availability',
+ createdAt=timeutils.utcnow(),
+ attachments=[dict(
+ id='vol2_id',
+ volumeId='vol2_id',
+ serverId='instance_uuid',
+ device='/foo2')],
+ displayName='vol2_name',
+ displayDescription='vol2_desc',
+ volumeType='vol2_type',
+ snapshotId='snap2_id',
+ metadata=dict(
+ foo='vol2_foo',
+ bar='vol2_bar',
+ ),
+ )]
+ text = serializer.serialize(dict(volumes=raw_volumes))
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('volumes', tree.tag)
+ self.assertEqual(len(raw_volumes), len(tree))
+ for idx, child in enumerate(tree):
+ self._verify_volume(raw_volumes[idx], child)
+
+
+class TestVolumeCreateRequestXMLDeserializer(test.TestCase):
+
+ def setUp(self):
+ super(TestVolumeCreateRequestXMLDeserializer, self).setUp()
+ self.deserializer = volumes.CreateDeserializer()
+
+ def test_minimal_volume(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
+ size="1"></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "size": "1",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_display_name(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
+ size="1"
+ display_name="Volume-xml"></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "size": "1",
+ "display_name": "Volume-xml",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_display_description(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
+ size="1"
+ display_name="Volume-xml"
+ display_description="description"></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "size": "1",
+ "display_name": "Volume-xml",
+ "display_description": "description",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_volume_type(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
+ size="1"
+ display_name="Volume-xml"
+ display_description="description"
+ volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "size": "1",
+ "display_name": "Volume-xml",
+ "display_description": "description",
+ "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_availability_zone(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
+ size="1"
+ display_name="Volume-xml"
+ display_description="description"
+ volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
+ availability_zone="us-east1"></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "size": "1",
+ "display_name": "Volume-xml",
+ "display_description": "description",
+ "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
+ "availability_zone": "us-east1",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_metadata(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
+ display_name="Volume-xml"
+ size="1">
+ <metadata><meta key="Type">work</meta></metadata></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "display_name": "Volume-xml",
+ "size": "1",
+ "metadata": {
+ "Type": "work",
+ },
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_full_volume(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
+ size="1"
+ display_name="Volume-xml"
+ display_description="description"
+ volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
+ availability_zone="us-east1">
+ <metadata><meta key="Type">work</meta></metadata></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "size": "1",
+ "display_name": "Volume-xml",
+ "display_description": "description",
+ "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
+ "availability_zone": "us-east1",
+ "metadata": {
+ "Type": "work",
+ },
+ },
+ }
+ self.maxDiff = None
+ self.assertEqual(request['body'], expected)
+
+
+class CommonBadRequestTestCase(object):
+
+ resource = None
+ entity_name = None
+ controller_cls = None
+ kwargs = {}
+
+ """
+ Tests of places we throw 400 Bad Request from
+ """
+
+ def setUp(self):
+ super(CommonBadRequestTestCase, self).setUp()
+ self.controller = self.controller_cls()
+
+ def _bad_request_create(self, body):
+ req = fakes.HTTPRequest.blank('/v2/fake/' + self.resource)
+ req.method = 'POST'
+
+ kwargs = self.kwargs.copy()
+ kwargs['body'] = body
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, **kwargs)
+
+ def test_create_no_body(self):
+ self._bad_request_create(body=None)
+
+ def test_create_missing_volume(self):
+ body = {'foo': {'a': 'b'}}
+ self._bad_request_create(body=body)
+
+ def test_create_malformed_entity(self):
+ body = {self.entity_name: 'string'}
+ self._bad_request_create(body=body)
+
+
+class BadRequestVolumeTestCaseV21(CommonBadRequestTestCase,
+ test.TestCase):
+
+ resource = 'os-volumes'
+ entity_name = 'volume'
+ controller_cls = volumes_v3.VolumeController
+
+
+class BadRequestVolumeTestCaseV2(BadRequestVolumeTestCaseV21):
+ controller_cls = volumes.VolumeController
+
+
+class BadRequestAttachmentTestCase(CommonBadRequestTestCase,
+ test.TestCase):
+ resource = 'servers/' + FAKE_UUID + '/os-volume_attachments'
+ entity_name = 'volumeAttachment'
+ controller_cls = volumes.VolumeAttachmentController
+ kwargs = {'server_id': FAKE_UUID}
+
+
+class BadRequestSnapshotTestCaseV21(CommonBadRequestTestCase,
+ test.TestCase):
+
+ resource = 'os-snapshots'
+ entity_name = 'snapshot'
+ controller_cls = volumes.SnapshotController
+
+
+class BadRequestSnapshotTestCaseV2(BadRequestSnapshotTestCaseV21):
+ controller_cls = volumes_v3.SnapshotController
+
+
+class ShowSnapshotTestCaseV21(test.TestCase):
+ snapshot_cls = volumes_v3.SnapshotController
+
+ def setUp(self):
+ super(ShowSnapshotTestCaseV21, self).setUp()
+ self.controller = self.snapshot_cls()
+ self.req = fakes.HTTPRequest.blank('/v2/fake/os-snapshots')
+ self.req.method = 'GET'
+
+ def test_show_snapshot_not_exist(self):
+ def fake_get_snapshot(self, context, id):
+ raise exception.SnapshotNotFound(snapshot_id=id)
+ self.stubs.Set(cinder.API, 'get_snapshot', fake_get_snapshot)
+ self.assertRaises(exc.HTTPNotFound,
+ self.controller.show, self.req, FAKE_UUID_A)
+
+
+class ShowSnapshotTestCaseV2(ShowSnapshotTestCaseV21):
+ snapshot_cls = volumes.SnapshotController
+
+
+class CreateSnapshotTestCaseV21(test.TestCase):
+ snapshot_cls = volumes_v3.SnapshotController
+
+ def setUp(self):
+ super(CreateSnapshotTestCaseV21, self).setUp()
+ self.controller = self.snapshot_cls()
+ self.stubs.Set(cinder.API, 'get', fake_get_volume)
+ self.stubs.Set(cinder.API, 'create_snapshot_force',
+ fake_create_snapshot)
+ self.stubs.Set(cinder.API, 'create_snapshot', fake_create_snapshot)
+ self.req = fakes.HTTPRequest.blank('/v2/fake/os-snapshots')
+ self.req.method = 'POST'
+ self.body = {'snapshot': {'volume_id': 1}}
+
+ def test_force_true(self):
+ self.body['snapshot']['force'] = 'True'
+ self.controller.create(self.req, body=self.body)
+
+ def test_force_false(self):
+ self.body['snapshot']['force'] = 'f'
+ self.controller.create(self.req, body=self.body)
+
+ def test_force_invalid(self):
+ self.body['snapshot']['force'] = 'foo'
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, self.req, body=self.body)
+
+
+class CreateSnapshotTestCaseV2(CreateSnapshotTestCaseV21):
+ snapshot_cls = volumes.SnapshotController
+
+
+class DeleteSnapshotTestCaseV21(test.TestCase):
+ snapshot_cls = volumes_v3.SnapshotController
+
+ def setUp(self):
+ super(DeleteSnapshotTestCaseV21, self).setUp()
+ self.controller = self.snapshot_cls()
+ self.stubs.Set(cinder.API, 'get', fake_get_volume)
+ self.stubs.Set(cinder.API, 'create_snapshot_force',
+ fake_create_snapshot)
+ self.stubs.Set(cinder.API, 'create_snapshot', fake_create_snapshot)
+ self.stubs.Set(cinder.API, 'delete_snapshot', fake_delete_snapshot)
+ self.req = fakes.HTTPRequest.blank('/v2/fake/os-snapshots')
+
+ def test_normal_delete(self):
+ self.req.method = 'POST'
+ self.body = {'snapshot': {'volume_id': 1}}
+ result = self.controller.create(self.req, body=self.body)
+
+ self.req.method = 'DELETE'
+ result = self.controller.delete(self.req, result['snapshot']['id'])
+
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ if isinstance(self.controller, volumes_v3.SnapshotController):
+ status_int = self.controller.delete.wsgi_code
+ else:
+ status_int = result.status_int
+ self.assertEqual(202, status_int)
+
+ def test_delete_snapshot_not_exists(self):
+ def fake_delete_snapshot_not_exist(self, context, snapshot_id):
+ raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
+
+ self.stubs.Set(cinder.API, 'delete_snapshot',
+ fake_delete_snapshot_not_exist)
+ self.req.method = 'POST'
+ self.body = {'snapshot': {'volume_id': 1}}
+ result = self.controller.create(self.req, body=self.body)
+
+ self.req.method = 'DELETE'
+ self.assertRaises(exc.HTTPNotFound, self.controller.delete,
+ self.req, result['snapshot']['id'])
+
+
+class DeleteSnapshotTestCaseV2(DeleteSnapshotTestCaseV21):
+ snapshot_cls = volumes.SnapshotController
+
+
+class AssistedSnapshotCreateTestCase(test.TestCase):
+ def setUp(self):
+ super(AssistedSnapshotCreateTestCase, self).setUp()
+
+ self.controller = assisted_snaps.AssistedVolumeSnapshotsController()
+ self.stubs.Set(compute_api.API, 'volume_snapshot_create',
+ fake_compute_volume_snapshot_create)
+
+ def test_assisted_create(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
+ body = {'snapshot': {'volume_id': 1, 'create_info': {}}}
+ req.method = 'POST'
+ self.controller.create(req, body=body)
+
+ def test_assisted_create_missing_create_info(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
+ body = {'snapshot': {'volume_id': 1}}
+ req.method = 'POST'
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, body=body)
+
+
+class AssistedSnapshotDeleteTestCase(test.TestCase):
+ def setUp(self):
+ super(AssistedSnapshotDeleteTestCase, self).setUp()
+
+ self.controller = assisted_snaps.AssistedVolumeSnapshotsController()
+ self.stubs.Set(compute_api.API, 'volume_snapshot_delete',
+ fake_compute_volume_snapshot_delete)
+
+ def test_assisted_delete(self):
+ params = {
+ 'delete_info': jsonutils.dumps({'volume_id': 1}),
+ }
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-assisted-volume-snapshots?%s' %
+ '&'.join(['%s=%s' % (k, v) for k, v in params.iteritems()]))
+ req.method = 'DELETE'
+ result = self.controller.delete(req, '5')
+ self.assertEqual(result.status_int, 204)
+
+ def test_assisted_delete_missing_delete_info(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
+ req.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
+ req, '5')
diff --git a/nova/tests/unit/api/openstack/compute/extensions/__init__.py b/nova/tests/unit/api/openstack/compute/extensions/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/extensions/__init__.py
diff --git a/nova/tests/unit/api/openstack/compute/extensions/foxinsocks.py b/nova/tests/unit/api/openstack/compute/extensions/foxinsocks.py
new file mode 100644
index 0000000000..7d1e273ea7
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/extensions/foxinsocks.py
@@ -0,0 +1,92 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob.exc
+
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+
+
+class FoxInSocksController(object):
+
+ def index(self, req):
+ return "Try to say this Mr. Knox, sir..."
+
+
+class FoxInSocksServerControllerExtension(wsgi.Controller):
+ @wsgi.action('add_tweedle')
+ def _add_tweedle(self, req, id, body):
+
+ return "Tweedle Beetle Added."
+
+ @wsgi.action('delete_tweedle')
+ def _delete_tweedle(self, req, id, body):
+
+ return "Tweedle Beetle Deleted."
+
+ @wsgi.action('fail')
+ def _fail(self, req, id, body):
+
+ raise webob.exc.HTTPBadRequest(explanation='Tweedle fail')
+
+
+class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller):
+ @wsgi.extends
+ def show(self, req, resp_obj, id):
+ # NOTE: This only handles JSON responses.
+ # You can use content type header to test for XML.
+ resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing')
+
+
+class FoxInSocksFlavorBandsControllerExtension(wsgi.Controller):
+ @wsgi.extends
+ def show(self, req, resp_obj, id):
+ # NOTE: This only handles JSON responses.
+ # You can use content type header to test for XML.
+ resp_obj.obj['big_bands'] = 'Pig Bands!'
+
+
+class Foxinsocks(extensions.ExtensionDescriptor):
+ """The Fox In Socks Extension."""
+
+ name = "Fox In Socks"
+ alias = "FOXNSOX"
+ namespace = "http://www.fox.in.socks/api/ext/pie/v1.0"
+ updated = "2011-01-22T13:25:27-06:00"
+
+ def __init__(self, ext_mgr):
+ ext_mgr.register(self)
+
+ def get_resources(self):
+ resources = []
+ resource = extensions.ResourceExtension('foxnsocks',
+ FoxInSocksController())
+ resources.append(resource)
+ return resources
+
+ def get_controller_extensions(self):
+ extension_list = []
+
+ extension_set = [
+ (FoxInSocksServerControllerExtension, 'servers'),
+ (FoxInSocksFlavorGooseControllerExtension, 'flavors'),
+ (FoxInSocksFlavorBandsControllerExtension, 'flavors'),
+ ]
+ for klass, collection in extension_set:
+ controller = klass()
+ ext = extensions.ControllerExtension(self, collection, controller)
+ extension_list.append(ext)
+
+ return extension_list
diff --git a/nova/tests/unit/api/openstack/compute/plugins/__init__.py b/nova/tests/unit/api/openstack/compute/plugins/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/__init__.py
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/__init__.py b/nova/tests/unit/api/openstack/compute/plugins/v3/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/__init__.py
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/admin_only_action_common.py b/nova/tests/unit/api/openstack/compute/plugins/v3/admin_only_action_common.py
new file mode 100644
index 0000000000..ce99d1069b
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/admin_only_action_common.py
@@ -0,0 +1,263 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import webob
+
+from nova.compute import vm_states
+import nova.context
+from nova import exception
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit import fake_instance
+
+
+class CommonMixin(object):
+ def setUp(self):
+ super(CommonMixin, self).setUp()
+ self.compute_api = None
+ self.context = nova.context.RequestContext('fake', 'fake')
+
+ def _make_request(self, url, body):
+ req = webob.Request.blank('/v2/fake' + url)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.content_type = 'application/json'
+ return req.get_response(self.app)
+
+ def _stub_instance_get(self, uuid=None):
+ if uuid is None:
+ uuid = uuidutils.generate_uuid()
+ instance = fake_instance.fake_instance_obj(self.context,
+ id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
+ task_state=None, launched_at=timeutils.utcnow())
+ self.compute_api.get(self.context, uuid, expected_attrs=None,
+ want_objects=True).AndReturn(instance)
+ return instance
+
+ def _stub_instance_get_failure(self, exc_info, uuid=None):
+ if uuid is None:
+ uuid = uuidutils.generate_uuid()
+ self.compute_api.get(self.context, uuid, expected_attrs=None,
+ want_objects=True).AndRaise(exc_info)
+ return uuid
+
+ def _test_non_existing_instance(self, action, body_map=None):
+ uuid = uuidutils.generate_uuid()
+ self._stub_instance_get_failure(
+ exception.InstanceNotFound(instance_id=uuid), uuid=uuid)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % uuid,
+ {action: body_map.get(action)})
+ self.assertEqual(404, res.status_int)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def _test_action(self, action, body=None, method=None,
+ compute_api_args_map=None):
+ if method is None:
+ method = action
+
+ compute_api_args_map = compute_api_args_map or {}
+
+ instance = self._stub_instance_get()
+
+ args, kwargs = compute_api_args_map.get(action, ((), {}))
+ getattr(self.compute_api, method)(self.context, instance, *args,
+ **kwargs)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance.uuid,
+ {action: body})
+ self.assertEqual(202, res.status_int)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def _test_not_implemented_state(self, action, method=None):
+ if method is None:
+ method = action
+
+ instance = self._stub_instance_get()
+ body = {}
+ compute_api_args_map = {}
+ args, kwargs = compute_api_args_map.get(action, ((), {}))
+ getattr(self.compute_api, method)(self.context, instance,
+ *args, **kwargs).AndRaise(
+ NotImplementedError())
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance.uuid,
+ {action: body})
+ self.assertEqual(501, res.status_int)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def _test_invalid_state(self, action, method=None, body_map=None,
+ compute_api_args_map=None):
+ if method is None:
+ method = action
+ if body_map is None:
+ body_map = {}
+ if compute_api_args_map is None:
+ compute_api_args_map = {}
+
+ instance = self._stub_instance_get()
+
+ args, kwargs = compute_api_args_map.get(action, ((), {}))
+
+ getattr(self.compute_api, method)(self.context, instance,
+ *args, **kwargs).AndRaise(
+ exception.InstanceInvalidState(
+ attr='vm_state', instance_uuid=instance.uuid,
+ state='foo', method=method))
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance.uuid,
+ {action: body_map.get(action)})
+ self.assertEqual(409, res.status_int)
+ self.assertIn("Cannot \'%(action)s\' instance %(id)s"
+ % {'action': action, 'id': instance.uuid}, res.body)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def _test_locked_instance(self, action, method=None, body=None,
+ compute_api_args_map=None):
+ if method is None:
+ method = action
+
+ compute_api_args_map = compute_api_args_map or {}
+ instance = self._stub_instance_get()
+
+ args, kwargs = compute_api_args_map.get(action, ((), {}))
+ getattr(self.compute_api, method)(self.context, instance, *args,
+ **kwargs).AndRaise(
+ exception.InstanceIsLocked(instance_uuid=instance.uuid))
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance.uuid,
+ {action: body})
+ self.assertEqual(409, res.status_int)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def _test_instance_not_found_in_compute_api(self, action,
+ method=None, body=None, compute_api_args_map=None):
+ if method is None:
+ method = action
+
+ compute_api_args_map = compute_api_args_map or {}
+
+ instance = self._stub_instance_get()
+
+ args, kwargs = compute_api_args_map.get(action, ((), {}))
+ getattr(self.compute_api, method)(self.context, instance, *args,
+ **kwargs).AndRaise(
+ exception.InstanceNotFound(instance_id=instance.uuid))
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance.uuid,
+ {action: body})
+ self.assertEqual(404, res.status_int)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+
+class CommonTests(CommonMixin, test.NoDBTestCase):
+ def _test_actions(self, actions, method_translations=None, body_map=None,
+ args_map=None):
+ method_translations = method_translations or {}
+ body_map = body_map or {}
+ args_map = args_map or {}
+ for action in actions:
+ method = method_translations.get(action)
+ body = body_map.get(action)
+ self.mox.StubOutWithMock(self.compute_api, method or action)
+ self._test_action(action, method=method, body=body,
+ compute_api_args_map=args_map)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def _test_actions_instance_not_found_in_compute_api(self,
+ actions, method_translations=None, body_map=None,
+ args_map=None):
+ method_translations = method_translations or {}
+ body_map = body_map or {}
+ args_map = args_map or {}
+ for action in actions:
+ method = method_translations.get(action)
+ body = body_map.get(action)
+ self.mox.StubOutWithMock(self.compute_api, method or action)
+ self._test_instance_not_found_in_compute_api(
+ action, method=method, body=body,
+ compute_api_args_map=args_map)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def _test_actions_with_non_existed_instance(self, actions, body_map=None):
+ body_map = body_map or {}
+ for action in actions:
+ self._test_non_existing_instance(action,
+ body_map=body_map)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def _test_actions_raise_conflict_on_invalid_state(
+ self, actions, method_translations=None, body_map=None,
+ args_map=None):
+ method_translations = method_translations or {}
+ body_map = body_map or {}
+ args_map = args_map or {}
+ for action in actions:
+ method = method_translations.get(action)
+ self.mox.StubOutWithMock(self.compute_api, method or action)
+ self._test_invalid_state(action, method=method,
+ body_map=body_map,
+ compute_api_args_map=args_map)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def _test_actions_with_locked_instance(self, actions,
+ method_translations=None,
+ body_map=None, args_map=None):
+ method_translations = method_translations or {}
+ body_map = body_map or {}
+ args_map = args_map or {}
+ for action in actions:
+ method = method_translations.get(action)
+ body = body_map.get(action)
+ self.mox.StubOutWithMock(self.compute_api, method or action)
+ self._test_locked_instance(action, method=method, body=body,
+ compute_api_args_map=args_map)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_access_ips.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_access_ips.py
new file mode 100644
index 0000000000..44c1d5b5cd
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_access_ips.py
@@ -0,0 +1,383 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.serialization import jsonutils
+
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import access_ips
+from nova.api.openstack.compute.plugins.v3 import servers
+from nova.api.openstack import wsgi
+from nova.compute import api as compute_api
+from nova import db
+from nova import exception
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.image import fake
+
+
+class AccessIPsExtTest(test.NoDBTestCase):
+ def setUp(self):
+ super(AccessIPsExtTest, self).setUp()
+ self.access_ips_ext = access_ips.AccessIPs(None)
+
+ def _test(self, func):
+ server_dict = {access_ips.AccessIPs.v4_key: '1.1.1.1',
+ access_ips.AccessIPs.v6_key: 'fe80::'}
+ create_kwargs = {}
+ func(server_dict, create_kwargs)
+ self.assertEqual(create_kwargs, {'access_ip_v4': '1.1.1.1',
+ 'access_ip_v6': 'fe80::'})
+
+ def _test_with_ipv4_only(self, func):
+ server_dict = {access_ips.AccessIPs.v4_key: '1.1.1.1'}
+ create_kwargs = {}
+ func(server_dict, create_kwargs)
+ self.assertEqual(create_kwargs, {'access_ip_v4': '1.1.1.1'})
+
+ def _test_with_ipv6_only(self, func):
+ server_dict = {access_ips.AccessIPs.v6_key: 'fe80::'}
+ create_kwargs = {}
+ func(server_dict, create_kwargs)
+ self.assertEqual(create_kwargs, {'access_ip_v6': 'fe80::'})
+
+ def _test_without_ipv4_and_ipv6(self, func):
+ server_dict = {}
+ create_kwargs = {}
+ func(server_dict, create_kwargs)
+ self.assertEqual(create_kwargs, {})
+
+ def _test_with_ipv4_null(self, func):
+ server_dict = {access_ips.AccessIPs.v4_key: None}
+ create_kwargs = {}
+ func(server_dict, create_kwargs)
+ self.assertEqual(create_kwargs, {'access_ip_v4': None})
+
+ def _test_with_ipv6_null(self, func):
+ server_dict = {access_ips.AccessIPs.v6_key: None}
+ create_kwargs = {}
+ func(server_dict, create_kwargs)
+ self.assertEqual(create_kwargs, {'access_ip_v6': None})
+
+ def _test_with_ipv4_blank(self, func):
+ server_dict = {access_ips.AccessIPs.v4_key: ''}
+ create_kwargs = {}
+ func(server_dict, create_kwargs)
+ self.assertEqual(create_kwargs, {'access_ip_v4': None})
+
+ def _test_with_ipv6_blank(self, func):
+ server_dict = {access_ips.AccessIPs.v6_key: ''}
+ create_kwargs = {}
+ func(server_dict, create_kwargs)
+ self.assertEqual(create_kwargs, {'access_ip_v6': None})
+
+ def test_server_create(self):
+ self._test(self.access_ips_ext.server_create)
+
+ def test_server_create_with_ipv4_only(self):
+ self._test_with_ipv4_only(self.access_ips_ext.server_create)
+
+ def test_server_create_with_ipv6_only(self):
+ self._test_with_ipv6_only(self.access_ips_ext.server_create)
+
+ def test_server_create_without_ipv4_and_ipv6(self):
+ self._test_without_ipv4_and_ipv6(self.access_ips_ext.server_create)
+
+ def test_server_create_with_ipv4_null(self):
+ self._test_with_ipv4_null(self.access_ips_ext.server_create)
+
+ def test_server_create_with_ipv6_null(self):
+ self._test_with_ipv6_null(self.access_ips_ext.server_create)
+
+ def test_server_create_with_ipv4_blank(self):
+ self._test_with_ipv4_blank(self.access_ips_ext.server_create)
+
+ def test_server_create_with_ipv6_blank(self):
+ self._test_with_ipv6_blank(self.access_ips_ext.server_create)
+
+ def test_server_update(self):
+ self._test(self.access_ips_ext.server_update)
+
+ def test_server_update_with_ipv4_only(self):
+ self._test_with_ipv4_only(self.access_ips_ext.server_update)
+
+ def test_server_update_with_ipv6_only(self):
+ self._test_with_ipv6_only(self.access_ips_ext.server_update)
+
+ def test_server_update_without_ipv4_and_ipv6(self):
+ self._test_without_ipv4_and_ipv6(self.access_ips_ext.server_update)
+
+ def test_server_update_with_ipv4_null(self):
+ self._test_with_ipv4_null(self.access_ips_ext.server_update)
+
+ def test_server_update_with_ipv6_null(self):
+ self._test_with_ipv6_null(self.access_ips_ext.server_update)
+
+ def test_server_update_with_ipv4_blank(self):
+ self._test_with_ipv4_blank(self.access_ips_ext.server_update)
+
+ def test_server_update_with_ipv6_blank(self):
+ self._test_with_ipv6_blank(self.access_ips_ext.server_update)
+
+ def test_server_rebuild(self):
+ self._test(self.access_ips_ext.server_rebuild)
+
+ def test_server_rebuild_with_ipv4_only(self):
+ self._test_with_ipv4_only(self.access_ips_ext.server_rebuild)
+
+ def test_server_rebuild_with_ipv6_only(self):
+ self._test_with_ipv6_only(self.access_ips_ext.server_rebuild)
+
+ def test_server_rebuild_without_ipv4_and_ipv6(self):
+ self._test_without_ipv4_and_ipv6(self.access_ips_ext.server_rebuild)
+
+ def test_server_rebuild_with_ipv4_null(self):
+ self._test_with_ipv4_null(self.access_ips_ext.server_rebuild)
+
+ def test_server_rebuild_with_ipv6_null(self):
+ self._test_with_ipv6_null(self.access_ips_ext.server_rebuild)
+
+ def test_server_rebuild_with_ipv4_blank(self):
+ self._test_with_ipv4_blank(self.access_ips_ext.server_rebuild)
+
+ def test_server_rebuild_with_ipv6_blank(self):
+ self._test_with_ipv6_blank(self.access_ips_ext.server_rebuild)
+
+
+class AccessIPsExtAPIValidationTest(test.TestCase):
+ def setUp(self):
+ super(AccessIPsExtAPIValidationTest, self).setUp()
+
+ def fake_save(context, **kwargs):
+ pass
+
+ def fake_rebuild(*args, **kwargs):
+ pass
+
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers.ServersController(extension_info=ext_info)
+ fake.stub_out_image_service(self.stubs)
+ self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get())
+ self.stubs.Set(instance_obj.Instance, 'save', fake_save)
+ self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
+
+ def _test_create(self, params):
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ 'flavorRef': 'http://localhost/123/flavors/3',
+ },
+ }
+ body['server'].update(params)
+
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.headers['content-type'] = 'application/json'
+ req.body = jsonutils.dumps(body)
+ self.controller.create(req, body=body)
+
+ def _test_update(self, params):
+ body = {
+ 'server': {
+ },
+ }
+ body['server'].update(params)
+
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'PUT'
+ req.headers['content-type'] = 'application/json'
+ req.body = jsonutils.dumps(body)
+ self.controller.update(req, fakes.FAKE_UUID, body=body)
+
+ def _test_rebuild(self, params):
+ body = {
+ 'rebuild': {
+ 'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ },
+ }
+ body['rebuild'].update(params)
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'PUT'
+ req.headers['content-type'] = 'application/json'
+ req.body = jsonutils.dumps(body)
+ self.controller._action_rebuild(req, fakes.FAKE_UUID, body=body)
+
+ def test_create_server_with_access_ipv4(self):
+ params = {access_ips.AccessIPs.v4_key: '192.168.0.10'}
+ self._test_create(params)
+
+ def test_create_server_with_invalid_access_ipv4(self):
+ params = {access_ips.AccessIPs.v4_key: '1.1.1.1.1.1'}
+ self.assertRaises(exception.ValidationError, self._test_create, params)
+
+ def test_create_server_with_access_ipv6(self):
+ params = {access_ips.AccessIPs.v6_key: '2001:db8::9abc'}
+ self._test_create(params)
+
+ def test_create_server_with_invalid_access_ipv6(self):
+ params = {access_ips.AccessIPs.v6_key: 'fe80:::::::'}
+ self.assertRaises(exception.ValidationError, self._test_create, params)
+
+ def test_update_server_with_access_ipv4(self):
+ params = {access_ips.AccessIPs.v4_key: '192.168.0.10'}
+ self._test_update(params)
+
+ def test_update_server_with_invalid_access_ipv4(self):
+ params = {access_ips.AccessIPs.v4_key: '1.1.1.1.1.1'}
+ self.assertRaises(exception.ValidationError, self._test_update, params)
+
+ def test_update_server_with_access_ipv6(self):
+ params = {access_ips.AccessIPs.v6_key: '2001:db8::9abc'}
+ self._test_update(params)
+
+ def test_update_server_with_invalid_access_ipv6(self):
+ params = {access_ips.AccessIPs.v6_key: 'fe80:::::::'}
+ self.assertRaises(exception.ValidationError, self._test_update, params)
+
+ def test_rebuild_server_with_access_ipv4(self):
+ params = {access_ips.AccessIPs.v4_key: '192.168.0.10'}
+ self._test_rebuild(params)
+
+ def test_rebuild_server_with_invalid_access_ipv4(self):
+ params = {access_ips.AccessIPs.v4_key: '1.1.1.1.1.1'}
+ self.assertRaises(exception.ValidationError, self._test_rebuild,
+ params)
+
+ def test_rebuild_server_with_access_ipv6(self):
+ params = {access_ips.AccessIPs.v6_key: '2001:db8::9abc'}
+ self._test_rebuild(params)
+
+ def test_rebuild_server_with_invalid_access_ipv6(self):
+ params = {access_ips.AccessIPs.v6_key: 'fe80:::::::'}
+ self.assertRaises(exception.ValidationError, self._test_rebuild,
+ params)
+
+
+class AccessIPsControllerTest(test.NoDBTestCase):
+ def setUp(self):
+ super(AccessIPsControllerTest, self).setUp()
+ self.controller = access_ips.AccessIPsController()
+
+ def _test_with_access_ips(self, func, kwargs={'id': 'fake'}):
+ req = wsgi.Request({'nova.context':
+ fakes.FakeRequestContext('fake_user', 'fake',
+ is_admin=True)})
+ instance = {'uuid': 'fake',
+ 'access_ip_v4': '1.1.1.1',
+ 'access_ip_v6': 'fe80::'}
+ req.cache_db_instance(instance)
+ resp_obj = wsgi.ResponseObject(
+ {"server": {'id': 'fake'}})
+ func(req, resp_obj, **kwargs)
+ self.assertEqual(resp_obj.obj['server'][access_ips.AccessIPs.v4_key],
+ '1.1.1.1')
+ self.assertEqual(resp_obj.obj['server'][access_ips.AccessIPs.v6_key],
+ 'fe80::')
+
+ def _test_without_access_ips(self, func, kwargs={'id': 'fake'}):
+ req = wsgi.Request({'nova.context':
+ fakes.FakeRequestContext('fake_user', 'fake',
+ is_admin=True)})
+ instance = {'uuid': 'fake',
+ 'access_ip_v4': None,
+ 'access_ip_v6': None}
+ req.cache_db_instance(instance)
+ resp_obj = wsgi.ResponseObject(
+ {"server": {'id': 'fake'}})
+ func(req, resp_obj, **kwargs)
+ self.assertEqual(resp_obj.obj['server'][access_ips.AccessIPs.v4_key],
+ '')
+ self.assertEqual(resp_obj.obj['server'][access_ips.AccessIPs.v6_key],
+ '')
+
+ def test_create(self):
+ self._test_with_access_ips(self.controller.create, {'body': {}})
+
+ def test_create_without_access_ips(self):
+ self._test_with_access_ips(self.controller.create, {'body': {}})
+
+ def test_show(self):
+ self._test_with_access_ips(self.controller.show)
+
+ def test_show_without_access_ips(self):
+ self._test_without_access_ips(self.controller.show)
+
+ def test_detail(self):
+ req = wsgi.Request({'nova.context':
+ fakes.FakeRequestContext('fake_user', 'fake',
+ is_admin=True)})
+ instance1 = {'uuid': 'fake1',
+ 'access_ip_v4': '1.1.1.1',
+ 'access_ip_v6': 'fe80::'}
+ instance2 = {'uuid': 'fake2',
+ 'access_ip_v4': '1.1.1.2',
+ 'access_ip_v6': 'fe81::'}
+ req.cache_db_instance(instance1)
+ req.cache_db_instance(instance2)
+ resp_obj = wsgi.ResponseObject(
+ {"servers": [{'id': 'fake1'}, {'id': 'fake2'}]})
+ self.controller.detail(req, resp_obj)
+ self.assertEqual(
+ resp_obj.obj['servers'][0][access_ips.AccessIPs.v4_key],
+ '1.1.1.1')
+ self.assertEqual(
+ resp_obj.obj['servers'][0][access_ips.AccessIPs.v6_key],
+ 'fe80::')
+ self.assertEqual(
+ resp_obj.obj['servers'][1][access_ips.AccessIPs.v4_key],
+ '1.1.1.2')
+ self.assertEqual(
+ resp_obj.obj['servers'][1][access_ips.AccessIPs.v6_key],
+ 'fe81::')
+
+ def test_detail_without_access_ips(self):
+ req = wsgi.Request({'nova.context':
+ fakes.FakeRequestContext('fake_user', 'fake',
+ is_admin=True)})
+ instance1 = {'uuid': 'fake1',
+ 'access_ip_v4': None,
+ 'access_ip_v6': None}
+ instance2 = {'uuid': 'fake2',
+ 'access_ip_v4': None,
+ 'access_ip_v6': None}
+ req.cache_db_instance(instance1)
+ req.cache_db_instance(instance2)
+ resp_obj = wsgi.ResponseObject(
+ {"servers": [{'id': 'fake1'}, {'id': 'fake2'}]})
+ self.controller.detail(req, resp_obj)
+ self.assertEqual(
+ resp_obj.obj['servers'][0][access_ips.AccessIPs.v4_key], '')
+ self.assertEqual(
+ resp_obj.obj['servers'][0][access_ips.AccessIPs.v6_key], '')
+ self.assertEqual(
+ resp_obj.obj['servers'][1][access_ips.AccessIPs.v4_key], '')
+ self.assertEqual(
+ resp_obj.obj['servers'][1][access_ips.AccessIPs.v6_key], '')
+
+ def test_update(self):
+ self._test_with_access_ips(self.controller.update, {'id': 'fake',
+ 'body': {}})
+
+ def test_update_without_access_ips(self):
+ self._test_without_access_ips(self.controller.update, {'id': 'fake',
+ 'body': {}})
+
+ def test_rebuild(self):
+ self._test_with_access_ips(self.controller.rebuild, {'id': 'fake',
+ 'body': {}})
+
+ def test_rebuild_without_access_ips(self):
+ self._test_without_access_ips(self.controller.rebuild, {'id': 'fake',
+ 'body': {}})
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_console_auth_tokens.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_console_auth_tokens.py
new file mode 100644
index 0000000000..259906c535
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_console_auth_tokens.py
@@ -0,0 +1,95 @@
+# Copyright 2013 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.serialization import jsonutils
+
+from nova.consoleauth import rpcapi as consoleauth_rpcapi
+from nova import context
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+_FAKE_CONNECT_INFO = {'instance_uuid': 'fake_instance_uuid',
+ 'host': 'fake_host',
+ 'port': 'fake_port',
+ 'internal_access_path': 'fake_access_path',
+ 'console_type': 'rdp-html5'}
+
+
+def _fake_check_token(self, context, token):
+ return _FAKE_CONNECT_INFO
+
+
+def _fake_check_token_not_found(self, context, token):
+ return None
+
+
+def _fake_check_token_unauthorized(self, context, token):
+ connect_info = _FAKE_CONNECT_INFO
+ connect_info['console_type'] = 'unauthorized_console_type'
+ return connect_info
+
+
+class ConsoleAuthTokensExtensionTest(test.TestCase):
+
+ _FAKE_URL = '/v2/fake/os-console-auth-tokens/1'
+
+ _EXPECTED_OUTPUT = {'console': {'instance_uuid': 'fake_instance_uuid',
+ 'host': 'fake_host',
+ 'port': 'fake_port',
+ 'internal_access_path':
+ 'fake_access_path'}}
+
+ def setUp(self):
+ super(ConsoleAuthTokensExtensionTest, self).setUp()
+ self.stubs.Set(consoleauth_rpcapi.ConsoleAuthAPI, 'check_token',
+ _fake_check_token)
+
+ ctxt = self._get_admin_context()
+ self.app = fakes.wsgi_app_v21(init_only=('os-console-auth-tokens'),
+ fake_auth_context=ctxt)
+
+ def _get_admin_context(self):
+ ctxt = context.get_admin_context()
+ ctxt.user_id = 'fake'
+ ctxt.project_id = 'fake'
+ return ctxt
+
+ def _create_request(self):
+ req = fakes.HTTPRequestV3.blank(self._FAKE_URL)
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+ return req
+
+ def test_get_console_connect_info(self):
+ req = self._create_request()
+ res = req.get_response(self.app)
+ self.assertEqual(200, res.status_int)
+ output = jsonutils.loads(res.body)
+ self.assertEqual(self._EXPECTED_OUTPUT, output)
+
+ def test_get_console_connect_info_token_not_found(self):
+ self.stubs.Set(consoleauth_rpcapi.ConsoleAuthAPI, 'check_token',
+ _fake_check_token_not_found)
+ req = self._create_request()
+ res = req.get_response(self.app)
+ self.assertEqual(404, res.status_int)
+
+ def test_get_console_connect_info_unauthorized_console_type(self):
+ self.stubs.Set(consoleauth_rpcapi.ConsoleAuthAPI, 'check_token',
+ _fake_check_token_unauthorized)
+ req = self._create_request()
+ res = req.get_response(self.app)
+ self.assertEqual(401, res.status_int)
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_consoles.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_consoles.py
new file mode 100644
index 0000000000..d3ba83dcbc
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_consoles.py
@@ -0,0 +1,270 @@
+# Copyright 2010-2011 OpenStack Foundation
+# Copyright 2011 Piston Cloud Computing, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import uuid as stdlib_uuid
+
+from oslo.utils import timeutils
+import webob
+
+from nova.api.openstack.compute.plugins.v3 import consoles
+from nova.compute import vm_states
+from nova import console
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import matchers
+
+
+FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+
+
+class FakeInstanceDB(object):
+
+ def __init__(self):
+ self.instances_by_id = {}
+ self.ids_by_uuid = {}
+ self.max_id = 0
+
+ def return_server_by_id(self, context, id):
+ if id not in self.instances_by_id:
+ self._add_server(id=id)
+ return dict(self.instances_by_id[id])
+
+ def return_server_by_uuid(self, context, uuid):
+ if uuid not in self.ids_by_uuid:
+ self._add_server(uuid=uuid)
+ return dict(self.instances_by_id[self.ids_by_uuid[uuid]])
+
+ def _add_server(self, id=None, uuid=None):
+ if id is None:
+ id = self.max_id + 1
+ if uuid is None:
+ uuid = str(stdlib_uuid.uuid4())
+ instance = stub_instance(id, uuid=uuid)
+ self.instances_by_id[id] = instance
+ self.ids_by_uuid[uuid] = id
+ if id > self.max_id:
+ self.max_id = id
+
+
+def stub_instance(id, user_id='fake', project_id='fake', host=None,
+ vm_state=None, task_state=None,
+ reservation_id="", uuid=FAKE_UUID, image_ref="10",
+ flavor_id="1", name=None, key_name='',
+ access_ipv4=None, access_ipv6=None, progress=0):
+
+ if host is not None:
+ host = str(host)
+
+ if key_name:
+ key_data = 'FAKE'
+ else:
+ key_data = ''
+
+ # ReservationID isn't sent back, hack it in there.
+ server_name = name or "server%s" % id
+ if reservation_id != "":
+ server_name = "reservation_%s" % (reservation_id, )
+
+ instance = {
+ "id": int(id),
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "admin_password": "",
+ "user_id": user_id,
+ "project_id": project_id,
+ "image_ref": image_ref,
+ "kernel_id": "",
+ "ramdisk_id": "",
+ "launch_index": 0,
+ "key_name": key_name,
+ "key_data": key_data,
+ "vm_state": vm_state or vm_states.BUILDING,
+ "task_state": task_state,
+ "memory_mb": 0,
+ "vcpus": 0,
+ "root_gb": 0,
+ "hostname": "",
+ "host": host,
+ "instance_type": {},
+ "user_data": "",
+ "reservation_id": reservation_id,
+ "mac_address": "",
+ "scheduled_at": timeutils.utcnow(),
+ "launched_at": timeutils.utcnow(),
+ "terminated_at": timeutils.utcnow(),
+ "availability_zone": "",
+ "display_name": server_name,
+ "display_description": "",
+ "locked": False,
+ "metadata": [],
+ "access_ip_v4": access_ipv4,
+ "access_ip_v6": access_ipv6,
+ "uuid": uuid,
+ "progress": progress}
+
+ return instance
+
+
+class ConsolesControllerTest(test.NoDBTestCase):
+ def setUp(self):
+ super(ConsolesControllerTest, self).setUp()
+ self.flags(verbose=True)
+ self.instance_db = FakeInstanceDB()
+ self.stubs.Set(db, 'instance_get',
+ self.instance_db.return_server_by_id)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ self.instance_db.return_server_by_uuid)
+ self.uuid = str(stdlib_uuid.uuid4())
+ self.url = '/v3/fake/servers/%s/consoles' % self.uuid
+ self.controller = consoles.ConsolesController()
+
+ def test_create_console(self):
+ def fake_create_console(cons_self, context, instance_id):
+ self.assertEqual(instance_id, self.uuid)
+ return {}
+ self.stubs.Set(console.api.API, 'create_console', fake_create_console)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.controller.create(req, self.uuid, None)
+ self.assertEqual(self.controller.create.wsgi_code, 201)
+
+ def test_create_console_unknown_instance(self):
+ def fake_create_console(cons_self, context, instance_id):
+ raise exception.InstanceNotFound(instance_id=instance_id)
+ self.stubs.Set(console.api.API, 'create_console', fake_create_console)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
+ req, self.uuid, None)
+
+ def test_show_console(self):
+ def fake_get_console(cons_self, context, instance_id, console_id):
+ self.assertEqual(instance_id, self.uuid)
+ self.assertEqual(console_id, 20)
+ pool = dict(console_type='fake_type',
+ public_hostname='fake_hostname')
+ return dict(id=console_id, password='fake_password',
+ port='fake_port', pool=pool, instance_name='inst-0001')
+
+ expected = {'console': {'id': 20,
+ 'port': 'fake_port',
+ 'host': 'fake_hostname',
+ 'password': 'fake_password',
+ 'instance_name': 'inst-0001',
+ 'console_type': 'fake_type'}}
+
+ self.stubs.Set(console.api.API, 'get_console', fake_get_console)
+
+ req = fakes.HTTPRequestV3.blank(self.url + '/20')
+ res_dict = self.controller.show(req, self.uuid, '20')
+ self.assertThat(res_dict, matchers.DictMatches(expected))
+
+ def test_show_console_unknown_console(self):
+ def fake_get_console(cons_self, context, instance_id, console_id):
+ raise exception.ConsoleNotFound(console_id=console_id)
+
+ self.stubs.Set(console.api.API, 'get_console', fake_get_console)
+
+ req = fakes.HTTPRequestV3.blank(self.url + '/20')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
+ req, self.uuid, '20')
+
+ def test_show_console_unknown_instance(self):
+ def fake_get_console(cons_self, context, instance_id, console_id):
+ raise exception.ConsoleNotFoundForInstance(
+ instance_uuid=instance_id)
+
+ self.stubs.Set(console.api.API, 'get_console', fake_get_console)
+
+ req = fakes.HTTPRequestV3.blank(self.url + '/20')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
+ req, self.uuid, '20')
+
+ def test_list_consoles(self):
+ def fake_get_consoles(cons_self, context, instance_id):
+ self.assertEqual(instance_id, self.uuid)
+
+ pool1 = dict(console_type='fake_type',
+ public_hostname='fake_hostname')
+ cons1 = dict(id=10, password='fake_password',
+ port='fake_port', pool=pool1)
+ pool2 = dict(console_type='fake_type2',
+ public_hostname='fake_hostname2')
+ cons2 = dict(id=11, password='fake_password2',
+ port='fake_port2', pool=pool2)
+ return [cons1, cons2]
+
+ expected = {'consoles':
+ [{'id': 10, 'console_type': 'fake_type'},
+ {'id': 11, 'console_type': 'fake_type2'}]}
+
+ self.stubs.Set(console.api.API, 'get_consoles', fake_get_consoles)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ res_dict = self.controller.index(req, self.uuid)
+ self.assertThat(res_dict, matchers.DictMatches(expected))
+
+ def test_list_consoles_unknown_instance(self):
+ def fake_get_consoles(cons_self, context, instance_id):
+ raise exception.InstanceNotFound(instance_id=instance_id)
+ self.stubs.Set(console.api.API, 'get_consoles', fake_get_consoles)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.index,
+ req, self.uuid)
+
+ def test_delete_console(self):
+ def fake_get_console(cons_self, context, instance_id, console_id):
+ self.assertEqual(instance_id, self.uuid)
+ self.assertEqual(console_id, 20)
+ pool = dict(console_type='fake_type',
+ public_hostname='fake_hostname')
+ return dict(id=console_id, password='fake_password',
+ port='fake_port', pool=pool)
+
+ def fake_delete_console(cons_self, context, instance_id, console_id):
+ self.assertEqual(instance_id, self.uuid)
+ self.assertEqual(console_id, 20)
+
+ self.stubs.Set(console.api.API, 'get_console', fake_get_console)
+ self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
+
+ req = fakes.HTTPRequestV3.blank(self.url + '/20')
+ self.controller.delete(req, self.uuid, '20')
+
+ def test_delete_console_unknown_console(self):
+ def fake_delete_console(cons_self, context, instance_id, console_id):
+ raise exception.ConsoleNotFound(console_id=console_id)
+
+ self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
+
+ req = fakes.HTTPRequestV3.blank(self.url + '/20')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, self.uuid, '20')
+
+ def test_delete_console_unknown_instance(self):
+ def fake_delete_console(cons_self, context, instance_id, console_id):
+ raise exception.ConsoleNotFoundForInstance(
+ instance_uuid=instance_id)
+
+ self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
+
+ req = fakes.HTTPRequestV3.blank(self.url + '/20')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, self.uuid, '20')
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_create_backup.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_create_backup.py
new file mode 100644
index 0000000000..83701090f8
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_create_backup.py
@@ -0,0 +1,261 @@
+# Copyright 2011 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack import common
+from nova.api.openstack.compute.plugins.v3 import create_backup
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit.api.openstack.compute.plugins.v3 import \
+ admin_only_action_common
+from nova.tests.unit.api.openstack import fakes
+
+
+class CreateBackupTests(admin_only_action_common.CommonMixin,
+ test.NoDBTestCase):
+ def setUp(self):
+ super(CreateBackupTests, self).setUp()
+ self.controller = create_backup.CreateBackupController()
+ self.compute_api = self.controller.compute_api
+
+ def _fake_controller(*args, **kwargs):
+ return self.controller
+
+ self.stubs.Set(create_backup, 'CreateBackupController',
+ _fake_controller)
+ self.app = fakes.wsgi_app_v21(init_only=('servers',
+ 'os-create-backup'),
+ fake_auth_context=self.context)
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+ self.mox.StubOutWithMock(common,
+ 'check_img_metadata_properties_quota')
+ self.mox.StubOutWithMock(self.compute_api, 'backup')
+
+ def _make_url(self, uuid=None):
+ if uuid is None:
+ uuid = uuidutils.generate_uuid()
+ return '/servers/%s/action' % uuid
+
+ def test_create_backup_with_metadata(self):
+ metadata = {'123': 'asdf'}
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ 'metadata': metadata,
+ },
+ }
+
+ image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
+ properties=metadata)
+
+ common.check_img_metadata_properties_quota(self.context, metadata)
+ instance = self._stub_instance_get()
+ self.compute_api.backup(self.context, instance, 'Backup 1',
+ 'daily', 1,
+ extra_properties=metadata).AndReturn(image)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request(self._make_url(instance.uuid), body)
+ self.assertEqual(202, res.status_int)
+ self.assertIn('fake-image-id', res.headers['Location'])
+
+ def test_create_backup_no_name(self):
+ # Name is required for backups.
+ body = {
+ 'createBackup': {
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+ res = self._make_request(self._make_url(), body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_no_rotation(self):
+ # Rotation is required for backup requests.
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ },
+ }
+ res = self._make_request(self._make_url(), body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_negative_rotation(self):
+ """Rotation must be greater than or equal to zero
+ for backup requests
+ """
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': -1,
+ },
+ }
+ res = self._make_request(self._make_url(), body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_negative_rotation_with_string_number(self):
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': '-1',
+ },
+ }
+ res = self._make_request(self._make_url('fake'), body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_no_backup_type(self):
+ # Backup Type (daily or weekly) is required for backup requests.
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'rotation': 1,
+ },
+ }
+ res = self._make_request(self._make_url(), body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_non_dict_metadata(self):
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ 'metadata': 'non_dict',
+ },
+ }
+ res = self._make_request(self._make_url('fake'), body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_bad_entity(self):
+ body = {'createBackup': 'go'}
+ res = self._make_request(self._make_url(), body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_rotation_is_zero(self):
+ # The happy path for creating backups if rotation is zero.
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 0,
+ },
+ }
+
+ image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
+ properties={})
+ common.check_img_metadata_properties_quota(self.context, {})
+ instance = self._stub_instance_get()
+ self.compute_api.backup(self.context, instance, 'Backup 1',
+ 'daily', 0,
+ extra_properties={}).AndReturn(image)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request(self._make_url(instance.uuid), body)
+ self.assertEqual(202, res.status_int)
+ self.assertNotIn('Location', res.headers)
+
+ def test_create_backup_rotation_is_positive(self):
+ # The happy path for creating backups if rotation is positive.
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+
+ image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
+ properties={})
+ common.check_img_metadata_properties_quota(self.context, {})
+ instance = self._stub_instance_get()
+ self.compute_api.backup(self.context, instance, 'Backup 1',
+ 'daily', 1,
+ extra_properties={}).AndReturn(image)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request(self._make_url(instance.uuid), body)
+ self.assertEqual(202, res.status_int)
+ self.assertIn('fake-image-id', res.headers['Location'])
+
+ def test_create_backup_rotation_is_string_number(self):
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': '1',
+ },
+ }
+
+ image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
+ properties={})
+ common.check_img_metadata_properties_quota(self.context, {})
+ instance = self._stub_instance_get()
+ self.compute_api.backup(self.context, instance, 'Backup 1',
+ 'daily', 1,
+ extra_properties={}).AndReturn(image)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request(self._make_url(instance['uuid']), body)
+ self.assertEqual(202, res.status_int)
+ self.assertIn('fake-image-id', res.headers['Location'])
+
+ def test_create_backup_raises_conflict_on_invalid_state(self):
+ body_map = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+ args_map = {
+ 'createBackup': (
+ ('Backup 1', 'daily', 1), {'extra_properties': {}}
+ ),
+ }
+ common.check_img_metadata_properties_quota(self.context, {})
+ self._test_invalid_state('createBackup', method='backup',
+ body_map=body_map,
+ compute_api_args_map=args_map)
+
+ def test_create_backup_with_non_existed_instance(self):
+ body_map = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+ common.check_img_metadata_properties_quota(self.context, {})
+ self._test_non_existing_instance('createBackup',
+ body_map=body_map)
+
+ def test_create_backup_with_invalid_create_backup(self):
+ body = {
+ 'createBackupup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+ res = self._make_request(self._make_url(), body)
+ self.assertEqual(400, res.status_int)
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_extended_volumes.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_extended_volumes.py
new file mode 100644
index 0000000000..dc6dd2898f
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_extended_volumes.py
@@ -0,0 +1,387 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.plugins.v3 import extended_volumes
+from nova import compute
+from nova import context
+from nova import db
+from nova import exception
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova import volume
+
+UUID1 = '00000000-0000-0000-0000-000000000001'
+UUID2 = '00000000-0000-0000-0000-000000000002'
+UUID3 = '00000000-0000-0000-0000-000000000003'
+
+
+def fake_compute_get(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID1)
+ return fake_instance.fake_instance_obj(args[1], **inst)
+
+
+def fake_compute_get_not_found(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id=UUID1)
+
+
+def fake_compute_get_all(*args, **kwargs):
+ db_list = [fakes.stub_instance(1), fakes.stub_instance(2)]
+ fields = instance_obj.INSTANCE_DEFAULT_FIELDS
+ return instance_obj._make_instance_list(args[1],
+ objects.InstanceList(),
+ db_list, fields)
+
+
+def fake_bdms_get_all_by_instance(*args, **kwargs):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': UUID1, 'source_type': 'volume',
+ 'destination_type': 'volume', 'id': 1}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': UUID2, 'source_type': 'volume',
+ 'destination_type': 'volume', 'id': 2})]
+
+
+def fake_attach_volume(self, context, instance, volume_id,
+ device, disk_bus, device_type):
+ pass
+
+
+def fake_attach_volume_not_found_vol(self, context, instance, volume_id,
+ device, disk_bus, device_type):
+ raise exception.VolumeNotFound(volume_id=volume_id)
+
+
+def fake_attach_volume_invalid_device_path(self, context, instance,
+ volume_id, device, disk_bus,
+ device_type):
+ raise exception.InvalidDevicePath(path=device)
+
+
+def fake_attach_volume_instance_invalid_state(self, context, instance,
+ volume_id, device, disk_bus,
+ device_type):
+ raise exception.InstanceInvalidState(instance_uuid=UUID1, state='',
+ method='', attr='')
+
+
+def fake_attach_volume_invalid_volume(self, context, instance,
+ volume_id, device, disk_bus,
+ device_type):
+ raise exception.InvalidVolume(reason='')
+
+
+def fake_detach_volume(self, context, instance, volume):
+ pass
+
+
+def fake_swap_volume(self, context, instance,
+ old_volume_id, new_volume_id):
+ pass
+
+
+def fake_swap_volume_invalid_volume(self, context, instance,
+ volume_id, device):
+ raise exception.InvalidVolume(reason='', volume_id=volume_id)
+
+
+def fake_swap_volume_unattached_volume(self, context, instance,
+ volume_id, device):
+ raise exception.VolumeUnattached(reason='', volume_id=volume_id)
+
+
+def fake_detach_volume_invalid_volume(self, context, instance, volume):
+ raise exception.InvalidVolume(reason='')
+
+
+def fake_swap_volume_instance_invalid_state(self, context, instance,
+ volume_id, device):
+ raise exception.InstanceInvalidState(instance_uuid=UUID1, state='',
+ method='', attr='')
+
+
+def fake_volume_get(*args, **kwargs):
+ pass
+
+
+def fake_volume_get_not_found(*args, **kwargs):
+ raise exception.VolumeNotFound(volume_id=UUID1)
+
+
+class ExtendedVolumesTest(test.TestCase):
+ content_type = 'application/json'
+ prefix = 'os-extended-volumes:'
+
+ def setUp(self):
+ super(ExtendedVolumesTest, self).setUp()
+ self.Controller = extended_volumes.ExtendedVolumesController()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_bdms_get_all_by_instance)
+ self.stubs.Set(volume.cinder.API, 'get', fake_volume_get)
+ self.stubs.Set(compute.api.API, 'detach_volume', fake_detach_volume)
+ self.stubs.Set(compute.api.API, 'attach_volume', fake_attach_volume)
+ self.app = fakes.wsgi_app_v21(init_only=('os-extended-volumes',
+ 'servers'))
+ return_server = fakes.fake_instance_get()
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ def _make_request(self, url, body=None):
+ base_url = '/v2/fake/servers'
+ req = webob.Request.blank(base_url + url)
+ req.headers['Accept'] = self.content_type
+ if body:
+ req.body = jsonutils.dumps(body)
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ res = req.get_response(self.app)
+ return res
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def test_show(self):
+ url = '/%s' % UUID1
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ server = self._get_server(res.body)
+ exp_volumes = [{'id': UUID1}, {'id': UUID2}]
+ if self.content_type == 'application/json':
+ actual = server.get('%svolumes_attached' % self.prefix)
+ self.assertEqual(exp_volumes, actual)
+
+ def test_detail(self):
+ url = '/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ exp_volumes = [{'id': UUID1}, {'id': UUID2}]
+ for i, server in enumerate(self._get_servers(res.body)):
+ if self.content_type == 'application/json':
+ actual = server.get('%svolumes_attached' % self.prefix)
+ self.assertEqual(exp_volumes, actual)
+
+ def test_detach(self):
+ url = "/%s/action" % UUID1
+ res = self._make_request(url, {"detach": {"volume_id": UUID1}})
+ self.assertEqual(res.status_int, 202)
+
+ def test_detach_volume_from_locked_server(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(compute.api.API, 'detach_volume',
+ fakes.fake_actions_to_locked_server)
+ res = self._make_request(url, {"detach": {"volume_id": UUID1}})
+ self.assertEqual(res.status_int, 409)
+
+ def test_detach_with_non_existed_vol(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(volume.cinder.API, 'get', fake_volume_get_not_found)
+ res = self._make_request(url, {"detach": {"volume_id": UUID2}})
+ self.assertEqual(res.status_int, 404)
+
+ def test_detach_with_non_existed_instance(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get_not_found)
+ res = self._make_request(url, {"detach": {"volume_id": UUID2}})
+ self.assertEqual(res.status_int, 404)
+
+ def test_detach_with_invalid_vol(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(compute.api.API, 'detach_volume',
+ fake_detach_volume_invalid_volume)
+ res = self._make_request(url, {"detach": {"volume_id": UUID2}})
+ self.assertEqual(res.status_int, 400)
+
+ def test_detach_with_bad_id(self):
+ url = "/%s/action" % UUID1
+ res = self._make_request(url, {"detach": {"volume_id": 'xxx'}})
+ self.assertEqual(res.status_int, 400)
+
+ def test_detach_without_id(self):
+ url = "/%s/action" % UUID1
+ res = self._make_request(url, {"detach": {}})
+ self.assertEqual(res.status_int, 400)
+
+ def test_detach_volume_with_invalid_request(self):
+ url = "/%s/action" % UUID1
+ res = self._make_request(url, {"detach": None})
+ self.assertEqual(res.status_int, 400)
+
+ @mock.patch('nova.objects.BlockDeviceMapping.is_root',
+ new_callable=mock.PropertyMock)
+ def test_detach_volume_root(self, mock_isroot):
+ url = "/%s/action" % UUID1
+ mock_isroot.return_value = True
+ res = self._make_request(url, {"detach": {"volume_id": UUID1}})
+ self.assertEqual(res.status_int, 403)
+
+ def test_attach_volume(self):
+ url = "/%s/action" % UUID1
+ res = self._make_request(url, {"attach": {"volume_id": UUID1}})
+ self.assertEqual(res.status_int, 202)
+
+ def test_attach_volume_to_locked_server(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(compute.api.API, 'attach_volume',
+ fakes.fake_actions_to_locked_server)
+ res = self._make_request(url, {"attach": {"volume_id": UUID1}})
+ self.assertEqual(res.status_int, 409)
+
+ def test_attach_volume_disk_bus_and_disk_dev(self):
+ url = "/%s/action" % UUID1
+ self._make_request(url, {"attach": {"volume_id": UUID1,
+ "device": "/dev/vdb",
+ "disk_bus": "ide",
+ "device_type": "cdrom"}})
+
+ def test_attach_volume_with_bad_id(self):
+ url = "/%s/action" % UUID1
+ res = self._make_request(url, {"attach": {"volume_id": 'xxx'}})
+ self.assertEqual(res.status_int, 400)
+
+ def test_attach_volume_without_id(self):
+ url = "/%s/action" % UUID1
+ res = self._make_request(url, {"attach": {}})
+ self.assertEqual(res.status_int, 400)
+
+ def test_attach_volume_with_invalid_request(self):
+ url = "/%s/action" % UUID1
+ res = self._make_request(url, {"attach": None})
+ self.assertEqual(res.status_int, 400)
+
+ def test_attach_volume_with_non_existe_vol(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(compute.api.API, 'attach_volume',
+ fake_attach_volume_not_found_vol)
+ res = self._make_request(url, {"attach": {"volume_id": UUID1}})
+ self.assertEqual(res.status_int, 404)
+
+ def test_attach_volume_with_non_existed_instance(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get_not_found)
+ res = self._make_request(url, {"attach": {"volume_id": UUID1}})
+ self.assertEqual(res.status_int, 404)
+
+ def test_attach_volume_with_invalid_device_path(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(compute.api.API, 'attach_volume',
+ fake_attach_volume_invalid_device_path)
+ res = self._make_request(url, {"attach": {"volume_id": UUID1,
+ 'device': 'xxx'}})
+ self.assertEqual(res.status_int, 400)
+
+ def test_attach_volume_with_instance_invalid_state(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(compute.api.API, 'attach_volume',
+ fake_attach_volume_instance_invalid_state)
+ res = self._make_request(url, {"attach": {"volume_id": UUID1}})
+ self.assertEqual(res.status_int, 409)
+
+ def test_attach_volume_with_invalid_volume(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(compute.api.API, 'attach_volume',
+ fake_attach_volume_invalid_volume)
+ res = self._make_request(url, {"attach": {"volume_id": UUID1}})
+ self.assertEqual(res.status_int, 400)
+
+ def test_attach_volume_with_invalid_request_body(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(compute.api.API, 'attach_volume',
+ fake_attach_volume_invalid_volume)
+ res = self._make_request(url, {"attach": None})
+ self.assertEqual(res.status_int, 400)
+
+ def _test_swap(self, uuid=UUID1, body=None):
+ body = body or {'swap_volume_attachment': {'old_volume_id': uuid,
+ 'new_volume_id': UUID2}}
+ req = webob.Request.blank('/v2/fake/servers/%s/action' % UUID1)
+ req.method = 'PUT'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = context.get_admin_context()
+ return self.Controller.swap(req, UUID1, body=body)
+
+ def test_swap_volume(self):
+ self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume)
+ # Check any exceptions don't happen and status code
+ self._test_swap()
+ self.assertEqual(202, self.Controller.swap.wsgi_code)
+
+ def test_swap_volume_for_locked_server(self):
+ def fake_swap_volume_for_locked_server(self, context, instance,
+ old_volume, new_volume):
+ raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
+ self.stubs.Set(compute.api.API, 'swap_volume',
+ fake_swap_volume_for_locked_server)
+ self.assertRaises(webob.exc.HTTPConflict, self._test_swap)
+
+ def test_swap_volume_for_locked_server_new(self):
+ self.stubs.Set(compute.api.API, 'swap_volume',
+ fakes.fake_actions_to_locked_server)
+ self.assertRaises(webob.exc.HTTPConflict, self._test_swap)
+
+ def test_swap_volume_instance_not_found(self):
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get_not_found)
+ self.assertRaises(webob.exc.HTTPNotFound, self._test_swap)
+
+ def test_swap_volume_with_bad_action(self):
+ self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume)
+ body = {'swap_volume_attachment_bad_action': None}
+ self.assertRaises(exception.ValidationError, self._test_swap,
+ body=body)
+
+ def test_swap_volume_with_invalid_body(self):
+ self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume)
+ body = {'swap_volume_attachment': {'bad_volume_id_body': UUID1,
+ 'new_volume_id': UUID2}}
+ self.assertRaises(exception.ValidationError, self._test_swap,
+ body=body)
+
+ def test_swap_volume_with_invalid_volume(self):
+ self.stubs.Set(compute.api.API, 'swap_volume',
+ fake_swap_volume_invalid_volume)
+ self.assertRaises(webob.exc.HTTPBadRequest, self._test_swap)
+
+ def test_swap_volume_with_unattached_volume(self):
+ self.stubs.Set(compute.api.API, 'swap_volume',
+ fake_swap_volume_unattached_volume)
+ self.assertRaises(webob.exc.HTTPNotFound, self._test_swap)
+
+ def test_swap_volume_with_bad_state_instance(self):
+ self.stubs.Set(compute.api.API, 'swap_volume',
+ fake_swap_volume_instance_invalid_state)
+ self.assertRaises(webob.exc.HTTPConflict, self._test_swap)
+
+ def test_swap_volume_no_attachment(self):
+ self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume)
+ self.assertRaises(webob.exc.HTTPNotFound, self._test_swap, UUID3)
+
+ def test_swap_volume_not_found(self):
+ self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume)
+ self.stubs.Set(volume.cinder.API, 'get', fake_volume_get_not_found)
+ self.assertRaises(webob.exc.HTTPNotFound, self._test_swap)
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_extension_info.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_extension_info.py
new file mode 100644
index 0000000000..ee4e9d18b9
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_extension_info.py
@@ -0,0 +1,98 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import extension_info
+from nova import exception
+from nova import policy
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+class fake_extension(object):
+ def __init__(self, name, alias, description, version):
+ self.name = name
+ self.alias = alias
+ self.__doc__ = description
+ self.version = version
+
+
+fake_extensions = {
+ 'ext1-alias': fake_extension('ext1', 'ext1-alias', 'ext1 description', 1),
+ 'ext2-alias': fake_extension('ext2', 'ext2-alias', 'ext2 description', 2),
+ 'ext3-alias': fake_extension('ext3', 'ext3-alias', 'ext3 description', 1)
+}
+
+
+def fake_policy_enforce(context, action, target, do_raise=True):
+ return True
+
+
+def fake_policy_enforce_selective(context, action, target, do_raise=True):
+ if action == 'compute_extension:v3:ext1-alias:discoverable':
+ raise exception.Forbidden
+ else:
+ return True
+
+
+class ExtensionInfoTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(ExtensionInfoTest, self).setUp()
+ ext_info = plugins.LoadedExtensionInfo()
+ ext_info.extensions = fake_extensions
+ self.controller = extension_info.ExtensionInfoController(ext_info)
+
+ def test_extension_info_list(self):
+ self.stubs.Set(policy, 'enforce', fake_policy_enforce)
+ req = fakes.HTTPRequestV3.blank('/extensions')
+ res_dict = self.controller.index(req)
+ self.assertEqual(3, len(res_dict['extensions']))
+ for e in res_dict['extensions']:
+ self.assertIn(e['alias'], fake_extensions)
+ self.assertEqual(e['name'], fake_extensions[e['alias']].name)
+ self.assertEqual(e['alias'], fake_extensions[e['alias']].alias)
+ self.assertEqual(e['description'],
+ fake_extensions[e['alias']].__doc__)
+ self.assertEqual(e['version'],
+ fake_extensions[e['alias']].version)
+
+ def test_extension_info_show(self):
+ self.stubs.Set(policy, 'enforce', fake_policy_enforce)
+ req = fakes.HTTPRequestV3.blank('/extensions/ext1-alias')
+ res_dict = self.controller.show(req, 'ext1-alias')
+ self.assertEqual(1, len(res_dict))
+ self.assertEqual(res_dict['extension']['name'],
+ fake_extensions['ext1-alias'].name)
+ self.assertEqual(res_dict['extension']['alias'],
+ fake_extensions['ext1-alias'].alias)
+ self.assertEqual(res_dict['extension']['description'],
+ fake_extensions['ext1-alias'].__doc__)
+ self.assertEqual(res_dict['extension']['version'],
+ fake_extensions['ext1-alias'].version)
+
+ def test_extension_info_list_not_all_discoverable(self):
+ self.stubs.Set(policy, 'enforce', fake_policy_enforce_selective)
+ req = fakes.HTTPRequestV3.blank('/extensions')
+ res_dict = self.controller.index(req)
+ self.assertEqual(2, len(res_dict['extensions']))
+ for e in res_dict['extensions']:
+ self.assertNotEqual('ext1-alias', e['alias'])
+ self.assertIn(e['alias'], fake_extensions)
+ self.assertEqual(e['name'], fake_extensions[e['alias']].name)
+ self.assertEqual(e['alias'], fake_extensions[e['alias']].alias)
+ self.assertEqual(e['description'],
+ fake_extensions[e['alias']].__doc__)
+ self.assertEqual(e['version'],
+ fake_extensions[e['alias']].version)
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_lock_server.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_lock_server.py
new file mode 100644
index 0000000000..ff5817ba19
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_lock_server.py
@@ -0,0 +1,57 @@
+# Copyright 2011 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack.compute.plugins.v3 import lock_server
+from nova import exception
+from nova.tests.unit.api.openstack.compute.plugins.v3 import \
+ admin_only_action_common
+from nova.tests.unit.api.openstack import fakes
+
+
+class LockServerTests(admin_only_action_common.CommonTests):
+ def setUp(self):
+ super(LockServerTests, self).setUp()
+ self.controller = lock_server.LockServerController()
+ self.compute_api = self.controller.compute_api
+
+ def _fake_controller(*args, **kwargs):
+ return self.controller
+
+ self.stubs.Set(lock_server, 'LockServerController',
+ _fake_controller)
+ self.app = fakes.wsgi_app_v21(init_only=('servers',
+ 'os-lock-server'),
+ fake_auth_context=self.context)
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def test_lock_unlock(self):
+ self._test_actions(['lock', 'unlock'])
+
+ def test_lock_unlock_with_non_existed_instance(self):
+ self._test_actions_with_non_existed_instance(['lock', 'unlock'])
+
+ def test_unlock_not_authorized(self):
+ self.mox.StubOutWithMock(self.compute_api, 'unlock')
+
+ instance = self._stub_instance_get()
+
+ self.compute_api.unlock(self.context, instance).AndRaise(
+ exception.PolicyNotAuthorized(action='unlock'))
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance.uuid,
+ {'unlock': None})
+ self.assertEqual(403, res.status_int)
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_migrations.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_migrations.py
new file mode 100644
index 0000000000..c735e87fea
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_migrations.py
@@ -0,0 +1,115 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from nova.api.openstack.compute.plugins.v3 import migrations
+from nova import context
+from nova import exception
+from nova import objects
+from nova.objects import base
+from nova.openstack.common.fixture import moxstubout
+from nova import test
+
+
+fake_migrations = [
+ {
+ 'id': 1234,
+ 'source_node': 'node1',
+ 'dest_node': 'node2',
+ 'source_compute': 'compute1',
+ 'dest_compute': 'compute2',
+ 'dest_host': '1.2.3.4',
+ 'status': 'Done',
+ 'instance_uuid': 'instance_id_123',
+ 'old_instance_type_id': 1,
+ 'new_instance_type_id': 2,
+ 'created_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'deleted_at': None,
+ 'deleted': False
+ },
+ {
+ 'id': 5678,
+ 'source_node': 'node10',
+ 'dest_node': 'node20',
+ 'source_compute': 'compute10',
+ 'dest_compute': 'compute20',
+ 'dest_host': '5.6.7.8',
+ 'status': 'Done',
+ 'instance_uuid': 'instance_id_456',
+ 'old_instance_type_id': 5,
+ 'new_instance_type_id': 6,
+ 'created_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
+ 'updated_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
+ 'deleted_at': None,
+ 'deleted': False
+ }
+]
+
+migrations_obj = base.obj_make_list(
+ 'fake-context',
+ objects.MigrationList(),
+ objects.Migration,
+ fake_migrations)
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {}
+
+
+class MigrationsTestCase(test.NoDBTestCase):
+ def setUp(self):
+ """Run before each test."""
+ super(MigrationsTestCase, self).setUp()
+ self.controller = migrations.MigrationsController()
+ self.context = context.get_admin_context()
+ self.req = FakeRequest()
+ self.req.environ['nova.context'] = self.context
+ mox_fixture = self.useFixture(moxstubout.MoxStubout())
+ self.mox = mox_fixture.mox
+
+ def test_index(self):
+ migrations_in_progress = {
+ 'migrations': migrations.output(migrations_obj)}
+
+ for mig in migrations_in_progress['migrations']:
+ self.assertIn('id', mig)
+ self.assertNotIn('deleted', mig)
+ self.assertNotIn('deleted_at', mig)
+
+ filters = {'host': 'host1', 'status': 'migrating',
+ 'cell_name': 'ChildCell'}
+ self.req.GET = filters
+ self.mox.StubOutWithMock(self.controller.compute_api,
+ "get_migrations")
+
+ self.controller.compute_api.get_migrations(
+ self.context, filters).AndReturn(migrations_obj)
+ self.mox.ReplayAll()
+
+ response = self.controller.index(self.req)
+ self.assertEqual(migrations_in_progress, response)
+
+ def test_index_needs_authorization(self):
+ user_context = context.RequestContext(user_id=None,
+ project_id=None,
+ is_admin=False,
+ read_deleted="no",
+ overwrite=False)
+ self.req.environ['nova.context'] = user_context
+
+ self.assertRaises(exception.PolicyNotAuthorized, self.controller.index,
+ self.req)
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_multiple_create.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_multiple_create.py
new file mode 100644
index 0000000000..35a559c668
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_multiple_create.py
@@ -0,0 +1,547 @@
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import uuid
+
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import block_device_mapping
+from nova.api.openstack.compute.plugins.v3 import multiple_create
+from nova.api.openstack.compute.plugins.v3 import servers
+from nova.compute import api as compute_api
+from nova.compute import flavors
+from nova import db
+from nova import exception
+from nova.network import manager
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit.image import fake
+
+CONF = cfg.CONF
+FAKE_UUID = fakes.FAKE_UUID
+
+
+def fake_gen_uuid():
+ return FAKE_UUID
+
+
+def return_security_group(context, instance_id, security_group_id):
+ pass
+
+
+class ServersControllerCreateTest(test.TestCase):
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTest, self).setUp()
+
+ self.flags(verbose=True,
+ enable_instance_password=True)
+ self.instance_cache_num = 0
+ self.instance_cache_by_id = {}
+ self.instance_cache_by_uuid = {}
+
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers.ServersController(extension_info=ext_info)
+ CONF.set_override('extensions_blacklist', 'os-multiple-create',
+ 'osapi_v3')
+ self.no_mult_create_controller = servers.ServersController(
+ extension_info=ext_info)
+
+ def instance_create(context, inst):
+ inst_type = flavors.get_flavor_by_flavor_id(3)
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ def_image_ref = 'http://localhost/images/%s' % image_uuid
+ self.instance_cache_num += 1
+ instance = fake_instance.fake_db_instance(**{
+ 'id': self.instance_cache_num,
+ 'display_name': inst['display_name'] or 'test',
+ 'uuid': FAKE_UUID,
+ 'instance_type': inst_type,
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fead::1234',
+ 'image_ref': inst.get('image_ref', def_image_ref),
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'reservation_id': inst['reservation_id'],
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "progress": 0,
+ "fixed_ips": [],
+ "task_state": "",
+ "vm_state": "",
+ "security_groups": inst['security_groups'],
+ })
+
+ self.instance_cache_by_id[instance['id']] = instance
+ self.instance_cache_by_uuid[instance['uuid']] = instance
+ return instance
+
+ def instance_get(context, instance_id):
+ """Stub for compute/api create() pulling in instance after
+ scheduling
+ """
+ return self.instance_cache_by_id[instance_id]
+
+ def instance_update(context, uuid, values):
+ instance = self.instance_cache_by_uuid[uuid]
+ instance.update(values)
+ return instance
+
+ def server_update(context, instance_uuid, params, update_cells=True,
+ columns_to_join=None):
+ inst = self.instance_cache_by_uuid[instance_uuid]
+ inst.update(params)
+ return (inst, inst)
+
+ def fake_method(*args, **kwargs):
+ pass
+
+ def project_get_networks(context, user_id):
+ return dict(id='1', host='localhost')
+
+ def queue_get_for(context, *args):
+ return 'network_topic'
+
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
+ self.stubs.Set(db, 'instance_add_security_group',
+ return_security_group)
+ self.stubs.Set(db, 'project_get_networks',
+ project_get_networks)
+ self.stubs.Set(db, 'instance_create', instance_create)
+ self.stubs.Set(db, 'instance_system_metadata_update',
+ fake_method)
+ self.stubs.Set(db, 'instance_get', instance_get)
+ self.stubs.Set(db, 'instance_update', instance_update)
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ server_update)
+ self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
+ fake_method)
+
+ def _test_create_extra(self, params, no_image=False,
+ override_controller=None):
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
+ if no_image:
+ server.pop('imageRef', None)
+ server.update(params)
+ body = dict(server=server)
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ if override_controller:
+ server = override_controller.create(req, body=body).obj['server']
+ else:
+ server = self.controller.create(req, body=body).obj['server']
+
+ def test_create_instance_with_multiple_create_disabled(self):
+ min_count = 2
+ max_count = 3
+ params = {
+ multiple_create.MIN_ATTRIBUTE_NAME: min_count,
+ multiple_create.MAX_ATTRIBUTE_NAME: max_count,
+ }
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertNotIn('min_count', kwargs)
+ self.assertNotIn('max_count', kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(
+ params,
+ override_controller=self.no_mult_create_controller)
+
+ def test_multiple_create_with_string_type_min_and_max(self):
+ min_count = '2'
+ max_count = '3'
+ params = {
+ multiple_create.MIN_ATTRIBUTE_NAME: min_count,
+ multiple_create.MAX_ATTRIBUTE_NAME: max_count,
+ }
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIsInstance(kwargs['min_count'], int)
+ self.assertIsInstance(kwargs['max_count'], int)
+ self.assertEqual(kwargs['min_count'], 2)
+ self.assertEqual(kwargs['max_count'], 3)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_multiple_create_enabled(self):
+ min_count = 2
+ max_count = 3
+ params = {
+ multiple_create.MIN_ATTRIBUTE_NAME: min_count,
+ multiple_create.MAX_ATTRIBUTE_NAME: max_count,
+ }
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['min_count'], 2)
+ self.assertEqual(kwargs['max_count'], 3)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_invalid_negative_min(self):
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ body = {
+ 'server': {
+ multiple_create.MIN_ATTRIBUTE_NAME: -1,
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ }
+ }
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(exception.ValidationError,
+ self.controller.create,
+ req,
+ body=body)
+
+ def test_create_instance_invalid_negative_max(self):
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ body = {
+ 'server': {
+ multiple_create.MAX_ATTRIBUTE_NAME: -1,
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ }
+ }
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(exception.ValidationError,
+ self.controller.create,
+ req,
+ body=body)
+
+ def test_create_instance_with_blank_min(self):
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ body = {
+ 'server': {
+ multiple_create.MIN_ATTRIBUTE_NAME: '',
+ 'name': 'server_test',
+ 'image_ref': image_href,
+ 'flavor_ref': flavor_ref,
+ }
+ }
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(exception.ValidationError,
+ self.controller.create,
+ req,
+ body=body)
+
+ def test_create_instance_with_blank_max(self):
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ body = {
+ 'server': {
+ multiple_create.MAX_ATTRIBUTE_NAME: '',
+ 'name': 'server_test',
+ 'image_ref': image_href,
+ 'flavor_ref': flavor_ref,
+ }
+ }
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(exception.ValidationError,
+ self.controller.create,
+ req,
+ body=body)
+
+ def test_create_instance_invalid_min_greater_than_max(self):
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ body = {
+ 'server': {
+ multiple_create.MIN_ATTRIBUTE_NAME: 4,
+ multiple_create.MAX_ATTRIBUTE_NAME: 2,
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ }
+ }
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ req,
+ body=body)
+
+ def test_create_instance_invalid_alpha_min(self):
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ body = {
+ 'server': {
+ multiple_create.MIN_ATTRIBUTE_NAME: 'abcd',
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ }
+ }
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(exception.ValidationError,
+ self.controller.create,
+ req,
+ body=body)
+
+ def test_create_instance_invalid_alpha_max(self):
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ body = {
+ 'server': {
+ multiple_create.MAX_ATTRIBUTE_NAME: 'abcd',
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ }
+ }
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(exception.ValidationError,
+ self.controller.create,
+ req,
+ body=body)
+
+ def test_create_multiple_instances(self):
+ """Test creating multiple instances but not asking for
+ reservation_id
+ """
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+ body = {
+ 'server': {
+ multiple_create.MIN_ATTRIBUTE_NAME: 2,
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {'hello': 'world',
+ 'open': 'stack'},
+ }
+ }
+
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = self.controller.create(req, body=body).obj
+
+ self.assertEqual(FAKE_UUID, res["server"]["id"])
+ self._check_admin_password_len(res["server"])
+
+ def test_create_multiple_instances_pass_disabled(self):
+ """Test creating multiple instances but not asking for
+ reservation_id
+ """
+ self.flags(enable_instance_password=False)
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+ body = {
+ 'server': {
+ multiple_create.MIN_ATTRIBUTE_NAME: 2,
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {'hello': 'world',
+ 'open': 'stack'},
+ }
+ }
+
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = self.controller.create(req, body=body).obj
+
+ self.assertEqual(FAKE_UUID, res["server"]["id"])
+ self._check_admin_password_missing(res["server"])
+
+ def _check_admin_password_len(self, server_dict):
+ """utility function - check server_dict for admin_password length."""
+ self.assertEqual(CONF.password_length,
+ len(server_dict["adminPass"]))
+
+ def _check_admin_password_missing(self, server_dict):
+ """utility function - check server_dict for admin_password absence."""
+ self.assertNotIn("admin_password", server_dict)
+
+ def _create_multiple_instances_resv_id_return(self, resv_id_return):
+ """Test creating multiple instances with asking for
+ reservation_id
+ """
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+ body = {
+ 'server': {
+ multiple_create.MIN_ATTRIBUTE_NAME: 2,
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {'hello': 'world',
+ 'open': 'stack'},
+ multiple_create.RRID_ATTRIBUTE_NAME: resv_id_return
+ }
+ }
+
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = self.controller.create(req, body=body)
+ reservation_id = res.obj['reservation_id']
+ self.assertNotEqual(reservation_id, "")
+ self.assertIsNotNone(reservation_id)
+ self.assertTrue(len(reservation_id) > 1)
+
+ def test_create_multiple_instances_with_resv_id_return(self):
+ self._create_multiple_instances_resv_id_return(True)
+
+ def test_create_multiple_instances_with_string_resv_id_return(self):
+ self._create_multiple_instances_resv_id_return("True")
+
+ def test_create_multiple_instances_with_multiple_volume_bdm(self):
+ """Test that a BadRequest is raised if multiple instances
+ are requested with a list of block device mappings for volumes.
+ """
+ min_count = 2
+ bdm = [{'source_type': 'volume', 'uuid': 'vol-xxxx'},
+ {'source_type': 'volume', 'uuid': 'vol-yyyy'}
+ ]
+ params = {
+ block_device_mapping.ATTRIBUTE_NAME: bdm,
+ multiple_create.MIN_ATTRIBUTE_NAME: min_count
+ }
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['min_count'], 2)
+ self.assertEqual(len(kwargs['block_device_mapping']), 2)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ exc = self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params, no_image=True)
+ self.assertEqual("Cannot attach one or more volumes to multiple "
+ "instances", exc.explanation)
+
+ def test_create_multiple_instances_with_single_volume_bdm(self):
+ """Test that a BadRequest is raised if multiple instances
+ are requested to boot from a single volume.
+ """
+ min_count = 2
+ bdm = [{'source_type': 'volume', 'uuid': 'vol-xxxx'}]
+ params = {
+ block_device_mapping.ATTRIBUTE_NAME: bdm,
+ multiple_create.MIN_ATTRIBUTE_NAME: min_count
+ }
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['min_count'], 2)
+ self.assertEqual(kwargs['block_device_mapping'][0]['volume_id'],
+ 'vol-xxxx')
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ exc = self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params, no_image=True)
+ self.assertEqual("Cannot attach one or more volumes to multiple "
+ "instances", exc.explanation)
+
+ def test_create_multiple_instance_with_non_integer_max_count(self):
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+ body = {
+ 'server': {
+ multiple_create.MAX_ATTRIBUTE_NAME: 2.5,
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {'hello': 'world',
+ 'open': 'stack'},
+ }
+ }
+
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
+
+ def test_create_multiple_instance_with_non_integer_min_count(self):
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+ body = {
+ 'server': {
+ multiple_create.MIN_ATTRIBUTE_NAME: 2.5,
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {'hello': 'world',
+ 'open': 'stack'},
+ }
+ }
+
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_pause_server.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_pause_server.py
new file mode 100644
index 0000000000..5364fb45b3
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_pause_server.py
@@ -0,0 +1,60 @@
+# Copyright 2011 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack.compute.plugins.v3 import pause_server
+from nova.tests.unit.api.openstack.compute.plugins.v3 import \
+ admin_only_action_common
+from nova.tests.unit.api.openstack import fakes
+
+
+class PauseServerTests(admin_only_action_common.CommonTests):
+ def setUp(self):
+ super(PauseServerTests, self).setUp()
+ self.controller = pause_server.PauseServerController()
+ self.compute_api = self.controller.compute_api
+
+ def _fake_controller(*args, **kwargs):
+ return self.controller
+
+ self.stubs.Set(pause_server, 'PauseServerController',
+ _fake_controller)
+ self.app = fakes.wsgi_app_v21(init_only=('servers',
+ 'os-pause-server'),
+ fake_auth_context=self.context)
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def test_pause_unpause(self):
+ self._test_actions(['pause', 'unpause'])
+
+ def test_actions_raise_on_not_implemented(self):
+ for action in ['pause', 'unpause']:
+ self.mox.StubOutWithMock(self.compute_api, action)
+ self._test_not_implemented_state(action)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def test_pause_unpause_with_non_existed_instance(self):
+ self._test_actions_with_non_existed_instance(['pause', 'unpause'])
+
+ def test_pause_unpause_with_non_existed_instance_in_compute_api(self):
+ self._test_actions_instance_not_found_in_compute_api(['pause',
+ 'unpause'])
+
+ def test_pause_unpause_raise_conflict_on_invalid_state(self):
+ self._test_actions_raise_conflict_on_invalid_state(['pause',
+ 'unpause'])
+
+ def test_actions_with_locked_instance(self):
+ self._test_actions_with_locked_instance(['pause', 'unpause'])
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_pci.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_pci.py
new file mode 100644
index 0000000000..6ac6269195
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_pci.py
@@ -0,0 +1,236 @@
+# Copyright 2013 Intel Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from oslo.serialization import jsonutils
+from webob import exc
+
+from nova.api.openstack.compute.plugins.v3 import pci
+from nova.api.openstack import wsgi
+from nova import context
+from nova import db
+from nova import exception
+from nova import objects
+from nova.pci import device
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.objects import test_pci_device
+
+
+fake_compute_node = {
+ 'pci_stats': [{"count": 3,
+ "vendor_id": "8086",
+ "product_id": "1520",
+ "extra_info": {"phys_function": '[["0x0000", "0x04", '
+ '"0x00", "0x1"]]'}}]}
+
+
+class FakeResponse(wsgi.ResponseObject):
+ pass
+
+
+class PciServerControllerTest(test.NoDBTestCase):
+ def setUp(self):
+ super(PciServerControllerTest, self).setUp()
+ self.controller = pci.PciServerController()
+ self.fake_obj = {'server': {'addresses': {},
+ 'id': 'fb08',
+ 'name': 'a3',
+ 'status': 'ACTIVE',
+ 'tenant_id': '9a3af784c',
+ 'user_id': 'e992080ac0',
+ }}
+ self.fake_list = {'servers': [{'addresses': {},
+ 'id': 'fb08',
+ 'name': 'a3',
+ 'status': 'ACTIVE',
+ 'tenant_id': '9a3af784c',
+ 'user_id': 'e992080ac',
+ }]}
+ self._create_fake_instance()
+ self._create_fake_pci_device()
+ device.claim(self.pci_device, self.inst)
+ device.allocate(self.pci_device, self.inst)
+
+ def _create_fake_instance(self):
+ self.inst = objects.Instance()
+ self.inst.uuid = 'fake-inst-uuid'
+ self.inst.pci_devices = objects.PciDeviceList()
+
+ def _create_fake_pci_device(self):
+ def fake_pci_device_get_by_addr(ctxt, id, addr):
+ return test_pci_device.fake_db_dev
+
+ ctxt = context.get_admin_context()
+ self.stubs.Set(db, 'pci_device_get_by_addr',
+ fake_pci_device_get_by_addr)
+ self.pci_device = objects.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
+
+ def test_show(self):
+ def fake_get_db_instance(id):
+ return self.inst
+
+ resp = FakeResponse(self.fake_obj, '')
+ req = fakes.HTTPRequestV3.blank('/os-pci/1', use_admin_context=True)
+ self.stubs.Set(req, 'get_db_instance', fake_get_db_instance)
+ self.controller.show(req, resp, '1')
+ self.assertEqual([{'id': 1}],
+ resp.obj['server']['os-pci:pci_devices'])
+
+ def test_detail(self):
+ def fake_get_db_instance(id):
+ return self.inst
+
+ resp = FakeResponse(self.fake_list, '')
+ req = fakes.HTTPRequestV3.blank('/os-pci/detail',
+ use_admin_context=True)
+ self.stubs.Set(req, 'get_db_instance', fake_get_db_instance)
+ self.controller.detail(req, resp)
+ self.assertEqual([{'id': 1}],
+ resp.obj['servers'][0]['os-pci:pci_devices'])
+
+
+class PciHypervisorControllerTest(test.NoDBTestCase):
+ def setUp(self):
+ super(PciHypervisorControllerTest, self).setUp()
+ self.controller = pci.PciHypervisorController()
+ self.fake_objs = dict(hypervisors=[
+ dict(id=1,
+ service=dict(id=1, host="compute1"),
+ hypervisor_type="xen",
+ hypervisor_version=3,
+ hypervisor_hostname="hyper1")])
+ self.fake_obj = dict(hypervisor=dict(
+ id=1,
+ service=dict(id=1, host="compute1"),
+ hypervisor_type="xen",
+ hypervisor_version=3,
+ hypervisor_hostname="hyper1"))
+
+ def test_show(self):
+ def fake_get_db_compute_node(id):
+ fake_compute_node['pci_stats'] = jsonutils.dumps(
+ fake_compute_node['pci_stats'])
+ return fake_compute_node
+
+ req = fakes.HTTPRequestV3.blank('/os-hypervisors/1',
+ use_admin_context=True)
+ resp = FakeResponse(self.fake_obj, '')
+ self.stubs.Set(req, 'get_db_compute_node', fake_get_db_compute_node)
+ self.controller.show(req, resp, '1')
+ self.assertIn('os-pci:pci_stats', resp.obj['hypervisor'])
+ fake_compute_node['pci_stats'] = jsonutils.loads(
+ fake_compute_node['pci_stats'])
+ self.assertEqual(fake_compute_node['pci_stats'][0],
+ resp.obj['hypervisor']['os-pci:pci_stats'][0])
+
+ def test_detail(self):
+ def fake_get_db_compute_node(id):
+ fake_compute_node['pci_stats'] = jsonutils.dumps(
+ fake_compute_node['pci_stats'])
+ return fake_compute_node
+
+ req = fakes.HTTPRequestV3.blank('/os-hypervisors/detail',
+ use_admin_context=True)
+ resp = FakeResponse(self.fake_objs, '')
+ self.stubs.Set(req, 'get_db_compute_node', fake_get_db_compute_node)
+ self.controller.detail(req, resp)
+ fake_compute_node['pci_stats'] = jsonutils.loads(
+ fake_compute_node['pci_stats'])
+ self.assertIn('os-pci:pci_stats', resp.obj['hypervisors'][0])
+ self.assertEqual(fake_compute_node['pci_stats'][0],
+ resp.obj['hypervisors'][0]['os-pci:pci_stats'][0])
+
+
+class PciControlletest(test.NoDBTestCase):
+ def setUp(self):
+ super(PciControlletest, self).setUp()
+ self.controller = pci.PciController()
+
+ def test_show(self):
+ def fake_pci_device_get_by_id(context, id):
+ return test_pci_device.fake_db_dev
+
+ self.stubs.Set(db, 'pci_device_get_by_id', fake_pci_device_get_by_id)
+ req = fakes.HTTPRequestV3.blank('/os-pci/1', use_admin_context=True)
+ result = self.controller.show(req, '1')
+ dist = {'pci_device': {'address': 'a',
+ 'compute_node_id': 1,
+ 'dev_id': 'i',
+ 'extra_info': {},
+ 'dev_type': 't',
+ 'id': 1,
+ 'server_uuid': None,
+ 'label': 'l',
+ 'product_id': 'p',
+ 'status': 'available',
+ 'vendor_id': 'v'}}
+ self.assertEqual(dist, result)
+
+ def test_show_error_id(self):
+ def fake_pci_device_get_by_id(context, id):
+ raise exception.PciDeviceNotFoundById(id=id)
+
+ self.stubs.Set(db, 'pci_device_get_by_id', fake_pci_device_get_by_id)
+ req = fakes.HTTPRequestV3.blank('/os-pci/0', use_admin_context=True)
+ self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '0')
+
+ def _fake_compute_node_get_all(self, context):
+ return [dict(id=1,
+ service_id=1,
+ cpu_info='cpu_info',
+ disk_available_least=100)]
+
+ def _fake_pci_device_get_all_by_node(self, context, node):
+ return [test_pci_device.fake_db_dev, test_pci_device.fake_db_dev_1]
+
+ def test_index(self):
+ self.stubs.Set(db, 'compute_node_get_all',
+ self._fake_compute_node_get_all)
+ self.stubs.Set(db, 'pci_device_get_all_by_node',
+ self._fake_pci_device_get_all_by_node)
+
+ req = fakes.HTTPRequestV3.blank('/os-pci', use_admin_context=True)
+ result = self.controller.index(req)
+ dist = {'pci_devices': [test_pci_device.fake_db_dev,
+ test_pci_device.fake_db_dev_1]}
+ for i in range(len(result['pci_devices'])):
+ self.assertEqual(dist['pci_devices'][i]['vendor_id'],
+ result['pci_devices'][i]['vendor_id'])
+ self.assertEqual(dist['pci_devices'][i]['id'],
+ result['pci_devices'][i]['id'])
+ self.assertEqual(dist['pci_devices'][i]['status'],
+ result['pci_devices'][i]['status'])
+ self.assertEqual(dist['pci_devices'][i]['address'],
+ result['pci_devices'][i]['address'])
+
+ def test_detail(self):
+ self.stubs.Set(db, 'compute_node_get_all',
+ self._fake_compute_node_get_all)
+ self.stubs.Set(db, 'pci_device_get_all_by_node',
+ self._fake_pci_device_get_all_by_node)
+ req = fakes.HTTPRequestV3.blank('/os-pci/detail',
+ use_admin_context=True)
+ result = self.controller.detail(req)
+ dist = {'pci_devices': [test_pci_device.fake_db_dev,
+ test_pci_device.fake_db_dev_1]}
+ for i in range(len(result['pci_devices'])):
+ self.assertEqual(dist['pci_devices'][i]['vendor_id'],
+ result['pci_devices'][i]['vendor_id'])
+ self.assertEqual(dist['pci_devices'][i]['id'],
+ result['pci_devices'][i]['id'])
+ self.assertEqual(dist['pci_devices'][i]['label'],
+ result['pci_devices'][i]['label'])
+ self.assertEqual(dist['pci_devices'][i]['dev_id'],
+ result['pci_devices'][i]['dev_id'])
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_server_actions.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_server_actions.py
new file mode 100644
index 0000000000..0bfe0eb2d4
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_server_actions.py
@@ -0,0 +1,1131 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import mock
+import mox
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import servers
+from nova.compute import api as compute_api
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.image import glance
+from nova import objects
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit.image import fake
+
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
+FAKE_UUID = fakes.FAKE_UUID
+INSTANCE_IDS = {FAKE_UUID: 1}
+
+
+def return_server_not_found(*arg, **kwarg):
+ raise exception.InstanceNotFound(instance_id='42')
+
+
+def instance_update_and_get_original(context, instance_uuid, values,
+ update_cells=True,
+ columns_to_join=None,
+ ):
+ inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
+ inst = dict(inst, **values)
+ return (inst, inst)
+
+
+def instance_update(context, instance_uuid, kwargs, update_cells=True):
+ inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
+ return inst
+
+
+class MockSetAdminPassword(object):
+ def __init__(self):
+ self.instance_id = None
+ self.password = None
+
+ def __call__(self, context, instance, password):
+ self.instance_id = instance['uuid']
+ self.password = password
+
+
+class ServerActionsControllerTest(test.TestCase):
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ image_href = 'http://localhost/v2/fake/images/%s' % image_uuid
+
+ def setUp(self):
+ super(ServerActionsControllerTest, self).setUp()
+
+ CONF.set_override('host', 'localhost', group='glance')
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ host='fake_host'))
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ instance_update_and_get_original)
+
+ fakes.stub_out_nw_api(self.stubs)
+ fakes.stub_out_compute_api_snapshot(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+ self.flags(allow_instance_snapshots=True,
+ enable_instance_password=True)
+ self.uuid = FAKE_UUID
+ self.url = '/servers/%s/action' % self.uuid
+ self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
+
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers.ServersController(extension_info=ext_info)
+ self.compute_api = self.controller.compute_api
+ self.context = context.RequestContext('fake', 'fake')
+ self.app = fakes.wsgi_app_v21(init_only=('servers',),
+ fake_auth_context=self.context)
+
+ def _make_request(self, url, body):
+ req = webob.Request.blank('/v2/fake' + url)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.content_type = 'application/json'
+ return req.get_response(self.app)
+
+ def _stub_instance_get(self, uuid=None):
+ self.mox.StubOutWithMock(compute_api.API, 'get')
+ if uuid is None:
+ uuid = uuidutils.generate_uuid()
+ instance = fake_instance.fake_db_instance(
+ id=1, uuid=uuid, vm_state=vm_states.ACTIVE, task_state=None)
+ instance = objects.Instance._from_db_object(
+ self.context, objects.Instance(), instance)
+
+ self.compute_api.get(self.context, uuid, want_objects=True,
+ expected_attrs=['pci_devices']).AndReturn(instance)
+ return instance
+
+ def _test_locked_instance(self, action, method=None, body_map=None,
+ compute_api_args_map=None):
+ if method is None:
+ method = action
+ if body_map is None:
+ body_map = {}
+ if compute_api_args_map is None:
+ compute_api_args_map = {}
+
+ instance = self._stub_instance_get()
+ args, kwargs = compute_api_args_map.get(action, ((), {}))
+
+ getattr(compute_api.API, method)(self.context, instance,
+ *args, **kwargs).AndRaise(
+ exception.InstanceIsLocked(instance_uuid=instance['uuid']))
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance['uuid'],
+ {action: body_map.get(action)})
+ self.assertEqual(409, res.status_int)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def test_actions_with_locked_instance(self):
+ actions = ['resize', 'confirmResize', 'revertResize', 'reboot',
+ 'rebuild']
+
+ method_translations = {'confirmResize': 'confirm_resize',
+ 'revertResize': 'revert_resize'}
+
+ body_map = {'resize': {'flavorRef': '2'},
+ 'reboot': {'type': 'HARD'},
+ 'rebuild': {'imageRef': self.image_uuid,
+ 'adminPass': 'TNc53Dr8s7vw'}}
+
+ args_map = {'resize': (('2'), {}),
+ 'confirmResize': ((), {}),
+ 'reboot': (('HARD',), {}),
+ 'rebuild': ((self.image_uuid, 'TNc53Dr8s7vw'), {})}
+
+ for action in actions:
+ method = method_translations.get(action)
+ self.mox.StubOutWithMock(compute_api.API, method or action)
+ self._test_locked_instance(action, method=method,
+ body_map=body_map,
+ compute_api_args_map=args_map)
+
+ def test_reboot_hard(self):
+ body = dict(reboot=dict(type="HARD"))
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.controller._action_reboot(req, FAKE_UUID, body)
+
+ def test_reboot_soft(self):
+ body = dict(reboot=dict(type="SOFT"))
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.controller._action_reboot(req, FAKE_UUID, body)
+
+ def test_reboot_incorrect_type(self):
+ body = dict(reboot=dict(type="NOT_A_TYPE"))
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_reboot_missing_type(self):
+ body = dict(reboot=dict())
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_reboot_none(self):
+ body = dict(reboot=dict(type=None))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_reboot_not_found(self):
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ return_server_not_found)
+
+ body = dict(reboot=dict(type="HARD"))
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._action_reboot,
+ req, str(uuid.uuid4()), body)
+
+ def test_reboot_raises_conflict_on_invalid_state(self):
+ body = dict(reboot=dict(type="HARD"))
+
+ def fake_reboot(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ self.stubs.Set(compute_api.API, 'reboot', fake_reboot)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_reboot_soft_with_soft_in_progress_raises_conflict(self):
+ body = dict(reboot=dict(type="SOFT"))
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBOOTING))
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_reboot_hard_with_soft_in_progress_does_not_raise(self):
+ body = dict(reboot=dict(type="HARD"))
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBOOTING))
+ self.controller._action_reboot(req, FAKE_UUID, body)
+
+ def test_reboot_hard_with_hard_in_progress_raises_conflict(self):
+ body = dict(reboot=dict(type="HARD"))
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBOOTING_HARD))
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_rebuild_accepted_minimum(self):
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE, host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self_href = 'http://localhost/v3/servers/%s' % FAKE_UUID
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ robj = self.controller._action_rebuild(req, FAKE_UUID, body=body)
+ body = robj.obj
+
+ self.assertEqual(body['server']['image']['id'], '2')
+ self.assertEqual(len(body['server']['adminPass']),
+ CONF.password_length)
+
+ self.assertEqual(robj['location'], self_href)
+
+ def test_rebuild_instance_with_image_uuid(self):
+ info = dict(image_href_in_call=None)
+
+ def rebuild(self2, context, instance, image_href, *args, **kwargs):
+ info['image_href_in_call'] = image_href
+
+ self.stubs.Set(db, 'instance_get',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
+ self.stubs.Set(compute_api.API, 'rebuild', rebuild)
+
+ body = {
+ 'rebuild': {
+ 'imageRef': self.image_uuid,
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank('/v2/fake/servers/a/action')
+ self.controller._action_rebuild(req, FAKE_UUID, body=body)
+ self.assertEqual(info['image_href_in_call'], self.image_uuid)
+
+ def test_rebuild_instance_with_image_href_uses_uuid(self):
+ info = dict(image_href_in_call=None)
+
+ def rebuild(self2, context, instance, image_href, *args, **kwargs):
+ info['image_href_in_call'] = image_href
+
+ self.stubs.Set(db, 'instance_get',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
+ self.stubs.Set(compute_api.API, 'rebuild', rebuild)
+
+ body = {
+ 'rebuild': {
+ 'imageRef': self.image_href,
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank('/v2/fake/servers/a/action')
+ self.controller._action_rebuild(req, FAKE_UUID, body=body)
+ self.assertEqual(info['image_href_in_call'], self.image_uuid)
+
+ def test_rebuild_accepted_minimum_pass_disabled(self):
+ # run with enable_instance_password disabled to verify admin_password
+ # is missing from response. See lp bug 921814
+ self.flags(enable_instance_password=False)
+
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE, host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self_href = 'http://localhost/v3/servers/%s' % FAKE_UUID
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ robj = self.controller._action_rebuild(req, FAKE_UUID, body=body)
+ body = robj.obj
+
+ self.assertEqual(body['server']['image']['id'], '2')
+ self.assertNotIn("admin_password", body['server'])
+
+ self.assertEqual(robj['location'], self_href)
+
+ def test_rebuild_raises_conflict_on_invalid_state(self):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+
+ def fake_rebuild(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body=body)
+
+ def test_rebuild_accepted_with_metadata(self):
+ metadata = {'new': 'metadata'}
+
+ return_server = fakes.fake_instance_get(metadata=metadata,
+ vm_state=vm_states.ACTIVE, host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "metadata": metadata,
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ body = self.controller._action_rebuild(req, FAKE_UUID, body=body).obj
+
+ self.assertEqual(body['server']['metadata'], metadata)
+
+ def test_rebuild_accepted_with_bad_metadata(self):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "metadata": "stack",
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body=body)
+
+ def test_rebuild_with_too_large_metadata(self):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "metadata": {
+ 256 * "k": "value"
+ }
+ }
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_rebuild, req,
+ FAKE_UUID, body=body)
+
+ def test_rebuild_bad_entity(self):
+ body = {
+ "rebuild": {
+ "imageId": self._image_href,
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body=body)
+
+ def test_rebuild_admin_password(self):
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE, host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "adminPass": "asdf",
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ body = self.controller._action_rebuild(req, FAKE_UUID, body=body).obj
+
+ self.assertEqual(body['server']['image']['id'], '2')
+ self.assertEqual(body['server']['adminPass'], 'asdf')
+
+ def test_rebuild_admin_password_pass_disabled(self):
+ # run with enable_instance_password disabled to verify admin_password
+ # is missing from response. See lp bug 921814
+ self.flags(enable_instance_password=False)
+
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE, host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "admin_password": "asdf",
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ body = self.controller._action_rebuild(req, FAKE_UUID, body=body).obj
+
+ self.assertEqual(body['server']['image']['id'], '2')
+ self.assertNotIn('adminPass', body['server'])
+
+ def test_rebuild_server_not_found(self):
+ def server_not_found(self, instance_id,
+ columns_to_join=None, use_slave=False):
+ raise exception.InstanceNotFound(instance_id=instance_id)
+ self.stubs.Set(db, 'instance_get_by_uuid', server_not_found)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body=body)
+
+ def test_rebuild_with_bad_image(self):
+ body = {
+ "rebuild": {
+ "imageRef": "foo",
+ },
+ }
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body=body)
+
+ def test_rebuild_when_kernel_not_exists(self):
+
+ def return_image_meta(*args, **kwargs):
+ image_meta_table = {
+ '2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
+ '155d900f-4e14-4e4c-a73d-069cbf4541e6':
+ {'id': 3, 'status': 'active', 'container_format': 'raw',
+ 'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
+ }
+ image_id = args[2]
+ try:
+ image_meta = image_meta_table[str(image_id)]
+ except KeyError:
+ raise exception.ImageNotFound(image_id=image_id)
+
+ return image_meta
+
+ self.stubs.Set(fake._FakeImageService, 'show', return_image_meta)
+ body = {
+ "rebuild": {
+ "imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ },
+ }
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body=body)
+
+ def test_rebuild_proper_kernel_ram(self):
+ instance_meta = {'kernel_id': None, 'ramdisk_id': None}
+
+ orig_get = compute_api.API.get
+
+ def wrap_get(*args, **kwargs):
+ inst = orig_get(*args, **kwargs)
+ instance_meta['instance'] = inst
+ return inst
+
+ def fake_save(context, **kwargs):
+ instance = instance_meta['instance']
+ for key in instance_meta.keys():
+ if key in instance.obj_what_changed():
+ instance_meta[key] = instance[key]
+
+ def return_image_meta(*args, **kwargs):
+ image_meta_table = {
+ '1': {'id': 1, 'status': 'active', 'container_format': 'aki'},
+ '2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
+ '155d900f-4e14-4e4c-a73d-069cbf4541e6':
+ {'id': 3, 'status': 'active', 'container_format': 'raw',
+ 'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
+ }
+ image_id = args[2]
+ try:
+ image_meta = image_meta_table[str(image_id)]
+ except KeyError:
+ raise exception.ImageNotFound(image_id=image_id)
+
+ return image_meta
+
+ self.stubs.Set(fake._FakeImageService, 'show', return_image_meta)
+ self.stubs.Set(compute_api.API, 'get', wrap_get)
+ self.stubs.Set(objects.Instance, 'save', fake_save)
+ body = {
+ "rebuild": {
+ "imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ },
+ }
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.controller._action_rebuild(req, FAKE_UUID, body=body).obj
+ self.assertEqual(instance_meta['kernel_id'], '1')
+ self.assertEqual(instance_meta['ramdisk_id'], '2')
+
+ def _test_rebuild_preserve_ephemeral(self, value=None):
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE,
+ host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+ if value is not None:
+ body['rebuild']['preserve_ephemeral'] = value
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ context = req.environ['nova.context']
+
+ self.mox.StubOutWithMock(compute_api.API, 'rebuild')
+ if value is not None:
+ compute_api.API.rebuild(context, mox.IgnoreArg(), self._image_href,
+ mox.IgnoreArg(), preserve_ephemeral=value)
+ else:
+ compute_api.API.rebuild(context, mox.IgnoreArg(), self._image_href,
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ self.controller._action_rebuild(req, FAKE_UUID, body=body)
+
+ def test_rebuild_preserve_ephemeral_true(self):
+ self._test_rebuild_preserve_ephemeral(True)
+
+ def test_rebuild_preserve_ephemeral_false(self):
+ self._test_rebuild_preserve_ephemeral(False)
+
+ def test_rebuild_preserve_ephemeral_default(self):
+ self._test_rebuild_preserve_ephemeral()
+
+ @mock.patch.object(compute_api.API, 'rebuild',
+ side_effect=exception.AutoDiskConfigDisabledByImage(
+ image='dummy'))
+ def test_rebuild_instance_raise_auto_disk_config_exc(self, mock_rebuild):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body=body)
+
+ def test_resize_server(self):
+
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ self.resize_called = False
+
+ def resize_mock(*args):
+ self.resize_called = True
+
+ self.stubs.Set(compute_api.API, 'resize', resize_mock)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ body = self.controller._action_resize(req, FAKE_UUID, body=body)
+
+ self.assertEqual(self.resize_called, True)
+
+ def test_resize_server_no_flavor(self):
+ body = dict(resize=dict())
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_resize,
+ req, FAKE_UUID, body=body)
+
+ def test_resize_server_no_flavor_ref(self):
+ body = dict(resize=dict(flavorRef=None))
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_resize,
+ req, FAKE_UUID, body=body)
+
+ def test_resize_with_server_not_found(self):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ self.stubs.Set(compute_api.API, 'get', return_server_not_found)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._action_resize,
+ req, FAKE_UUID, body=body)
+
+ def test_resize_with_image_exceptions(self):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+ self.resize_called = 0
+ image_id = 'fake_image_id'
+
+ exceptions = [
+ (exception.ImageNotAuthorized(image_id=image_id),
+ webob.exc.HTTPUnauthorized),
+ (exception.ImageNotFound(image_id=image_id),
+ webob.exc.HTTPBadRequest),
+ (exception.Invalid, webob.exc.HTTPBadRequest),
+ (exception.NoValidHost(reason='Bad host'),
+ webob.exc.HTTPBadRequest),
+ (exception.AutoDiskConfigDisabledByImage(image=image_id),
+ webob.exc.HTTPBadRequest),
+ ]
+
+ raised, expected = map(iter, zip(*exceptions))
+
+ def _fake_resize(obj, context, instance, flavor_id):
+ self.resize_called += 1
+ raise raised.next()
+
+ self.stubs.Set(compute_api.API, 'resize', _fake_resize)
+
+ for call_no in range(len(exceptions)):
+ req = fakes.HTTPRequestV3.blank(self.url)
+ next_exception = expected.next()
+ actual = self.assertRaises(next_exception,
+ self.controller._action_resize,
+ req, FAKE_UUID, body=body)
+ if (isinstance(exceptions[call_no][0],
+ exception.NoValidHost)):
+ self.assertEqual(actual.explanation,
+ 'No valid host was found. Bad host')
+ elif (isinstance(exceptions[call_no][0],
+ exception.AutoDiskConfigDisabledByImage)):
+ self.assertEqual(actual.explanation,
+ 'Requested image fake_image_id has automatic'
+ ' disk resize disabled.')
+ self.assertEqual(self.resize_called, call_no + 1)
+
+ def test_resize_with_too_many_instances(self):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ def fake_resize(*args, **kwargs):
+ raise exception.TooManyInstances(message="TooManyInstance")
+
+ self.stubs.Set(compute_api.API, 'resize', fake_resize)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller._action_resize,
+ req, FAKE_UUID, body=body)
+
+ @mock.patch('nova.compute.api.API.resize',
+ side_effect=exception.CannotResizeDisk(reason=''))
+ def test_resize_raises_cannot_resize_disk(self, mock_resize):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_resize,
+ req, FAKE_UUID, body=body)
+
+ @mock.patch('nova.compute.api.API.resize',
+ side_effect=exception.FlavorNotFound(reason='',
+ flavor_id='fake_id'))
+ def test_resize_raises_flavor_not_found(self, mock_resize):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_resize,
+ req, FAKE_UUID, body=body)
+
+ def test_resize_raises_conflict_on_invalid_state(self):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ def fake_resize(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ self.stubs.Set(compute_api.API, 'resize', fake_resize)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_resize,
+ req, FAKE_UUID, body=body)
+
+ def test_confirm_resize_server(self):
+ body = dict(confirmResize=None)
+
+ self.confirm_resize_called = False
+
+ def cr_mock(*args):
+ self.confirm_resize_called = True
+
+ self.stubs.Set(compute_api.API, 'confirm_resize', cr_mock)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ body = self.controller._action_confirm_resize(req, FAKE_UUID, body)
+
+ self.assertEqual(self.confirm_resize_called, True)
+
+ def test_confirm_resize_migration_not_found(self):
+ body = dict(confirmResize=None)
+
+ def confirm_resize_mock(*args):
+ raise exception.MigrationNotFoundByStatus(instance_id=1,
+ status='finished')
+
+ self.stubs.Set(compute_api.API,
+ 'confirm_resize',
+ confirm_resize_mock)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_confirm_resize,
+ req, FAKE_UUID, body)
+
+ def test_confirm_resize_raises_conflict_on_invalid_state(self):
+ body = dict(confirmResize=None)
+
+ def fake_confirm_resize(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ self.stubs.Set(compute_api.API, 'confirm_resize',
+ fake_confirm_resize)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_confirm_resize,
+ req, FAKE_UUID, body)
+
+ def test_revert_resize_migration_not_found(self):
+ body = dict(revertResize=None)
+
+ def revert_resize_mock(*args):
+ raise exception.MigrationNotFoundByStatus(instance_id=1,
+ status='finished')
+
+ self.stubs.Set(compute_api.API,
+ 'revert_resize',
+ revert_resize_mock)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_revert_resize,
+ req, FAKE_UUID, body)
+
+ def test_revert_resize_server_not_found(self):
+ body = dict(revertResize=None)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob. exc.HTTPNotFound,
+ self.controller._action_revert_resize,
+ req, "bad_server_id", body)
+
+ def test_revert_resize_server(self):
+ body = dict(revertResize=None)
+
+ self.revert_resize_called = False
+
+ def revert_mock(*args):
+ self.revert_resize_called = True
+
+ self.stubs.Set(compute_api.API, 'revert_resize', revert_mock)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ body = self.controller._action_revert_resize(req, FAKE_UUID, body)
+
+ self.assertEqual(self.revert_resize_called, True)
+
+ def test_revert_resize_raises_conflict_on_invalid_state(self):
+ body = dict(revertResize=None)
+
+ def fake_revert_resize(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ self.stubs.Set(compute_api.API, 'revert_resize',
+ fake_revert_resize)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_revert_resize,
+ req, FAKE_UUID, body)
+
+ def test_create_image(self):
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ response = self.controller._action_create_image(req, FAKE_UUID, body)
+
+ location = response.headers['Location']
+ self.assertEqual(glance.generate_image_url('123'), location)
+
+ def test_create_image_name_too_long(self):
+ long_name = 'a' * 260
+ body = {
+ 'createImage': {
+ 'name': long_name,
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_create_image, req,
+ FAKE_UUID, body)
+
+ def _do_test_create_volume_backed_image(self, extra_properties):
+
+ def _fake_id(x):
+ return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
+
+ body = dict(createImage=dict(name='snapshot_of_volume_backed'))
+
+ if extra_properties:
+ body['createImage']['metadata'] = extra_properties
+
+ image_service = glance.get_default_image_service()
+
+ bdm = [dict(volume_id=_fake_id('a'),
+ volume_size=1,
+ device_name='vda',
+ delete_on_termination=False)]
+ props = dict(kernel_id=_fake_id('b'),
+ ramdisk_id=_fake_id('c'),
+ root_device_name='/dev/vda',
+ block_device_mapping=bdm)
+ original_image = dict(properties=props,
+ container_format='ami',
+ status='active',
+ is_public=True)
+
+ image_service.create(None, original_image)
+
+ def fake_block_device_mapping_get_all_by_instance(context, inst_id,
+ use_slave=False):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': _fake_id('a'),
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'device_name': 'vda',
+ 'snapshot_id': 1,
+ 'boot_index': 0,
+ 'delete_on_termination': False,
+ 'no_device': None})]
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_block_device_mapping_get_all_by_instance)
+
+ instance = fakes.fake_instance_get(image_ref=original_image['id'],
+ vm_state=vm_states.ACTIVE,
+ root_device_name='/dev/vda')
+ self.stubs.Set(db, 'instance_get_by_uuid', instance)
+
+ volume = dict(id=_fake_id('a'),
+ size=1,
+ host='fake',
+ display_description='fake')
+ snapshot = dict(id=_fake_id('d'))
+ self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
+ volume_api = self.controller.compute_api.volume_api
+ volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
+ volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
+
+ self.mox.ReplayAll()
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ response = self.controller._action_create_image(req, FAKE_UUID, body)
+
+ location = response.headers['Location']
+ image_id = location.replace(glance.generate_image_url(''), '')
+ image = image_service.show(None, image_id)
+
+ self.assertEqual(image['name'], 'snapshot_of_volume_backed')
+ properties = image['properties']
+ self.assertEqual(properties['kernel_id'], _fake_id('b'))
+ self.assertEqual(properties['ramdisk_id'], _fake_id('c'))
+ self.assertEqual(properties['root_device_name'], '/dev/vda')
+ self.assertEqual(properties['bdm_v2'], True)
+ bdms = properties['block_device_mapping']
+ self.assertEqual(len(bdms), 1)
+ self.assertEqual(bdms[0]['boot_index'], 0)
+ self.assertEqual(bdms[0]['source_type'], 'snapshot')
+ self.assertEqual(bdms[0]['destination_type'], 'volume')
+ self.assertEqual(bdms[0]['snapshot_id'], snapshot['id'])
+ for fld in ('connection_info', 'id',
+ 'instance_uuid', 'device_name'):
+ self.assertNotIn(fld, bdms[0])
+ for k in extra_properties.keys():
+ self.assertEqual(properties[k], extra_properties[k])
+
+ def test_create_volume_backed_image_no_metadata(self):
+ self._do_test_create_volume_backed_image({})
+
+ def test_create_volume_backed_image_with_metadata(self):
+ self._do_test_create_volume_backed_image(dict(ImageType='Gold',
+ ImageVersion='2.0'))
+
+ def _test_create_volume_backed_image_with_metadata_from_volume(
+ self, extra_metadata=None):
+
+ def _fake_id(x):
+ return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
+
+ body = dict(createImage=dict(name='snapshot_of_volume_backed'))
+ if extra_metadata:
+ body['createImage']['metadata'] = extra_metadata
+
+ image_service = glance.get_default_image_service()
+
+ def fake_block_device_mapping_get_all_by_instance(context, inst_id,
+ use_slave=False):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': _fake_id('a'),
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'device_name': 'vda',
+ 'snapshot_id': 1,
+ 'boot_index': 0,
+ 'delete_on_termination': False,
+ 'no_device': None})]
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_block_device_mapping_get_all_by_instance)
+
+ instance = fakes.fake_instance_get(image_ref='',
+ vm_state=vm_states.ACTIVE,
+ root_device_name='/dev/vda')
+ self.stubs.Set(db, 'instance_get_by_uuid', instance)
+
+ fake_metadata = {'test_key1': 'test_value1',
+ 'test_key2': 'test_value2'}
+ volume = dict(id=_fake_id('a'),
+ size=1,
+ host='fake',
+ display_description='fake',
+ volume_image_metadata=fake_metadata)
+ snapshot = dict(id=_fake_id('d'))
+ self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
+ volume_api = self.controller.compute_api.volume_api
+ volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
+ volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
+ volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+
+ self.mox.ReplayAll()
+ response = self.controller._action_create_image(req, FAKE_UUID, body)
+ location = response.headers['Location']
+ image_id = location.replace('http://localhost:9292/images/', '')
+ image = image_service.show(None, image_id)
+
+ properties = image['properties']
+ self.assertEqual(properties['test_key1'], 'test_value1')
+ self.assertEqual(properties['test_key2'], 'test_value2')
+ if extra_metadata:
+ for key, val in extra_metadata.items():
+ self.assertEqual(properties[key], val)
+
+ def test_create_vol_backed_img_with_meta_from_vol_without_extra_meta(self):
+ self._test_create_volume_backed_image_with_metadata_from_volume()
+
+ def test_create_vol_backed_img_with_meta_from_vol_with_extra_meta(self):
+ self._test_create_volume_backed_image_with_metadata_from_volume(
+ extra_metadata={'a': 'b'})
+
+ def test_create_image_snapshots_disabled(self):
+ """Don't permit a snapshot if the allow_instance_snapshots flag is
+ False
+ """
+ self.flags(allow_instance_snapshots=False)
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ },
+ }
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+ def test_create_image_with_metadata(self):
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ 'metadata': {'key': 'asdf'},
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ response = self.controller._action_create_image(req, FAKE_UUID, body)
+
+ location = response.headers['Location']
+ self.assertEqual(glance.generate_image_url('123'), location)
+
+ def test_create_image_with_too_much_metadata(self):
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ 'metadata': {},
+ },
+ }
+ for num in range(CONF.quota_metadata_items + 1):
+ body['createImage']['metadata']['foo%i' % num] = "bar"
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+ def test_create_image_no_name(self):
+ body = {
+ 'createImage': {},
+ }
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+ def test_create_image_blank_name(self):
+ body = {
+ 'createImage': {
+ 'name': '',
+ }
+ }
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+ def test_create_image_bad_metadata(self):
+ body = {
+ 'createImage': {
+ 'name': 'geoff',
+ 'metadata': 'henry',
+ },
+ }
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+ def test_create_image_raises_conflict_on_invalid_state(self):
+ def snapshot(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+ self.stubs.Set(compute_api.API, 'snapshot', snapshot)
+
+ body = {
+ "createImage": {
+ "name": "test_snapshot",
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_server_external_events.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_server_external_events.py
new file mode 100644
index 0000000000..e9bd4538a0
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_server_external_events.py
@@ -0,0 +1,140 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.plugins.v3 import server_external_events
+from nova import context
+from nova import exception
+from nova import objects
+from nova import test
+
+fake_instances = {
+ '00000000-0000-0000-0000-000000000001': objects.Instance(
+ uuid='00000000-0000-0000-0000-000000000001', host='host1'),
+ '00000000-0000-0000-0000-000000000002': objects.Instance(
+ uuid='00000000-0000-0000-0000-000000000002', host='host1'),
+ '00000000-0000-0000-0000-000000000003': objects.Instance(
+ uuid='00000000-0000-0000-0000-000000000003', host='host2'),
+}
+fake_instance_uuids = sorted(fake_instances.keys())
+MISSING_UUID = '00000000-0000-0000-0000-000000000004'
+
+
+@classmethod
+def fake_get_by_uuid(cls, context, uuid):
+ try:
+ return fake_instances[uuid]
+ except KeyError:
+ raise exception.InstanceNotFound(instance_id=uuid)
+
+
+@mock.patch('nova.objects.instance.Instance.get_by_uuid', fake_get_by_uuid)
+class ServerExternalEventsTest(test.NoDBTestCase):
+ def setUp(self):
+ super(ServerExternalEventsTest, self).setUp()
+ self.api = server_external_events.ServerExternalEventsController()
+ self.context = context.get_admin_context()
+ self.default_body = {
+ 'events': [
+ {'name': 'network-vif-plugged',
+ 'tag': 'foo',
+ 'status': 'completed',
+ 'server_uuid': fake_instance_uuids[0]},
+ {'name': 'network-changed',
+ 'status': 'completed',
+ 'server_uuid': fake_instance_uuids[1]},
+ ]
+ }
+
+ def _create_req(self, body):
+ req = webob.Request.blank('/v2/fake/os-server-external-events')
+ req.method = 'POST'
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ req.body = jsonutils.dumps(body)
+ return req
+
+ def _assert_call(self, req, body, expected_uuids, expected_events):
+ with mock.patch.object(self.api.compute_api,
+ 'external_instance_event') as api_method:
+ response = self.api.create(req, body)
+
+ result = response.obj
+ code = response._code
+
+ self.assertEqual(1, api_method.call_count)
+ for inst in api_method.call_args_list[0][0][1]:
+ expected_uuids.remove(inst.uuid)
+ self.assertEqual([], expected_uuids)
+ for event in api_method.call_args_list[0][0][2]:
+ expected_events.remove(event.name)
+ self.assertEqual([], expected_events)
+ return result, code
+
+ def test_create(self):
+ req = self._create_req(self.default_body)
+ result, code = self._assert_call(req, self.default_body,
+ fake_instance_uuids[:2],
+ ['network-vif-plugged',
+ 'network-changed'])
+ self.assertEqual(self.default_body, result)
+ self.assertEqual(200, code)
+
+ def test_create_one_bad_instance(self):
+ body = self.default_body
+ body['events'][1]['server_uuid'] = MISSING_UUID
+ req = self._create_req(body)
+ result, code = self._assert_call(req, body, [fake_instance_uuids[0]],
+ ['network-vif-plugged'])
+ self.assertEqual('failed', result['events'][1]['status'])
+ self.assertEqual(200, result['events'][0]['code'])
+ self.assertEqual(404, result['events'][1]['code'])
+ self.assertEqual(207, code)
+
+ def test_create_no_good_instances(self):
+ body = self.default_body
+ body['events'][0]['server_uuid'] = MISSING_UUID
+ body['events'][1]['server_uuid'] = MISSING_UUID
+ req = self._create_req(body)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.api.create, req, body)
+
+ def test_create_bad_status(self):
+ body = self.default_body
+ body['events'][1]['status'] = 'foo'
+ req = self._create_req(body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.api.create, req, body)
+
+ def test_create_extra_gorp(self):
+ body = self.default_body
+ body['events'][0]['foobar'] = 'bad stuff'
+ req = self._create_req(body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.api.create, req, body)
+
+ def test_create_bad_events(self):
+ body = {'events': 'foo'}
+ req = self._create_req(body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.api.create, req, body)
+
+ def test_create_bad_body(self):
+ body = {'foo': 'bar'}
+ req = self._create_req(body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.api.create, req, body)
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_server_password.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_server_password.py
new file mode 100644
index 0000000000..20a8c1e0a1
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_server_password.py
@@ -0,0 +1,80 @@
+# Copyright 2012 Nebula, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.metadata import password
+from nova import compute
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+
+CONF = cfg.CONF
+
+
+class ServerPasswordTest(test.TestCase):
+ content_type = 'application/json'
+
+ def setUp(self):
+ super(ServerPasswordTest, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(
+ compute.api.API, 'get',
+ lambda self, ctxt, *a, **kw:
+ fake_instance.fake_instance_obj(
+ ctxt,
+ system_metadata={},
+ expected_attrs=['system_metadata']))
+ self.password = 'fakepass'
+
+ def fake_extract_password(instance):
+ return self.password
+
+ def fake_convert_password(context, password):
+ self.password = password
+ return {}
+
+ self.stubs.Set(password, 'extract_password', fake_extract_password)
+ self.stubs.Set(password, 'convert_password', fake_convert_password)
+
+ def _make_request(self, url, method='GET'):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ req.method = method
+ res = req.get_response(
+ fakes.wsgi_app_v21(init_only=('servers', 'os-server-password')))
+ return res
+
+ def _get_pass(self, body):
+ return jsonutils.loads(body).get('password')
+
+ def test_get_password(self):
+ url = '/v2/fake/servers/fake/os-server-password'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(self._get_pass(res.body), 'fakepass')
+
+ def test_reset_password(self):
+ url = '/v2/fake/servers/fake/os-server-password'
+ res = self._make_request(url, 'DELETE')
+ self.assertEqual(res.status_int, 204)
+
+ res = self._make_request(url)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(self._get_pass(res.body), '')
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_servers.py
new file mode 100644
index 0000000000..6eb92902fe
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_servers.py
@@ -0,0 +1,3353 @@
+# Copyright 2010-2011 OpenStack Foundation
+# Copyright 2011 Piston Cloud Computing, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import contextlib
+import copy
+import datetime
+import uuid
+
+import iso8601
+import mock
+import mox
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import six.moves.urllib.parse as urlparse
+import testtools
+import webob
+
+from nova.api.openstack import compute
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import disk_config
+from nova.api.openstack.compute.plugins.v3 import ips
+from nova.api.openstack.compute.plugins.v3 import keypairs
+from nova.api.openstack.compute.plugins.v3 import servers
+from nova.api.openstack.compute.schemas.v3 import disk_config as \
+ disk_config_schema
+from nova.api.openstack.compute.schemas.v3 import servers as servers_schema
+from nova.api.openstack.compute import views
+from nova.api.openstack import extensions
+from nova.compute import api as compute_api
+from nova.compute import delete_types
+from nova.compute import flavors
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import models
+from nova import exception
+from nova.i18n import _
+from nova.image import glance
+from nova.network import manager
+from nova.network.neutronv2 import api as neutron_api
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova.openstack.common import policy as common_policy
+from nova import policy
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_network
+from nova.tests.unit.image import fake
+from nova.tests.unit import matchers
+from nova import utils as nova_utils
+
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
+
+FAKE_UUID = fakes.FAKE_UUID
+
+INSTANCE_IDS = {FAKE_UUID: 1}
+FIELDS = instance_obj.INSTANCE_DEFAULT_FIELDS
+
+
+def fake_gen_uuid():
+ return FAKE_UUID
+
+
+def return_servers_empty(context, *args, **kwargs):
+ return []
+
+
+def instance_update_and_get_original(context, instance_uuid, values,
+ update_cells=True,
+ columns_to_join=None,
+ ):
+ inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
+ name=values.get('display_name'))
+ inst = dict(inst, **values)
+ return (inst, inst)
+
+
+def instance_update(context, instance_uuid, values, update_cells=True):
+ inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
+ name=values.get('display_name'))
+ inst = dict(inst, **values)
+ return inst
+
+
+def fake_compute_api(cls, req, id):
+ return True
+
+
+def fake_start_stop_not_ready(self, context, instance):
+ raise exception.InstanceNotReady(instance_id=instance["uuid"])
+
+
+def fake_start_stop_invalid_state(self, context, instance):
+ raise exception.InstanceInvalidState(
+ instance_uuid=instance['uuid'], attr='fake_attr',
+ method='fake_method', state='fake_state')
+
+
+def fake_instance_get_by_uuid_not_found(context, uuid,
+ columns_to_join, use_slave=False):
+ raise exception.InstanceNotFound(instance_id=uuid)
+
+
+class MockSetAdminPassword(object):
+ def __init__(self):
+ self.instance_id = None
+ self.password = None
+
+ def __call__(self, context, instance_id, password):
+ self.instance_id = instance_id
+ self.password = password
+
+
+class Base64ValidationTest(test.TestCase):
+ def setUp(self):
+ super(Base64ValidationTest, self).setUp()
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers.ServersController(extension_info=ext_info)
+
+ def test_decode_base64(self):
+ value = "A random string"
+ result = self.controller._decode_base64(base64.b64encode(value))
+ self.assertEqual(result, value)
+
+ def test_decode_base64_binary(self):
+ value = "\x00\x12\x75\x99"
+ result = self.controller._decode_base64(base64.b64encode(value))
+ self.assertEqual(result, value)
+
+ def test_decode_base64_whitespace(self):
+ value = "A random string"
+ encoded = base64.b64encode(value)
+ white = "\n \n%s\t%s\n" % (encoded[:2], encoded[2:])
+ result = self.controller._decode_base64(white)
+ self.assertEqual(result, value)
+
+ def test_decode_base64_invalid(self):
+ invalid = "A random string"
+ result = self.controller._decode_base64(invalid)
+ self.assertIsNone(result)
+
+ def test_decode_base64_illegal_bytes(self):
+ value = "A random string"
+ encoded = base64.b64encode(value)
+ white = ">\x01%s*%s()" % (encoded[:2], encoded[2:])
+ result = self.controller._decode_base64(white)
+ self.assertIsNone(result)
+
+
+class NeutronV2Subclass(neutron_api.API):
+ """Used to ensure that API handles subclasses properly."""
+ pass
+
+
+class ControllerTest(test.TestCase):
+
+ def setUp(self):
+ super(ControllerTest, self).setUp()
+ self.flags(verbose=True, use_ipv6=False)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+ return_server = fakes.fake_instance_get()
+ return_servers = fakes.fake_instance_get_all_by_filters()
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ return_servers)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ return_server)
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ instance_update_and_get_original)
+
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers.ServersController(extension_info=ext_info)
+ self.ips_controller = ips.IPsController()
+ policy.reset()
+ policy.init()
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+
+
+class ServersControllerTest(ControllerTest):
+
+ def setUp(self):
+ super(ServersControllerTest, self).setUp()
+ CONF.set_override('host', 'localhost', group='glance')
+
+ def test_requested_networks_prefix(self):
+ uuid = 'br-00000000-0000-0000-0000-000000000000'
+ requested_networks = [{'uuid': uuid}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertIn((uuid, None), res.as_tuples())
+
+ def test_requested_networks_neutronv2_enabled_with_port(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'port': port}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertEqual([(None, None, port, None)], res.as_tuples())
+
+ def test_requested_networks_neutronv2_enabled_with_network(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ requested_networks = [{'uuid': network}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertEqual([(network, None, None, None)], res.as_tuples())
+
+ def test_requested_networks_neutronv2_enabled_with_network_and_port(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network, 'port': port}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertEqual([(None, None, port, None)], res.as_tuples())
+
+ def test_requested_networks_neutronv2_enabled_conflict_on_fixed_ip(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ addr = '10.0.0.1'
+ requested_networks = [{'uuid': network,
+ 'fixed_ip': addr,
+ 'port': port}]
+ self.assertRaises(
+ webob.exc.HTTPBadRequest,
+ self.controller._get_requested_networks,
+ requested_networks)
+
+ def test_requested_networks_neutronv2_disabled_with_port(self):
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'port': port}]
+ self.assertRaises(
+ webob.exc.HTTPBadRequest,
+ self.controller._get_requested_networks,
+ requested_networks)
+
+ def test_requested_networks_api_enabled_with_v2_subclass(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network, 'port': port}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertEqual([(None, None, port, None)], res.as_tuples())
+
+ def test_requested_networks_neutronv2_subclass_with_port(self):
+ cls = ('nova.tests.unit.api.openstack.compute' +
+ '.test_servers.NeutronV2Subclass')
+ self.flags(network_api_class=cls)
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'port': port}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertEqual([(None, None, port, None)], res.as_tuples())
+
+ def test_get_server_by_uuid(self):
+ req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
+ res_dict = self.controller.show(req, FAKE_UUID)
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+
+ def test_get_server_joins_pci_devices(self):
+ self.expected_attrs = None
+
+ def fake_get(_self, *args, **kwargs):
+ self.expected_attrs = kwargs['expected_attrs']
+ ctxt = context.RequestContext('fake', 'fake')
+ return fake_instance.fake_instance_obj(ctxt)
+
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+
+ req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
+ self.controller.show(req, FAKE_UUID)
+
+ self.assertIn('pci_devices', self.expected_attrs)
+
+ def test_unique_host_id(self):
+ """Create two servers with the same host and different
+ project_ids and check that the host_id's are unique.
+ """
+ def return_instance_with_host(self, *args, **kwargs):
+ project_id = str(uuid.uuid4())
+ return fakes.stub_instance(id=1, uuid=FAKE_UUID,
+ project_id=project_id,
+ host='fake_host')
+
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ return_instance_with_host)
+ self.stubs.Set(db, 'instance_get',
+ return_instance_with_host)
+
+ req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
+ server1 = self.controller.show(req, FAKE_UUID)
+ server2 = self.controller.show(req, FAKE_UUID)
+
+ self.assertNotEqual(server1['server']['hostId'],
+ server2['server']['hostId'])
+
+ def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
+ status="ACTIVE", progress=100):
+ return {
+ "server": {
+ "id": uuid,
+ "user_id": "fake_user",
+ "tenant_id": "fake_project",
+ "updated": "2010-11-11T11:00:00Z",
+ "created": "2010-10-10T12:00:00Z",
+ "progress": progress,
+ "name": "server1",
+ "status": status,
+ "hostId": '',
+ "image": {
+ "id": "10",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": image_bookmark,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": flavor_bookmark,
+ },
+ ],
+ },
+ "addresses": {
+ 'test1': [
+ {'version': 4, 'addr': '192.168.1.100',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
+ {'version': 6, 'addr': '2001:db8:0:1::1',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'}
+ ]
+ },
+ "metadata": {
+ "seq": "1",
+ },
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v3/servers/%s" % uuid,
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/servers/%s" % uuid,
+ },
+ ],
+ }
+ }
+
+ def test_get_server_by_id(self):
+ self.flags(use_ipv6=True)
+ image_bookmark = "http://localhost/images/10"
+ flavor_bookmark = "http://localhost/flavors/1"
+
+ uuid = FAKE_UUID
+ req = fakes.HTTPRequestV3.blank('/servers/%s' % uuid)
+ res_dict = self.controller.show(req, uuid)
+
+ expected_server = self._get_server_data_dict(uuid,
+ image_bookmark,
+ flavor_bookmark,
+ status="BUILD",
+ progress=0)
+
+ self.assertThat(res_dict, matchers.DictMatches(expected_server))
+
+ def test_get_server_with_active_status_by_id(self):
+ image_bookmark = "http://localhost/images/10"
+ flavor_bookmark = "http://localhost/flavors/1"
+
+ new_return_server = fakes.fake_instance_get(
+ vm_state=vm_states.ACTIVE, progress=100)
+ self.stubs.Set(db, 'instance_get_by_uuid', new_return_server)
+
+ uuid = FAKE_UUID
+ req = fakes.HTTPRequestV3.blank('/servers/%s' % uuid)
+ res_dict = self.controller.show(req, uuid)
+ expected_server = self._get_server_data_dict(uuid,
+ image_bookmark,
+ flavor_bookmark)
+ self.assertThat(res_dict, matchers.DictMatches(expected_server))
+
+ def test_get_server_with_id_image_ref_by_id(self):
+ image_ref = "10"
+ image_bookmark = "http://localhost/images/10"
+ flavor_id = "1"
+ flavor_bookmark = "http://localhost/flavors/1"
+
+ new_return_server = fakes.fake_instance_get(
+ vm_state=vm_states.ACTIVE, image_ref=image_ref,
+ flavor_id=flavor_id, progress=100)
+ self.stubs.Set(db, 'instance_get_by_uuid', new_return_server)
+
+ uuid = FAKE_UUID
+ req = fakes.HTTPRequestV3.blank('/servers/%s' % uuid)
+ res_dict = self.controller.show(req, uuid)
+ expected_server = self._get_server_data_dict(uuid,
+ image_bookmark,
+ flavor_bookmark)
+
+ self.assertThat(res_dict, matchers.DictMatches(expected_server))
+
+ def test_get_server_addresses_from_cache(self):
+ pub0 = ('172.19.0.1', '172.19.0.2',)
+ pub1 = ('1.2.3.4',)
+ pub2 = ('b33f::fdee:ddff:fecc:bbaa',)
+ priv0 = ('192.168.0.3', '192.168.0.4',)
+
+ def _ip(ip):
+ return {'address': ip, 'type': 'fixed'}
+
+ nw_cache = [
+ {'address': 'aa:aa:aa:aa:aa:aa',
+ 'id': 1,
+ 'network': {'bridge': 'br0',
+ 'id': 1,
+ 'label': 'public',
+ 'subnets': [{'cidr': '172.19.0.0/24',
+ 'ips': [_ip(ip) for ip in pub0]},
+ {'cidr': '1.2.3.0/16',
+ 'ips': [_ip(ip) for ip in pub1]},
+ {'cidr': 'b33f::/64',
+ 'ips': [_ip(ip) for ip in pub2]}]}},
+ {'address': 'bb:bb:bb:bb:bb:bb',
+ 'id': 2,
+ 'network': {'bridge': 'br1',
+ 'id': 2,
+ 'label': 'private',
+ 'subnets': [{'cidr': '192.168.0.0/24',
+ 'ips': [_ip(ip) for ip in priv0]}]}}]
+
+ return_server = fakes.fake_instance_get(nw_cache=nw_cache)
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ req = fakes.HTTPRequestV3.blank('/servers/%s/ips' % FAKE_UUID)
+ res_dict = self.ips_controller.index(req, FAKE_UUID)
+
+ expected = {
+ 'addresses': {
+ 'private': [
+ {'version': 4, 'addr': '192.168.0.3',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'},
+ {'version': 4, 'addr': '192.168.0.4',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'},
+ ],
+ 'public': [
+ {'version': 4, 'addr': '172.19.0.1',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
+ {'version': 4, 'addr': '172.19.0.2',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
+ {'version': 4, 'addr': '1.2.3.4',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
+ {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
+ ],
+ },
+ }
+ self.assertThat(res_dict, matchers.DictMatches(expected))
+
+ def test_get_server_addresses_nonexistent_network(self):
+ url = '/v3/servers/%s/ips/network_0' % FAKE_UUID
+ req = fakes.HTTPRequestV3.blank(url)
+ self.assertRaises(webob.exc.HTTPNotFound, self.ips_controller.show,
+ req, FAKE_UUID, 'network_0')
+
+ def test_get_server_addresses_nonexistent_server(self):
+ def fake_instance_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+
+ server_id = str(uuid.uuid4())
+ req = fakes.HTTPRequestV3.blank('/servers/%s/ips' % server_id)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.ips_controller.index, req, server_id)
+
+ def test_get_server_list_empty(self):
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ return_servers_empty)
+
+ req = fakes.HTTPRequestV3.blank('/servers')
+ res_dict = self.controller.index(req)
+
+ num_servers = len(res_dict['servers'])
+ self.assertEqual(0, num_servers)
+
+ def test_get_server_list_with_reservation_id(self):
+ req = fakes.HTTPRequestV3.blank('/servers?reservation_id=foo')
+ res_dict = self.controller.index(req)
+
+ i = 0
+ for s in res_dict['servers']:
+ self.assertEqual(s.get('name'), 'server%d' % (i + 1))
+ i += 1
+
+ def test_get_server_list_with_reservation_id_empty(self):
+ req = fakes.HTTPRequestV3.blank('/servers/detail?'
+ 'reservation_id=foo')
+ res_dict = self.controller.detail(req)
+
+ i = 0
+ for s in res_dict['servers']:
+ self.assertEqual(s.get('name'), 'server%d' % (i + 1))
+ i += 1
+
+ def test_get_server_list_with_reservation_id_details(self):
+ req = fakes.HTTPRequestV3.blank('/servers/detail?'
+ 'reservation_id=foo')
+ res_dict = self.controller.detail(req)
+
+ i = 0
+ for s in res_dict['servers']:
+ self.assertEqual(s.get('name'), 'server%d' % (i + 1))
+ i += 1
+
+ def test_get_server_list(self):
+ req = fakes.HTTPRequestV3.blank('/servers')
+ res_dict = self.controller.index(req)
+
+ self.assertEqual(len(res_dict['servers']), 5)
+ for i, s in enumerate(res_dict['servers']):
+ self.assertEqual(s['id'], fakes.get_fake_uuid(i))
+ self.assertEqual(s['name'], 'server%d' % (i + 1))
+ self.assertIsNone(s.get('image', None))
+
+ expected_links = [
+ {
+ "rel": "self",
+ "href": "http://localhost/v3/servers/%s" % s['id'],
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/servers/%s" % s['id'],
+ },
+ ]
+
+ self.assertEqual(s['links'], expected_links)
+
+ def test_get_servers_with_limit(self):
+ req = fakes.HTTPRequestV3.blank('/servers?limit=3')
+ res_dict = self.controller.index(req)
+
+ servers = res_dict['servers']
+ self.assertEqual([s['id'] for s in servers],
+ [fakes.get_fake_uuid(i) for i in xrange(len(servers))])
+
+ servers_links = res_dict['servers_links']
+ self.assertEqual(servers_links[0]['rel'], 'next')
+ href_parts = urlparse.urlparse(servers_links[0]['href'])
+ self.assertEqual('/v3/servers', href_parts.path)
+ params = urlparse.parse_qs(href_parts.query)
+ expected_params = {'limit': ['3'],
+ 'marker': [fakes.get_fake_uuid(2)]}
+ self.assertThat(params, matchers.DictMatches(expected_params))
+
+ def test_get_servers_with_limit_bad_value(self):
+ req = fakes.HTTPRequestV3.blank('/servers?limit=aaa')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_get_server_details_empty(self):
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ return_servers_empty)
+
+ req = fakes.HTTPRequestV3.blank('/servers/detail')
+ res_dict = self.controller.detail(req)
+
+ num_servers = len(res_dict['servers'])
+ self.assertEqual(0, num_servers)
+
+ def test_get_server_details_with_limit(self):
+ req = fakes.HTTPRequestV3.blank('/servers/detail?limit=3')
+ res = self.controller.detail(req)
+
+ servers = res['servers']
+ self.assertEqual([s['id'] for s in servers],
+ [fakes.get_fake_uuid(i) for i in xrange(len(servers))])
+
+ servers_links = res['servers_links']
+ self.assertEqual(servers_links[0]['rel'], 'next')
+
+ href_parts = urlparse.urlparse(servers_links[0]['href'])
+ self.assertEqual('/v3/servers/detail', href_parts.path)
+ params = urlparse.parse_qs(href_parts.query)
+ expected = {'limit': ['3'], 'marker': [fakes.get_fake_uuid(2)]}
+ self.assertThat(params, matchers.DictMatches(expected))
+
+ def test_get_server_details_with_limit_bad_value(self):
+ req = fakes.HTTPRequestV3.blank('/servers/detail?limit=aaa')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.detail, req)
+
+ def test_get_server_details_with_limit_and_other_params(self):
+ req = fakes.HTTPRequestV3.blank('/servers/detail'
+ '?limit=3&blah=2:t')
+ res = self.controller.detail(req)
+
+ servers = res['servers']
+ self.assertEqual([s['id'] for s in servers],
+ [fakes.get_fake_uuid(i) for i in xrange(len(servers))])
+
+ servers_links = res['servers_links']
+ self.assertEqual(servers_links[0]['rel'], 'next')
+
+ href_parts = urlparse.urlparse(servers_links[0]['href'])
+ self.assertEqual('/v3/servers/detail', href_parts.path)
+ params = urlparse.parse_qs(href_parts.query)
+ expected = {'limit': ['3'], 'blah': ['2:t'],
+ 'marker': [fakes.get_fake_uuid(2)]}
+ self.assertThat(params, matchers.DictMatches(expected))
+
+ def test_get_servers_with_too_big_limit(self):
+ req = fakes.HTTPRequestV3.blank('/servers?limit=30')
+ res_dict = self.controller.index(req)
+ self.assertNotIn('servers_links', res_dict)
+
+ def test_get_servers_with_bad_limit(self):
+ req = fakes.HTTPRequestV3.blank('/servers?limit=asdf')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_get_servers_with_marker(self):
+ url = '/v3/servers?marker=%s' % fakes.get_fake_uuid(2)
+ req = fakes.HTTPRequestV3.blank(url)
+ servers = self.controller.index(req)['servers']
+ self.assertEqual([s['name'] for s in servers], ["server4", "server5"])
+
+ def test_get_servers_with_limit_and_marker(self):
+ url = '/v3/servers?limit=2&marker=%s' % fakes.get_fake_uuid(1)
+ req = fakes.HTTPRequestV3.blank(url)
+ servers = self.controller.index(req)['servers']
+ self.assertEqual([s['name'] for s in servers], ['server3', 'server4'])
+
+ def test_get_servers_with_bad_marker(self):
+ req = fakes.HTTPRequestV3.blank('/servers?limit=2&marker=asdf')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_get_servers_with_bad_option(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?unknownoption=whee')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_allows_image(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('image', search_opts)
+ self.assertEqual(search_opts['image'], '12345')
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?image=12345')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_tenant_id_filter_converts_to_project_id_for_admin(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False,
+ expected_attrs=None):
+ self.assertIsNotNone(filters)
+ self.assertEqual(filters['project_id'], 'newfake')
+ self.assertFalse(filters.get('tenant_id'))
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers'
+ '?all_tenants=1&tenant_id=newfake',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_tenant_id_filter_no_admin_context(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False,
+ expected_attrs=None):
+ self.assertNotEqual(filters, None)
+ self.assertEqual(filters['project_id'], 'fake')
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?tenant_id=newfake')
+ res = self.controller.index(req)
+ self.assertIn('servers', res)
+
+ def test_tenant_id_filter_implies_all_tenants(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False,
+ expected_attrs=None):
+ self.assertNotEqual(filters, None)
+ # The project_id assertion checks that the project_id
+ # filter is set to that specified in the request url and
+ # not that of the context, verifying that the all_tenants
+ # flag was enabled
+ self.assertEqual(filters['project_id'], 'newfake')
+ self.assertFalse(filters.get('tenant_id'))
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?tenant_id=newfake',
+ use_admin_context=True)
+ res = self.controller.index(req)
+ self.assertIn('servers', res)
+
+ def test_all_tenants_param_normal(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False,
+ expected_attrs=None):
+ self.assertNotIn('project_id', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?all_tenants',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_param_one(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False,
+ expected_attrs=None):
+ self.assertNotIn('project_id', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?all_tenants=1',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_param_zero(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False,
+ expected_attrs=None):
+ self.assertNotIn('all_tenants', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?all_tenants=0',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_param_false(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False,
+ expected_attrs=None):
+ self.assertNotIn('all_tenants', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?all_tenants=false',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_param_invalid(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None,
+ expected_attrs=None):
+ self.assertNotIn('all_tenants', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?all_tenants=xxx',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_admin_restricted_tenant(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False,
+ expected_attrs=None):
+ self.assertIsNotNone(filters)
+ self.assertEqual(filters['project_id'], 'fake')
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_pass_policy(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False,
+ expected_attrs=None):
+ self.assertIsNotNone(filters)
+ self.assertNotIn('project_id', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ rules = {
+ "compute:get_all_tenants":
+ common_policy.parse_rule("project_id:fake"),
+ "compute:get_all":
+ common_policy.parse_rule("project_id:fake"),
+ }
+
+ policy.set_rules(rules)
+
+ req = fakes.HTTPRequestV3.blank('/servers?all_tenants=1')
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_fail_policy(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None):
+ self.assertIsNotNone(filters)
+ return [fakes.stub_instance(100)]
+
+ rules = {
+ "compute:get_all_tenants":
+ common_policy.parse_rule("project_id:non_fake"),
+ "compute:get_all":
+ common_policy.parse_rule("project_id:fake"),
+ }
+
+ policy.set_rules(rules)
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?all_tenants=1')
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.index, req)
+
+ def test_get_servers_allows_flavor(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('flavor', search_opts)
+ # flavor is an integer ID
+ self.assertEqual(search_opts['flavor'], '12345')
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?flavor=12345')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_with_bad_flavor(self):
+ req = fakes.HTTPRequestV3.blank('/servers?flavor=abcde')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 0)
+
+ def test_get_server_details_with_bad_flavor(self):
+ req = fakes.HTTPRequestV3.blank('/servers?flavor=abcde')
+ servers = self.controller.detail(req)['servers']
+
+ self.assertThat(servers, testtools.matchers.HasLength(0))
+
+ def test_get_servers_allows_status(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('vm_state', search_opts)
+ self.assertEqual(search_opts['vm_state'], [vm_states.ACTIVE])
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?status=active')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_allows_task_status(self):
+ server_uuid = str(uuid.uuid4())
+ task_state = task_states.REBOOTING
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('task_state', search_opts)
+ self.assertEqual([task_states.REBOOT_PENDING,
+ task_states.REBOOT_STARTED,
+ task_states.REBOOTING],
+ search_opts['task_state'])
+ db_list = [fakes.stub_instance(100, uuid=server_uuid,
+ task_state=task_state)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?status=reboot')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_resize_status(self):
+ # Test when resize status, it maps list of vm states.
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIn('vm_state', search_opts)
+ self.assertEqual(search_opts['vm_state'],
+ [vm_states.ACTIVE, vm_states.STOPPED])
+
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?status=resize')
+
+ servers = self.controller.detail(req)['servers']
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_invalid_status(self):
+ # Test getting servers by invalid status.
+ req = fakes.HTTPRequestV3.blank('/servers?status=baloney',
+ use_admin_context=False)
+ servers = self.controller.index(req)['servers']
+ self.assertEqual(len(servers), 0)
+
+ def test_get_servers_deleted_status_as_user(self):
+ req = fakes.HTTPRequestV3.blank('/servers?status=deleted',
+ use_admin_context=False)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.detail, req)
+
+ def test_get_servers_deleted_status_as_admin(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIn('vm_state', search_opts)
+ self.assertEqual(search_opts['vm_state'], ['deleted'])
+
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?status=deleted',
+ use_admin_context=True)
+
+ servers = self.controller.detail(req)['servers']
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_allows_name(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('name', search_opts)
+ self.assertEqual(search_opts['name'], 'whee.*')
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?name=whee.*')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ @mock.patch.object(compute_api.API, 'get_all')
+ def test_get_servers_flavor_not_found(self, get_all_mock):
+ get_all_mock.side_effect = exception.FlavorNotFound(flavor_id=1)
+
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers?status=active&flavor=abc')
+ servers = self.controller.index(req)['servers']
+ self.assertEqual(0, len(servers))
+
+ def test_get_servers_allows_changes_since(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('changes-since', search_opts)
+ changes_since = datetime.datetime(2011, 1, 24, 17, 8, 1,
+ tzinfo=iso8601.iso8601.UTC)
+ self.assertEqual(search_opts['changes-since'], changes_since)
+ self.assertNotIn('deleted', search_opts)
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ params = 'changes-since=2011-01-24T17:08:01Z'
+ req = fakes.HTTPRequestV3.blank('/servers?%s' % params)
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_allows_changes_since_bad_value(self):
+ params = 'changes-since=asdf'
+ req = fakes.HTTPRequestV3.blank('/servers?%s' % params)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req)
+
+ def test_get_servers_admin_filters_as_user(self):
+ """Test getting servers by admin-only or unknown options when
+ context is not admin. Make sure the admin and unknown options
+ are stripped before they get to compute_api.get_all()
+ """
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIsNotNone(search_opts)
+ # Allowed by user
+ self.assertIn('name', search_opts)
+ self.assertIn('ip', search_opts)
+ # OSAPI converts status to vm_state
+ self.assertIn('vm_state', search_opts)
+ # Allowed only by admins with admin API on
+ self.assertNotIn('unknown_option', search_opts)
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ query_str = "name=foo&ip=10.*&status=active&unknown_option=meow"
+ req = fakes.HTTPRequest.blank('/servers?%s' % query_str)
+ res = self.controller.index(req)
+
+ servers = res['servers']
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_admin_options_as_admin(self):
+ """Test getting servers by admin-only or unknown options when
+ context is admin. All options should be passed
+ """
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIsNotNone(search_opts)
+ # Allowed by user
+ self.assertIn('name', search_opts)
+ # OSAPI converts status to vm_state
+ self.assertIn('vm_state', search_opts)
+ # Allowed only by admins with admin API on
+ self.assertIn('ip', search_opts)
+ self.assertIn('unknown_option', search_opts)
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ query_str = "name=foo&ip=10.*&status=active&unknown_option=meow"
+ req = fakes.HTTPRequestV3.blank('/servers?%s' % query_str,
+ use_admin_context=True)
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_allows_ip(self):
+ """Test getting servers by ip."""
+
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('ip', search_opts)
+ self.assertEqual(search_opts['ip'], '10\..*')
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?ip=10\..*')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_admin_allows_ip6(self):
+ """Test getting servers by ip6 with admin_api enabled and
+ admin context
+ """
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('ip6', search_opts)
+ self.assertEqual(search_opts['ip6'], 'ffff.*')
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?ip6=ffff.*',
+ use_admin_context=True)
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_all_server_details(self):
+ expected_flavor = {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": 'http://localhost/flavors/1',
+ },
+ ],
+ }
+ expected_image = {
+ "id": "10",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": 'http://localhost/images/10',
+ },
+ ],
+ }
+ req = fakes.HTTPRequestV3.blank('/servers/detail')
+ res_dict = self.controller.detail(req)
+
+ for i, s in enumerate(res_dict['servers']):
+ self.assertEqual(s['id'], fakes.get_fake_uuid(i))
+ self.assertEqual(s['hostId'], '')
+ self.assertEqual(s['name'], 'server%d' % (i + 1))
+ self.assertEqual(s['image'], expected_image)
+ self.assertEqual(s['flavor'], expected_flavor)
+ self.assertEqual(s['status'], 'BUILD')
+ self.assertEqual(s['metadata']['seq'], str(i + 1))
+
+ def test_get_all_server_details_with_host(self):
+ """We want to make sure that if two instances are on the same host,
+ then they return the same hostId. If two instances are on different
+ hosts, they should return different hostIds. In this test,
+ there are 5 instances - 2 on one host and 3 on another.
+ """
+
+ def return_servers_with_host(context, *args, **kwargs):
+ return [fakes.stub_instance(i + 1, 'fake', 'fake', host=i % 2,
+ uuid=fakes.get_fake_uuid(i))
+ for i in xrange(5)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ return_servers_with_host)
+
+ req = fakes.HTTPRequestV3.blank('/servers/detail')
+ res_dict = self.controller.detail(req)
+
+ server_list = res_dict['servers']
+ host_ids = [server_list[0]['hostId'], server_list[1]['hostId']]
+ self.assertTrue(host_ids[0] and host_ids[1])
+ self.assertNotEqual(host_ids[0], host_ids[1])
+
+ for i, s in enumerate(server_list):
+ self.assertEqual(s['id'], fakes.get_fake_uuid(i))
+ self.assertEqual(s['hostId'], host_ids[i % 2])
+ self.assertEqual(s['name'], 'server%d' % (i + 1))
+
+ def test_get_servers_joins_pci_devices(self):
+ self.expected_attrs = None
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.expected_attrs = expected_attrs
+ return []
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers', use_admin_context=True)
+ self.assertIn('servers', self.controller.index(req))
+ self.assertIn('pci_devices', self.expected_attrs)
+
+
+class ServersControllerDeleteTest(ControllerTest):
+
+ def setUp(self):
+ super(ServersControllerDeleteTest, self).setUp()
+ self.server_delete_called = False
+
+ def instance_destroy_mock(*args, **kwargs):
+ self.server_delete_called = True
+ deleted_at = timeutils.utcnow()
+ return fake_instance.fake_db_instance(deleted_at=deleted_at)
+
+ self.stubs.Set(db, 'instance_destroy', instance_destroy_mock)
+
+ def _create_delete_request(self, uuid):
+ fakes.stub_out_instance_quota(self.stubs, 0, 10)
+ req = fakes.HTTPRequestV3.blank('/servers/%s' % uuid)
+ req.method = 'DELETE'
+ return req
+
+ def _delete_server_instance(self, uuid=FAKE_UUID):
+ req = self._create_delete_request(uuid)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
+ self.controller.delete(req, uuid)
+
+ def test_delete_server_instance(self):
+ self._delete_server_instance()
+ self.assertTrue(self.server_delete_called)
+
+ def test_delete_server_instance_not_found(self):
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self._delete_server_instance,
+ uuid='non-existent-uuid')
+
+ def test_delete_server_instance_while_building(self):
+ req = self._create_delete_request(FAKE_UUID)
+ self.controller.delete(req, FAKE_UUID)
+
+ self.assertTrue(self.server_delete_called)
+
+ def test_delete_locked_server(self):
+ req = self._create_delete_request(FAKE_UUID)
+ self.stubs.Set(compute_api.API, delete_types.SOFT_DELETE,
+ fakes.fake_actions_to_locked_server)
+ self.stubs.Set(compute_api.API, delete_types.DELETE,
+ fakes.fake_actions_to_locked_server)
+
+ self.assertRaises(webob.exc.HTTPConflict, self.controller.delete,
+ req, FAKE_UUID)
+
+ def test_delete_server_instance_while_resize(self):
+ req = self._create_delete_request(FAKE_UUID)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.RESIZE_PREP))
+
+ self.controller.delete(req, FAKE_UUID)
+ # Delete shoud be allowed in any case, even during resizing,
+ # because it may get stuck.
+ self.assertTrue(self.server_delete_called)
+
+ def test_delete_server_instance_if_not_launched(self):
+ self.flags(reclaim_instance_interval=3600)
+ req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
+ req.method = 'DELETE'
+
+ self.server_delete_called = False
+
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(launched_at=None))
+
+ def instance_destroy_mock(*args, **kwargs):
+ self.server_delete_called = True
+ deleted_at = timeutils.utcnow()
+ return fake_instance.fake_db_instance(deleted_at=deleted_at)
+ self.stubs.Set(db, 'instance_destroy', instance_destroy_mock)
+
+ self.controller.delete(req, FAKE_UUID)
+ # delete() should be called for instance which has never been active,
+ # even if reclaim_instance_interval has been set.
+ self.assertEqual(self.server_delete_called, True)
+
+
+class ServersControllerRebuildInstanceTest(ControllerTest):
+
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ image_href = 'http://localhost/v3/fake/images/%s' % image_uuid
+
+ def setUp(self):
+ super(ServersControllerRebuildInstanceTest, self).setUp()
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
+ self.body = {
+ 'rebuild': {
+ 'name': 'new_name',
+ 'imageRef': self.image_href,
+ 'metadata': {
+ 'open': 'stack',
+ },
+ },
+ }
+ self.req = fakes.HTTPRequest.blank('/fake/servers/a/action')
+ self.req.method = 'POST'
+ self.req.headers["content-type"] = "application/json"
+
+ def test_rebuild_instance_with_blank_metadata_key(self):
+ self.body['rebuild']['metadata'][''] = 'world'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, body=self.body)
+
+ def test_rebuild_instance_with_metadata_key_too_long(self):
+ self.body['rebuild']['metadata'][('a' * 260)] = 'world'
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, body=self.body)
+
+ def test_rebuild_instance_with_metadata_value_too_long(self):
+ self.body['rebuild']['metadata']['key1'] = ('a' * 260)
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_rebuild, self.req,
+ FAKE_UUID, body=self.body)
+
+ def test_rebuild_instance_with_metadata_value_not_string(self):
+ self.body['rebuild']['metadata']['key1'] = 1
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_rebuild, self.req,
+ FAKE_UUID, body=self.body)
+
+ def test_rebuild_instance_fails_when_min_ram_too_small(self):
+ # make min_ram larger than our instance ram size
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True,
+ status='active', properties={'key1': 'value1'},
+ min_ram="4096", min_disk="10")
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, body=self.body)
+
+ def test_rebuild_instance_fails_when_min_disk_too_small(self):
+ # make min_disk larger than our instance disk size
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True,
+ status='active', properties={'key1': 'value1'},
+ min_ram="128", min_disk="100000")
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild, self.req,
+ FAKE_UUID, body=self.body)
+
+ def test_rebuild_instance_image_too_large(self):
+ # make image size larger than our instance disk size
+ size = str(1000 * (1024 ** 3))
+
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True,
+ status='active', size=size)
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, body=self.body)
+
+ def test_rebuild_instance_name_all_blank(self):
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True, status='active')
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
+ self.body['rebuild']['name'] = ' '
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, body=self.body)
+
+ def test_rebuild_instance_with_deleted_image(self):
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True,
+ status='DELETED')
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, body=self.body)
+
+ def test_rebuild_instance_onset_file_limit_over_quota(self):
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True, status='active')
+
+ with contextlib.nested(
+ mock.patch.object(fake._FakeImageService, 'show',
+ side_effect=fake_get_image),
+ mock.patch.object(self.controller.compute_api, 'rebuild',
+ side_effect=exception.OnsetFileLimitExceeded)
+ ) as (
+ show_mock, rebuild_mock
+ ):
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, body=self.body)
+
+ def test_start(self):
+ self.mox.StubOutWithMock(compute_api.API, 'start')
+ compute_api.API.start(mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
+ body = dict(start="")
+ self.controller._start_server(req, FAKE_UUID, body)
+
+ def test_start_policy_failed(self):
+ rules = {
+ "compute:v3:servers:start":
+ common_policy.parse_rule("project_id:non_fake")
+ }
+ policy.set_rules(rules)
+ req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
+ body = dict(start="")
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._start_server,
+ req, FAKE_UUID, body)
+ self.assertIn("compute:v3:servers:start", exc.format_message())
+
+ def test_start_not_ready(self):
+ self.stubs.Set(compute_api.API, 'start', fake_start_stop_not_ready)
+ req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._start_server, req, FAKE_UUID, body)
+
+ def test_start_locked_server(self):
+ self.stubs.Set(compute_api.API, 'start',
+ fakes.fake_actions_to_locked_server)
+ req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._start_server, req, FAKE_UUID, body)
+
+ def test_start_invalid(self):
+ self.stubs.Set(compute_api.API, 'start', fake_start_stop_invalid_state)
+ req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._start_server, req, FAKE_UUID, body)
+
+ def test_stop(self):
+ self.mox.StubOutWithMock(compute_api.API, 'stop')
+ compute_api.API.stop(mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
+ body = dict(stop="")
+ self.controller._stop_server(req, FAKE_UUID, body)
+
+ def test_stop_policy_failed(self):
+ rules = {
+ "compute:v3:servers:stop":
+ common_policy.parse_rule("project_id:non_fake")
+ }
+ policy.set_rules(rules)
+ req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
+ body = dict(stop='')
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._stop_server,
+ req, FAKE_UUID, body)
+ self.assertIn("compute:v3:servers:stop", exc.format_message())
+
+ def test_stop_not_ready(self):
+ self.stubs.Set(compute_api.API, 'stop', fake_start_stop_not_ready)
+ req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
+ body = dict(stop="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._stop_server, req, FAKE_UUID, body)
+
+ def test_stop_locked_server(self):
+ self.stubs.Set(compute_api.API, 'stop',
+ fakes.fake_actions_to_locked_server)
+ req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
+ body = dict(stop="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._stop_server, req, FAKE_UUID, body)
+
+ def test_stop_invalid_state(self):
+ self.stubs.Set(compute_api.API, 'stop', fake_start_stop_invalid_state)
+ req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._stop_server, req, FAKE_UUID, body)
+
+ def test_start_with_bogus_id(self):
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fake_instance_get_by_uuid_not_found)
+ req = fakes.HTTPRequestV3.blank('/servers/test_inst/action')
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._start_server, req, 'test_inst', body)
+
+ def test_stop_with_bogus_id(self):
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fake_instance_get_by_uuid_not_found)
+ req = fakes.HTTPRequestV3.blank('/servers/test_inst/action')
+ body = dict(stop="")
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._stop_server, req, 'test_inst', body)
+
+
+class ServersControllerUpdateTest(ControllerTest):
+
+ def _get_request(self, body=None, options=None):
+ if options:
+ self.stubs.Set(db, 'instance_get',
+ fakes.fake_instance_get(**options))
+ req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
+ req.method = 'PUT'
+ req.content_type = 'application/json'
+ req.body = jsonutils.dumps(body)
+ return req
+
+ def test_update_server_all_attributes(self):
+ body = {'server': {
+ 'name': 'server_test',
+ }}
+ req = self._get_request(body, {'name': 'server_test'})
+ res_dict = self.controller.update(req, FAKE_UUID, body=body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['name'], 'server_test')
+
+ def test_update_server_name(self):
+ body = {'server': {'name': 'server_test'}}
+ req = self._get_request(body, {'name': 'server_test'})
+ res_dict = self.controller.update(req, FAKE_UUID, body=body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['name'], 'server_test')
+
+ def test_update_server_name_too_long(self):
+ body = {'server': {'name': 'x' * 256}}
+ req = self._get_request(body, {'name': 'server_test'})
+ self.assertRaises(exception.ValidationError, self.controller.update,
+ req, FAKE_UUID, body=body)
+
+ def test_update_server_name_all_blank_spaces(self):
+ self.stubs.Set(db, 'instance_get',
+ fakes.fake_instance_get(name='server_test'))
+ req = fakes.HTTPRequest.blank('/v3/servers/%s' % FAKE_UUID)
+ req.method = 'PUT'
+ req.content_type = 'application/json'
+ body = {'server': {'name': ' ' * 64}}
+ req.body = jsonutils.dumps(body)
+ self.assertRaises(exception.ValidationError, self.controller.update,
+ req, FAKE_UUID, body=body)
+
+ def test_update_server_admin_password_ignored(self):
+ inst_dict = dict(name='server_test', admin_password='bacon')
+ body = dict(server=inst_dict)
+
+ def server_update(context, id, params):
+ filtered_dict = {
+ 'display_name': 'server_test',
+ }
+ self.assertEqual(params, filtered_dict)
+ filtered_dict['uuid'] = id
+ return filtered_dict
+
+ self.stubs.Set(db, 'instance_update', server_update)
+ # FIXME (comstud)
+ # self.stubs.Set(db, 'instance_get',
+ # return_server_with_attributes(name='server_test'))
+
+ req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
+ req.method = 'PUT'
+ req.content_type = "application/json"
+ req.body = jsonutils.dumps(body)
+ res_dict = self.controller.update(req, FAKE_UUID, body=body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['name'], 'server_test')
+
+ def test_update_server_not_found(self):
+ def fake_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+ body = {'server': {'name': 'server_test'}}
+ req = self._get_request(body)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
+ req, FAKE_UUID, body=body)
+
+ def test_update_server_not_found_on_update(self):
+ def fake_update(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(db, 'instance_update_and_get_original', fake_update)
+ body = {'server': {'name': 'server_test'}}
+ req = self._get_request(body)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
+ req, FAKE_UUID, body=body)
+
+ def test_update_server_policy_fail(self):
+ rule = {'compute:update': common_policy.parse_rule('role:admin')}
+ policy.set_rules(rule)
+ body = {'server': {'name': 'server_test'}}
+ req = self._get_request(body, {'name': 'server_test'})
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.update, req, FAKE_UUID, body=body)
+
+
+class ServerStatusTest(test.TestCase):
+
+ def setUp(self):
+ super(ServerStatusTest, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers.ServersController(extension_info=ext_info)
+
+ def _get_with_state(self, vm_state, task_state=None):
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_state,
+ task_state=task_state))
+
+ request = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
+ return self.controller.show(request, FAKE_UUID)
+
+ def test_active(self):
+ response = self._get_with_state(vm_states.ACTIVE)
+ self.assertEqual(response['server']['status'], 'ACTIVE')
+
+ def test_reboot(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.REBOOTING)
+ self.assertEqual(response['server']['status'], 'REBOOT')
+
+ def test_reboot_hard(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.REBOOTING_HARD)
+ self.assertEqual(response['server']['status'], 'HARD_REBOOT')
+
+ def test_reboot_resize_policy_fail(self):
+ def fake_get_server(context, req, id):
+ return fakes.stub_instance(id)
+
+ self.stubs.Set(self.controller, '_get_server', fake_get_server)
+
+ rule = {'compute:reboot':
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rule)
+ req = fakes.HTTPRequestV3.blank('/servers/1234/action')
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._action_reboot, req, '1234',
+ {'reboot': {'type': 'HARD'}})
+
+ def test_rebuild(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.REBUILDING)
+ self.assertEqual(response['server']['status'], 'REBUILD')
+
+ def test_rebuild_error(self):
+ response = self._get_with_state(vm_states.ERROR)
+ self.assertEqual(response['server']['status'], 'ERROR')
+
+ def test_resize(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.RESIZE_PREP)
+ self.assertEqual(response['server']['status'], 'RESIZE')
+
+ def test_confirm_resize_policy_fail(self):
+ def fake_get_server(context, req, id):
+ return fakes.stub_instance(id)
+
+ self.stubs.Set(self.controller, '_get_server', fake_get_server)
+
+ rule = {'compute:confirm_resize':
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rule)
+ req = fakes.HTTPRequestV3.blank('/servers/1234/action')
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._action_confirm_resize, req, '1234', {})
+
+ def test_verify_resize(self):
+ response = self._get_with_state(vm_states.RESIZED, None)
+ self.assertEqual(response['server']['status'], 'VERIFY_RESIZE')
+
+ def test_revert_resize(self):
+ response = self._get_with_state(vm_states.RESIZED,
+ task_states.RESIZE_REVERTING)
+ self.assertEqual(response['server']['status'], 'REVERT_RESIZE')
+
+ def test_revert_resize_policy_fail(self):
+ def fake_get_server(context, req, id):
+ return fakes.stub_instance(id)
+
+ self.stubs.Set(self.controller, '_get_server', fake_get_server)
+
+ rule = {'compute:revert_resize':
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rule)
+ req = fakes.HTTPRequestV3.blank('/servers/1234/action')
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._action_revert_resize, req, '1234', {})
+
+ def test_password_update(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.UPDATING_PASSWORD)
+ self.assertEqual(response['server']['status'], 'PASSWORD')
+
+ def test_stopped(self):
+ response = self._get_with_state(vm_states.STOPPED)
+ self.assertEqual(response['server']['status'], 'SHUTOFF')
+
+
+class ServersControllerCreateTest(test.TestCase):
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTest, self).setUp()
+
+ self.flags(verbose=True,
+ enable_instance_password=True)
+ self.instance_cache_num = 0
+ self.instance_cache_by_id = {}
+ self.instance_cache_by_uuid = {}
+
+ fakes.stub_out_nw_api(self.stubs)
+
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers.ServersController(extension_info=ext_info)
+
+ def instance_create(context, inst):
+ inst_type = flavors.get_flavor_by_flavor_id(3)
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ def_image_ref = 'http://localhost/images/%s' % image_uuid
+ self.instance_cache_num += 1
+ instance = fake_instance.fake_db_instance(**{
+ 'id': self.instance_cache_num,
+ 'display_name': inst['display_name'] or 'test',
+ 'uuid': FAKE_UUID,
+ 'instance_type': inst_type,
+ 'image_ref': inst.get('image_ref', def_image_ref),
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'reservation_id': inst['reservation_id'],
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "config_drive": None,
+ "progress": 0,
+ "fixed_ips": [],
+ "task_state": "",
+ "vm_state": "",
+ "root_device_name": inst.get('root_device_name', 'vda'),
+ })
+
+ self.instance_cache_by_id[instance['id']] = instance
+ self.instance_cache_by_uuid[instance['uuid']] = instance
+ return instance
+
+ def instance_get(context, instance_id):
+ """Stub for compute/api create() pulling in instance after
+ scheduling
+ """
+ return self.instance_cache_by_id[instance_id]
+
+ def instance_update(context, uuid, values):
+ instance = self.instance_cache_by_uuid[uuid]
+ instance.update(values)
+ return instance
+
+ def server_update(context, instance_uuid, params, update_cells=True):
+ inst = self.instance_cache_by_uuid[instance_uuid]
+ inst.update(params)
+ return inst
+
+ def server_update_and_get_original(
+ context, instance_uuid, params, update_cells=False,
+ columns_to_join=None):
+ inst = self.instance_cache_by_uuid[instance_uuid]
+ inst.update(params)
+ return (inst, inst)
+
+ def fake_method(*args, **kwargs):
+ pass
+
+ def project_get_networks(context, user_id):
+ return dict(id='1', host='localhost')
+
+ def queue_get_for(context, *args):
+ return 'network_topic'
+
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+ self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
+ self.stubs.Set(db, 'project_get_networks',
+ project_get_networks)
+ self.stubs.Set(db, 'instance_create', instance_create)
+ self.stubs.Set(db, 'instance_system_metadata_update',
+ fake_method)
+ self.stubs.Set(db, 'instance_get', instance_get)
+ self.stubs.Set(db, 'instance_update', instance_update)
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ server_update_and_get_original)
+ self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
+ fake_method)
+ self.body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': self.image_uuid,
+ 'flavorRef': self.flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ },
+ }
+ self.bdm = [{'delete_on_termination': 1,
+ 'device_name': 123,
+ 'volume_size': 1,
+ 'volume_id': '11111111-1111-1111-1111-111111111111'}]
+
+ self.req = fakes.HTTPRequest.blank('/fake/servers')
+ self.req.method = 'POST'
+ self.req.headers["content-type"] = "application/json"
+
+ def _check_admin_password_len(self, server_dict):
+ """utility function - check server_dict for admin_password length."""
+ self.assertEqual(CONF.password_length,
+ len(server_dict["adminPass"]))
+
+ def _check_admin_password_missing(self, server_dict):
+ """utility function - check server_dict for admin_password absence."""
+ self.assertNotIn("adminPass", server_dict)
+
+ def _test_create_instance(self, flavor=2):
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ self.body['server']['imageRef'] = image_uuid
+ self.body['server']['flavorRef'] = flavor
+ self.req.body = jsonutils.dumps(self.body)
+ server = self.controller.create(self.req, body=self.body).obj['server']
+ self._check_admin_password_len(server)
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_private_flavor(self):
+ values = {
+ 'name': 'fake_name',
+ 'memory_mb': 512,
+ 'vcpus': 1,
+ 'root_gb': 10,
+ 'ephemeral_gb': 10,
+ 'flavorid': '1324',
+ 'swap': 0,
+ 'rxtx_factor': 0.5,
+ 'vcpu_weight': 1,
+ 'disabled': False,
+ 'is_public': False,
+ }
+ db.flavor_create(context.get_admin_context(), values)
+ self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_instance,
+ flavor=1324)
+
+ def test_create_server_bad_image_href(self):
+ image_href = 1
+ self.body['server']['min_count'] = 1
+ self.body['server']['imageRef'] = image_href,
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create,
+ self.req, body=self.body)
+ # TODO(cyeoh): bp-v3-api-unittests
+ # This needs to be ported to the os-networks extension tests
+ # def test_create_server_with_invalid_networks_parameter(self):
+ # self.ext_mgr.extensions = {'os-networks': 'fake'}
+ # image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ # flavor_ref = 'http://localhost/123/flavors/3'
+ # body = {
+ # 'server': {
+ # 'name': 'server_test',
+ # 'imageRef': image_href,
+ # 'flavorRef': flavor_ref,
+ # 'networks': {'uuid': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'},
+ # }
+ # }
+ # req = fakes.HTTPRequest.blank('/v2/fake/servers')
+ # req.method = 'POST'
+ # req.body = jsonutils.dumps(body)
+ # req.headers["content-type"] = "application/json"
+ # self.assertRaises(webob.exc.HTTPBadRequest,
+ # self.controller.create,
+ # req,
+ # body)
+
+ def test_create_server_with_deleted_image(self):
+ # Get the fake image service so we can set the status to deleted
+ (image_service, image_id) = glance.get_remote_image_service(
+ context, '')
+ image_service.update(context, self.image_uuid, {'status': 'DELETED'})
+ self.addCleanup(image_service.update, context, self.image_uuid,
+ {'status': 'active'})
+
+ self.body['server']['flavorRef'] = 2
+ self.req.body = jsonutils.dumps(self.body)
+ with testtools.ExpectedException(
+ webob.exc.HTTPBadRequest,
+ 'Image 76fa36fc-c930-4bf3-8c8a-ea2a2420deb6 is not active.'):
+ self.controller.create(self.req, body=self.body)
+
+ def test_create_server_image_too_large(self):
+ # Get the fake image service so we can set the status to deleted
+ (image_service, image_id) = glance.get_remote_image_service(
+ context, self.image_uuid)
+
+ image = image_service.show(context, image_id)
+
+ orig_size = image['size']
+ new_size = str(1000 * (1024 ** 3))
+ image_service.update(context, self.image_uuid, {'size': new_size})
+
+ self.addCleanup(image_service.update, context, self.image_uuid,
+ {'size': orig_size})
+
+ self.body['server']['flavorRef'] = 2
+ self.req.body = jsonutils.dumps(self.body)
+
+ with testtools.ExpectedException(
+ webob.exc.HTTPBadRequest,
+ "Flavor's disk is too small for requested image."):
+ self.controller.create(self.req, body=self.body)
+
+ def test_create_instance_image_ref_is_bookmark(self):
+ image_href = 'http://localhost/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, body=self.body).obj
+
+ server = res['server']
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_image_ref_is_invalid(self):
+ image_uuid = 'this_is_not_a_valid_uuid'
+ image_href = 'http://localhost/images/%s' % image_uuid
+ flavor_ref = 'http://localhost/flavors/3'
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['flavorRef'] = flavor_ref
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ self.req, body=self.body)
+
+ def test_create_instance_no_key_pair(self):
+ fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False)
+ self._test_create_instance()
+
+ def _test_create_extra(self, params, no_image=False):
+ self.body['server']['flavorRef'] = 2
+ if no_image:
+ self.body['server'].pop('imageRef', None)
+ self.body['server'].update(params)
+ self.req.body = jsonutils.dumps(self.body)
+ self.req.headers["content-type"] = "application/json"
+ self.controller.create(self.req, body=self.body).obj['server']
+
+ # TODO(cyeoh): bp-v3-api-unittests
+ # This needs to be ported to the os-keypairs extension tests
+ # def test_create_instance_with_keypairs_enabled(self):
+ # self.ext_mgr.extensions = {'os-keypairs': 'fake'}
+ # key_name = 'green'
+ #
+ # params = {'key_name': key_name}
+ # old_create = compute_api.API.create
+ #
+ # # NOTE(sdague): key pair goes back to the database,
+ # # so we need to stub it out for tests
+ # def key_pair_get(context, user_id, name):
+ # return {'public_key': 'FAKE_KEY',
+ # 'fingerprint': 'FAKE_FINGERPRINT',
+ # 'name': name}
+ #
+ # def create(*args, **kwargs):
+ # self.assertEqual(kwargs['key_name'], key_name)
+ # return old_create(*args, **kwargs)
+ #
+ # self.stubs.Set(db, 'key_pair_get', key_pair_get)
+ # self.stubs.Set(compute_api.API, 'create', create)
+ # self._test_create_extra(params)
+ #
+ # TODO(cyeoh): bp-v3-api-unittests
+ # This needs to be ported to the os-networks extension tests
+ # def test_create_instance_with_networks_enabled(self):
+ # self.ext_mgr.extensions = {'os-networks': 'fake'}
+ # net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ # requested_networks = [{'uuid': net_uuid}]
+ # params = {'networks': requested_networks}
+ # old_create = compute_api.API.create
+
+ # def create(*args, **kwargs):
+ # result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None)]
+ # self.assertEqual(kwargs['requested_networks'], result)
+ # return old_create(*args, **kwargs)
+
+ # self.stubs.Set(compute_api.API, 'create', create)
+ # self._test_create_extra(params)
+
+ def test_create_instance_with_port_with_no_fixed_ips(self):
+ port_id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'port': port_id}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ raise exception.PortRequiresFixedIP(port_id=port_id)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_instance_raise_user_data_too_large(self, mock_create):
+ mock_create.side_effect = exception.InstanceUserDataTooLarge(
+ maxsize=1, length=2)
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ self.req, body=self.body)
+
+ def test_create_instance_with_network_with_no_subnet(self):
+ network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ raise exception.NetworkRequiresSubnet(network_uuid=network)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_non_unique_secgroup_name(self):
+ network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network}]
+ params = {'networks': requested_networks,
+ 'security_groups': [{'name': 'dup'}, {'name': 'dup'}]}
+
+ def fake_create(*args, **kwargs):
+ raise exception.NoUniqueMatch("No Unique match found for ...")
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_networks_disabled_neutronv2(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ requested_networks = [{'uuid': net_uuid}]
+ params = {'networks': requested_networks}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None,
+ None, None)]
+ self.assertEqual(result, kwargs['requested_networks'].as_tuples())
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_networks_disabled(self):
+ net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ requested_networks = [{'uuid': net_uuid}]
+ params = {'networks': requested_networks}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIsNone(kwargs['requested_networks'])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_pass_disabled(self):
+ # test with admin passwords disabled See lp bug 921814
+ self.flags(enable_instance_password=False)
+
+ # proper local hrefs must start with 'http://localhost/v3/'
+ self.flags(enable_instance_password=False)
+ image_href = 'http://localhost/v2/fake/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, body=self.body).obj
+
+ server = res['server']
+ self._check_admin_password_missing(server)
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_name_too_long(self):
+ # proper local hrefs must start with 'http://localhost/v3/'
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['name'] = 'X' * 256
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError, self.controller.create,
+ self.req, body=self.body)
+
+ def test_create_instance_name_all_blank_spaces(self):
+ # proper local hrefs must start with 'http://localhost/v2/'
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ image_href = 'http://localhost/v3/images/%s' % image_uuid
+ flavor_ref = 'http://localhost/flavors/3'
+ body = {
+ 'server': {
+ 'name': ' ' * 64,
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ },
+ }
+
+ req = fakes.HTTPRequest.blank('/v3/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
+
+ def test_create_instance(self):
+ # proper local hrefs must start with 'http://localhost/v3/'
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, body=self.body).obj
+
+ server = res['server']
+ self._check_admin_password_len(server)
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_extension_create_exception(self):
+ def fake_keypair_server_create(self, server_dict,
+ create_kwargs):
+ raise KeyError
+
+ self.stubs.Set(keypairs.Keypairs, 'server_create',
+ fake_keypair_server_create)
+ # proper local hrefs must start with 'http://localhost/v3/'
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ image_href = 'http://localhost/v3/images/%s' % image_uuid
+ flavor_ref = 'http://localhost/123/flavors/3'
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(webob.exc.HTTPInternalServerError,
+ self.controller.create, req, body=body)
+
+ def test_create_instance_pass_disabled(self):
+ self.flags(enable_instance_password=False)
+ # proper local hrefs must start with 'http://localhost/v3/'
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, body=self.body).obj
+
+ server = res['server']
+ self._check_admin_password_missing(server)
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_too_much_metadata(self):
+ self.flags(quota_metadata_items=1)
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['metadata']['vote'] = 'fiddletown'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_metadata_key_too_long(self):
+ self.flags(quota_metadata_items=1)
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['metadata'] = {('a' * 260): '12345'}
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_metadata_value_too_long(self):
+ self.flags(quota_metadata_items=1)
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['metadata'] = {'key1': ('a' * 260)}
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_metadata_key_blank(self):
+ self.flags(quota_metadata_items=1)
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['metadata'] = {'': 'abcd'}
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_metadata_not_dict(self):
+ self.flags(quota_metadata_items=1)
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['metadata'] = 'string'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_metadata_key_not_string(self):
+ self.flags(quota_metadata_items=1)
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['metadata'] = {1: 'test'}
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_metadata_value_not_string(self):
+ self.flags(quota_metadata_items=1)
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['metadata'] = {'test': ['a', 'list']}
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_user_data_malformed_bad_request(self):
+ params = {'user_data': 'u1234'}
+ self.assertRaises(exception.ValidationError,
+ self._test_create_extra, params)
+
+ def test_create_instance_invalid_key_name(self):
+ image_href = 'http://localhost/v2/images/2'
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['key_name'] = 'nonexistentkey'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_valid_key_name(self):
+ self.body['server']['key_name'] = 'key'
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, body=self.body).obj
+
+ self.assertEqual(FAKE_UUID, res["server"]["id"])
+ self._check_admin_password_len(res["server"])
+
+ def test_create_instance_invalid_flavor_href(self):
+ image_href = 'http://localhost/v2/images/2'
+ flavor_ref = 'http://localhost/v2/flavors/asdf'
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['flavorRef'] = flavor_ref
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_invalid_flavor_id_int(self):
+ image_href = 'http://localhost/v2/images/2'
+ flavor_ref = -1
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['flavorRef'] = flavor_ref
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_bad_flavor_href(self):
+ image_href = 'http://localhost/v2/images/2'
+ flavor_ref = 'http://localhost/v2/flavors/17'
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['flavorRef'] = flavor_ref
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_bad_href(self):
+ image_href = 'asdf'
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_local_href(self):
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, body=self.body).obj
+
+ server = res['server']
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_admin_password(self):
+ self.body['server']['flavorRef'] = 3
+ self.body['server']['adminPass'] = 'testpass'
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, body=self.body).obj
+
+ server = res['server']
+ self.assertEqual(server['adminPass'],
+ self.body['server']['adminPass'])
+
+ def test_create_instance_admin_password_pass_disabled(self):
+ self.flags(enable_instance_password=False)
+ self.body['server']['flavorRef'] = 3
+ self.body['server']['adminPass'] = 'testpass'
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, body=self.body).obj
+
+ self.assertIn('server', res)
+ self.assertIn('adminPass', self.body['server'])
+
+ def test_create_instance_admin_password_empty(self):
+ self.body['server']['flavorRef'] = 3
+ self.body['server']['adminPass'] = ''
+ self.req.body = jsonutils.dumps(self.body)
+
+ # The fact that the action doesn't raise is enough validation
+ self.controller.create(self.req, body=self.body)
+
+ def test_create_location(self):
+ selfhref = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
+ self.req.body = jsonutils.dumps(self.body)
+ robj = self.controller.create(self.req, body=self.body)
+
+ self.assertEqual(robj['Location'], selfhref)
+
+ def _do_test_create_instance_above_quota(self, resource, allowed, quota,
+ expected_msg):
+ fakes.stub_out_instance_quota(self.stubs, allowed, quota, resource)
+ self.body['server']['flavorRef'] = 3
+ self.req.body = jsonutils.dumps(self.body)
+ try:
+ self.controller.create(self.req, body=self.body).obj['server']
+ self.fail('expected quota to be exceeded')
+ except webob.exc.HTTPForbidden as e:
+ self.assertEqual(e.explanation, expected_msg)
+
+ def test_create_instance_above_quota_instances(self):
+ msg = _('Quota exceeded for instances: Requested 1, but'
+ ' already used 10 of 10 instances')
+ self._do_test_create_instance_above_quota('instances', 0, 10, msg)
+
+ def test_create_instance_above_quota_ram(self):
+ msg = _('Quota exceeded for ram: Requested 4096, but'
+ ' already used 8192 of 10240 ram')
+ self._do_test_create_instance_above_quota('ram', 2048, 10 * 1024, msg)
+
+ def test_create_instance_above_quota_cores(self):
+ msg = _('Quota exceeded for cores: Requested 2, but'
+ ' already used 9 of 10 cores')
+ self._do_test_create_instance_above_quota('cores', 1, 10, msg)
+
+ def test_create_instance_above_quota_server_group_members(self):
+ ctxt = context.get_admin_context()
+ fake_group = objects.InstanceGroup(ctxt)
+ fake_group.create()
+
+ def fake_count(context, name, group, user_id):
+ self.assertEqual(name, "server_group_members")
+ self.assertEqual(group.uuid, fake_group.uuid)
+ self.assertEqual(user_id,
+ self.req.environ['nova.context'].user_id)
+ return 10
+
+ def fake_limit_check(context, **kwargs):
+ if 'server_group_members' in kwargs:
+ raise exception.OverQuota(overs={})
+
+ def fake_instance_destroy(context, uuid, constraint):
+ return fakes.stub_instance(1)
+
+ self.stubs.Set(fakes.QUOTAS, 'count', fake_count)
+ self.stubs.Set(fakes.QUOTAS, 'limit_check', fake_limit_check)
+ self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
+ self.body['os:scheduler_hints'] = {'group': fake_group.uuid}
+ self.req.body = jsonutils.dumps(self.body)
+ expected_msg = "Quota exceeded, too many servers in group"
+
+ try:
+ self.controller.create(self.req, body=self.body).obj
+ self.fail('expected quota to be exceeded')
+ except webob.exc.HTTPForbidden as e:
+ self.assertEqual(e.explanation, expected_msg)
+
+ def test_create_instance_above_quota_server_groups(self):
+
+ def fake_reserve(contex, **deltas):
+ if 'server_groups' in deltas:
+ raise exception.OverQuota(overs={})
+
+ def fake_instance_destroy(context, uuid, constraint):
+ return fakes.stub_instance(1)
+
+ self.stubs.Set(fakes.QUOTAS, 'reserve', fake_reserve)
+ self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
+ self.body['os:scheduler_hints'] = {'group': 'fake_group'}
+ self.req.body = jsonutils.dumps(self.body)
+
+ expected_msg = "Quota exceeded, too many server groups."
+
+ try:
+ self.controller.create(self.req, body=self.body).obj
+ self.fail('expected quota to be exceeded')
+ except webob.exc.HTTPForbidden as e:
+ self.assertEqual(e.explanation, expected_msg)
+
+ def test_create_instance_with_neutronv2_port_in_use(self):
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network, 'port': port}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ raise exception.PortInUse(port_id=port)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self._test_create_extra, params)
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_instance_public_network_non_admin(self, mock_create):
+ public_network_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ params = {'networks': [{'uuid': public_network_uuid}]}
+ self.req.body = jsonutils.dumps(self.body)
+ mock_create.side_effect = exception.ExternalNetworkAttachForbidden(
+ network_uuid=public_network_uuid)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self._test_create_extra, params)
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_multiple_instance_with_specified_ip_neutronv2(self,
+ _api_mock):
+ _api_mock.side_effect = exception.InvalidFixedIpAndMaxCountRequest(
+ reason="")
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ address = '10.0.0.1'
+ requested_networks = [{'uuid': network, 'fixed_ip': address,
+ 'port': port}]
+ params = {'networks': requested_networks}
+ self.body['server']['max_count'] = 2
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ def test_create_multiple_instance_with_neutronv2_port(self):
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network, 'port': port}]
+ params = {'networks': requested_networks}
+ self.body['server']['max_count'] = 2
+
+ def fake_create(*args, **kwargs):
+ msg = _("Unable to launch multiple instances with"
+ " a single configured port ID. Please launch your"
+ " instance one by one with different ports.")
+ raise exception.MultiplePortsNotApplicable(reason=msg)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_neturonv2_not_found_network(self):
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ requested_networks = [{'uuid': network}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ raise exception.NetworkNotFound(network_id=network)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_neutronv2_port_not_found(self):
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network, 'port': port}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ raise exception.PortNotFound(port_id=port)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_instance_with_network_ambiguous(self, mock_create):
+ mock_create.side_effect = exception.NetworkAmbiguous()
+ self.assertRaises(webob.exc.HTTPConflict,
+ self._test_create_extra, {})
+
+ @mock.patch.object(compute_api.API, 'create',
+ side_effect=exception.InstanceExists(
+ name='instance-name'))
+ def test_create_instance_raise_instance_exists(self, mock_create):
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller.create,
+ self.req, body=self.body)
+
+
+class ServersControllerCreateTestWithMock(test.TestCase):
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTestWithMock, self).setUp()
+
+ self.flags(verbose=True,
+ enable_instance_password=True)
+ self.instance_cache_num = 0
+ self.instance_cache_by_id = {}
+ self.instance_cache_by_uuid = {}
+
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers.ServersController(extension_info=ext_info)
+
+ self.body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': self.image_uuid,
+ 'flavorRef': self.flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ },
+ }
+ self.req = fakes.HTTPRequest.blank('/fake/servers')
+ self.req.method = 'POST'
+ self.req.headers["content-type"] = "application/json"
+
+ def _test_create_extra(self, params, no_image=False):
+ self.body['server']['flavorRef'] = 2
+ if no_image:
+ self.body['server'].pop('imageRef', None)
+ self.body['server'].update(params)
+ self.req.body = jsonutils.dumps(self.body)
+ self.req.headers["content-type"] = "application/json"
+ self.controller.create(self.req, body=self.body).obj['server']
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_instance_with_neutronv2_fixed_ip_already_in_use(self,
+ create_mock):
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ address = '10.0.2.3'
+ requested_networks = [{'uuid': network, 'fixed_ip': address}]
+ params = {'networks': requested_networks}
+ create_mock.side_effect = exception.FixedIpAlreadyInUse(
+ address=address,
+ instance_uuid=network)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+ self.assertEqual(1, len(create_mock.call_args_list))
+
+ @mock.patch.object(compute_api.API, 'create',
+ side_effect=exception.InvalidVolume(reason='error'))
+ def test_create_instance_with_invalid_volume_error(self, create_mock):
+ # Tests that InvalidVolume is translated to a 400 error.
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, {})
+
+
+class ServersViewBuilderTest(test.TestCase):
+
+ def setUp(self):
+ super(ServersViewBuilderTest, self).setUp()
+ CONF.set_override('host', 'localhost', group='glance')
+ self.flags(use_ipv6=True)
+ db_inst = fakes.stub_instance(
+ id=1,
+ image_ref="5",
+ uuid="deadbeef-feed-edee-beef-d0ea7beefedd",
+ display_name="test_server",
+ include_fake_metadata=False)
+
+ privates = ['172.19.0.1']
+ publics = ['192.168.0.3']
+ public6s = ['b33f::fdee:ddff:fecc:bbaa']
+
+ def nw_info(*args, **kwargs):
+ return [(None, {'label': 'public',
+ 'ips': [dict(ip=ip) for ip in publics],
+ 'ip6s': [dict(ip=ip) for ip in public6s]}),
+ (None, {'label': 'private',
+ 'ips': [dict(ip=ip) for ip in privates]})]
+
+ def floaters(*args, **kwargs):
+ return []
+
+ fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
+ fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
+ floaters)
+
+ self.uuid = db_inst['uuid']
+ self.view_builder = views.servers.ViewBuilderV3()
+ self.request = fakes.HTTPRequestV3.blank("")
+ self.request.context = context.RequestContext('fake', 'fake')
+ self.instance = fake_instance.fake_instance_obj(
+ self.request.context,
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
+ **db_inst)
+
+ def test_get_flavor_valid_instance_type(self):
+ flavor_bookmark = "http://localhost/flavors/1"
+ expected = {"id": "1",
+ "links": [{"rel": "bookmark",
+ "href": flavor_bookmark}]}
+ result = self.view_builder._get_flavor(self.request, self.instance)
+ self.assertEqual(result, expected)
+
+ def test_build_server(self):
+ self_link = "http://localhost/v3/servers/%s" % self.uuid
+ bookmark_link = "http://localhost/servers/%s" % self.uuid
+ expected_server = {
+ "server": {
+ "id": self.uuid,
+ "name": "test_server",
+ "links": [
+ {
+ "rel": "self",
+ "href": self_link,
+ },
+ {
+ "rel": "bookmark",
+ "href": bookmark_link,
+ },
+ ],
+ }
+ }
+
+ output = self.view_builder.basic(self.request, self.instance)
+ self.assertThat(output, matchers.DictMatches(expected_server))
+
+ def test_build_server_with_project_id(self):
+ expected_server = {
+ "server": {
+ "id": self.uuid,
+ "name": "test_server",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v3/servers/%s" %
+ self.uuid,
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/servers/%s" % self.uuid,
+ },
+ ],
+ }
+ }
+
+ output = self.view_builder.basic(self.request, self.instance)
+ self.assertThat(output, matchers.DictMatches(expected_server))
+
+ def test_build_server_detail(self):
+ image_bookmark = "http://localhost/images/5"
+ flavor_bookmark = "http://localhost/flavors/1"
+ self_link = "http://localhost/v3/servers/%s" % self.uuid
+ bookmark_link = "http://localhost/servers/%s" % self.uuid
+ expected_server = {
+ "server": {
+ "id": self.uuid,
+ "user_id": "fake_user",
+ "tenant_id": "fake_project",
+ "updated": "2010-11-11T11:00:00Z",
+ "created": "2010-10-10T12:00:00Z",
+ "progress": 0,
+ "name": "test_server",
+ "status": "BUILD",
+ "hostId": '',
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": image_bookmark,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": flavor_bookmark,
+ },
+ ],
+ },
+ "addresses": {
+ 'test1': [
+ {'version': 4, 'addr': '192.168.1.100',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
+ {'version': 6, 'addr': '2001:db8:0:1::1',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'}
+ ]
+ },
+ "metadata": {},
+ "links": [
+ {
+ "rel": "self",
+ "href": self_link,
+ },
+ {
+ "rel": "bookmark",
+ "href": bookmark_link,
+ },
+ ],
+ }
+ }
+
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output, matchers.DictMatches(expected_server))
+
+ def test_build_server_detail_with_fault(self):
+ self.instance['vm_state'] = vm_states.ERROR
+ self.instance['fault'] = fake_instance.fake_fault_obj(
+ self.request.context, self.uuid)
+
+ image_bookmark = "http://localhost/images/5"
+ flavor_bookmark = "http://localhost/flavors/1"
+ self_link = "http://localhost/v3/servers/%s" % self.uuid
+ bookmark_link = "http://localhost/servers/%s" % self.uuid
+ expected_server = {
+ "server": {
+ "id": self.uuid,
+ "user_id": "fake_user",
+ "tenant_id": "fake_project",
+ "updated": "2010-11-11T11:00:00Z",
+ "created": "2010-10-10T12:00:00Z",
+ "name": "test_server",
+ "status": "ERROR",
+ "hostId": '',
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": image_bookmark,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": flavor_bookmark,
+ },
+ ],
+ },
+ "addresses": {
+ 'test1': [
+ {'version': 4, 'addr': '192.168.1.100',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
+ {'version': 6, 'addr': '2001:db8:0:1::1',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'}
+ ]
+ },
+ "metadata": {},
+ "links": [
+ {
+ "rel": "self",
+ "href": self_link,
+ },
+ {
+ "rel": "bookmark",
+ "href": bookmark_link,
+ },
+ ],
+ "fault": {
+ "code": 404,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "HTTPNotFound",
+ "details": "Stock details for test",
+ },
+ }
+ }
+
+ self.request.context = context.RequestContext('fake', 'fake')
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output, matchers.DictMatches(expected_server))
+
+ def test_build_server_detail_with_fault_that_has_been_deleted(self):
+ self.instance['deleted'] = 1
+ self.instance['vm_state'] = vm_states.ERROR
+ fault = fake_instance.fake_fault_obj(self.request.context,
+ self.uuid, code=500,
+ message="No valid host was found")
+ self.instance['fault'] = fault
+
+ expected_fault = {"code": 500,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "No valid host was found"}
+
+ self.request.context = context.RequestContext('fake', 'fake')
+ output = self.view_builder.show(self.request, self.instance)
+ # Regardless of vm_state deleted servers sholud be DELETED
+ self.assertEqual("DELETED", output['server']['status'])
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
+
+ def test_build_server_detail_with_fault_no_details_not_admin(self):
+ self.instance['vm_state'] = vm_states.ERROR
+ self.instance['fault'] = fake_instance.fake_fault_obj(
+ self.request.context,
+ self.uuid,
+ code=500,
+ message='Error')
+
+ expected_fault = {"code": 500,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "Error"}
+
+ self.request.context = context.RequestContext('fake', 'fake')
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
+
+ def test_build_server_detail_with_fault_admin(self):
+ self.instance['vm_state'] = vm_states.ERROR
+ self.instance['fault'] = fake_instance.fake_fault_obj(
+ self.request.context,
+ self.uuid,
+ code=500,
+ message='Error')
+
+ expected_fault = {"code": 500,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "Error",
+ 'details': 'Stock details for test'}
+
+ self.request.environ['nova.context'].is_admin = True
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
+
+ def test_build_server_detail_with_fault_no_details_admin(self):
+ self.instance['vm_state'] = vm_states.ERROR
+ self.instance['fault'] = fake_instance.fake_fault_obj(
+ self.request.context,
+ self.uuid,
+ code=500,
+ message='Error',
+ details='')
+
+ expected_fault = {"code": 500,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "Error"}
+
+ self.request.environ['nova.context'].is_admin = True
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
+
+ def test_build_server_detail_with_fault_but_active(self):
+ self.instance['vm_state'] = vm_states.ACTIVE
+ self.instance['progress'] = 100
+ self.instance['fault'] = fake_instance.fake_fault_obj(
+ self.request.context, self.uuid)
+
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertNotIn('fault', output['server'])
+
+ def test_build_server_detail_active_status(self):
+ # set the power state of the instance to running
+ self.instance['vm_state'] = vm_states.ACTIVE
+ self.instance['progress'] = 100
+ image_bookmark = "http://localhost/images/5"
+ flavor_bookmark = "http://localhost/flavors/1"
+ self_link = "http://localhost/v3/servers/%s" % self.uuid
+ bookmark_link = "http://localhost/servers/%s" % self.uuid
+ expected_server = {
+ "server": {
+ "id": self.uuid,
+ "user_id": "fake_user",
+ "tenant_id": "fake_project",
+ "updated": "2010-11-11T11:00:00Z",
+ "created": "2010-10-10T12:00:00Z",
+ "progress": 100,
+ "name": "test_server",
+ "status": "ACTIVE",
+ "hostId": '',
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": image_bookmark,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": flavor_bookmark,
+ },
+ ],
+ },
+ "addresses": {
+ 'test1': [
+ {'version': 4, 'addr': '192.168.1.100',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
+ {'version': 6, 'addr': '2001:db8:0:1::1',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'}
+ ]
+ },
+ "metadata": {},
+ "links": [
+ {
+ "rel": "self",
+ "href": self_link,
+ },
+ {
+ "rel": "bookmark",
+ "href": bookmark_link,
+ },
+ ],
+ }
+ }
+
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output, matchers.DictMatches(expected_server))
+
+ def test_build_server_detail_with_metadata(self):
+
+ metadata = []
+ metadata.append(models.InstanceMetadata(key="Open", value="Stack"))
+ metadata = nova_utils.metadata_to_dict(metadata)
+ self.instance['metadata'] = metadata
+
+ image_bookmark = "http://localhost/images/5"
+ flavor_bookmark = "http://localhost/flavors/1"
+ self_link = "http://localhost/v3/servers/%s" % self.uuid
+ bookmark_link = "http://localhost/servers/%s" % self.uuid
+ expected_server = {
+ "server": {
+ "id": self.uuid,
+ "user_id": "fake_user",
+ "tenant_id": "fake_project",
+ "updated": "2010-11-11T11:00:00Z",
+ "created": "2010-10-10T12:00:00Z",
+ "progress": 0,
+ "name": "test_server",
+ "status": "BUILD",
+ "hostId": '',
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": image_bookmark,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": flavor_bookmark,
+ },
+ ],
+ },
+ "addresses": {
+ 'test1': [
+ {'version': 4, 'addr': '192.168.1.100',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
+ {'version': 6, 'addr': '2001:db8:0:1::1',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
+ ]
+ },
+ "metadata": {"Open": "Stack"},
+ "links": [
+ {
+ "rel": "self",
+ "href": self_link,
+ },
+ {
+ "rel": "bookmark",
+ "href": bookmark_link,
+ },
+ ],
+ }
+ }
+
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output, matchers.DictMatches(expected_server))
+
+
+class ServersAllExtensionsTestCase(test.TestCase):
+ """Servers tests using default API router with all extensions enabled.
+
+ The intent here is to catch cases where extensions end up throwing
+ an exception because of a malformed request before the core API
+ gets a chance to validate the request and return a 422 response.
+
+ For example, AccessIPsController extends servers.Controller::
+
+ | @wsgi.extends
+ | def create(self, req, resp_obj, body):
+ | context = req.environ['nova.context']
+ | if authorize(context) and 'server' in resp_obj.obj:
+ | resp_obj.attach(xml=AccessIPTemplate())
+ | server = resp_obj.obj['server']
+ | self._extend_server(req, server)
+
+ we want to ensure that the extension isn't barfing on an invalid
+ body.
+ """
+
+ def setUp(self):
+ super(ServersAllExtensionsTestCase, self).setUp()
+ self.app = compute.APIRouterV3()
+
+ def test_create_missing_server(self):
+ # Test create with malformed body.
+
+ def fake_create(*args, **kwargs):
+ raise test.TestingException("Should not reach the compute API.")
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'foo': {'a': 'b'}}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(400, res.status_int)
+
+ def test_update_missing_server(self):
+ # Test update with malformed body.
+
+ def fake_update(*args, **kwargs):
+ raise test.TestingException("Should not reach the compute API.")
+
+ self.stubs.Set(compute_api.API, 'update', fake_update)
+
+ req = fakes.HTTPRequestV3.blank('/servers/1')
+ req.method = 'PUT'
+ req.content_type = 'application/json'
+ body = {'foo': {'a': 'b'}}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(400, res.status_int)
+
+
+class ServersInvalidRequestTestCase(test.TestCase):
+ """Tests of places we throw 400 Bad Request from."""
+
+ def setUp(self):
+ super(ServersInvalidRequestTestCase, self).setUp()
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers.ServersController(extension_info=ext_info)
+
+ def _invalid_server_create(self, body):
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
+
+ def test_create_server_no_body(self):
+ self._invalid_server_create(body=None)
+
+ def test_create_server_missing_server(self):
+ body = {'foo': {'a': 'b'}}
+ self._invalid_server_create(body=body)
+
+ def test_create_server_malformed_entity(self):
+ body = {'server': 'string'}
+ self._invalid_server_create(body=body)
+
+ def _unprocessable_server_update(self, body):
+ req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
+ req.method = 'PUT'
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, FAKE_UUID, body=body)
+
+ def test_update_server_no_body(self):
+ self._invalid_server_create(body=None)
+
+ def test_update_server_missing_server(self):
+ body = {'foo': {'a': 'b'}}
+ self._invalid_server_create(body=body)
+
+ def test_create_update_malformed_entity(self):
+ body = {'server': 'string'}
+ self._invalid_server_create(body=body)
+
+
+class FakeExt(extensions.V3APIExtensionBase):
+ name = "DiskConfig"
+ alias = 'os-disk-config'
+ version = 1
+
+ def fake_extension_point(self, *args, **kwargs):
+ pass
+
+ def get_controller_extensions(self):
+ return []
+
+ def get_resources(self):
+ return []
+
+
+class TestServersExtensionPoint(test.NoDBTestCase):
+ def setUp(self):
+ super(TestServersExtensionPoint, self).setUp()
+ CONF.set_override('extensions_whitelist', ['os-disk-config'],
+ 'osapi_v3')
+ self.stubs.Set(disk_config, 'DiskConfig', FakeExt)
+
+ def _test_load_extension_point(self, name):
+ setattr(FakeExt, 'server_%s' % name,
+ FakeExt.fake_extension_point)
+ ext_info = plugins.LoadedExtensionInfo()
+ controller = servers.ServersController(extension_info=ext_info)
+ self.assertEqual(
+ 'os-disk-config',
+ list(getattr(controller,
+ '%s_extension_manager' % name))[0].obj.alias)
+ delattr(FakeExt, 'server_%s' % name)
+
+ def test_load_update_extension_point(self):
+ self._test_load_extension_point('update')
+
+ def test_load_rebuild_extension_point(self):
+ self._test_load_extension_point('rebuild')
+
+ def test_load_create_extension_point(self):
+ self._test_load_extension_point('create')
+
+ def test_load_resize_extension_point(self):
+ self._test_load_extension_point('resize')
+
+
+class TestServersExtensionSchema(test.NoDBTestCase):
+ def setUp(self):
+ super(TestServersExtensionSchema, self).setUp()
+ CONF.set_override('extensions_whitelist', ['disk_config'], 'osapi_v3')
+
+ def _test_load_extension_schema(self, name):
+ setattr(FakeExt, 'get_server_%s_schema' % name,
+ FakeExt.fake_extension_point)
+ ext_info = plugins.LoadedExtensionInfo()
+ controller = servers.ServersController(extension_info=ext_info)
+ self.assertTrue(hasattr(controller, '%s_schema_manager' % name))
+
+ delattr(FakeExt, 'get_server_%s_schema' % name)
+ return getattr(controller, 'schema_server_%s' % name)
+
+ def test_load_create_extension_point(self):
+ # The expected is the schema combination of base and keypairs
+ # because of the above extensions_whitelist.
+ expected_schema = copy.deepcopy(servers_schema.base_create)
+ expected_schema['properties']['server']['properties'].update(
+ disk_config_schema.server_create)
+
+ actual_schema = self._test_load_extension_schema('create')
+ self.assertEqual(expected_schema, actual_schema)
+
+ def test_load_update_extension_point(self):
+ # keypair extension does not contain update_server() and
+ # here checks that any extension is not added to the schema.
+ expected_schema = copy.deepcopy(servers_schema.base_update)
+ expected_schema['properties']['server']['properties'].update(
+ disk_config_schema.server_create)
+
+ actual_schema = self._test_load_extension_schema('update')
+ self.assertEqual(expected_schema, actual_schema)
+
+ def test_load_rebuild_extension_point(self):
+ # keypair extension does not contain rebuild_server() and
+ # here checks that any extension is not added to the schema.
+ expected_schema = copy.deepcopy(servers_schema.base_rebuild)
+ expected_schema['properties']['rebuild']['properties'].update(
+ disk_config_schema.server_create)
+
+ actual_schema = self._test_load_extension_schema('rebuild')
+ self.assertEqual(expected_schema, actual_schema)
+
+ def test_load_resize_extension_point(self):
+ # keypair extension does not contain resize_server() and
+ # here checks that any extension is not added to the schema.
+ expected_schema = copy.deepcopy(servers_schema.base_resize)
+ expected_schema['properties']['resize']['properties'].update(
+ disk_config_schema.server_create)
+
+ actual_schema = self._test_load_extension_schema('resize')
+ self.assertEqual(expected_schema, actual_schema)
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_services.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_services.py
new file mode 100644
index 0000000000..072992cbb6
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_services.py
@@ -0,0 +1,453 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import calendar
+import datetime
+
+import iso8601
+import mock
+from oslo.utils import timeutils
+import webob.exc
+
+from nova.api.openstack.compute.plugins.v3 import services
+from nova import availability_zones
+from nova.compute import cells_api
+from nova import context
+from nova import db
+from nova import exception
+from nova.servicegroup.drivers import db as db_driver
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.objects import test_service
+
+
+fake_services_list = [
+ dict(test_service.fake_service,
+ binary='nova-scheduler',
+ host='host1',
+ id=1,
+ disabled=True,
+ topic='scheduler',
+ updated_at=datetime.datetime(2012, 10, 29, 13, 42, 2),
+ created_at=datetime.datetime(2012, 9, 18, 2, 46, 27),
+ disabled_reason='test1'),
+ dict(test_service.fake_service,
+ binary='nova-compute',
+ host='host1',
+ id=2,
+ disabled=True,
+ topic='compute',
+ updated_at=datetime.datetime(2012, 10, 29, 13, 42, 5),
+ created_at=datetime.datetime(2012, 9, 18, 2, 46, 27),
+ disabled_reason='test2'),
+ dict(test_service.fake_service,
+ binary='nova-scheduler',
+ host='host2',
+ id=3,
+ disabled=False,
+ topic='scheduler',
+ updated_at=datetime.datetime(2012, 9, 19, 6, 55, 34),
+ created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
+ disabled_reason=None),
+ dict(test_service.fake_service,
+ binary='nova-compute',
+ host='host2',
+ id=4,
+ disabled=True,
+ topic='compute',
+ updated_at=datetime.datetime(2012, 9, 18, 8, 3, 38),
+ created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
+ disabled_reason='test4'),
+ ]
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {}
+
+
+class FakeRequestWithService(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {"binary": "nova-compute"}
+
+
+class FakeRequestWithHost(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {"host": "host1"}
+
+
+class FakeRequestWithHostService(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {"host": "host1", "binary": "nova-compute"}
+
+
+def fake_service_get_all(services):
+ def service_get_all(context, filters=None, set_zones=False):
+ if set_zones or 'availability_zone' in filters:
+ return availability_zones.set_availability_zones(context,
+ services)
+ return services
+ return service_get_all
+
+
+def fake_db_api_service_get_all(context, disabled=None):
+ return fake_services_list
+
+
+def fake_db_service_get_by_host_binary(services):
+ def service_get_by_host_binary(context, host, binary):
+ for service in services:
+ if service['host'] == host and service['binary'] == binary:
+ return service
+ raise exception.HostBinaryNotFound(host=host, binary=binary)
+ return service_get_by_host_binary
+
+
+def fake_service_get_by_host_binary(context, host, binary):
+ fake = fake_db_service_get_by_host_binary(fake_services_list)
+ return fake(context, host, binary)
+
+
+def _service_get_by_id(services, value):
+ for service in services:
+ if service['id'] == value:
+ return service
+ return None
+
+
+def fake_db_service_update(services):
+ def service_update(context, service_id, values):
+ service = _service_get_by_id(services, service_id)
+ if service is None:
+ raise exception.ServiceNotFound(service_id=service_id)
+ return service
+ return service_update
+
+
+def fake_service_update(context, service_id, values):
+ fake = fake_db_service_update(fake_services_list)
+ return fake(context, service_id, values)
+
+
+def fake_utcnow():
+ return datetime.datetime(2012, 10, 29, 13, 42, 11)
+
+
+fake_utcnow.override_time = None
+
+
+def fake_utcnow_ts():
+ d = fake_utcnow()
+ return calendar.timegm(d.utctimetuple())
+
+
+class ServicesTest(test.TestCase):
+
+ def setUp(self):
+ super(ServicesTest, self).setUp()
+
+ self.controller = services.ServiceController()
+
+ self.stubs.Set(timeutils, "utcnow", fake_utcnow)
+ self.stubs.Set(timeutils, "utcnow_ts", fake_utcnow_ts)
+
+ self.stubs.Set(self.controller.host_api, "service_get_all",
+ fake_service_get_all(fake_services_list))
+
+ self.stubs.Set(db, "service_get_by_args",
+ fake_db_service_get_by_host_binary(fake_services_list))
+ self.stubs.Set(db, "service_update",
+ fake_db_service_update(fake_services_list))
+
+ def test_services_list(self):
+ req = FakeRequest()
+ res_dict = self.controller.index(req)
+ response = {'services': [
+ {'binary': 'nova-scheduler',
+ 'id': 1,
+ 'host': 'host1',
+ 'zone': 'internal',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'disabled_reason': 'test1'},
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'id': 2,
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
+ 'disabled_reason': 'test2'},
+ {'binary': 'nova-scheduler',
+ 'host': 'host2',
+ 'id': 3,
+ 'zone': 'internal',
+ 'status': 'enabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34),
+ 'disabled_reason': None},
+ {'binary': 'nova-compute',
+ 'host': 'host2',
+ 'id': 4,
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
+ 'disabled_reason': 'test4'}]}
+ self.assertEqual(res_dict, response)
+
+ def test_service_list_with_host(self):
+ req = FakeRequestWithHost()
+ res_dict = self.controller.index(req)
+ response = {'services': [
+ {'binary': 'nova-scheduler',
+ 'host': 'host1',
+ 'id': 1,
+ 'zone': 'internal',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'disabled_reason': 'test1'},
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'id': 2,
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
+ 'disabled_reason': 'test2'}]}
+ self.assertEqual(res_dict, response)
+
+ def test_service_list_with_service(self):
+ req = FakeRequestWithService()
+ res_dict = self.controller.index(req)
+ response = {'services': [
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'id': 2,
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
+ 'disabled_reason': 'test2'},
+ {'binary': 'nova-compute',
+ 'host': 'host2',
+ 'id': 4,
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
+ 'disabled_reason': 'test4'}]}
+ self.assertEqual(res_dict, response)
+
+ def test_service_list_with_host_service(self):
+ req = FakeRequestWithHostService()
+ res_dict = self.controller.index(req)
+ response = {'services': [
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'id': 2,
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
+ 'disabled_reason': 'test2'}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_enable(self):
+ def _service_update(context, service_id, values):
+ self.assertIsNone(values['disabled_reason'])
+ return dict(test_service.fake_service, id=service_id)
+
+ self.stubs.Set(db, "service_update", _service_update)
+
+ body = {'service': {'host': 'host1',
+ 'binary': 'nova-compute'}}
+ req = fakes.HTTPRequestV3.blank('/os-services/enable')
+ res_dict = self.controller.update(req, "enable", body)
+
+ self.assertEqual(res_dict['service']['status'], 'enabled')
+ self.assertNotIn('disabled_reason', res_dict['service'])
+
+ def test_services_enable_with_invalid_host(self):
+ body = {'service': {'host': 'invalid',
+ 'binary': 'nova-compute'}}
+ req = fakes.HTTPRequestV3.blank('/os-services/enable')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update,
+ req,
+ "enable",
+ body)
+
+ def test_services_enable_with_invalid_binary(self):
+ body = {'service': {'host': 'host1',
+ 'binary': 'invalid'}}
+ req = fakes.HTTPRequestV3.blank('/os-services/enable')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update,
+ req,
+ "enable",
+ body)
+
+ # This test is just to verify that the servicegroup API gets used when
+ # calling this API.
+ def test_services_with_exception(self):
+ def dummy_is_up(self, dummy):
+ raise KeyError()
+
+ self.stubs.Set(db_driver.DbDriver, 'is_up', dummy_is_up)
+ req = FakeRequestWithHostService()
+ self.assertRaises(webob.exc.HTTPInternalServerError,
+ self.controller.index, req)
+
+ def test_services_disable(self):
+ req = fakes.HTTPRequestV3.blank('/os-services/disable')
+ body = {'service': {'host': 'host1',
+ 'binary': 'nova-compute'}}
+ res_dict = self.controller.update(req, "disable", body)
+
+ self.assertEqual(res_dict['service']['status'], 'disabled')
+ self.assertNotIn('disabled_reason', res_dict['service'])
+
+ def test_services_disable_with_invalid_host(self):
+ body = {'service': {'host': 'invalid',
+ 'binary': 'nova-compute'}}
+ req = fakes.HTTPRequestV3.blank('/os-services/disable')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update,
+ req,
+ "disable",
+ body)
+
+ def test_services_disable_with_invalid_binary(self):
+ body = {'service': {'host': 'host1',
+ 'binary': 'invalid'}}
+ req = fakes.HTTPRequestV3.blank('/os-services/disable')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update,
+ req,
+ "disable",
+ body)
+
+ def test_services_disable_log_reason(self):
+ req = \
+ fakes.HTTPRequestV3.blank('/os-services/disable-log-reason')
+ body = {'service': {'host': 'host1',
+ 'binary': 'nova-compute',
+ 'disabled_reason': 'test-reason'}}
+ res_dict = self.controller.update(req, "disable-log-reason", body)
+
+ self.assertEqual(res_dict['service']['status'], 'disabled')
+ self.assertEqual(res_dict['service']['disabled_reason'], 'test-reason')
+
+ def test_mandatory_reason_field(self):
+ req = \
+ fakes.HTTPRequestV3.blank('/os-services/disable-log-reason')
+ body = {'service': {'host': 'host1',
+ 'binary': 'nova-compute'}}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, "disable-log-reason", body)
+
+ def test_invalid_reason_field(self):
+ reason = ' '
+ self.assertFalse(self.controller._is_valid_as_reason(reason))
+ reason = 'a' * 256
+ self.assertFalse(self.controller._is_valid_as_reason(reason))
+ reason = 'it\'s a valid reason.'
+ self.assertTrue(self.controller._is_valid_as_reason(reason))
+
+ def test_services_delete(self):
+ request = fakes.HTTPRequestV3.blank('/v3/os-services/1',
+ use_admin_context=True)
+ request.method = 'DELETE'
+
+ with mock.patch.object(self.controller.host_api,
+ 'service_delete') as service_delete:
+ self.controller.delete(request, '1')
+ service_delete.assert_called_once_with(
+ request.environ['nova.context'], '1')
+ self.assertEqual(self.controller.delete.wsgi_code, 204)
+
+ def test_services_delete_not_found(self):
+ request = fakes.HTTPRequestV3.blank('/v3/os-services/abc',
+ use_admin_context=True)
+ request.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.delete, request, 'abc')
+
+
+class ServicesCellsTest(test.TestCase):
+ def setUp(self):
+ super(ServicesCellsTest, self).setUp()
+
+ host_api = cells_api.HostAPI()
+
+ self.controller = services.ServiceController()
+ self.controller.host_api = host_api
+
+ self.stubs.Set(timeutils, "utcnow", fake_utcnow)
+ self.stubs.Set(timeutils, "utcnow_ts", fake_utcnow_ts)
+
+ services_list = []
+ for service in fake_services_list:
+ service = service.copy()
+ service['id'] = 'cell1@%d' % service['id']
+ services_list.append(service)
+
+ self.stubs.Set(host_api.cells_rpcapi, "service_get_all",
+ fake_service_get_all(services_list))
+
+ def test_services_detail(self):
+ req = FakeRequest()
+ res_dict = self.controller.index(req)
+ utc = iso8601.iso8601.Utc()
+ response = {'services': [
+ {'id': 'cell1@1',
+ 'binary': 'nova-scheduler',
+ 'host': 'host1',
+ 'zone': 'internal',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2,
+ tzinfo=utc),
+ 'disabled_reason': 'test1'},
+ {'id': 'cell1@2',
+ 'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5,
+ tzinfo=utc),
+ 'disabled_reason': 'test2'},
+ {'id': 'cell1@3',
+ 'binary': 'nova-scheduler',
+ 'host': 'host2',
+ 'zone': 'internal',
+ 'status': 'enabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34,
+ tzinfo=utc),
+ 'disabled_reason': None},
+ {'id': 'cell1@4',
+ 'binary': 'nova-compute',
+ 'host': 'host2',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38,
+ tzinfo=utc),
+ 'disabled_reason': 'test4'}]}
+ self.assertEqual(res_dict, response)
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_suspend_server.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_suspend_server.py
new file mode 100644
index 0000000000..b0b71a0229
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_suspend_server.py
@@ -0,0 +1,48 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack.compute.plugins.v3 import suspend_server
+from nova.tests.unit.api.openstack.compute.plugins.v3 import \
+ admin_only_action_common
+from nova.tests.unit.api.openstack import fakes
+
+
+class SuspendServerTests(admin_only_action_common.CommonTests):
+ def setUp(self):
+ super(SuspendServerTests, self).setUp()
+ self.controller = suspend_server.SuspendServerController()
+ self.compute_api = self.controller.compute_api
+
+ def _fake_controller(*args, **kwargs):
+ return self.controller
+
+ self.stubs.Set(suspend_server, 'SuspendServerController',
+ _fake_controller)
+ self.app = fakes.wsgi_app_v21(init_only=('servers',
+ 'os-suspend-server'),
+ fake_auth_context=self.context)
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def test_suspend_resume(self):
+ self._test_actions(['suspend', 'resume'])
+
+ def test_suspend_resume_with_non_existed_instance(self):
+ self._test_actions_with_non_existed_instance(['suspend', 'resume'])
+
+ def test_suspend_resume_raise_conflict_on_invalid_state(self):
+ self._test_actions_raise_conflict_on_invalid_state(['suspend',
+ 'resume'])
+
+ def test_actions_with_locked_instance(self):
+ self._test_actions_with_locked_instance(['suspend', 'resume'])
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_user_data.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_user_data.py
new file mode 100644
index 0000000000..0e10c283f7
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_user_data.py
@@ -0,0 +1,195 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import datetime
+import uuid
+
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import servers
+from nova.api.openstack.compute.plugins.v3 import user_data
+from nova.compute import api as compute_api
+from nova.compute import flavors
+from nova import db
+from nova import exception
+from nova.network import manager
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit.image import fake
+
+
+CONF = cfg.CONF
+FAKE_UUID = fakes.FAKE_UUID
+
+
+def fake_gen_uuid():
+ return FAKE_UUID
+
+
+def return_security_group(context, instance_id, security_group_id):
+ pass
+
+
+class ServersControllerCreateTest(test.TestCase):
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTest, self).setUp()
+
+ self.flags(verbose=True,
+ enable_instance_password=True)
+ self.instance_cache_num = 0
+ self.instance_cache_by_id = {}
+ self.instance_cache_by_uuid = {}
+
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers.ServersController(extension_info=ext_info)
+ CONF.set_override('extensions_blacklist', 'os-user-data',
+ 'osapi_v3')
+ self.no_user_data_controller = servers.ServersController(
+ extension_info=ext_info)
+
+ def instance_create(context, inst):
+ inst_type = flavors.get_flavor_by_flavor_id(3)
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ def_image_ref = 'http://localhost/images/%s' % image_uuid
+ self.instance_cache_num += 1
+ instance = fake_instance.fake_db_instance(**{
+ 'id': self.instance_cache_num,
+ 'display_name': inst['display_name'] or 'test',
+ 'uuid': FAKE_UUID,
+ 'instance_type': inst_type,
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fead::1234',
+ 'image_ref': inst.get('image_ref', def_image_ref),
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'reservation_id': inst['reservation_id'],
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ user_data.ATTRIBUTE_NAME: None,
+ "progress": 0,
+ "fixed_ips": [],
+ "task_state": "",
+ "vm_state": "",
+ "root_device_name": inst.get('root_device_name', 'vda'),
+ })
+
+ self.instance_cache_by_id[instance['id']] = instance
+ self.instance_cache_by_uuid[instance['uuid']] = instance
+ return instance
+
+ def instance_get(context, instance_id):
+ """Stub for compute/api create() pulling in instance after
+ scheduling
+ """
+ return self.instance_cache_by_id[instance_id]
+
+ def instance_update(context, uuid, values):
+ instance = self.instance_cache_by_uuid[uuid]
+ instance.update(values)
+ return instance
+
+ def server_update(context, instance_uuid, params):
+ inst = self.instance_cache_by_uuid[instance_uuid]
+ inst.update(params)
+ return (inst, inst)
+
+ def fake_method(*args, **kwargs):
+ pass
+
+ def project_get_networks(context, user_id):
+ return dict(id='1', host='localhost')
+
+ def queue_get_for(context, *args):
+ return 'network_topic'
+
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
+ self.stubs.Set(db, 'instance_add_security_group',
+ return_security_group)
+ self.stubs.Set(db, 'project_get_networks',
+ project_get_networks)
+ self.stubs.Set(db, 'instance_create', instance_create)
+ self.stubs.Set(db, 'instance_system_metadata_update',
+ fake_method)
+ self.stubs.Set(db, 'instance_get', instance_get)
+ self.stubs.Set(db, 'instance_update', instance_update)
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ server_update)
+ self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
+ fake_method)
+
+ def _test_create_extra(self, params, no_image=False,
+ override_controller=None):
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
+ if no_image:
+ server.pop('imageRef', None)
+ server.update(params)
+ body = dict(server=server)
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ if override_controller:
+ server = override_controller.create(req, body=body).obj['server']
+ else:
+ server = self.controller.create(req, body=body).obj['server']
+ return server
+
+ def test_create_instance_with_user_data_disabled(self):
+ params = {user_data.ATTRIBUTE_NAME: base64.b64encode('fake')}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertNotIn('user_data', kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(
+ params,
+ override_controller=self.no_user_data_controller)
+
+ def test_create_instance_with_user_data_enabled(self):
+ params = {user_data.ATTRIBUTE_NAME: base64.b64encode('fake')}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIn('user_data', kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_user_data(self):
+ value = base64.b64encode("A random string")
+ params = {user_data.ATTRIBUTE_NAME: value}
+ server = self._test_create_extra(params)
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_with_bad_user_data(self):
+ value = "A random string"
+ params = {user_data.ATTRIBUTE_NAME: value}
+ self.assertRaises(exception.ValidationError,
+ self._test_create_extra, params)
diff --git a/nova/tests/unit/api/openstack/compute/schemas/__init__.py b/nova/tests/unit/api/openstack/compute/schemas/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/__init__.py
diff --git a/nova/tests/unit/api/openstack/compute/schemas/test_schemas.py b/nova/tests/unit/api/openstack/compute/schemas/test_schemas.py
new file mode 100644
index 0000000000..c6ce82057e
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/test_schemas.py
@@ -0,0 +1,106 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import glob
+import os
+
+import lxml.etree
+
+from nova import test
+
+SCHEMAS = "nova/api/openstack/compute/schemas"
+
+
+class RelaxNGSchemaTestCase(test.NoDBTestCase):
+ """various validation tasks for the RelaxNG schemas
+
+ lxml.etree has no built-in way to validate an entire namespace
+ (i.e., multiple RelaxNG schema files defining elements in the same
+ namespace), so we define a few tests that should hopefully reduce
+ the risk of an inconsistent namespace
+ """
+
+ def _load_schema(self, schemafile):
+ return lxml.etree.RelaxNG(lxml.etree.parse(schemafile))
+
+ def _load_test_cases(self, path):
+ """load test cases from the given path."""
+ rv = dict(valid=[], invalid=[])
+ path = os.path.join(os.path.dirname(__file__), path)
+ for ctype in rv.keys():
+ for cfile in glob.glob(os.path.join(path, ctype, "*.xml")):
+ rv[ctype].append(lxml.etree.parse(cfile))
+ return rv
+
+ def _validate_schema(self, schemafile):
+ """validate a single RelaxNG schema file."""
+ try:
+ self._load_schema(schemafile)
+ except lxml.etree.RelaxNGParseError as err:
+ self.fail("%s is not a valid RelaxNG schema: %s" %
+ (schemafile, err))
+
+ def _api_versions(self):
+ """get a list of API versions."""
+ return [''] + [os.path.basename(v)
+ for v in glob.glob(os.path.join(SCHEMAS, "v*"))]
+
+ def _schema_files(self, api_version):
+ return glob.glob(os.path.join(SCHEMAS, api_version, "*.rng"))
+
+ def test_schema_validity(self):
+ for api_version in self._api_versions():
+ for schema in self._schema_files(api_version):
+ self._validate_schema(schema)
+
+ def test_schema_duplicate_elements(self):
+ for api_version in self._api_versions():
+ elements = dict()
+ duplicates = dict()
+ for schemafile in self._schema_files(api_version):
+ schema = lxml.etree.parse(schemafile)
+ fname = os.path.basename(schemafile)
+ if schema.getroot().tag != "element":
+ # we don't do any sort of validation on grammars
+ # yet
+ continue
+ el_name = schema.getroot().get("name")
+ if el_name in elements:
+ duplicates.setdefault(el_name,
+ [elements[el_name]]).append(fname)
+ else:
+ elements[el_name] = fname
+ self.assertEqual(len(duplicates), 0,
+ "Duplicate element definitions found: %s" %
+ "; ".join("%s in %s" % dup
+ for dup in duplicates.items()))
+
+ def test_schema_explicit_cases(self):
+ cases = {'v1.1/flavors.rng': self._load_test_cases("v1.1/flavors"),
+ 'v1.1/images.rng': self._load_test_cases("v1.1/images"),
+ 'v1.1/servers.rng': self._load_test_cases("v1.1/servers")}
+
+ for schemafile, caselists in cases.items():
+ schema = self._load_schema(os.path.join(SCHEMAS, schemafile))
+ for case in caselists['valid']:
+ self.assertTrue(schema.validate(case),
+ "Schema validation failed against %s: %s\n%s" %
+ (schemafile, schema.error_log, case))
+
+ for case in caselists['invalid']:
+ self.assertFalse(
+ schema.validate(case),
+ "Schema validation succeeded unexpectedly against %s: %s"
+ "\n%s" % (schemafile, schema.error_log, case))
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/mixed.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/mixed.xml
new file mode 100644
index 0000000000..df4368bf41
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/mixed.xml
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="utf-8"?>
+<flavors xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <!-- you cannot mix flavor references (i.e., with only name and id)
+ and specifications (with all attributes) in the same document
+ -->
+ <flavor name="foo" id="foo"/>
+ <flavor name="bar" id="bar" ram="bar" disk="bar" vcpus="bar"/>
+</flavors>
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/partial.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/partial.xml
new file mode 100644
index 0000000000..3343a7be59
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/partial.xml
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="utf-8"?>
+<flavors xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <!-- this flavor is only partially specified -->
+ <flavor name="foo"/>
+</flavors>
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/partial2.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/partial2.xml
new file mode 100644
index 0000000000..f67c5a82fe
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/partial2.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="utf-8"?>
+<flavors xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <!-- a flavor can either have *only* name and id, or needs to also
+ have disk and vcpus in addition to ram -->
+ <flavor name="foo" id="foo" ram="foo"/>
+</flavors>
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/empty.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/empty.xml
new file mode 100644
index 0000000000..36aa3936e7
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/empty.xml
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="utf-8"?>
+<flavors xmlns="http://docs.openstack.org/compute/api/v1.1"/>
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/full.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/full.xml
new file mode 100644
index 0000000000..59eafc8608
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/full.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="utf-8"?>
+<flavors xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor name="foo" id="foo" ram="foo" disk="foo" vcpus="foo"/>
+</flavors>
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/refs.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/refs.xml
new file mode 100644
index 0000000000..751b626258
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/refs.xml
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="utf-8"?>
+<flavors xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor name="foo" id="foo"/>
+ <flavor name="bar" id="bar"/>
+</flavors>
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/mixed.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/mixed.xml
new file mode 100644
index 0000000000..8f7bf208ae
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/mixed.xml
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="utf-8"?>
+<images xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <!-- cannot mix refs and specs in the same document -->
+ <image name="foo" id="foo"/>
+ <image name="bar" id="bar" updated="1401991486" created="1401991486"
+ status="foo">
+ <metadata/>
+ </image>
+</images>
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/no-metadata.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/no-metadata.xml
new file mode 100644
index 0000000000..435294e27c
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/no-metadata.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="utf-8"?>
+<images xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <!-- image specs require a metadata child -->
+ <image name="foo" id="foo" updated="1401991486" created="1401991486"
+ status="foo"/>
+</images>
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/partial.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/partial.xml
new file mode 100644
index 0000000000..5637cce787
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/partial.xml
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="utf-8"?>
+<images xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <!-- image refs require id -->
+ <image name="foo"/>
+</images>
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/partial2.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/partial2.xml
new file mode 100644
index 0000000000..db5e974621
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/partial2.xml
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="utf-8"?>
+<images xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <!-- an image must be either a ref, with *only* name and id attrs,
+ or fully specified, with name, id, updated, created, status,
+ and a metadata child -->
+ <image name="foo" id="foo" updated="foo"/>
+</images>
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/empty.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/empty.xml
new file mode 100644
index 0000000000..05e0b8241c
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/empty.xml
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="utf-8"?>
+<images xmlns="http://docs.openstack.org/compute/api/v1.1"/>
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/full.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/full.xml
new file mode 100644
index 0000000000..4f148db625
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/full.xml
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="utf-8"?>
+<images xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <image name="foo" id="foo" updated="1401991486" created="1401991486"
+ status="foo">
+ <metadata/>
+ </image>
+ <image name="bar" id="bar" updated="1401991486" created="1401991486"
+ status="bar" progress="bar" minDisk="100" minRam="100">
+ <server id="bar"/>
+ <metadata>
+ <meta key="baz">baz</meta>
+ </metadata>
+ </image>
+</images>
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/refs.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/refs.xml
new file mode 100644
index 0000000000..1dfedd2c77
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/refs.xml
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="utf-8"?>
+<images xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <image name="foo" id="foo"/>
+ <image name="bar" id="bar"/>
+</images>
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/mixed.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/mixed.xml
new file mode 100644
index 0000000000..c941472beb
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/mixed.xml
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8"?>
+<servers xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom">
+ <!-- you cannot mix server refs and specs in the same document -->
+ <server name="foo" id="foo"/>
+ <server name="foo" userId="foo" tenantId="foo" id="foo" updated="1401991486"
+ created="1401991486" hostId="foo" accessIPv4="1.2.3.4"
+ accessIPv6="::1" status="foo">
+ <image id="foo">
+ <atom:link href="/compute/api/v1.1/image/foo"/>
+ </image>
+ <flavor id="foo">
+ <atom:link href="/compute/api/v1.1/flavor/foo"/>
+ </flavor>
+ <metadata/>
+ <addresses/>
+ </server>
+</servers>
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial.xml
new file mode 100644
index 0000000000..721ce84327
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial.xml
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="utf-8"?>
+<servers xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <!-- server refs require the id attr -->
+ <server name="foo"/>
+</servers>
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial2.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial2.xml
new file mode 100644
index 0000000000..474b3a084e
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial2.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="utf-8"?>
+<servers xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <!-- server tags must either be refs, with *only* name and id, or
+ full specifications, with loads more detail -->
+ <server name="foo" id="foo" updated="foo"/>
+</servers>
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial3.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial3.xml
new file mode 100644
index 0000000000..6455fe899a
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial3.xml
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="utf-8"?>
+<servers xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <!-- the server specification requires a number of children -->
+ <server name="foo" userId="foo" tenantId="foo" id="foo" updated="1401991486"
+ created="1401991486" hostId="foo" accessIPv4="1.2.3.4"
+ accessIPv6="::1" status="foo"/>
+</servers>
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/detailed.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/detailed.xml
new file mode 100644
index 0000000000..97f5ee44e6
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/detailed.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0" encoding="utf-8"?>
+<servers xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom">
+ <server name="bar" userId="bar" tenantId="bar" id="bar" updated="1401991486"
+ created="1401991486" hostId="bar" accessIPv4="1.2.3.4"
+ accessIPv6="::1" status="bar" progress="10" adminPass="bar">
+ <image id="foo">
+ <atom:link href="/compute/api/v1.1/image/foo"/>
+ </image>
+ <flavor id="foo">
+ <atom:link href="/compute/api/v1.1/flavor/foo"/>
+ </flavor>
+ <fault code="1" created="1401991486">
+ <message>fault</message>
+ <details>fault</details>
+ </fault>
+ <metadata>
+ <meta key="bar">bar</meta>
+ </metadata>
+ <addresses>
+ <network id="bar"/>
+ <network id="baz">
+ <ip version="4" addr="1.2.3.4"/>
+ </network>
+ </addresses>
+ </server>
+</servers>
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/empty.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/empty.xml
new file mode 100644
index 0000000000..b2f3666245
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/empty.xml
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="utf-8"?>
+<servers xmlns="http://docs.openstack.org/compute/api/v1.1"/>
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/full.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/full.xml
new file mode 100644
index 0000000000..fbd6202a76
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/full.xml
@@ -0,0 +1,16 @@
+<?xml version="1.0" encoding="utf-8"?>
+<servers xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom">
+ <server name="foo" userId="foo" tenantId="foo" id="foo" updated="1401991486"
+ created="1401991486" hostId="foo" accessIPv4="1.2.3.4"
+ accessIPv6="::1" status="foo">
+ <image id="foo">
+ <atom:link href="/compute/api/v1.1/image/foo"/>
+ </image>
+ <flavor id="foo">
+ <atom:link href="/compute/api/v1.1/flavor/foo"/>
+ </flavor>
+ <metadata/>
+ <addresses/>
+ </server>
+</servers>
diff --git a/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/refs.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/refs.xml
new file mode 100644
index 0000000000..e1212e985f
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/refs.xml
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="utf-8"?>
+<servers xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server name="foo" id="foo"/>
+ <server name="bar" id="bar"/>
+</servers>
diff --git a/nova/tests/unit/api/openstack/compute/test_api.py b/nova/tests/unit/api/openstack/compute/test_api.py
new file mode 100644
index 0000000000..f86c04d4bd
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_api.py
@@ -0,0 +1,186 @@
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+import webob.dec
+import webob.exc
+
+from nova.api import openstack as openstack_api
+from nova.api.openstack import wsgi
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+class APITest(test.NoDBTestCase):
+
+ def _wsgi_app(self, inner_app):
+ # simpler version of the app than fakes.wsgi_app
+ return openstack_api.FaultWrapper(inner_app)
+
+ def test_malformed_json(self):
+ req = webob.Request.blank('/')
+ req.method = 'POST'
+ req.body = '{'
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_malformed_xml(self):
+ req = webob.Request.blank('/')
+ req.method = 'POST'
+ req.body = '<hi im not xml>'
+ req.headers["content-type"] = "application/xml"
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_vendor_content_type_json(self):
+ ctype = 'application/vnd.openstack.compute+json'
+
+ req = webob.Request.blank('/')
+ req.headers['Accept'] = ctype
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, ctype)
+
+ jsonutils.loads(res.body)
+
+ def test_vendor_content_type_xml(self):
+ ctype = 'application/vnd.openstack.compute+xml'
+
+ req = webob.Request.blank('/')
+ req.headers['Accept'] = ctype
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, ctype)
+
+ etree.XML(res.body)
+
+ def test_exceptions_are_converted_to_faults_webob_exc(self):
+ @webob.dec.wsgify
+ def raise_webob_exc(req):
+ raise webob.exc.HTTPNotFound(explanation='Raised a webob.exc')
+
+ # api.application = raise_webob_exc
+ api = self._wsgi_app(raise_webob_exc)
+ resp = webob.Request.blank('/').get_response(api)
+ self.assertEqual(resp.status_int, 404, resp.body)
+
+ def test_exceptions_are_converted_to_faults_api_fault(self):
+ @webob.dec.wsgify
+ def raise_api_fault(req):
+ exc = webob.exc.HTTPNotFound(explanation='Raised a webob.exc')
+ return wsgi.Fault(exc)
+
+ # api.application = raise_api_fault
+ api = self._wsgi_app(raise_api_fault)
+ resp = webob.Request.blank('/').get_response(api)
+ self.assertIn('itemNotFound', resp.body)
+ self.assertEqual(resp.status_int, 404, resp.body)
+
+ def test_exceptions_are_converted_to_faults_exception(self):
+ @webob.dec.wsgify
+ def fail(req):
+ raise Exception("Threw an exception")
+
+ # api.application = fail
+ api = self._wsgi_app(fail)
+ resp = webob.Request.blank('/').get_response(api)
+ self.assertIn('{"computeFault', resp.body)
+ self.assertEqual(resp.status_int, 500, resp.body)
+
+ def test_exceptions_are_converted_to_faults_exception_xml(self):
+ @webob.dec.wsgify
+ def fail(req):
+ raise Exception("Threw an exception")
+
+ # api.application = fail
+ api = self._wsgi_app(fail)
+ resp = webob.Request.blank('/.xml').get_response(api)
+ self.assertIn('<computeFault', resp.body)
+ self.assertEqual(resp.status_int, 500, resp.body)
+
+ def _do_test_exception_safety_reflected_in_faults(self, expose):
+ class ExceptionWithSafety(exception.NovaException):
+ safe = expose
+
+ @webob.dec.wsgify
+ def fail(req):
+ raise ExceptionWithSafety('some explanation')
+
+ api = self._wsgi_app(fail)
+ resp = webob.Request.blank('/').get_response(api)
+ self.assertIn('{"computeFault', resp.body)
+ expected = ('ExceptionWithSafety: some explanation' if expose else
+ 'The server has either erred or is incapable '
+ 'of performing the requested operation.')
+ self.assertIn(expected, resp.body)
+ self.assertEqual(resp.status_int, 500, resp.body)
+
+ def test_safe_exceptions_are_described_in_faults(self):
+ self._do_test_exception_safety_reflected_in_faults(True)
+
+ def test_unsafe_exceptions_are_not_described_in_faults(self):
+ self._do_test_exception_safety_reflected_in_faults(False)
+
+ def _do_test_exception_mapping(self, exception_type, msg):
+ @webob.dec.wsgify
+ def fail(req):
+ raise exception_type(msg)
+
+ api = self._wsgi_app(fail)
+ resp = webob.Request.blank('/').get_response(api)
+ self.assertIn(msg, resp.body)
+ self.assertEqual(resp.status_int, exception_type.code, resp.body)
+
+ if hasattr(exception_type, 'headers'):
+ for (key, value) in exception_type.headers.iteritems():
+ self.assertIn(key, resp.headers)
+ self.assertEqual(resp.headers[key], str(value))
+
+ def test_quota_error_mapping(self):
+ self._do_test_exception_mapping(exception.QuotaError, 'too many used')
+
+ def test_non_nova_notfound_exception_mapping(self):
+ class ExceptionWithCode(Exception):
+ code = 404
+
+ self._do_test_exception_mapping(ExceptionWithCode,
+ 'NotFound')
+
+ def test_non_nova_exception_mapping(self):
+ class ExceptionWithCode(Exception):
+ code = 417
+
+ self._do_test_exception_mapping(ExceptionWithCode,
+ 'Expectation failed')
+
+ def test_exception_with_none_code_throws_500(self):
+ class ExceptionWithNoneCode(Exception):
+ code = None
+
+ @webob.dec.wsgify
+ def fail(req):
+ raise ExceptionWithNoneCode()
+
+ api = self._wsgi_app(fail)
+ resp = webob.Request.blank('/').get_response(api)
+ self.assertEqual(500, resp.status_int)
diff --git a/nova/tests/unit/api/openstack/compute/test_auth.py b/nova/tests/unit/api/openstack/compute/test_auth.py
new file mode 100644
index 0000000000..0386623b5d
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_auth.py
@@ -0,0 +1,61 @@
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob
+import webob.dec
+
+from nova import context
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+class TestNoAuthMiddleware(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestNoAuthMiddleware, self).setUp()
+ self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_networking(self.stubs)
+
+ def test_authorize_user(self):
+ req = webob.Request.blank('/v2')
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
+ req.headers['X-Auth-Project-Id'] = 'user1_project'
+ result = req.get_response(fakes.wsgi_app(use_no_auth=True))
+ self.assertEqual(result.status, '204 No Content')
+ self.assertEqual(result.headers['X-Server-Management-Url'],
+ "http://localhost/v2/user1_project")
+
+ def test_authorize_user_trailing_slash(self):
+ # make sure it works with trailing slash on the request
+ req = webob.Request.blank('/v2/')
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
+ req.headers['X-Auth-Project-Id'] = 'user1_project'
+ result = req.get_response(fakes.wsgi_app(use_no_auth=True))
+ self.assertEqual(result.status, '204 No Content')
+ self.assertEqual(result.headers['X-Server-Management-Url'],
+ "http://localhost/v2/user1_project")
+
+ def test_auth_token_no_empty_headers(self):
+ req = webob.Request.blank('/v2')
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
+ req.headers['X-Auth-Project-Id'] = 'user1_project'
+ result = req.get_response(fakes.wsgi_app(use_no_auth=True))
+ self.assertEqual(result.status, '204 No Content')
+ self.assertNotIn('X-CDN-Management-Url', result.headers)
+ self.assertNotIn('X-Storage-Url', result.headers)
diff --git a/nova/tests/unit/api/openstack/compute/test_consoles.py b/nova/tests/unit/api/openstack/compute/test_consoles.py
new file mode 100644
index 0000000000..3ba99899c0
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_consoles.py
@@ -0,0 +1,293 @@
+# Copyright 2010-2011 OpenStack Foundation
+# Copyright 2011 Piston Cloud Computing, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import uuid as stdlib_uuid
+
+from lxml import etree
+from oslo.utils import timeutils
+import webob
+
+from nova.api.openstack.compute import consoles
+from nova.compute import vm_states
+from nova import console
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import matchers
+
+
+FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+
+
+class FakeInstanceDB(object):
+
+ def __init__(self):
+ self.instances_by_id = {}
+ self.ids_by_uuid = {}
+ self.max_id = 0
+
+ def return_server_by_id(self, context, id):
+ if id not in self.instances_by_id:
+ self._add_server(id=id)
+ return dict(self.instances_by_id[id])
+
+ def return_server_by_uuid(self, context, uuid):
+ if uuid not in self.ids_by_uuid:
+ self._add_server(uuid=uuid)
+ return dict(self.instances_by_id[self.ids_by_uuid[uuid]])
+
+ def _add_server(self, id=None, uuid=None):
+ if id is None:
+ id = self.max_id + 1
+ if uuid is None:
+ uuid = str(stdlib_uuid.uuid4())
+ instance = stub_instance(id, uuid=uuid)
+ self.instances_by_id[id] = instance
+ self.ids_by_uuid[uuid] = id
+ if id > self.max_id:
+ self.max_id = id
+
+
+def stub_instance(id, user_id='fake', project_id='fake', host=None,
+ vm_state=None, task_state=None,
+ reservation_id="", uuid=FAKE_UUID, image_ref="10",
+ flavor_id="1", name=None, key_name='',
+ access_ipv4=None, access_ipv6=None, progress=0):
+
+ if host is not None:
+ host = str(host)
+
+ if key_name:
+ key_data = 'FAKE'
+ else:
+ key_data = ''
+
+ # ReservationID isn't sent back, hack it in there.
+ server_name = name or "server%s" % id
+ if reservation_id != "":
+ server_name = "reservation_%s" % (reservation_id, )
+
+ instance = {
+ "id": int(id),
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "admin_pass": "",
+ "user_id": user_id,
+ "project_id": project_id,
+ "image_ref": image_ref,
+ "kernel_id": "",
+ "ramdisk_id": "",
+ "launch_index": 0,
+ "key_name": key_name,
+ "key_data": key_data,
+ "vm_state": vm_state or vm_states.BUILDING,
+ "task_state": task_state,
+ "memory_mb": 0,
+ "vcpus": 0,
+ "root_gb": 0,
+ "hostname": "",
+ "host": host,
+ "instance_type": {},
+ "user_data": "",
+ "reservation_id": reservation_id,
+ "mac_address": "",
+ "scheduled_at": timeutils.utcnow(),
+ "launched_at": timeutils.utcnow(),
+ "terminated_at": timeutils.utcnow(),
+ "availability_zone": "",
+ "display_name": server_name,
+ "display_description": "",
+ "locked": False,
+ "metadata": [],
+ "access_ip_v4": access_ipv4,
+ "access_ip_v6": access_ipv6,
+ "uuid": uuid,
+ "progress": progress}
+
+ return instance
+
+
+class ConsolesControllerTest(test.NoDBTestCase):
+ def setUp(self):
+ super(ConsolesControllerTest, self).setUp()
+ self.flags(verbose=True)
+ self.instance_db = FakeInstanceDB()
+ self.stubs.Set(db, 'instance_get',
+ self.instance_db.return_server_by_id)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ self.instance_db.return_server_by_uuid)
+ self.uuid = str(stdlib_uuid.uuid4())
+ self.url = '/v2/fake/servers/%s/consoles' % self.uuid
+ self.controller = consoles.Controller()
+
+ def test_create_console(self):
+ def fake_create_console(cons_self, context, instance_id):
+ self.assertEqual(instance_id, self.uuid)
+ return {}
+ self.stubs.Set(console.api.API, 'create_console', fake_create_console)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.controller.create(req, self.uuid, None)
+
+ def test_show_console(self):
+ def fake_get_console(cons_self, context, instance_id, console_id):
+ self.assertEqual(instance_id, self.uuid)
+ self.assertEqual(console_id, 20)
+ pool = dict(console_type='fake_type',
+ public_hostname='fake_hostname')
+ return dict(id=console_id, password='fake_password',
+ port='fake_port', pool=pool, instance_name='inst-0001')
+
+ expected = {'console': {'id': 20,
+ 'port': 'fake_port',
+ 'host': 'fake_hostname',
+ 'password': 'fake_password',
+ 'instance_name': 'inst-0001',
+ 'console_type': 'fake_type'}}
+
+ self.stubs.Set(console.api.API, 'get_console', fake_get_console)
+
+ req = fakes.HTTPRequest.blank(self.url + '/20')
+ res_dict = self.controller.show(req, self.uuid, '20')
+ self.assertThat(res_dict, matchers.DictMatches(expected))
+
+ def test_show_console_unknown_console(self):
+ def fake_get_console(cons_self, context, instance_id, console_id):
+ raise exception.ConsoleNotFound(console_id=console_id)
+
+ self.stubs.Set(console.api.API, 'get_console', fake_get_console)
+
+ req = fakes.HTTPRequest.blank(self.url + '/20')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
+ req, self.uuid, '20')
+
+ def test_show_console_unknown_instance(self):
+ def fake_get_console(cons_self, context, instance_id, console_id):
+ raise exception.InstanceNotFound(instance_id=instance_id)
+
+ self.stubs.Set(console.api.API, 'get_console', fake_get_console)
+
+ req = fakes.HTTPRequest.blank(self.url + '/20')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
+ req, self.uuid, '20')
+
+ def test_list_consoles(self):
+ def fake_get_consoles(cons_self, context, instance_id):
+ self.assertEqual(instance_id, self.uuid)
+
+ pool1 = dict(console_type='fake_type',
+ public_hostname='fake_hostname')
+ cons1 = dict(id=10, password='fake_password',
+ port='fake_port', pool=pool1)
+ pool2 = dict(console_type='fake_type2',
+ public_hostname='fake_hostname2')
+ cons2 = dict(id=11, password='fake_password2',
+ port='fake_port2', pool=pool2)
+ return [cons1, cons2]
+
+ expected = {'consoles':
+ [{'console': {'id': 10, 'console_type': 'fake_type'}},
+ {'console': {'id': 11, 'console_type': 'fake_type2'}}]}
+
+ self.stubs.Set(console.api.API, 'get_consoles', fake_get_consoles)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ res_dict = self.controller.index(req, self.uuid)
+ self.assertThat(res_dict, matchers.DictMatches(expected))
+
+ def test_delete_console(self):
+ def fake_get_console(cons_self, context, instance_id, console_id):
+ self.assertEqual(instance_id, self.uuid)
+ self.assertEqual(console_id, 20)
+ pool = dict(console_type='fake_type',
+ public_hostname='fake_hostname')
+ return dict(id=console_id, password='fake_password',
+ port='fake_port', pool=pool)
+
+ def fake_delete_console(cons_self, context, instance_id, console_id):
+ self.assertEqual(instance_id, self.uuid)
+ self.assertEqual(console_id, 20)
+
+ self.stubs.Set(console.api.API, 'get_console', fake_get_console)
+ self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
+
+ req = fakes.HTTPRequest.blank(self.url + '/20')
+ self.controller.delete(req, self.uuid, '20')
+
+ def test_delete_console_unknown_console(self):
+ def fake_delete_console(cons_self, context, instance_id, console_id):
+ raise exception.ConsoleNotFound(console_id=console_id)
+
+ self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
+
+ req = fakes.HTTPRequest.blank(self.url + '/20')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, self.uuid, '20')
+
+ def test_delete_console_unknown_instance(self):
+ def fake_delete_console(cons_self, context, instance_id, console_id):
+ raise exception.InstanceNotFound(instance_id=instance_id)
+
+ self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
+
+ req = fakes.HTTPRequest.blank(self.url + '/20')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, self.uuid, '20')
+
+
+class TestConsolesXMLSerializer(test.NoDBTestCase):
+ def test_show(self):
+ fixture = {'console': {'id': 20,
+ 'password': 'fake_password',
+ 'port': 'fake_port',
+ 'host': 'fake_hostname',
+ 'console_type': 'fake_type'}}
+
+ output = consoles.ConsoleTemplate().serialize(fixture)
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, 'console')
+ self.assertEqual(res_tree.xpath('id')[0].text, '20')
+ self.assertEqual(res_tree.xpath('port')[0].text, 'fake_port')
+ self.assertEqual(res_tree.xpath('host')[0].text, 'fake_hostname')
+ self.assertEqual(res_tree.xpath('password')[0].text, 'fake_password')
+ self.assertEqual(res_tree.xpath('console_type')[0].text, 'fake_type')
+
+ def test_index(self):
+ fixture = {'consoles': [{'console': {'id': 10,
+ 'console_type': 'fake_type'}},
+ {'console': {'id': 11,
+ 'console_type': 'fake_type2'}}]}
+
+ output = consoles.ConsolesTemplate().serialize(fixture)
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, 'consoles')
+ self.assertEqual(len(res_tree), 2)
+ self.assertEqual(res_tree[0].tag, 'console')
+ self.assertEqual(res_tree[1].tag, 'console')
+ self.assertEqual(len(res_tree[0]), 1)
+ self.assertEqual(res_tree[0][0].tag, 'console')
+ self.assertEqual(len(res_tree[1]), 1)
+ self.assertEqual(res_tree[1][0].tag, 'console')
+ self.assertEqual(res_tree[0][0].xpath('id')[0].text, '10')
+ self.assertEqual(res_tree[1][0].xpath('id')[0].text, '11')
+ self.assertEqual(res_tree[0][0].xpath('console_type')[0].text,
+ 'fake_type')
+ self.assertEqual(res_tree[1][0].xpath('console_type')[0].text,
+ 'fake_type2')
diff --git a/nova/tests/unit/api/openstack/compute/test_extensions.py b/nova/tests/unit/api/openstack/compute/test_extensions.py
new file mode 100644
index 0000000000..cf84fc1f84
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_extensions.py
@@ -0,0 +1,747 @@
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import iso8601
+from lxml import etree
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack import compute
+from nova.api.openstack.compute import extensions as compute_extensions
+from nova.api.openstack import extensions as base_extensions
+from nova.api.openstack import wsgi
+from nova.api.openstack import xmlutil
+from nova import exception
+import nova.policy
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import matchers
+
+CONF = cfg.CONF
+
+NS = "{http://docs.openstack.org/common/api/v1.0}"
+ATOMNS = "{http://www.w3.org/2005/Atom}"
+response_body = "Try to say this Mr. Knox, sir..."
+extension_body = "I am not a fox!"
+
+
+class StubController(object):
+
+ def __init__(self, body):
+ self.body = body
+
+ def index(self, req):
+ return self.body
+
+ def create(self, req, body):
+ msg = 'All aboard the fail train!'
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ def show(self, req, id):
+ raise webob.exc.HTTPNotFound()
+
+
+class StubActionController(wsgi.Controller):
+ def __init__(self, body):
+ self.body = body
+
+ @wsgi.action('fooAction')
+ def _action_foo(self, req, id, body):
+ return self.body
+
+
+class StubControllerExtension(base_extensions.ExtensionDescriptor):
+ name = 'twaadle'
+
+ def __init__(self):
+ pass
+
+
+class StubEarlyExtensionController(wsgi.Controller):
+ def __init__(self, body):
+ self.body = body
+
+ @wsgi.extends
+ def index(self, req):
+ yield self.body
+
+ @wsgi.extends(action='fooAction')
+ def _action_foo(self, req, id, body):
+ yield self.body
+
+
+class StubLateExtensionController(wsgi.Controller):
+ def __init__(self, body):
+ self.body = body
+
+ @wsgi.extends
+ def index(self, req, resp_obj):
+ return self.body
+
+ @wsgi.extends(action='fooAction')
+ def _action_foo(self, req, resp_obj, id, body):
+ return self.body
+
+
+class StubExtensionManager(object):
+ """Provides access to Tweedle Beetles."""
+
+ name = "Tweedle Beetle Extension"
+ alias = "TWDLBETL"
+
+ def __init__(self, resource_ext=None, action_ext=None, request_ext=None,
+ controller_ext=None):
+ self.resource_ext = resource_ext
+ self.action_ext = action_ext
+ self.request_ext = request_ext
+ self.controller_ext = controller_ext
+ self.extra_resource_ext = None
+
+ def get_resources(self):
+ resource_exts = []
+ if self.resource_ext:
+ resource_exts.append(self.resource_ext)
+ if self.extra_resource_ext:
+ resource_exts.append(self.extra_resource_ext)
+ return resource_exts
+
+ def get_actions(self):
+ action_exts = []
+ if self.action_ext:
+ action_exts.append(self.action_ext)
+ return action_exts
+
+ def get_request_extensions(self):
+ request_extensions = []
+ if self.request_ext:
+ request_extensions.append(self.request_ext)
+ return request_extensions
+
+ def get_controller_extensions(self):
+ controller_extensions = []
+ if self.controller_ext:
+ controller_extensions.append(self.controller_ext)
+ return controller_extensions
+
+
+class ExtensionTestCase(test.TestCase):
+ def setUp(self):
+ super(ExtensionTestCase, self).setUp()
+ ext_list = CONF.osapi_compute_extension[:]
+ fox = ('nova.tests.unit.api.openstack.compute.extensions.'
+ 'foxinsocks.Foxinsocks')
+ if fox not in ext_list:
+ ext_list.append(fox)
+ self.flags(osapi_compute_extension=ext_list)
+ self.fake_context = nova.context.RequestContext('fake', 'fake')
+
+ def test_extension_authorizer_throws_exception_if_policy_fails(self):
+ target = {'project_id': '1234',
+ 'user_id': '5678'}
+ self.mox.StubOutWithMock(nova.policy, 'enforce')
+ nova.policy.enforce(self.fake_context,
+ "compute_extension:used_limits_for_admin",
+ target).AndRaise(
+ exception.PolicyNotAuthorized(
+ action="compute_extension:used_limits_for_admin"))
+ self.mox.ReplayAll()
+ authorize = base_extensions.extension_authorizer('compute',
+ 'used_limits_for_admin'
+ )
+ self.assertRaises(exception.PolicyNotAuthorized, authorize,
+ self.fake_context, target=target)
+
+ def test_core_authorizer_throws_exception_if_policy_fails(self):
+ target = {'project_id': '1234',
+ 'user_id': '5678'}
+ self.mox.StubOutWithMock(nova.policy, 'enforce')
+ nova.policy.enforce(self.fake_context,
+ "compute:used_limits_for_admin",
+ target).AndRaise(
+ exception.PolicyNotAuthorized(
+ action="compute:used_limits_for_admin"))
+ self.mox.ReplayAll()
+ authorize = base_extensions.core_authorizer('compute',
+ 'used_limits_for_admin'
+ )
+ self.assertRaises(exception.PolicyNotAuthorized, authorize,
+ self.fake_context, target=target)
+
+
+class ExtensionControllerTest(ExtensionTestCase):
+
+ def setUp(self):
+ super(ExtensionControllerTest, self).setUp()
+ self.ext_list = [
+ "AdminActions",
+ "Aggregates",
+ "AssistedVolumeSnapshots",
+ "AvailabilityZone",
+ "Agents",
+ "Certificates",
+ "Cloudpipe",
+ "CloudpipeUpdate",
+ "ConsoleOutput",
+ "Consoles",
+ "Createserverext",
+ "DeferredDelete",
+ "DiskConfig",
+ "ExtendedAvailabilityZone",
+ "ExtendedFloatingIps",
+ "ExtendedIps",
+ "ExtendedIpsMac",
+ "ExtendedVIFNet",
+ "Evacuate",
+ "ExtendedStatus",
+ "ExtendedVolumes",
+ "ExtendedServerAttributes",
+ "FixedIPs",
+ "FlavorAccess",
+ "FlavorDisabled",
+ "FlavorExtraSpecs",
+ "FlavorExtraData",
+ "FlavorManage",
+ "FlavorRxtx",
+ "FlavorSwap",
+ "FloatingIps",
+ "FloatingIpDns",
+ "FloatingIpPools",
+ "FloatingIpsBulk",
+ "Fox In Socks",
+ "Hosts",
+ "ImageSize",
+ "InstanceActions",
+ "Keypairs",
+ "Multinic",
+ "MultipleCreate",
+ "QuotaClasses",
+ "Quotas",
+ "ExtendedQuotas",
+ "Rescue",
+ "SchedulerHints",
+ "SecurityGroupDefaultRules",
+ "SecurityGroups",
+ "ServerDiagnostics",
+ "ServerListMultiStatus",
+ "ServerPassword",
+ "ServerStartStop",
+ "Services",
+ "SimpleTenantUsage",
+ "UsedLimits",
+ "UserData",
+ "VirtualInterfaces",
+ "VolumeAttachmentUpdate",
+ "Volumes",
+ ]
+ self.ext_list.sort()
+
+ def test_list_extensions_json(self):
+ app = compute.APIRouter(init_only=('extensions',))
+ request = webob.Request.blank("/fake/extensions")
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+
+ # Make sure we have all the extensions, extra extensions being OK.
+ data = jsonutils.loads(response.body)
+ names = [str(x['name']) for x in data['extensions']
+ if str(x['name']) in self.ext_list]
+ names.sort()
+ self.assertEqual(names, self.ext_list)
+
+ # Ensure all the timestamps are valid according to iso8601
+ for ext in data['extensions']:
+ iso8601.parse_date(ext['updated'])
+
+ # Make sure that at least Fox in Sox is correct.
+ (fox_ext, ) = [
+ x for x in data['extensions'] if x['alias'] == 'FOXNSOX']
+ self.assertEqual(fox_ext, {
+ 'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0',
+ 'name': 'Fox In Socks',
+ 'updated': '2011-01-22T13:25:27-06:00',
+ 'description': 'The Fox In Socks Extension.',
+ 'alias': 'FOXNSOX',
+ 'links': []
+ },
+ )
+
+ for ext in data['extensions']:
+ url = '/fake/extensions/%s' % ext['alias']
+ request = webob.Request.blank(url)
+ response = request.get_response(app)
+ output = jsonutils.loads(response.body)
+ self.assertEqual(output['extension']['alias'], ext['alias'])
+
+ def test_get_extension_json(self):
+ app = compute.APIRouter(init_only=('extensions',))
+ request = webob.Request.blank("/fake/extensions/FOXNSOX")
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+
+ data = jsonutils.loads(response.body)
+ self.assertEqual(data['extension'], {
+ "namespace": "http://www.fox.in.socks/api/ext/pie/v1.0",
+ "name": "Fox In Socks",
+ "updated": "2011-01-22T13:25:27-06:00",
+ "description": "The Fox In Socks Extension.",
+ "alias": "FOXNSOX",
+ "links": []})
+
+ def test_get_non_existing_extension_json(self):
+ app = compute.APIRouter(init_only=('extensions',))
+ request = webob.Request.blank("/fake/extensions/4")
+ response = request.get_response(app)
+ self.assertEqual(404, response.status_int)
+
+ def test_list_extensions_xml(self):
+ app = compute.APIRouter(init_only=('servers', 'flavors', 'extensions'))
+ request = webob.Request.blank("/fake/extensions")
+ request.accept = "application/xml"
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+
+ root = etree.XML(response.body)
+ self.assertEqual(root.tag.split('extensions')[0], NS)
+
+ # Make sure we have all the extensions, extras extensions being OK.
+ exts = root.findall('{0}extension'.format(NS))
+ self.assertTrue(len(exts) >= len(self.ext_list))
+
+ # Make sure that at least Fox in Sox is correct.
+ (fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX']
+ self.assertEqual(fox_ext.get('name'), 'Fox In Socks')
+ self.assertEqual(fox_ext.get('namespace'),
+ 'http://www.fox.in.socks/api/ext/pie/v1.0')
+ self.assertEqual(fox_ext.get('updated'), '2011-01-22T13:25:27-06:00')
+ self.assertEqual(fox_ext.findtext('{0}description'.format(NS)),
+ 'The Fox In Socks Extension.')
+
+ xmlutil.validate_schema(root, 'extensions')
+
+ def test_get_extension_xml(self):
+ app = compute.APIRouter(init_only=('servers', 'flavors', 'extensions'))
+ request = webob.Request.blank("/fake/extensions/FOXNSOX")
+ request.accept = "application/xml"
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ xml = response.body
+
+ root = etree.XML(xml)
+ self.assertEqual(root.tag.split('extension')[0], NS)
+ self.assertEqual(root.get('alias'), 'FOXNSOX')
+ self.assertEqual(root.get('name'), 'Fox In Socks')
+ self.assertEqual(root.get('namespace'),
+ 'http://www.fox.in.socks/api/ext/pie/v1.0')
+ self.assertEqual(root.get('updated'), '2011-01-22T13:25:27-06:00')
+ self.assertEqual(root.findtext('{0}description'.format(NS)),
+ 'The Fox In Socks Extension.')
+
+ xmlutil.validate_schema(root, 'extension')
+
+
+class ResourceExtensionTest(ExtensionTestCase):
+
+ def test_no_extension_present(self):
+ manager = StubExtensionManager(None)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/blah")
+ response = request.get_response(app)
+ self.assertEqual(404, response.status_int)
+
+ def test_get_resources(self):
+ res_ext = base_extensions.ResourceExtension('tweedles',
+ StubController(response_body))
+ manager = StubExtensionManager(res_ext)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/tweedles")
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(response_body, response.body)
+
+ def test_get_resources_with_controller(self):
+ res_ext = base_extensions.ResourceExtension('tweedles',
+ StubController(response_body))
+ manager = StubExtensionManager(res_ext)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/tweedles")
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(response_body, response.body)
+
+ def test_bad_request(self):
+ res_ext = base_extensions.ResourceExtension('tweedles',
+ StubController(response_body))
+ manager = StubExtensionManager(res_ext)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/tweedles")
+ request.method = "POST"
+ response = request.get_response(app)
+ self.assertEqual(400, response.status_int)
+ self.assertEqual('application/json', response.content_type)
+ body = jsonutils.loads(response.body)
+ expected = {
+ "badRequest": {
+ "message": "All aboard the fail train!",
+ "code": 400
+ }
+ }
+ self.assertThat(expected, matchers.DictMatches(body))
+
+ def test_non_exist_resource(self):
+ res_ext = base_extensions.ResourceExtension('tweedles',
+ StubController(response_body))
+ manager = StubExtensionManager(res_ext)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/tweedles/1")
+ response = request.get_response(app)
+ self.assertEqual(404, response.status_int)
+ self.assertEqual('application/json', response.content_type)
+ body = jsonutils.loads(response.body)
+ expected = {
+ "itemNotFound": {
+ "message": "The resource could not be found.",
+ "code": 404
+ }
+ }
+ self.assertThat(expected, matchers.DictMatches(body))
+
+
+class InvalidExtension(object):
+
+ alias = "THIRD"
+
+
+class ExtensionManagerTest(ExtensionTestCase):
+
+ response_body = "Try to say this Mr. Knox, sir..."
+
+ def test_get_resources(self):
+ app = compute.APIRouter()
+ request = webob.Request.blank("/fake/foxnsocks")
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(response_body, response.body)
+
+ def test_invalid_extensions(self):
+ # Don't need the serialization middleware here because we're
+ # not testing any serialization
+ compute.APIRouter()
+ ext_mgr = compute_extensions.ExtensionManager()
+ ext_mgr.register(InvalidExtension())
+ self.assertTrue(ext_mgr.is_loaded('FOXNSOX'))
+ self.assertFalse(ext_mgr.is_loaded('THIRD'))
+
+
+class ActionExtensionTest(ExtensionTestCase):
+
+ def _send_server_action_request(self, url, body):
+ app = compute.APIRouter(init_only=('servers',))
+ request = webob.Request.blank(url)
+ request.method = 'POST'
+ request.content_type = 'application/json'
+ request.body = jsonutils.dumps(body)
+ response = request.get_response(app)
+ return response
+
+ def test_extended_action(self):
+ body = dict(add_tweedle=dict(name="test"))
+ url = "/fake/servers/abcd/action"
+ response = self._send_server_action_request(url, body)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual("Tweedle Beetle Added.", response.body)
+
+ body = dict(delete_tweedle=dict(name="test"))
+ response = self._send_server_action_request(url, body)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual("Tweedle Beetle Deleted.", response.body)
+
+ def test_invalid_action(self):
+ body = dict(blah=dict(name="test")) # Doesn't exist
+ url = "/fake/servers/abcd/action"
+ response = self._send_server_action_request(url, body)
+ self.assertEqual(400, response.status_int)
+ self.assertEqual('application/json', response.content_type)
+ body = jsonutils.loads(response.body)
+ expected = {
+ "badRequest": {
+ "message": "There is no such action: blah",
+ "code": 400
+ }
+ }
+ self.assertThat(expected, matchers.DictMatches(body))
+
+ def test_non_exist_action(self):
+ body = dict(blah=dict(name="test"))
+ url = "/fake/fdsa/1/action"
+ response = self._send_server_action_request(url, body)
+ self.assertEqual(404, response.status_int)
+
+ def test_failed_action(self):
+ body = dict(fail=dict(name="test"))
+ url = "/fake/servers/abcd/action"
+ response = self._send_server_action_request(url, body)
+ self.assertEqual(400, response.status_int)
+ self.assertEqual('application/json', response.content_type)
+ body = jsonutils.loads(response.body)
+ expected = {
+ "badRequest": {
+ "message": "Tweedle fail",
+ "code": 400
+ }
+ }
+ self.assertThat(expected, matchers.DictMatches(body))
+
+
+class RequestExtensionTest(ExtensionTestCase):
+
+ def test_get_resources_with_stub_mgr(self):
+ class GooGoose(wsgi.Controller):
+ @wsgi.extends
+ def show(self, req, resp_obj, id):
+ # only handle JSON responses
+ resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing')
+
+ req_ext = base_extensions.ControllerExtension(
+ StubControllerExtension(), 'flavors', GooGoose())
+
+ manager = StubExtensionManager(None, None, None, req_ext)
+ app = fakes.wsgi_app(ext_mgr=manager)
+ request = webob.Request.blank("/v2/fake/flavors/1?chewing=bluegoo")
+ request.environ['api.version'] = '2'
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ response_data = jsonutils.loads(response.body)
+ self.assertEqual('bluegoo', response_data['flavor']['googoose'])
+
+ def test_get_resources_with_mgr(self):
+
+ app = fakes.wsgi_app(init_only=('flavors',))
+ request = webob.Request.blank("/v2/fake/flavors/1?chewing=newblue")
+ request.environ['api.version'] = '2'
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ response_data = jsonutils.loads(response.body)
+ self.assertEqual('newblue', response_data['flavor']['googoose'])
+ self.assertEqual("Pig Bands!", response_data['big_bands'])
+
+
+class ControllerExtensionTest(ExtensionTestCase):
+ def test_controller_extension_early(self):
+ controller = StubController(response_body)
+ res_ext = base_extensions.ResourceExtension('tweedles', controller)
+ ext_controller = StubEarlyExtensionController(extension_body)
+ extension = StubControllerExtension()
+ cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
+ ext_controller)
+ manager = StubExtensionManager(resource_ext=res_ext,
+ controller_ext=cont_ext)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/tweedles")
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(extension_body, response.body)
+
+ def test_controller_extension_late(self):
+ # Need a dict for the body to convert to a ResponseObject
+ controller = StubController(dict(foo=response_body))
+ res_ext = base_extensions.ResourceExtension('tweedles', controller)
+
+ ext_controller = StubLateExtensionController(extension_body)
+ extension = StubControllerExtension()
+ cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
+ ext_controller)
+
+ manager = StubExtensionManager(resource_ext=res_ext,
+ controller_ext=cont_ext)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/tweedles")
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(extension_body, response.body)
+
+ def test_controller_extension_late_inherited_resource(self):
+ # Need a dict for the body to convert to a ResponseObject
+ controller = StubController(dict(foo=response_body))
+ parent_ext = base_extensions.ResourceExtension('tweedles', controller)
+
+ ext_controller = StubLateExtensionController(extension_body)
+ extension = StubControllerExtension()
+ cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
+ ext_controller)
+
+ manager = StubExtensionManager(resource_ext=parent_ext,
+ controller_ext=cont_ext)
+ child_ext = base_extensions.ResourceExtension('beetles', controller,
+ inherits='tweedles')
+ manager.extra_resource_ext = child_ext
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/beetles")
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(extension_body, response.body)
+
+ def test_controller_action_extension_early(self):
+ controller = StubActionController(response_body)
+ actions = dict(action='POST')
+ res_ext = base_extensions.ResourceExtension('tweedles', controller,
+ member_actions=actions)
+ ext_controller = StubEarlyExtensionController(extension_body)
+ extension = StubControllerExtension()
+ cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
+ ext_controller)
+ manager = StubExtensionManager(resource_ext=res_ext,
+ controller_ext=cont_ext)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/tweedles/foo/action")
+ request.method = 'POST'
+ request.headers['Content-Type'] = 'application/json'
+ request.body = jsonutils.dumps(dict(fooAction=True))
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(extension_body, response.body)
+
+ def test_controller_action_extension_late(self):
+ # Need a dict for the body to convert to a ResponseObject
+ controller = StubActionController(dict(foo=response_body))
+ actions = dict(action='POST')
+ res_ext = base_extensions.ResourceExtension('tweedles', controller,
+ member_actions=actions)
+
+ ext_controller = StubLateExtensionController(extension_body)
+ extension = StubControllerExtension()
+ cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
+ ext_controller)
+
+ manager = StubExtensionManager(resource_ext=res_ext,
+ controller_ext=cont_ext)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/tweedles/foo/action")
+ request.method = 'POST'
+ request.headers['Content-Type'] = 'application/json'
+ request.body = jsonutils.dumps(dict(fooAction=True))
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(extension_body, response.body)
+
+
+class ExtensionsXMLSerializerTest(test.TestCase):
+
+ def test_serialize_extension(self):
+ serializer = base_extensions.ExtensionTemplate()
+ data = {'extension': {
+ 'name': 'ext1',
+ 'namespace': 'http://docs.rack.com/servers/api/ext/pie/v1.0',
+ 'alias': 'RS-PIE',
+ 'updated': '2011-01-22T13:25:27-06:00',
+ 'description': 'Adds the capability to share an image.',
+ 'links': [{'rel': 'describedby',
+ 'type': 'application/pdf',
+ 'href': 'http://docs.rack.com/servers/api/ext/cs.pdf'},
+ {'rel': 'describedby',
+ 'type': 'application/vnd.sun.wadl+xml',
+ 'href': 'http://docs.rack.com/servers/api/ext/cs.wadl'}]}}
+
+ xml = serializer.serialize(data)
+ root = etree.XML(xml)
+ ext_dict = data['extension']
+ self.assertEqual(root.findtext('{0}description'.format(NS)),
+ ext_dict['description'])
+
+ for key in ['name', 'namespace', 'alias', 'updated']:
+ self.assertEqual(root.get(key), ext_dict[key])
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(ext_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ xmlutil.validate_schema(root, 'extension')
+
+ def test_serialize_extensions(self):
+ serializer = base_extensions.ExtensionsTemplate()
+ data = {"extensions": [{
+ "name": "Public Image Extension",
+ "namespace": "http://foo.com/api/ext/pie/v1.0",
+ "alias": "RS-PIE",
+ "updated": "2011-01-22T13:25:27-06:00",
+ "description": "Adds the capability to share an image.",
+ "links": [{"rel": "describedby",
+ "type": "application/pdf",
+ "href": "http://foo.com/api/ext/cs-pie.pdf"},
+ {"rel": "describedby",
+ "type": "application/vnd.sun.wadl+xml",
+ "href": "http://foo.com/api/ext/cs-pie.wadl"}]},
+ {"name": "Cloud Block Storage",
+ "namespace": "http://foo.com/api/ext/cbs/v1.0",
+ "alias": "RS-CBS",
+ "updated": "2011-01-12T11:22:33-06:00",
+ "description": "Allows mounting cloud block storage.",
+ "links": [{"rel": "describedby",
+ "type": "application/pdf",
+ "href": "http://foo.com/api/ext/cs-cbs.pdf"},
+ {"rel": "describedby",
+ "type": "application/vnd.sun.wadl+xml",
+ "href": "http://foo.com/api/ext/cs-cbs.wadl"}]}]}
+
+ xml = serializer.serialize(data)
+ root = etree.XML(xml)
+ ext_elems = root.findall('{0}extension'.format(NS))
+ self.assertEqual(len(ext_elems), 2)
+ for i, ext_elem in enumerate(ext_elems):
+ ext_dict = data['extensions'][i]
+ self.assertEqual(ext_elem.findtext('{0}description'.format(NS)),
+ ext_dict['description'])
+
+ for key in ['name', 'namespace', 'alias', 'updated']:
+ self.assertEqual(ext_elem.get(key), ext_dict[key])
+
+ link_nodes = ext_elem.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(ext_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ xmlutil.validate_schema(root, 'extensions')
+
+
+class ExtensionControllerIdFormatTest(test.TestCase):
+
+ def _bounce_id(self, test_id):
+
+ class BounceController(object):
+ def show(self, req, id):
+ return id
+ res_ext = base_extensions.ResourceExtension('bounce',
+ BounceController())
+ manager = StubExtensionManager(res_ext)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/bounce/%s" % test_id)
+ response = request.get_response(app)
+ return response.body
+
+ def test_id_with_xml_format(self):
+ result = self._bounce_id('foo.xml')
+ self.assertEqual(result, 'foo')
+
+ def test_id_with_json_format(self):
+ result = self._bounce_id('foo.json')
+ self.assertEqual(result, 'foo')
+
+ def test_id_with_bad_format(self):
+ result = self._bounce_id('foo.bad')
+ self.assertEqual(result, 'foo.bad')
diff --git a/nova/tests/unit/api/openstack/compute/test_flavors.py b/nova/tests/unit/api/openstack/compute/test_flavors.py
new file mode 100644
index 0000000000..265b50ac85
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_flavors.py
@@ -0,0 +1,943 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import six.moves.urllib.parse as urlparse
+import webob
+
+from nova.api.openstack import common
+from nova.api.openstack.compute import flavors as flavors_v2
+from nova.api.openstack.compute.plugins.v3 import flavors as flavors_v3
+from nova.api.openstack import xmlutil
+import nova.compute.flavors
+from nova import context
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import matchers
+
+NS = "{http://docs.openstack.org/compute/api/v1.1}"
+ATOMNS = "{http://www.w3.org/2005/Atom}"
+
+
+FAKE_FLAVORS = {
+ 'flavor 1': {
+ "flavorid": '1',
+ "name": 'flavor 1',
+ "memory_mb": '256',
+ "root_gb": '10',
+ "ephemeral_gb": '20',
+ "swap": '10',
+ "disabled": False,
+ "vcpus": '',
+ },
+ 'flavor 2': {
+ "flavorid": '2',
+ "name": 'flavor 2',
+ "memory_mb": '512',
+ "root_gb": '20',
+ "ephemeral_gb": '10',
+ "swap": '5',
+ "disabled": False,
+ "vcpus": '',
+ },
+}
+
+
+def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
+ return FAKE_FLAVORS['flavor %s' % flavorid]
+
+
+def fake_get_all_flavors_sorted_list(context=None, inactive=False,
+ filters=None, sort_key='flavorid',
+ sort_dir='asc', limit=None, marker=None):
+ if marker in ['99999']:
+ raise exception.MarkerNotFound(marker)
+
+ def reject_min(db_attr, filter_attr):
+ return (filter_attr in filters and
+ int(flavor[db_attr]) < int(filters[filter_attr]))
+
+ filters = filters or {}
+ res = []
+ for (flavor_name, flavor) in FAKE_FLAVORS.items():
+ if reject_min('memory_mb', 'min_memory_mb'):
+ continue
+ elif reject_min('root_gb', 'min_root_gb'):
+ continue
+
+ res.append(flavor)
+
+ res = sorted(res, key=lambda item: item[sort_key])
+ output = []
+ marker_found = True if marker is None else False
+ for flavor in res:
+ if not marker_found and marker == flavor['flavorid']:
+ marker_found = True
+ elif marker_found:
+ if limit is None or len(output) < int(limit):
+ output.append(flavor)
+
+ return output
+
+
+def fake_get_limit_and_marker(request, max_limit=1):
+ params = common.get_pagination_params(request)
+ limit = params.get('limit', max_limit)
+ limit = min(max_limit, limit)
+ marker = params.get('marker')
+
+ return limit, marker
+
+
+def empty_get_all_flavors_sorted_list(context=None, inactive=False,
+ filters=None, sort_key='flavorid',
+ sort_dir='asc', limit=None, marker=None):
+ return []
+
+
+def return_flavor_not_found(flavor_id, ctxt=None):
+ raise exception.FlavorNotFound(flavor_id=flavor_id)
+
+
+class FlavorsTestV21(test.TestCase):
+ _prefix = "/v3"
+ Controller = flavors_v3.FlavorsController
+ fake_request = fakes.HTTPRequestV3
+ _rspv = "v3"
+ _fake = ""
+
+ def setUp(self):
+ super(FlavorsTestV21, self).setUp()
+ self.flags(osapi_compute_extension=[])
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ self.stubs.Set(nova.compute.flavors, "get_all_flavors_sorted_list",
+ fake_get_all_flavors_sorted_list)
+ self.stubs.Set(nova.compute.flavors,
+ "get_flavor_by_flavor_id",
+ fake_flavor_get_by_flavor_id)
+ self.controller = self.Controller()
+
+ def _set_expected_body(self, expected, ephemeral, swap, disabled):
+ # NOTE(oomichi): On v2.1 API, some extensions of v2.0 are merged
+ # as core features and we can get the following parameters as the
+ # default.
+ expected['OS-FLV-EXT-DATA:ephemeral'] = ephemeral
+ expected['OS-FLV-DISABLED:disabled'] = disabled
+ expected['swap'] = swap
+
+ def test_get_flavor_by_invalid_id(self):
+ self.stubs.Set(nova.compute.flavors,
+ "get_flavor_by_flavor_id",
+ return_flavor_not_found)
+ req = self.fake_request.blank(self._prefix + '/flavors/asdf')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.show, req, 'asdf')
+
+ def test_get_flavor_by_id(self):
+ req = self.fake_request.blank(self._prefix + '/flavors/1')
+ flavor = self.controller.show(req, '1')
+ expected = {
+ "flavor": {
+ "id": "1",
+ "name": "flavor 1",
+ "ram": "256",
+ "disk": "10",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/1",
+ },
+ ],
+ },
+ }
+ self._set_expected_body(expected['flavor'], ephemeral='20',
+ swap='10', disabled=False)
+ self.assertEqual(flavor, expected)
+
+ def test_get_flavor_with_custom_link_prefix(self):
+ self.flags(osapi_compute_link_prefix='http://zoo.com:42',
+ osapi_glance_link_prefix='http://circus.com:34')
+ req = self.fake_request.blank(self._prefix + '/flavors/1')
+ flavor = self.controller.show(req, '1')
+ expected = {
+ "flavor": {
+ "id": "1",
+ "name": "flavor 1",
+ "ram": "256",
+ "disk": "10",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://zoo.com:42/" + self._rspv +
+ "/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://zoo.com:42" + self._fake +
+ "/flavors/1",
+ },
+ ],
+ },
+ }
+ self._set_expected_body(expected['flavor'], ephemeral='20',
+ swap='10', disabled=False)
+ self.assertEqual(expected, flavor)
+
+ def test_get_flavor_list(self):
+ req = self.fake_request.blank(self._prefix + '/flavors')
+ flavor = self.controller.index(req)
+ expected = {
+ "flavors": [
+ {
+ "id": "1",
+ "name": "flavor 1",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/1",
+ },
+ ],
+ },
+ {
+ "id": "2",
+ "name": "flavor 2",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/2",
+ },
+ ],
+ },
+ ],
+ }
+ self.assertEqual(flavor, expected)
+
+ def test_get_flavor_list_with_marker(self):
+ self.maxDiff = None
+ url = self._prefix + '/flavors?limit=1&marker=1'
+ req = self.fake_request.blank(url)
+ flavor = self.controller.index(req)
+ expected = {
+ "flavors": [
+ {
+ "id": "2",
+ "name": "flavor 2",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/2",
+ },
+ ],
+ },
+ ],
+ 'flavors_links': [
+ {'href': 'http://localhost/' + self._rspv +
+ '/flavors?limit=1&marker=2',
+ 'rel': 'next'}
+ ]
+ }
+ self.assertThat(flavor, matchers.DictMatches(expected))
+
+ def test_get_flavor_list_with_invalid_marker(self):
+ req = self.fake_request.blank(self._prefix + '/flavors?marker=99999')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_get_flavor_detail_with_limit(self):
+ url = self._prefix + '/flavors/detail?limit=1'
+ req = self.fake_request.blank(url)
+ response = self.controller.index(req)
+ response_list = response["flavors"]
+ response_links = response["flavors_links"]
+
+ expected_flavors = [
+ {
+ "id": "1",
+ "name": "flavor 1",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/1",
+ },
+ ],
+ },
+ ]
+ self.assertEqual(response_list, expected_flavors)
+ self.assertEqual(response_links[0]['rel'], 'next')
+
+ href_parts = urlparse.urlparse(response_links[0]['href'])
+ self.assertEqual('/' + self._rspv + '/flavors', href_parts.path)
+ params = urlparse.parse_qs(href_parts.query)
+ self.assertThat({'limit': ['1'], 'marker': ['1']},
+ matchers.DictMatches(params))
+
+ def test_get_flavor_with_limit(self):
+ req = self.fake_request.blank(self._prefix + '/flavors?limit=2')
+ response = self.controller.index(req)
+ response_list = response["flavors"]
+ response_links = response["flavors_links"]
+
+ expected_flavors = [
+ {
+ "id": "1",
+ "name": "flavor 1",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/1",
+ },
+ ],
+ },
+ {
+ "id": "2",
+ "name": "flavor 2",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/2",
+ },
+ ],
+ }
+ ]
+ self.assertEqual(response_list, expected_flavors)
+ self.assertEqual(response_links[0]['rel'], 'next')
+
+ href_parts = urlparse.urlparse(response_links[0]['href'])
+ self.assertEqual('/' + self._rspv + '/flavors', href_parts.path)
+ params = urlparse.parse_qs(href_parts.query)
+ self.assertThat({'limit': ['2'], 'marker': ['2']},
+ matchers.DictMatches(params))
+
+ def test_get_flavor_with_default_limit(self):
+ self.stubs.Set(common, "get_limit_and_marker",
+ fake_get_limit_and_marker)
+ self.flags(osapi_max_limit=1)
+ req = fakes.HTTPRequest.blank('/v2/fake/flavors?limit=2')
+ response = self.controller.index(req)
+ response_list = response["flavors"]
+ response_links = response["flavors_links"]
+
+ expected_flavors = [
+ {
+ "id": "1",
+ "name": "flavor 1",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/fake/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/flavors/1",
+ }
+ ]
+ }
+ ]
+
+ self.assertEqual(response_list, expected_flavors)
+ self.assertEqual(response_links[0]['rel'], 'next')
+ href_parts = urlparse.urlparse(response_links[0]['href'])
+ self.assertEqual('/v2/fake/flavors', href_parts.path)
+ params = urlparse.parse_qs(href_parts.query)
+ self.assertThat({'limit': ['2'], 'marker': ['1']},
+ matchers.DictMatches(params))
+
+ def test_get_flavor_list_detail(self):
+ req = self.fake_request.blank(self._prefix + '/flavors/detail')
+ flavor = self.controller.detail(req)
+ expected = {
+ "flavors": [
+ {
+ "id": "1",
+ "name": "flavor 1",
+ "ram": "256",
+ "disk": "10",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/1",
+ },
+ ],
+ },
+ {
+ "id": "2",
+ "name": "flavor 2",
+ "ram": "512",
+ "disk": "20",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/2",
+ },
+ ],
+ },
+ ],
+ }
+ self._set_expected_body(expected['flavors'][0], ephemeral='20',
+ swap='10', disabled=False)
+ self._set_expected_body(expected['flavors'][1], ephemeral='10',
+ swap='5', disabled=False)
+ self.assertEqual(expected, flavor)
+
+ def test_get_empty_flavor_list(self):
+ self.stubs.Set(nova.compute.flavors, "get_all_flavors_sorted_list",
+ empty_get_all_flavors_sorted_list)
+
+ req = self.fake_request.blank(self._prefix + '/flavors')
+ flavors = self.controller.index(req)
+ expected = {'flavors': []}
+ self.assertEqual(flavors, expected)
+
+ def test_get_flavor_list_filter_min_ram(self):
+ # Flavor lists may be filtered by minRam.
+ req = self.fake_request.blank(self._prefix + '/flavors?minRam=512')
+ flavor = self.controller.index(req)
+ expected = {
+ "flavors": [
+ {
+ "id": "2",
+ "name": "flavor 2",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/2",
+ },
+ ],
+ },
+ ],
+ }
+ self.assertEqual(flavor, expected)
+
+ def test_get_flavor_list_filter_invalid_min_ram(self):
+ # Ensure you cannot list flavors with invalid minRam param.
+ req = self.fake_request.blank(self._prefix + '/flavors?minRam=NaN')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_get_flavor_list_filter_min_disk(self):
+ # Flavor lists may be filtered by minDisk.
+ req = self.fake_request.blank(self._prefix + '/flavors?minDisk=20')
+ flavor = self.controller.index(req)
+ expected = {
+ "flavors": [
+ {
+ "id": "2",
+ "name": "flavor 2",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/2",
+ },
+ ],
+ },
+ ],
+ }
+ self.assertEqual(flavor, expected)
+
+ def test_get_flavor_list_filter_invalid_min_disk(self):
+ # Ensure you cannot list flavors with invalid minDisk param.
+ req = self.fake_request.blank(self._prefix + '/flavors?minDisk=NaN')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_get_flavor_list_detail_min_ram_and_min_disk(self):
+ """Tests that filtering work on flavor details and that minRam and
+ minDisk filters can be combined
+ """
+ req = self.fake_request.blank(self._prefix + '/flavors/detail'
+ '?minRam=256&minDisk=20')
+ flavor = self.controller.detail(req)
+ expected = {
+ "flavors": [
+ {
+ "id": "2",
+ "name": "flavor 2",
+ "ram": "512",
+ "disk": "20",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/2",
+ },
+ ],
+ },
+ ],
+ }
+ self._set_expected_body(expected['flavors'][0], ephemeral='10',
+ swap='5', disabled=False)
+ self.assertEqual(expected, flavor)
+
+
+class FlavorsTestV20(FlavorsTestV21):
+ _prefix = "/v2/fake"
+ Controller = flavors_v2.Controller
+ fake_request = fakes.HTTPRequest
+ _rspv = "v2/fake"
+ _fake = "/fake"
+
+ def _set_expected_body(self, expected, ephemeral, swap, disabled):
+ pass
+
+
+class FlavorsXMLSerializationTest(test.TestCase):
+
+ def test_xml_declaration(self):
+ serializer = flavors_v2.FlavorTemplate()
+
+ fixture = {
+ "flavor": {
+ "id": "12",
+ "name": "asdf",
+ "ram": "256",
+ "disk": "10",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/fake/flavors/12",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/flavors/12",
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
+ self.assertTrue(has_dec)
+
+ def test_show(self):
+ serializer = flavors_v2.FlavorTemplate()
+
+ fixture = {
+ "flavor": {
+ "id": "12",
+ "name": "asdf",
+ "ram": "256",
+ "disk": "10",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/fake/flavors/12",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/flavors/12",
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'flavor')
+ flavor_dict = fixture['flavor']
+
+ for key in ['name', 'id', 'ram', 'disk']:
+ self.assertEqual(root.get(key), str(flavor_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(flavor_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_show_handles_integers(self):
+ serializer = flavors_v2.FlavorTemplate()
+
+ fixture = {
+ "flavor": {
+ "id": 12,
+ "name": "asdf",
+ "ram": 256,
+ "disk": 10,
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/fake/flavors/12",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/flavors/12",
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'flavor')
+ flavor_dict = fixture['flavor']
+
+ for key in ['name', 'id', 'ram', 'disk']:
+ self.assertEqual(root.get(key), str(flavor_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(flavor_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_detail(self):
+ serializer = flavors_v2.FlavorsTemplate()
+
+ fixture = {
+ "flavors": [
+ {
+ "id": "23",
+ "name": "flavor 23",
+ "ram": "512",
+ "disk": "20",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/fake/flavors/23",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/flavors/23",
+ },
+ ],
+ },
+ {
+ "id": "13",
+ "name": "flavor 13",
+ "ram": "256",
+ "disk": "10",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/fake/flavors/13",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/flavors/13",
+ },
+ ],
+ },
+ ],
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'flavors')
+ flavor_elems = root.findall('{0}flavor'.format(NS))
+ self.assertEqual(len(flavor_elems), 2)
+ for i, flavor_elem in enumerate(flavor_elems):
+ flavor_dict = fixture['flavors'][i]
+
+ for key in ['name', 'id', 'ram', 'disk']:
+ self.assertEqual(flavor_elem.get(key), str(flavor_dict[key]))
+
+ link_nodes = flavor_elem.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(flavor_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_index(self):
+ serializer = flavors_v2.MinimalFlavorsTemplate()
+
+ fixture = {
+ "flavors": [
+ {
+ "id": "23",
+ "name": "flavor 23",
+ "ram": "512",
+ "disk": "20",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/fake/flavors/23",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/flavors/23",
+ },
+ ],
+ },
+ {
+ "id": "13",
+ "name": "flavor 13",
+ "ram": "256",
+ "disk": "10",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/fake/flavors/13",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/flavors/13",
+ },
+ ],
+ },
+ ],
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'flavors')
+ flavor_elems = root.findall('{0}flavor'.format(NS))
+ self.assertEqual(len(flavor_elems), 2)
+ for i, flavor_elem in enumerate(flavor_elems):
+ flavor_dict = fixture['flavors'][i]
+
+ for key in ['name', 'id']:
+ self.assertEqual(flavor_elem.get(key), str(flavor_dict[key]))
+
+ link_nodes = flavor_elem.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(flavor_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_index_empty(self):
+ serializer = flavors_v2.MinimalFlavorsTemplate()
+
+ fixture = {
+ "flavors": [],
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'flavors')
+ flavor_elems = root.findall('{0}flavor'.format(NS))
+ self.assertEqual(len(flavor_elems), 0)
+
+
+class DisabledFlavorsWithRealDBTestV21(test.TestCase):
+ """Tests that disabled flavors should not be shown nor listed."""
+ Controller = flavors_v3.FlavorsController
+ _prefix = "/v3"
+ fake_request = fakes.HTTPRequestV3
+
+ def setUp(self):
+ super(DisabledFlavorsWithRealDBTestV21, self).setUp()
+
+ # Add a new disabled type to the list of flavors
+ self.req = self.fake_request.blank(self._prefix + '/flavors')
+ self.context = self.req.environ['nova.context']
+ self.admin_context = context.get_admin_context()
+
+ self.disabled_type = self._create_disabled_instance_type()
+ self.inst_types = db.flavor_get_all(
+ self.admin_context)
+ self.controller = self.Controller()
+
+ def tearDown(self):
+ db.flavor_destroy(
+ self.admin_context, self.disabled_type['name'])
+
+ super(DisabledFlavorsWithRealDBTestV21, self).tearDown()
+
+ def _create_disabled_instance_type(self):
+ inst_types = db.flavor_get_all(self.admin_context)
+
+ inst_type = inst_types[0]
+
+ del inst_type['id']
+ inst_type['name'] += '.disabled'
+ inst_type['flavorid'] = unicode(max(
+ [int(flavor['flavorid']) for flavor in inst_types]) + 1)
+ inst_type['disabled'] = True
+
+ disabled_type = db.flavor_create(
+ self.admin_context, inst_type)
+
+ return disabled_type
+
+ def test_index_should_not_list_disabled_flavors_to_user(self):
+ self.context.is_admin = False
+
+ flavor_list = self.controller.index(self.req)['flavors']
+ api_flavorids = set(f['id'] for f in flavor_list)
+
+ db_flavorids = set(i['flavorid'] for i in self.inst_types)
+ disabled_flavorid = str(self.disabled_type['flavorid'])
+
+ self.assertIn(disabled_flavorid, db_flavorids)
+ self.assertEqual(db_flavorids - set([disabled_flavorid]),
+ api_flavorids)
+
+ def test_index_should_list_disabled_flavors_to_admin(self):
+ self.context.is_admin = True
+
+ flavor_list = self.controller.index(self.req)['flavors']
+ api_flavorids = set(f['id'] for f in flavor_list)
+
+ db_flavorids = set(i['flavorid'] for i in self.inst_types)
+ disabled_flavorid = str(self.disabled_type['flavorid'])
+
+ self.assertIn(disabled_flavorid, db_flavorids)
+ self.assertEqual(db_flavorids, api_flavorids)
+
+ def test_show_should_include_disabled_flavor_for_user(self):
+ """Counterintuitively we should show disabled flavors to all users and
+ not just admins. The reason is that, when a user performs a server-show
+ request, we want to be able to display the pretty flavor name ('512 MB
+ Instance') and not just the flavor-id even if the flavor id has been
+ marked disabled.
+ """
+ self.context.is_admin = False
+
+ flavor = self.controller.show(
+ self.req, self.disabled_type['flavorid'])['flavor']
+
+ self.assertEqual(flavor['name'], self.disabled_type['name'])
+
+ def test_show_should_include_disabled_flavor_for_admin(self):
+ self.context.is_admin = True
+
+ flavor = self.controller.show(
+ self.req, self.disabled_type['flavorid'])['flavor']
+
+ self.assertEqual(flavor['name'], self.disabled_type['name'])
+
+
+class DisabledFlavorsWithRealDBTestV20(DisabledFlavorsWithRealDBTestV21):
+ """Tests that disabled flavors should not be shown nor listed."""
+ Controller = flavors_v2.Controller
+ _prefix = "/v2/fake"
+ fake_request = fakes.HTTPRequest
+
+
+class ParseIsPublicTestV21(test.TestCase):
+ Controller = flavors_v3.FlavorsController
+
+ def setUp(self):
+ super(ParseIsPublicTestV21, self).setUp()
+ self.controller = self.Controller()
+
+ def assertPublic(self, expected, is_public):
+ self.assertIs(expected, self.controller._parse_is_public(is_public),
+ '%s did not return %s' % (is_public, expected))
+
+ def test_None(self):
+ self.assertPublic(True, None)
+
+ def test_truthy(self):
+ self.assertPublic(True, True)
+ self.assertPublic(True, 't')
+ self.assertPublic(True, 'true')
+ self.assertPublic(True, 'yes')
+ self.assertPublic(True, '1')
+
+ def test_falsey(self):
+ self.assertPublic(False, False)
+ self.assertPublic(False, 'f')
+ self.assertPublic(False, 'false')
+ self.assertPublic(False, 'no')
+ self.assertPublic(False, '0')
+
+ def test_string_none(self):
+ self.assertPublic(None, 'none')
+ self.assertPublic(None, 'None')
+
+ def test_other(self):
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, self.assertPublic, None, 'other')
+
+
+class ParseIsPublicTestV20(ParseIsPublicTestV21):
+ Controller = flavors_v2.Controller
diff --git a/nova/tests/unit/api/openstack/compute/test_image_metadata.py b/nova/tests/unit/api/openstack/compute/test_image_metadata.py
new file mode 100644
index 0000000000..6de8ddf6f6
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_image_metadata.py
@@ -0,0 +1,366 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+import mock
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute import image_metadata
+from nova.api.openstack.compute.plugins.v3 import image_metadata \
+ as image_metadata_v21
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import image_fixtures
+
+IMAGE_FIXTURES = image_fixtures.get_image_fixtures()
+CHK_QUOTA_STR = 'nova.api.openstack.common.check_img_metadata_properties_quota'
+
+
+def get_image_123():
+ return copy.deepcopy(IMAGE_FIXTURES)[0]
+
+
+class ImageMetaDataTestV21(test.NoDBTestCase):
+ controller_class = image_metadata_v21.ImageMetadataController
+
+ def setUp(self):
+ super(ImageMetaDataTestV21, self).setUp()
+ self.controller = self.controller_class()
+
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_index(self, get_all_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
+ res_dict = self.controller.index(req, '123')
+ expected = {'metadata': {'key1': 'value1'}}
+ self.assertEqual(res_dict, expected)
+ get_all_mocked.assert_called_once_with(mock.ANY, '123')
+
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_show(self, get_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
+ res_dict = self.controller.show(req, '123', 'key1')
+ self.assertIn('meta', res_dict)
+ self.assertEqual(len(res_dict['meta']), 1)
+ self.assertEqual('value1', res_dict['meta']['key1'])
+ get_mocked.assert_called_once_with(mock.ANY, '123')
+
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_show_not_found(self, _get_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key9')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.show, req, '123', 'key9')
+
+ @mock.patch('nova.image.api.API.get',
+ side_effect=exception.ImageNotFound(image_id='100'))
+ def test_show_image_not_found(self, _get_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.show, req, '100', 'key9')
+
+ @mock.patch(CHK_QUOTA_STR)
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_create(self, get_mocked, update_mocked, quota_mocked):
+ mock_result = copy.deepcopy(get_image_123())
+ mock_result['properties']['key7'] = 'value7'
+ update_mocked.return_value = mock_result
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
+ req.method = 'POST'
+ body = {"metadata": {"key7": "value7"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = self.controller.create(req, '123', body)
+ get_mocked.assert_called_once_with(mock.ANY, '123')
+ expected = copy.deepcopy(get_image_123())
+ expected['properties'] = {
+ 'key1': 'value1', # existing meta
+ 'key7': 'value7' # new meta
+ }
+ quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
+ update_mocked.assert_called_once_with(mock.ANY, '123', expected,
+ data=None, purge_props=True)
+
+ expected_output = {'metadata': {'key1': 'value1', 'key7': 'value7'}}
+ self.assertEqual(expected_output, res)
+
+ @mock.patch(CHK_QUOTA_STR)
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get',
+ side_effect=exception.ImageNotFound(image_id='100'))
+ def test_create_image_not_found(self, _get_mocked, update_mocked,
+ quota_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata')
+ req.method = 'POST'
+ body = {"metadata": {"key7": "value7"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.create, req, '100', body)
+ self.assertFalse(quota_mocked.called)
+ self.assertFalse(update_mocked.called)
+
+ @mock.patch(CHK_QUOTA_STR)
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_update_all(self, get_mocked, update_mocked, quota_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
+ req.method = 'PUT'
+ body = {"metadata": {"key9": "value9"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = self.controller.update_all(req, '123', body)
+ get_mocked.assert_called_once_with(mock.ANY, '123')
+ expected = copy.deepcopy(get_image_123())
+ expected['properties'] = {
+ 'key9': 'value9' # replace meta
+ }
+ quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
+ update_mocked.assert_called_once_with(mock.ANY, '123', expected,
+ data=None, purge_props=True)
+
+ expected_output = {'metadata': {'key9': 'value9'}}
+ self.assertEqual(expected_output, res)
+
+ @mock.patch(CHK_QUOTA_STR)
+ @mock.patch('nova.image.api.API.get',
+ side_effect=exception.ImageNotFound(image_id='100'))
+ def test_update_all_image_not_found(self, _get_mocked, quota_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata')
+ req.method = 'PUT'
+ body = {"metadata": {"key9": "value9"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update_all, req, '100', body)
+ self.assertFalse(quota_mocked.called)
+
+ @mock.patch(CHK_QUOTA_STR)
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_update_item(self, _get_mocked, update_mocked, quota_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
+ req.method = 'PUT'
+ body = {"meta": {"key1": "zz"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = self.controller.update(req, '123', 'key1', body)
+ expected = copy.deepcopy(get_image_123())
+ expected['properties'] = {
+ 'key1': 'zz' # changed meta
+ }
+ quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
+ update_mocked.assert_called_once_with(mock.ANY, '123', expected,
+ data=None, purge_props=True)
+
+ expected_output = {'meta': {'key1': 'zz'}}
+ self.assertEqual(res, expected_output)
+
+ @mock.patch(CHK_QUOTA_STR)
+ @mock.patch('nova.image.api.API.get',
+ side_effect=exception.ImageNotFound(image_id='100'))
+ def test_update_item_image_not_found(self, _get_mocked, quota_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
+ req.method = 'PUT'
+ body = {"meta": {"key1": "zz"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update, req, '100', 'key1', body)
+ self.assertFalse(quota_mocked.called)
+
+ @mock.patch(CHK_QUOTA_STR)
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get')
+ def test_update_item_bad_body(self, get_mocked, update_mocked,
+ quota_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
+ req.method = 'PUT'
+ body = {"key1": "zz"}
+ req.body = ''
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, '123', 'key1', body)
+ self.assertFalse(get_mocked.called)
+ self.assertFalse(quota_mocked.called)
+ self.assertFalse(update_mocked.called)
+
+ @mock.patch(CHK_QUOTA_STR,
+ side_effect=webob.exc.HTTPRequestEntityTooLarge(
+ explanation='', headers={'Retry-After': 0}))
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get')
+ def test_update_item_too_many_keys(self, get_mocked, update_mocked,
+ _quota_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
+ req.method = 'PUT'
+ body = {"metadata": {"foo": "bar"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, '123', 'key1', body)
+ self.assertFalse(get_mocked.called)
+ self.assertFalse(update_mocked.called)
+
+ @mock.patch(CHK_QUOTA_STR)
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_update_item_body_uri_mismatch(self, _get_mocked, update_mocked,
+ quota_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/bad')
+ req.method = 'PUT'
+ body = {"meta": {"key1": "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, '123', 'bad', body)
+ self.assertFalse(quota_mocked.called)
+ self.assertFalse(update_mocked.called)
+
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_delete(self, _get_mocked, update_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
+ req.method = 'DELETE'
+ res = self.controller.delete(req, '123', 'key1')
+ expected = copy.deepcopy(get_image_123())
+ expected['properties'] = {}
+ update_mocked.assert_called_once_with(mock.ANY, '123', expected,
+ data=None, purge_props=True)
+
+ self.assertIsNone(res)
+
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_delete_not_found(self, _get_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah')
+ req.method = 'DELETE'
+
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.delete, req, '123', 'blah')
+
+ @mock.patch('nova.image.api.API.get',
+ side_effect=exception.ImageNotFound(image_id='100'))
+ def test_delete_image_not_found(self, _get_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
+ req.method = 'DELETE'
+
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.delete, req, '100', 'key1')
+
+ @mock.patch(CHK_QUOTA_STR,
+ side_effect=webob.exc.HTTPForbidden(
+ explanation='', headers={'Retry-After': 0}))
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_too_many_metadata_items_on_create(self, _get_mocked,
+ update_mocked, _quota_mocked):
+ body = {"metadata": {"foo": "bar"}}
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create, req, '123', body)
+ self.assertFalse(update_mocked.called)
+
+ @mock.patch(CHK_QUOTA_STR,
+ side_effect=webob.exc.HTTPForbidden(
+ explanation='', headers={'Retry-After': 0}))
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_too_many_metadata_items_on_put(self, _get_mocked,
+ update_mocked, _quota_mocked):
+ body = {"metadata": {"foo": "bar"}}
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah')
+ req.method = 'PUT'
+ body = {"meta": {"blah": "blah"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.update, req, '123', 'blah', body)
+ self.assertFalse(update_mocked.called)
+
+ @mock.patch('nova.image.api.API.get',
+ side_effect=exception.ImageNotAuthorized(image_id='123'))
+ def test_image_not_authorized_update(self, _get_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
+ req.method = 'PUT'
+ body = {"meta": {"key1": "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.update, req, '123', 'key1', body)
+
+ @mock.patch('nova.image.api.API.get',
+ side_effect=exception.ImageNotAuthorized(image_id='123'))
+ def test_image_not_authorized_update_all(self, _get_mocked):
+ image_id = 131
+ # see nova.tests.unit.api.openstack.fakes:_make_image_fixtures
+
+ req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1'
+ % image_id)
+ req.method = 'PUT'
+ body = {"meta": {"key1": "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.update_all, req, image_id, body)
+
+ @mock.patch('nova.image.api.API.get',
+ side_effect=exception.ImageNotAuthorized(image_id='123'))
+ def test_image_not_authorized_create(self, _get_mocked):
+ image_id = 131
+ # see nova.tests.unit.api.openstack.fakes:_make_image_fixtures
+
+ req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1'
+ % image_id)
+ req.method = 'POST'
+ body = {"meta": {"key1": "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create, req, image_id, body)
+
+
+class ImageMetaDataTestV2(ImageMetaDataTestV21):
+ controller_class = image_metadata.Controller
+
+ # NOTE(cyeoh): This duplicate unittest is necessary for a race condition
+ # with the V21 unittests. It's mock issue.
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_delete(self, _get_mocked, update_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
+ req.method = 'DELETE'
+ res = self.controller.delete(req, '123', 'key1')
+ expected = copy.deepcopy(get_image_123())
+ expected['properties'] = {}
+ update_mocked.assert_called_once_with(mock.ANY, '123', expected,
+ data=None, purge_props=True)
+
+ self.assertIsNone(res)
diff --git a/nova/tests/unit/api/openstack/compute/test_images.py b/nova/tests/unit/api/openstack/compute/test_images.py
new file mode 100644
index 0000000000..ad55f9a86e
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_images.py
@@ -0,0 +1,1046 @@
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests of the new image services, both as a service layer,
+and as a WSGI layer
+"""
+
+import copy
+
+from lxml import etree
+import mock
+import webob
+
+from nova.api.openstack.compute import images
+from nova.api.openstack.compute.plugins.v3 import images as images_v21
+from nova.api.openstack.compute.views import images as images_view
+from nova.api.openstack import xmlutil
+from nova import exception
+from nova.image import glance
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import image_fixtures
+from nova.tests.unit import matchers
+
+NS = "{http://docs.openstack.org/compute/api/v1.1}"
+ATOMNS = "{http://www.w3.org/2005/Atom}"
+NOW_API_FORMAT = "2010-10-11T10:30:22Z"
+IMAGE_FIXTURES = image_fixtures.get_image_fixtures()
+
+
+class ImagesControllerTestV21(test.NoDBTestCase):
+ """Test of the OpenStack API /images application controller w/Glance.
+ """
+ image_controller_class = images_v21.ImagesController
+ url_base = '/v3'
+ bookmark_base = ''
+ http_request = fakes.HTTPRequestV3
+
+ def setUp(self):
+ """Run before each test."""
+ super(ImagesControllerTestV21, self).setUp()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ fakes.stub_out_compute_api_snapshot(self.stubs)
+ fakes.stub_out_compute_api_backup(self.stubs)
+
+ self.controller = self.image_controller_class()
+ self.url_prefix = "http://localhost%s/images" % self.url_base
+ self.bookmark_prefix = "http://localhost%s/images" % self.bookmark_base
+ self.uuid = 'fa95aaf5-ab3b-4cd8-88c0-2be7dd051aaf'
+ self.server_uuid = "aa640691-d1a7-4a67-9d3c-d35ee6b3cc74"
+ self.server_href = (
+ "http://localhost%s/servers/%s" % (self.url_base,
+ self.server_uuid))
+ self.server_bookmark = (
+ "http://localhost%s/servers/%s" % (self.bookmark_base,
+ self.server_uuid))
+ self.alternate = "%s/images/%s"
+
+ self.expected_image_123 = {
+ "image": {'id': '123',
+ 'name': 'public image',
+ 'metadata': {'key1': 'value1'},
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'ACTIVE',
+ 'minDisk': 10,
+ 'progress': 100,
+ 'minRam': 128,
+ "links": [{
+ "rel": "self",
+ "href": "%s/123" % self.url_prefix
+ },
+ {
+ "rel": "bookmark",
+ "href":
+ "%s/123" % self.bookmark_prefix
+ },
+ {
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image",
+ "href": self.alternate %
+ (glance.generate_glance_url(),
+ 123),
+ }],
+ },
+ }
+
+ self.expected_image_124 = {
+ "image": {'id': '124',
+ 'name': 'queued snapshot',
+ 'metadata': {
+ u'instance_uuid': self.server_uuid,
+ u'user_id': u'fake',
+ },
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'SAVING',
+ 'progress': 25,
+ 'minDisk': 0,
+ 'minRam': 0,
+ 'server': {
+ 'id': self.server_uuid,
+ "links": [{
+ "rel": "self",
+ "href": self.server_href,
+ },
+ {
+ "rel": "bookmark",
+ "href": self.server_bookmark,
+ }],
+ },
+ "links": [{
+ "rel": "self",
+ "href": "%s/124" % self.url_prefix
+ },
+ {
+ "rel": "bookmark",
+ "href":
+ "%s/124" % self.bookmark_prefix
+ },
+ {
+ "rel": "alternate",
+ "type":
+ "application/vnd.openstack.image",
+ "href": self.alternate %
+ (glance.generate_glance_url(),
+ 124),
+ }],
+ },
+ }
+
+ @mock.patch('nova.image.api.API.get', return_value=IMAGE_FIXTURES[0])
+ def test_get_image(self, get_mocked):
+ request = self.http_request.blank(self.url_base + 'images/123')
+ actual_image = self.controller.show(request, '123')
+ self.assertThat(actual_image,
+ matchers.DictMatches(self.expected_image_123))
+ get_mocked.assert_called_once_with(mock.ANY, '123')
+
+ @mock.patch('nova.image.api.API.get', return_value=IMAGE_FIXTURES[1])
+ def test_get_image_with_custom_prefix(self, _get_mocked):
+ self.flags(osapi_compute_link_prefix='https://zoo.com:42',
+ osapi_glance_link_prefix='http://circus.com:34')
+ fake_req = self.http_request.blank(self.url_base + 'images/124')
+ actual_image = self.controller.show(fake_req, '124')
+
+ expected_image = self.expected_image_124
+ expected_image["image"]["links"][0]["href"] = (
+ "https://zoo.com:42%s/images/124" % self.url_base)
+ expected_image["image"]["links"][1]["href"] = (
+ "https://zoo.com:42%s/images/124" % self.bookmark_base)
+ expected_image["image"]["links"][2]["href"] = (
+ "http://circus.com:34/images/124")
+ expected_image["image"]["server"]["links"][0]["href"] = (
+ "https://zoo.com:42%s/servers/%s" % (self.url_base,
+ self.server_uuid))
+ expected_image["image"]["server"]["links"][1]["href"] = (
+ "https://zoo.com:42%s/servers/%s" % (self.bookmark_base,
+ self.server_uuid))
+
+ self.assertThat(actual_image, matchers.DictMatches(expected_image))
+
+ @mock.patch('nova.image.api.API.get', side_effect=exception.NotFound)
+ def test_get_image_404(self, _get_mocked):
+ fake_req = self.http_request.blank(self.url_base + 'images/unknown')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.show, fake_req, 'unknown')
+
+ @mock.patch('nova.image.api.API.get_all', return_value=IMAGE_FIXTURES)
+ def test_get_image_details(self, get_all_mocked):
+ request = self.http_request.blank(self.url_base + 'images/detail')
+ response = self.controller.detail(request)
+
+ get_all_mocked.assert_called_once_with(mock.ANY, filters={})
+ response_list = response["images"]
+
+ image_125 = copy.deepcopy(self.expected_image_124["image"])
+ image_125['id'] = '125'
+ image_125['name'] = 'saving snapshot'
+ image_125['progress'] = 50
+ image_125["links"][0]["href"] = "%s/125" % self.url_prefix
+ image_125["links"][1]["href"] = "%s/125" % self.bookmark_prefix
+ image_125["links"][2]["href"] = (
+ "%s/images/125" % glance.generate_glance_url())
+
+ image_126 = copy.deepcopy(self.expected_image_124["image"])
+ image_126['id'] = '126'
+ image_126['name'] = 'active snapshot'
+ image_126['status'] = 'ACTIVE'
+ image_126['progress'] = 100
+ image_126["links"][0]["href"] = "%s/126" % self.url_prefix
+ image_126["links"][1]["href"] = "%s/126" % self.bookmark_prefix
+ image_126["links"][2]["href"] = (
+ "%s/images/126" % glance.generate_glance_url())
+
+ image_127 = copy.deepcopy(self.expected_image_124["image"])
+ image_127['id'] = '127'
+ image_127['name'] = 'killed snapshot'
+ image_127['status'] = 'ERROR'
+ image_127['progress'] = 0
+ image_127["links"][0]["href"] = "%s/127" % self.url_prefix
+ image_127["links"][1]["href"] = "%s/127" % self.bookmark_prefix
+ image_127["links"][2]["href"] = (
+ "%s/images/127" % glance.generate_glance_url())
+
+ image_128 = copy.deepcopy(self.expected_image_124["image"])
+ image_128['id'] = '128'
+ image_128['name'] = 'deleted snapshot'
+ image_128['status'] = 'DELETED'
+ image_128['progress'] = 0
+ image_128["links"][0]["href"] = "%s/128" % self.url_prefix
+ image_128["links"][1]["href"] = "%s/128" % self.bookmark_prefix
+ image_128["links"][2]["href"] = (
+ "%s/images/128" % glance.generate_glance_url())
+
+ image_129 = copy.deepcopy(self.expected_image_124["image"])
+ image_129['id'] = '129'
+ image_129['name'] = 'pending_delete snapshot'
+ image_129['status'] = 'DELETED'
+ image_129['progress'] = 0
+ image_129["links"][0]["href"] = "%s/129" % self.url_prefix
+ image_129["links"][1]["href"] = "%s/129" % self.bookmark_prefix
+ image_129["links"][2]["href"] = (
+ "%s/images/129" % glance.generate_glance_url())
+
+ image_130 = copy.deepcopy(self.expected_image_123["image"])
+ image_130['id'] = '130'
+ image_130['name'] = None
+ image_130['metadata'] = {}
+ image_130['minDisk'] = 0
+ image_130['minRam'] = 0
+ image_130["links"][0]["href"] = "%s/130" % self.url_prefix
+ image_130["links"][1]["href"] = "%s/130" % self.bookmark_prefix
+ image_130["links"][2]["href"] = (
+ "%s/images/130" % glance.generate_glance_url())
+
+ image_131 = copy.deepcopy(self.expected_image_123["image"])
+ image_131['id'] = '131'
+ image_131['name'] = None
+ image_131['metadata'] = {}
+ image_131['minDisk'] = 0
+ image_131['minRam'] = 0
+ image_131["links"][0]["href"] = "%s/131" % self.url_prefix
+ image_131["links"][1]["href"] = "%s/131" % self.bookmark_prefix
+ image_131["links"][2]["href"] = (
+ "%s/images/131" % glance.generate_glance_url())
+
+ expected = [self.expected_image_123["image"],
+ self.expected_image_124["image"],
+ image_125, image_126, image_127,
+ image_128, image_129, image_130,
+ image_131]
+
+ self.assertThat(expected, matchers.DictListMatches(response_list))
+
+ @mock.patch('nova.image.api.API.get_all')
+ def test_get_image_details_with_limit(self, get_all_mocked):
+ request = self.http_request.blank(self.url_base +
+ 'images/detail?limit=2')
+ self.controller.detail(request)
+ get_all_mocked.assert_called_once_with(mock.ANY, limit=2, filters={})
+
+ @mock.patch('nova.image.api.API.get_all')
+ def test_get_image_details_with_limit_and_page_size(self, get_all_mocked):
+ request = self.http_request.blank(
+ self.url_base + 'images/detail?limit=2&page_size=1')
+ self.controller.detail(request)
+ get_all_mocked.assert_called_once_with(mock.ANY, limit=2, filters={},
+ page_size=1)
+
+ @mock.patch('nova.image.api.API.get_all')
+ def _detail_request(self, filters, request, get_all_mocked):
+ self.controller.detail(request)
+ get_all_mocked.assert_called_once_with(mock.ANY, filters=filters)
+
+ def test_image_detail_filter_with_name(self):
+ filters = {'name': 'testname'}
+ request = self.http_request.blank(self.url_base + 'images/detail'
+ '?name=testname')
+ self._detail_request(filters, request)
+
+ def test_image_detail_filter_with_status(self):
+ filters = {'status': 'active'}
+ request = self.http_request.blank(self.url_base + 'images/detail'
+ '?status=ACTIVE')
+ self._detail_request(filters, request)
+
+ def test_image_detail_filter_with_property(self):
+ filters = {'property-test': '3'}
+ request = self.http_request.blank(self.url_base + 'images/detail'
+ '?property-test=3')
+ self._detail_request(filters, request)
+
+ def test_image_detail_filter_server_href(self):
+ filters = {'property-instance_uuid': self.uuid}
+ request = self.http_request.blank(
+ self.url_base + 'images/detail?server=' + self.uuid)
+ self._detail_request(filters, request)
+
+ def test_image_detail_filter_server_uuid(self):
+ filters = {'property-instance_uuid': self.uuid}
+ request = self.http_request.blank(
+ self.url_base + 'images/detail?server=' + self.uuid)
+ self._detail_request(filters, request)
+
+ def test_image_detail_filter_changes_since(self):
+ filters = {'changes-since': '2011-01-24T17:08Z'}
+ request = self.http_request.blank(self.url_base + 'images/detail'
+ '?changes-since=2011-01-24T17:08Z')
+ self._detail_request(filters, request)
+
+ def test_image_detail_filter_with_type(self):
+ filters = {'property-image_type': 'BASE'}
+ request = self.http_request.blank(
+ self.url_base + 'images/detail?type=BASE')
+ self._detail_request(filters, request)
+
+ def test_image_detail_filter_not_supported(self):
+ filters = {'status': 'active'}
+ request = self.http_request.blank(
+ self.url_base + 'images/detail?status='
+ 'ACTIVE&UNSUPPORTEDFILTER=testname')
+ self._detail_request(filters, request)
+
+ def test_image_detail_no_filters(self):
+ filters = {}
+ request = self.http_request.blank(self.url_base + 'images/detail')
+ self._detail_request(filters, request)
+
+ @mock.patch('nova.image.api.API.get_all', side_effect=exception.Invalid)
+ def test_image_detail_invalid_marker(self, _get_all_mocked):
+ request = self.http_request.blank(self.url_base + '?marker=invalid')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail,
+ request)
+
+ def test_generate_alternate_link(self):
+ view = images_view.ViewBuilder()
+ request = self.http_request.blank(self.url_base + 'images/1')
+ generated_url = view._get_alternate_link(request, 1)
+ actual_url = "%s/images/1" % glance.generate_glance_url()
+ self.assertEqual(generated_url, actual_url)
+
+ def _check_response(self, controller_method, response, expected_code):
+ self.assertEqual(expected_code, controller_method.wsgi_code)
+
+ @mock.patch('nova.image.api.API.delete')
+ def test_delete_image(self, delete_mocked):
+ request = self.http_request.blank(self.url_base + 'images/124')
+ request.method = 'DELETE'
+ response = self.controller.delete(request, '124')
+ self._check_response(self.controller.delete, response, 204)
+ delete_mocked.assert_called_once_with(mock.ANY, '124')
+
+ @mock.patch('nova.image.api.API.delete',
+ side_effect=exception.ImageNotAuthorized(image_id='123'))
+ def test_delete_deleted_image(self, _delete_mocked):
+ # If you try to delete a deleted image, you get back 403 Forbidden.
+ request = self.http_request.blank(self.url_base + 'images/123')
+ request.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
+ request, '123')
+
+ @mock.patch('nova.image.api.API.delete',
+ side_effect=exception.ImageNotFound(image_id='123'))
+ def test_delete_image_not_found(self, _delete_mocked):
+ request = self.http_request.blank(self.url_base + 'images/300')
+ request.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.delete, request, '300')
+
+
+class ImagesControllerTestV2(ImagesControllerTestV21):
+ image_controller_class = images.Controller
+ url_base = '/v2/fake'
+ bookmark_base = '/fake'
+ http_request = fakes.HTTPRequest
+
+ def _check_response(self, controller_method, response, expected_code):
+ self.assertEqual(expected_code, response.status_int)
+
+
+class ImageXMLSerializationTest(test.NoDBTestCase):
+
+ TIMESTAMP = "2010-10-11T10:30:22Z"
+ SERVER_UUID = 'aa640691-d1a7-4a67-9d3c-d35ee6b3cc74'
+ SERVER_HREF = 'http://localhost/v2/fake/servers/' + SERVER_UUID
+ SERVER_BOOKMARK = 'http://localhost/fake/servers/' + SERVER_UUID
+ IMAGE_HREF = 'http://localhost/v2/fake/images/%s'
+ IMAGE_NEXT = 'http://localhost/v2/fake/images?limit=%s&marker=%s'
+ IMAGE_BOOKMARK = 'http://localhost/fake/images/%s'
+
+ def test_xml_declaration(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'progress': 80,
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
+ self.assertTrue(has_dec)
+
+ def test_show(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'progress': 80,
+ 'minRam': 10,
+ 'minDisk': 100,
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'image')
+ image_dict = fixture['image']
+
+ for key in ['name', 'id', 'updated', 'created', 'status', 'progress']:
+ self.assertEqual(root.get(key), str(image_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ metadata_root = root.find('{0}metadata'.format(NS))
+ metadata_elems = metadata_root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 1)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = image_dict['metadata'].items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ server_root = root.find('{0}server'.format(NS))
+ self.assertEqual(server_root.get('id'), image_dict['server']['id'])
+ link_nodes = server_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['server']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_show_zero_metadata(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'metadata': {},
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'image')
+ image_dict = fixture['image']
+
+ for key in ['name', 'id', 'updated', 'created', 'status']:
+ self.assertEqual(root.get(key), str(image_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ meta_nodes = root.findall('{0}meta'.format(ATOMNS))
+ self.assertEqual(len(meta_nodes), 0)
+
+ server_root = root.find('{0}server'.format(NS))
+ self.assertEqual(server_root.get('id'), image_dict['server']['id'])
+ link_nodes = server_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['server']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_show_image_no_metadata_key(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'image')
+ image_dict = fixture['image']
+
+ for key in ['name', 'id', 'updated', 'created', 'status']:
+ self.assertEqual(root.get(key), str(image_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ meta_nodes = root.findall('{0}meta'.format(ATOMNS))
+ self.assertEqual(len(meta_nodes), 0)
+
+ server_root = root.find('{0}server'.format(NS))
+ self.assertEqual(server_root.get('id'), image_dict['server']['id'])
+ link_nodes = server_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['server']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_show_no_server(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'image')
+ image_dict = fixture['image']
+
+ for key in ['name', 'id', 'updated', 'created', 'status']:
+ self.assertEqual(root.get(key), str(image_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ metadata_root = root.find('{0}metadata'.format(NS))
+ metadata_elems = metadata_root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 1)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = image_dict['metadata'].items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ server_root = root.find('{0}server'.format(NS))
+ self.assertIsNone(server_root)
+
+ def test_show_with_min_ram(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'progress': 80,
+ 'minRam': 256,
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'image')
+ image_dict = fixture['image']
+
+ for key in ['name', 'id', 'updated', 'created', 'status', 'progress',
+ 'minRam']:
+ self.assertEqual(root.get(key), str(image_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ metadata_root = root.find('{0}metadata'.format(NS))
+ metadata_elems = metadata_root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 1)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = image_dict['metadata'].items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ server_root = root.find('{0}server'.format(NS))
+ self.assertEqual(server_root.get('id'), image_dict['server']['id'])
+ link_nodes = server_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['server']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_show_with_min_disk(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'progress': 80,
+ 'minDisk': 5,
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'image')
+ image_dict = fixture['image']
+
+ for key in ['name', 'id', 'updated', 'created', 'status', 'progress',
+ 'minDisk']:
+ self.assertEqual(root.get(key), str(image_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ metadata_root = root.find('{0}metadata'.format(NS))
+ metadata_elems = metadata_root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 1)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = image_dict['metadata'].items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ server_root = root.find('{0}server'.format(NS))
+ self.assertEqual(server_root.get('id'), image_dict['server']['id'])
+ link_nodes = server_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['server']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_index(self):
+ serializer = images.MinimalImagesTemplate()
+
+ fixture = {
+ 'images': [
+ {
+ 'id': 1,
+ 'name': 'Image1',
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ {
+ 'id': 2,
+ 'name': 'Image2',
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 2,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 2,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ ]
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'images')
+ image_elems = root.findall('{0}image'.format(NS))
+ self.assertEqual(len(image_elems), 2)
+ for i, image_elem in enumerate(image_elems):
+ image_dict = fixture['images'][i]
+
+ for key in ['name', 'id']:
+ self.assertEqual(image_elem.get(key), str(image_dict[key]))
+
+ link_nodes = image_elem.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_index_with_links(self):
+ serializer = images.MinimalImagesTemplate()
+
+ fixture = {
+ 'images': [
+ {
+ 'id': 1,
+ 'name': 'Image1',
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ {
+ 'id': 2,
+ 'name': 'Image2',
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 2,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 2,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ ],
+ 'images_links': [
+ {
+ 'rel': 'next',
+ 'href': self.IMAGE_NEXT % (2, 2),
+ }
+ ],
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'images')
+ image_elems = root.findall('{0}image'.format(NS))
+ self.assertEqual(len(image_elems), 2)
+ for i, image_elem in enumerate(image_elems):
+ image_dict = fixture['images'][i]
+
+ for key in ['name', 'id']:
+ self.assertEqual(image_elem.get(key), str(image_dict[key]))
+
+ link_nodes = image_elem.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ # Check images_links
+ images_links = root.findall('{0}link'.format(ATOMNS))
+ for i, link in enumerate(fixture['images_links']):
+ for key, value in link.items():
+ self.assertEqual(images_links[i].get(key), value)
+
+ def test_index_zero_images(self):
+ serializer = images.MinimalImagesTemplate()
+
+ fixtures = {
+ 'images': [],
+ }
+
+ output = serializer.serialize(fixtures)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'images')
+ image_elems = root.findall('{0}image'.format(NS))
+ self.assertEqual(len(image_elems), 0)
+
+ def test_detail(self):
+ serializer = images.ImagesTemplate()
+
+ fixture = {
+ 'images': [
+ {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ {
+ 'id': '2',
+ 'name': 'Image2',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'SAVING',
+ 'progress': 80,
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 2,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 2,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ ]
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'images')
+ image_elems = root.findall('{0}image'.format(NS))
+ self.assertEqual(len(image_elems), 2)
+ for i, image_elem in enumerate(image_elems):
+ image_dict = fixture['images'][i]
+
+ for key in ['name', 'id', 'updated', 'created', 'status']:
+ self.assertEqual(image_elem.get(key), str(image_dict[key]))
+
+ link_nodes = image_elem.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
diff --git a/nova/tests/unit/api/openstack/compute/test_limits.py b/nova/tests/unit/api/openstack/compute/test_limits.py
new file mode 100644
index 0000000000..47da849b28
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_limits.py
@@ -0,0 +1,1016 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests dealing with HTTP rate-limiting.
+"""
+
+import httplib
+import StringIO
+from xml.dom import minidom
+
+from lxml import etree
+import mock
+from oslo.serialization import jsonutils
+import six
+import webob
+
+from nova.api.openstack.compute import limits
+from nova.api.openstack.compute.plugins.v3 import limits as limits_v3
+from nova.api.openstack.compute import views
+from nova.api.openstack import wsgi
+from nova.api.openstack import xmlutil
+import nova.context
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import matchers
+from nova import utils
+
+
+TEST_LIMITS = [
+ limits.Limit("GET", "/delayed", "^/delayed", 1,
+ utils.TIME_UNITS['MINUTE']),
+ limits.Limit("POST", "*", ".*", 7, utils.TIME_UNITS['MINUTE']),
+ limits.Limit("POST", "/servers", "^/servers", 3,
+ utils.TIME_UNITS['MINUTE']),
+ limits.Limit("PUT", "*", "", 10, utils.TIME_UNITS['MINUTE']),
+ limits.Limit("PUT", "/servers", "^/servers", 5,
+ utils.TIME_UNITS['MINUTE']),
+]
+NS = {
+ 'atom': 'http://www.w3.org/2005/Atom',
+ 'ns': 'http://docs.openstack.org/common/api/v1.0'
+}
+
+
+class BaseLimitTestSuite(test.NoDBTestCase):
+ """Base test suite which provides relevant stubs and time abstraction."""
+
+ def setUp(self):
+ super(BaseLimitTestSuite, self).setUp()
+ self.time = 0.0
+ self.stubs.Set(limits.Limit, "_get_time", self._get_time)
+ self.absolute_limits = {}
+
+ def stub_get_project_quotas(context, project_id, usages=True):
+ return dict((k, dict(limit=v))
+ for k, v in self.absolute_limits.items())
+
+ self.stubs.Set(nova.quota.QUOTAS, "get_project_quotas",
+ stub_get_project_quotas)
+
+ def _get_time(self):
+ """Return the "time" according to this test suite."""
+ return self.time
+
+
+class LimitsControllerTestV21(BaseLimitTestSuite):
+ """Tests for `limits.LimitsController` class."""
+ limits_controller = limits_v3.LimitsController
+
+ def setUp(self):
+ """Run before each test."""
+ super(LimitsControllerTestV21, self).setUp()
+ self.controller = wsgi.Resource(self.limits_controller())
+ self.ctrler = self.limits_controller()
+
+ def _get_index_request(self, accept_header="application/json",
+ tenant_id=None):
+ """Helper to set routing arguments."""
+ request = webob.Request.blank("/")
+ if tenant_id:
+ request = webob.Request.blank("/?tenant_id=%s" % tenant_id)
+
+ request.accept = accept_header
+ request.environ["wsgiorg.routing_args"] = (None, {
+ "action": "index",
+ "controller": "",
+ })
+ context = nova.context.RequestContext('testuser', 'testproject')
+ request.environ["nova.context"] = context
+ return request
+
+ def _populate_limits(self, request):
+ """Put limit info into a request."""
+ _limits = [
+ limits.Limit("GET", "*", ".*", 10, 60).display(),
+ limits.Limit("POST", "*", ".*", 5, 60 * 60).display(),
+ limits.Limit("GET", "changes-since*", "changes-since",
+ 5, 60).display(),
+ ]
+ request.environ["nova.limits"] = _limits
+ return request
+
+ def test_empty_index_json(self):
+ # Test getting empty limit details in JSON.
+ request = self._get_index_request()
+ response = request.get_response(self.controller)
+ expected = {
+ "limits": {
+ "rate": [],
+ "absolute": {},
+ },
+ }
+ body = jsonutils.loads(response.body)
+ self.assertEqual(expected, body)
+
+ def test_index_json(self):
+ self._test_index_json()
+
+ def test_index_json_by_tenant(self):
+ self._test_index_json('faketenant')
+
+ def _test_index_json(self, tenant_id=None):
+ # Test getting limit details in JSON.
+ request = self._get_index_request(tenant_id=tenant_id)
+ context = request.environ["nova.context"]
+ if tenant_id is None:
+ tenant_id = context.project_id
+
+ request = self._populate_limits(request)
+ self.absolute_limits = {
+ 'ram': 512,
+ 'instances': 5,
+ 'cores': 21,
+ 'key_pairs': 10,
+ 'floating_ips': 10,
+ 'security_groups': 10,
+ 'security_group_rules': 20,
+ }
+ expected = {
+ "limits": {
+ "rate": [
+ {
+ "regex": ".*",
+ "uri": "*",
+ "limit": [
+ {
+ "verb": "GET",
+ "next-available": "1970-01-01T00:00:00Z",
+ "unit": "MINUTE",
+ "value": 10,
+ "remaining": 10,
+ },
+ {
+ "verb": "POST",
+ "next-available": "1970-01-01T00:00:00Z",
+ "unit": "HOUR",
+ "value": 5,
+ "remaining": 5,
+ },
+ ],
+ },
+ {
+ "regex": "changes-since",
+ "uri": "changes-since*",
+ "limit": [
+ {
+ "verb": "GET",
+ "next-available": "1970-01-01T00:00:00Z",
+ "unit": "MINUTE",
+ "value": 5,
+ "remaining": 5,
+ },
+ ],
+ },
+
+ ],
+ "absolute": {
+ "maxTotalRAMSize": 512,
+ "maxTotalInstances": 5,
+ "maxTotalCores": 21,
+ "maxTotalKeypairs": 10,
+ "maxTotalFloatingIps": 10,
+ "maxSecurityGroups": 10,
+ "maxSecurityGroupRules": 20,
+ },
+ },
+ }
+
+ def _get_project_quotas(context, project_id, usages=True):
+ return dict((k, dict(limit=v))
+ for k, v in self.absolute_limits.items())
+
+ with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
+ get_project_quotas:
+ get_project_quotas.side_effect = _get_project_quotas
+
+ response = request.get_response(self.controller)
+
+ body = jsonutils.loads(response.body)
+ self.assertEqual(expected, body)
+ get_project_quotas.assert_called_once_with(context, tenant_id,
+ usages=False)
+
+
+class LimitsControllerTestV2(LimitsControllerTestV21):
+ limits_controller = limits.LimitsController
+
+ def _populate_limits_diff_regex(self, request):
+ """Put limit info into a request."""
+ _limits = [
+ limits.Limit("GET", "*", ".*", 10, 60).display(),
+ limits.Limit("GET", "*", "*.*", 10, 60).display(),
+ ]
+ request.environ["nova.limits"] = _limits
+ return request
+
+ def test_index_diff_regex(self):
+ # Test getting limit details in JSON.
+ request = self._get_index_request()
+ request = self._populate_limits_diff_regex(request)
+ response = request.get_response(self.controller)
+ expected = {
+ "limits": {
+ "rate": [
+ {
+ "regex": ".*",
+ "uri": "*",
+ "limit": [
+ {
+ "verb": "GET",
+ "next-available": "1970-01-01T00:00:00Z",
+ "unit": "MINUTE",
+ "value": 10,
+ "remaining": 10,
+ },
+ ],
+ },
+ {
+ "regex": "*.*",
+ "uri": "*",
+ "limit": [
+ {
+ "verb": "GET",
+ "next-available": "1970-01-01T00:00:00Z",
+ "unit": "MINUTE",
+ "value": 10,
+ "remaining": 10,
+ },
+ ],
+ },
+
+ ],
+ "absolute": {},
+ },
+ }
+ body = jsonutils.loads(response.body)
+ self.assertEqual(expected, body)
+
+ def _test_index_absolute_limits_json(self, expected):
+ request = self._get_index_request()
+ response = request.get_response(self.controller)
+ body = jsonutils.loads(response.body)
+ self.assertEqual(expected, body['limits']['absolute'])
+
+ def test_index_ignores_extra_absolute_limits_json(self):
+ self.absolute_limits = {'unknown_limit': 9001}
+ self._test_index_absolute_limits_json({})
+
+ def test_index_absolute_ram_json(self):
+ self.absolute_limits = {'ram': 1024}
+ self._test_index_absolute_limits_json({'maxTotalRAMSize': 1024})
+
+ def test_index_absolute_cores_json(self):
+ self.absolute_limits = {'cores': 17}
+ self._test_index_absolute_limits_json({'maxTotalCores': 17})
+
+ def test_index_absolute_instances_json(self):
+ self.absolute_limits = {'instances': 19}
+ self._test_index_absolute_limits_json({'maxTotalInstances': 19})
+
+ def test_index_absolute_metadata_json(self):
+ # NOTE: both server metadata and image metadata are overloaded
+ # into metadata_items
+ self.absolute_limits = {'metadata_items': 23}
+ expected = {
+ 'maxServerMeta': 23,
+ 'maxImageMeta': 23,
+ }
+ self._test_index_absolute_limits_json(expected)
+
+ def test_index_absolute_injected_files(self):
+ self.absolute_limits = {
+ 'injected_files': 17,
+ 'injected_file_content_bytes': 86753,
+ }
+ expected = {
+ 'maxPersonality': 17,
+ 'maxPersonalitySize': 86753,
+ }
+ self._test_index_absolute_limits_json(expected)
+
+ def test_index_absolute_security_groups(self):
+ self.absolute_limits = {
+ 'security_groups': 8,
+ 'security_group_rules': 16,
+ }
+ expected = {
+ 'maxSecurityGroups': 8,
+ 'maxSecurityGroupRules': 16,
+ }
+ self._test_index_absolute_limits_json(expected)
+
+ def test_limit_create(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/limits')
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.create,
+ req, {})
+
+ def test_limit_delete(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/limits')
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.delete,
+ req, 1)
+
+ def test_limit_detail(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/limits')
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.detail,
+ req)
+
+ def test_limit_show(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/limits')
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.show,
+ req, 1)
+
+ def test_limit_update(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/limits')
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.update,
+ req, 1, {})
+
+
+class MockLimiter(limits.Limiter):
+ pass
+
+
+class LimitMiddlewareTest(BaseLimitTestSuite):
+ """Tests for the `limits.RateLimitingMiddleware` class."""
+
+ @webob.dec.wsgify
+ def _empty_app(self, request):
+ """Do-nothing WSGI app."""
+ pass
+
+ def setUp(self):
+ """Prepare middleware for use through fake WSGI app."""
+ super(LimitMiddlewareTest, self).setUp()
+ _limits = '(GET, *, .*, 1, MINUTE)'
+ self.app = limits.RateLimitingMiddleware(self._empty_app, _limits,
+ "%s.MockLimiter" %
+ self.__class__.__module__)
+
+ def test_limit_class(self):
+ # Test that middleware selected correct limiter class.
+ self.assertIsInstance(self.app._limiter, MockLimiter)
+
+ def test_good_request(self):
+ # Test successful GET request through middleware.
+ request = webob.Request.blank("/")
+ response = request.get_response(self.app)
+ self.assertEqual(200, response.status_int)
+
+ def test_limited_request_json(self):
+ # Test a rate-limited (429) GET request through middleware.
+ request = webob.Request.blank("/")
+ response = request.get_response(self.app)
+ self.assertEqual(200, response.status_int)
+
+ request = webob.Request.blank("/")
+ response = request.get_response(self.app)
+ self.assertEqual(response.status_int, 429)
+
+ self.assertIn('Retry-After', response.headers)
+ retry_after = int(response.headers['Retry-After'])
+ self.assertAlmostEqual(retry_after, 60, 1)
+
+ body = jsonutils.loads(response.body)
+ expected = "Only 1 GET request(s) can be made to * every minute."
+ value = body["overLimit"]["details"].strip()
+ self.assertEqual(value, expected)
+
+ self.assertIn("retryAfter", body["overLimit"])
+ retryAfter = body["overLimit"]["retryAfter"]
+ self.assertEqual(retryAfter, "60")
+
+ def test_limited_request_xml(self):
+ # Test a rate-limited (429) response as XML.
+ request = webob.Request.blank("/")
+ response = request.get_response(self.app)
+ self.assertEqual(200, response.status_int)
+
+ request = webob.Request.blank("/")
+ request.accept = "application/xml"
+ response = request.get_response(self.app)
+ self.assertEqual(response.status_int, 429)
+
+ root = minidom.parseString(response.body).childNodes[0]
+ expected = "Only 1 GET request(s) can be made to * every minute."
+
+ self.assertIsNotNone(root.attributes.getNamedItem("retryAfter"))
+ retryAfter = root.attributes.getNamedItem("retryAfter").value
+ self.assertEqual(retryAfter, "60")
+
+ details = root.getElementsByTagName("details")
+ self.assertEqual(details.length, 1)
+
+ value = details.item(0).firstChild.data.strip()
+ self.assertEqual(value, expected)
+
+
+class LimitTest(BaseLimitTestSuite):
+ """Tests for the `limits.Limit` class."""
+
+ def test_GET_no_delay(self):
+ # Test a limit handles 1 GET per second.
+ limit = limits.Limit("GET", "*", ".*", 1, 1)
+ delay = limit("GET", "/anything")
+ self.assertIsNone(delay)
+ self.assertEqual(0, limit.next_request)
+ self.assertEqual(0, limit.last_request)
+
+ def test_GET_delay(self):
+ # Test two calls to 1 GET per second limit.
+ limit = limits.Limit("GET", "*", ".*", 1, 1)
+ delay = limit("GET", "/anything")
+ self.assertIsNone(delay)
+
+ delay = limit("GET", "/anything")
+ self.assertEqual(1, delay)
+ self.assertEqual(1, limit.next_request)
+ self.assertEqual(0, limit.last_request)
+
+ self.time += 4
+
+ delay = limit("GET", "/anything")
+ self.assertIsNone(delay)
+ self.assertEqual(4, limit.next_request)
+ self.assertEqual(4, limit.last_request)
+
+
+class ParseLimitsTest(BaseLimitTestSuite):
+ """Tests for the default limits parser in the in-memory
+ `limits.Limiter` class.
+ """
+
+ def test_invalid(self):
+ # Test that parse_limits() handles invalid input correctly.
+ self.assertRaises(ValueError, limits.Limiter.parse_limits,
+ ';;;;;')
+
+ def test_bad_rule(self):
+ # Test that parse_limits() handles bad rules correctly.
+ self.assertRaises(ValueError, limits.Limiter.parse_limits,
+ 'GET, *, .*, 20, minute')
+
+ def test_missing_arg(self):
+ # Test that parse_limits() handles missing args correctly.
+ self.assertRaises(ValueError, limits.Limiter.parse_limits,
+ '(GET, *, .*, 20)')
+
+ def test_bad_value(self):
+ # Test that parse_limits() handles bad values correctly.
+ self.assertRaises(ValueError, limits.Limiter.parse_limits,
+ '(GET, *, .*, foo, minute)')
+
+ def test_bad_unit(self):
+ # Test that parse_limits() handles bad units correctly.
+ self.assertRaises(ValueError, limits.Limiter.parse_limits,
+ '(GET, *, .*, 20, lightyears)')
+
+ def test_multiple_rules(self):
+ # Test that parse_limits() handles multiple rules correctly.
+ try:
+ l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);'
+ '(PUT, /foo*, /foo.*, 10, hour);'
+ '(POST, /bar*, /bar.*, 5, second);'
+ '(Say, /derp*, /derp.*, 1, day)')
+ except ValueError as e:
+ assert False, six.text_type(e)
+
+ # Make sure the number of returned limits are correct
+ self.assertEqual(len(l), 4)
+
+ # Check all the verbs...
+ expected = ['GET', 'PUT', 'POST', 'SAY']
+ self.assertEqual([t.verb for t in l], expected)
+
+ # ...the URIs...
+ expected = ['*', '/foo*', '/bar*', '/derp*']
+ self.assertEqual([t.uri for t in l], expected)
+
+ # ...the regexes...
+ expected = ['.*', '/foo.*', '/bar.*', '/derp.*']
+ self.assertEqual([t.regex for t in l], expected)
+
+ # ...the values...
+ expected = [20, 10, 5, 1]
+ self.assertEqual([t.value for t in l], expected)
+
+ # ...and the units...
+ expected = [utils.TIME_UNITS['MINUTE'], utils.TIME_UNITS['HOUR'],
+ utils.TIME_UNITS['SECOND'], utils.TIME_UNITS['DAY']]
+ self.assertEqual([t.unit for t in l], expected)
+
+
+class LimiterTest(BaseLimitTestSuite):
+ """Tests for the in-memory `limits.Limiter` class."""
+
+ def setUp(self):
+ """Run before each test."""
+ super(LimiterTest, self).setUp()
+ userlimits = {'limits.user3': '',
+ 'limits.user0': '(get, *, .*, 4, minute);'
+ '(put, *, .*, 2, minute)'}
+ self.limiter = limits.Limiter(TEST_LIMITS, **userlimits)
+
+ def _check(self, num, verb, url, username=None):
+ """Check and yield results from checks."""
+ for x in xrange(num):
+ yield self.limiter.check_for_delay(verb, url, username)[0]
+
+ def _check_sum(self, num, verb, url, username=None):
+ """Check and sum results from checks."""
+ results = self._check(num, verb, url, username)
+ return sum(item for item in results if item)
+
+ def test_no_delay_GET(self):
+ """Simple test to ensure no delay on a single call for a limit verb we
+ didn"t set.
+ """
+ delay = self.limiter.check_for_delay("GET", "/anything")
+ self.assertEqual(delay, (None, None))
+
+ def test_no_delay_PUT(self):
+ # Simple test to ensure no delay on a single call for a known limit.
+ delay = self.limiter.check_for_delay("PUT", "/anything")
+ self.assertEqual(delay, (None, None))
+
+ def test_delay_PUT(self):
+ """Ensure the 11th PUT will result in a delay of 6.0 seconds until
+ the next request will be granced.
+ """
+ expected = [None] * 10 + [6.0]
+ results = list(self._check(11, "PUT", "/anything"))
+
+ self.assertEqual(expected, results)
+
+ def test_delay_POST(self):
+ """Ensure the 8th POST will result in a delay of 6.0 seconds until
+ the next request will be granced.
+ """
+ expected = [None] * 7
+ results = list(self._check(7, "POST", "/anything"))
+ self.assertEqual(expected, results)
+
+ expected = 60.0 / 7.0
+ results = self._check_sum(1, "POST", "/anything")
+ self.assertAlmostEqual(expected, results, 8)
+
+ def test_delay_GET(self):
+ # Ensure the 11th GET will result in NO delay.
+ expected = [None] * 11
+ results = list(self._check(11, "GET", "/anything"))
+ self.assertEqual(expected, results)
+
+ expected = [None] * 4 + [15.0]
+ results = list(self._check(5, "GET", "/foo", "user0"))
+ self.assertEqual(expected, results)
+
+ def test_delay_PUT_servers(self):
+ """Ensure PUT on /servers limits at 5 requests, and PUT elsewhere is
+ still OK after 5 requests...but then after 11 total requests, PUT
+ limiting kicks in.
+ """
+ # First 6 requests on PUT /servers
+ expected = [None] * 5 + [12.0]
+ results = list(self._check(6, "PUT", "/servers"))
+ self.assertEqual(expected, results)
+
+ # Next 5 request on PUT /anything
+ expected = [None] * 4 + [6.0]
+ results = list(self._check(5, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ def test_delay_PUT_wait(self):
+ """Ensure after hitting the limit and then waiting for the correct
+ amount of time, the limit will be lifted.
+ """
+ expected = [None] * 10 + [6.0]
+ results = list(self._check(11, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ # Advance time
+ self.time += 6.0
+
+ expected = [None, 6.0]
+ results = list(self._check(2, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ def test_multiple_delays(self):
+ # Ensure multiple requests still get a delay.
+ expected = [None] * 10 + [6.0] * 10
+ results = list(self._check(20, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ self.time += 1.0
+
+ expected = [5.0] * 10
+ results = list(self._check(10, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ expected = [None] * 2 + [30.0] * 8
+ results = list(self._check(10, "PUT", "/anything", "user0"))
+ self.assertEqual(expected, results)
+
+ def test_user_limit(self):
+ # Test user-specific limits.
+ self.assertEqual(self.limiter.levels['user3'], [])
+ self.assertEqual(len(self.limiter.levels['user0']), 2)
+
+ def test_multiple_users(self):
+ # Tests involving multiple users.
+ # User0
+ expected = [None] * 2 + [30.0] * 8
+ results = list(self._check(10, "PUT", "/anything", "user0"))
+ self.assertEqual(expected, results)
+
+ # User1
+ expected = [None] * 10 + [6.0] * 10
+ results = list(self._check(20, "PUT", "/anything", "user1"))
+ self.assertEqual(expected, results)
+
+ # User2
+ expected = [None] * 10 + [6.0] * 5
+ results = list(self._check(15, "PUT", "/anything", "user2"))
+ self.assertEqual(expected, results)
+
+ # User3
+ expected = [None] * 20
+ results = list(self._check(20, "PUT", "/anything", "user3"))
+ self.assertEqual(expected, results)
+
+ self.time += 1.0
+
+ # User1 again
+ expected = [5.0] * 10
+ results = list(self._check(10, "PUT", "/anything", "user1"))
+ self.assertEqual(expected, results)
+
+ self.time += 1.0
+
+ # User1 again
+ expected = [4.0] * 5
+ results = list(self._check(5, "PUT", "/anything", "user2"))
+ self.assertEqual(expected, results)
+
+ # User0 again
+ expected = [28.0]
+ results = list(self._check(1, "PUT", "/anything", "user0"))
+ self.assertEqual(expected, results)
+
+ self.time += 28.0
+
+ expected = [None, 30.0]
+ results = list(self._check(2, "PUT", "/anything", "user0"))
+ self.assertEqual(expected, results)
+
+
+class WsgiLimiterTest(BaseLimitTestSuite):
+ """Tests for `limits.WsgiLimiter` class."""
+
+ def setUp(self):
+ """Run before each test."""
+ super(WsgiLimiterTest, self).setUp()
+ self.app = limits.WsgiLimiter(TEST_LIMITS)
+
+ def _request_data(self, verb, path):
+ """Get data describing a limit request verb/path."""
+ return jsonutils.dumps({"verb": verb, "path": path})
+
+ def _request(self, verb, url, username=None):
+ """Make sure that POSTing to the given url causes the given username
+ to perform the given action. Make the internal rate limiter return
+ delay and make sure that the WSGI app returns the correct response.
+ """
+ if username:
+ request = webob.Request.blank("/%s" % username)
+ else:
+ request = webob.Request.blank("/")
+
+ request.method = "POST"
+ request.body = self._request_data(verb, url)
+ response = request.get_response(self.app)
+
+ if "X-Wait-Seconds" in response.headers:
+ self.assertEqual(response.status_int, 403)
+ return response.headers["X-Wait-Seconds"]
+
+ self.assertEqual(response.status_int, 204)
+
+ def test_invalid_methods(self):
+ # Only POSTs should work.
+ for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
+ request = webob.Request.blank("/", method=method)
+ response = request.get_response(self.app)
+ self.assertEqual(response.status_int, 405)
+
+ def test_good_url(self):
+ delay = self._request("GET", "/something")
+ self.assertIsNone(delay)
+
+ def test_escaping(self):
+ delay = self._request("GET", "/something/jump%20up")
+ self.assertIsNone(delay)
+
+ def test_response_to_delays(self):
+ delay = self._request("GET", "/delayed")
+ self.assertIsNone(delay)
+
+ delay = self._request("GET", "/delayed")
+ self.assertEqual(delay, '60.00')
+
+ def test_response_to_delays_usernames(self):
+ delay = self._request("GET", "/delayed", "user1")
+ self.assertIsNone(delay)
+
+ delay = self._request("GET", "/delayed", "user2")
+ self.assertIsNone(delay)
+
+ delay = self._request("GET", "/delayed", "user1")
+ self.assertEqual(delay, '60.00')
+
+ delay = self._request("GET", "/delayed", "user2")
+ self.assertEqual(delay, '60.00')
+
+
+class FakeHttplibSocket(object):
+ """Fake `httplib.HTTPResponse` replacement."""
+
+ def __init__(self, response_string):
+ """Initialize new `FakeHttplibSocket`."""
+ self._buffer = StringIO.StringIO(response_string)
+
+ def makefile(self, _mode, _other):
+ """Returns the socket's internal buffer."""
+ return self._buffer
+
+
+class FakeHttplibConnection(object):
+ """Fake `httplib.HTTPConnection`."""
+
+ def __init__(self, app, host):
+ """Initialize `FakeHttplibConnection`."""
+ self.app = app
+ self.host = host
+
+ def request(self, method, path, body="", headers=None):
+ """Requests made via this connection actually get translated and routed
+ into our WSGI app, we then wait for the response and turn it back into
+ an `httplib.HTTPResponse`.
+ """
+ if not headers:
+ headers = {}
+
+ req = webob.Request.blank(path)
+ req.method = method
+ req.headers = headers
+ req.host = self.host
+ req.body = body
+
+ resp = str(req.get_response(self.app))
+ resp = "HTTP/1.0 %s" % resp
+ sock = FakeHttplibSocket(resp)
+ self.http_response = httplib.HTTPResponse(sock)
+ self.http_response.begin()
+
+ def getresponse(self):
+ """Return our generated response from the request."""
+ return self.http_response
+
+
+def wire_HTTPConnection_to_WSGI(host, app):
+ """Monkeypatches HTTPConnection so that if you try to connect to host, you
+ are instead routed straight to the given WSGI app.
+
+ After calling this method, when any code calls
+
+ httplib.HTTPConnection(host)
+
+ the connection object will be a fake. Its requests will be sent directly
+ to the given WSGI app rather than through a socket.
+
+ Code connecting to hosts other than host will not be affected.
+
+ This method may be called multiple times to map different hosts to
+ different apps.
+
+ This method returns the original HTTPConnection object, so that the caller
+ can restore the default HTTPConnection interface (for all hosts).
+ """
+ class HTTPConnectionDecorator(object):
+ """Wraps the real HTTPConnection class so that when you instantiate
+ the class you might instead get a fake instance.
+ """
+
+ def __init__(self, wrapped):
+ self.wrapped = wrapped
+
+ def __call__(self, connection_host, *args, **kwargs):
+ if connection_host == host:
+ return FakeHttplibConnection(app, host)
+ else:
+ return self.wrapped(connection_host, *args, **kwargs)
+
+ oldHTTPConnection = httplib.HTTPConnection
+ httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection)
+ return oldHTTPConnection
+
+
+class WsgiLimiterProxyTest(BaseLimitTestSuite):
+ """Tests for the `limits.WsgiLimiterProxy` class."""
+
+ def setUp(self):
+ """Do some nifty HTTP/WSGI magic which allows for WSGI to be called
+ directly by something like the `httplib` library.
+ """
+ super(WsgiLimiterProxyTest, self).setUp()
+ self.app = limits.WsgiLimiter(TEST_LIMITS)
+ self.oldHTTPConnection = (
+ wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app))
+ self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80")
+
+ def test_200(self):
+ # Successful request test.
+ delay = self.proxy.check_for_delay("GET", "/anything")
+ self.assertEqual(delay, (None, None))
+
+ def test_403(self):
+ # Forbidden request test.
+ delay = self.proxy.check_for_delay("GET", "/delayed")
+ self.assertEqual(delay, (None, None))
+
+ delay, error = self.proxy.check_for_delay("GET", "/delayed")
+ error = error.strip()
+
+ expected = ("60.00", "403 Forbidden\n\nOnly 1 GET request(s) can be "
+ "made to /delayed every minute.")
+
+ self.assertEqual((delay, error), expected)
+
+ def tearDown(self):
+ # restore original HTTPConnection object
+ httplib.HTTPConnection = self.oldHTTPConnection
+ super(WsgiLimiterProxyTest, self).tearDown()
+
+
+class LimitsViewBuilderTest(test.NoDBTestCase):
+ def setUp(self):
+ super(LimitsViewBuilderTest, self).setUp()
+ self.view_builder = views.limits.ViewBuilder()
+ self.rate_limits = [{"URI": "*",
+ "regex": ".*",
+ "value": 10,
+ "verb": "POST",
+ "remaining": 2,
+ "unit": "MINUTE",
+ "resetTime": 1311272226},
+ {"URI": "*/servers",
+ "regex": "^/servers",
+ "value": 50,
+ "verb": "POST",
+ "remaining": 10,
+ "unit": "DAY",
+ "resetTime": 1311272226}]
+ self.absolute_limits = {"metadata_items": 1,
+ "injected_files": 5,
+ "injected_file_content_bytes": 5}
+
+ def test_build_limits(self):
+ expected_limits = {"limits": {
+ "rate": [{
+ "uri": "*",
+ "regex": ".*",
+ "limit": [{"value": 10,
+ "verb": "POST",
+ "remaining": 2,
+ "unit": "MINUTE",
+ "next-available": "2011-07-21T18:17:06Z"}]},
+ {"uri": "*/servers",
+ "regex": "^/servers",
+ "limit": [{"value": 50,
+ "verb": "POST",
+ "remaining": 10,
+ "unit": "DAY",
+ "next-available": "2011-07-21T18:17:06Z"}]}],
+ "absolute": {"maxServerMeta": 1,
+ "maxImageMeta": 1,
+ "maxPersonality": 5,
+ "maxPersonalitySize": 5}}}
+
+ output = self.view_builder.build(self.rate_limits,
+ self.absolute_limits)
+ self.assertThat(output, matchers.DictMatches(expected_limits))
+
+ def test_build_limits_empty_limits(self):
+ expected_limits = {"limits": {"rate": [],
+ "absolute": {}}}
+
+ abs_limits = {}
+ rate_limits = []
+ output = self.view_builder.build(rate_limits, abs_limits)
+ self.assertThat(output, matchers.DictMatches(expected_limits))
+
+
+class LimitsXMLSerializationTest(test.NoDBTestCase):
+ def test_xml_declaration(self):
+ serializer = limits.LimitsTemplate()
+
+ fixture = {"limits": {
+ "rate": [],
+ "absolute": {}}}
+
+ output = serializer.serialize(fixture)
+ has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
+ self.assertTrue(has_dec)
+
+ def test_index(self):
+ serializer = limits.LimitsTemplate()
+ fixture = {
+ "limits": {
+ "rate": [{
+ "uri": "*",
+ "regex": ".*",
+ "limit": [{
+ "value": 10,
+ "verb": "POST",
+ "remaining": 2,
+ "unit": "MINUTE",
+ "next-available": "2011-12-15T22:42:45Z"}]},
+ {"uri": "*/servers",
+ "regex": "^/servers",
+ "limit": [{
+ "value": 50,
+ "verb": "POST",
+ "remaining": 10,
+ "unit": "DAY",
+ "next-available": "2011-12-15T22:42:45Z"}]}],
+ "absolute": {"maxServerMeta": 1,
+ "maxImageMeta": 1,
+ "maxPersonality": 5,
+ "maxPersonalitySize": 10240}}}
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'limits')
+
+ # verify absolute limits
+ absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS)
+ self.assertEqual(len(absolutes), 4)
+ for limit in absolutes:
+ name = limit.get('name')
+ value = limit.get('value')
+ self.assertEqual(value, str(fixture['limits']['absolute'][name]))
+
+ # verify rate limits
+ rates = root.xpath('ns:rates/ns:rate', namespaces=NS)
+ self.assertEqual(len(rates), 2)
+ for i, rate in enumerate(rates):
+ for key in ['uri', 'regex']:
+ self.assertEqual(rate.get(key),
+ str(fixture['limits']['rate'][i][key]))
+ rate_limits = rate.xpath('ns:limit', namespaces=NS)
+ self.assertEqual(len(rate_limits), 1)
+ for j, limit in enumerate(rate_limits):
+ for key in ['verb', 'value', 'remaining', 'unit',
+ 'next-available']:
+ self.assertEqual(limit.get(key),
+ str(fixture['limits']['rate'][i]['limit'][j][key]))
+
+ def test_index_no_limits(self):
+ serializer = limits.LimitsTemplate()
+
+ fixture = {"limits": {
+ "rate": [],
+ "absolute": {}}}
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'limits')
+
+ # verify absolute limits
+ absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS)
+ self.assertEqual(len(absolutes), 0)
+
+ # verify rate limits
+ rates = root.xpath('ns:rates/ns:rate', namespaces=NS)
+ self.assertEqual(len(rates), 0)
diff --git a/nova/tests/unit/api/openstack/compute/test_server_actions.py b/nova/tests/unit/api/openstack/compute/test_server_actions.py
new file mode 100644
index 0000000000..16f8ce14bf
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_server_actions.py
@@ -0,0 +1,1556 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import uuid
+
+import mock
+import mox
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute import servers
+from nova.compute import api as compute_api
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.image import glance
+from nova import objects
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit.image import fake
+from nova.tests.unit import matchers
+from nova.tests.unit import utils
+
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
+FAKE_UUID = fakes.FAKE_UUID
+INSTANCE_IDS = {FAKE_UUID: 1}
+
+
+def return_server_not_found(*arg, **kwarg):
+ raise exception.NotFound()
+
+
+def instance_update_and_get_original(context, instance_uuid, values,
+ update_cells=True,
+ columns_to_join=None,
+ ):
+ inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
+ inst = dict(inst, **values)
+ return (inst, inst)
+
+
+def instance_update(context, instance_uuid, kwargs, update_cells=True):
+ inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
+ return inst
+
+
+class MockSetAdminPassword(object):
+ def __init__(self):
+ self.instance_id = None
+ self.password = None
+
+ def __call__(self, context, instance, password):
+ self.instance_id = instance['uuid']
+ self.password = password
+
+
+class ServerActionsControllerTest(test.TestCase):
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ image_href = 'http://localhost/v2/fake/images/%s' % image_uuid
+
+ def setUp(self):
+ super(ServerActionsControllerTest, self).setUp()
+
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ host='fake_host'))
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ instance_update_and_get_original)
+
+ fakes.stub_out_nw_api(self.stubs)
+ fakes.stub_out_compute_api_snapshot(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+ self.flags(allow_instance_snapshots=True,
+ enable_instance_password=True)
+ self.uuid = FAKE_UUID
+ self.url = '/v2/fake/servers/%s/action' % self.uuid
+ self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
+
+ class FakeExtManager(object):
+ def is_loaded(self, ext):
+ return False
+
+ self.controller = servers.Controller(ext_mgr=FakeExtManager())
+ self.compute_api = self.controller.compute_api
+ self.context = context.RequestContext('fake', 'fake')
+ self.app = fakes.wsgi_app(init_only=('servers',),
+ fake_auth_context=self.context)
+
+ def _make_request(self, url, body):
+ req = webob.Request.blank('/v2/fake' + url)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.content_type = 'application/json'
+ return req.get_response(self.app)
+
+ def _stub_instance_get(self, uuid=None):
+ self.mox.StubOutWithMock(compute_api.API, 'get')
+ if uuid is None:
+ uuid = uuidutils.generate_uuid()
+ instance = fake_instance.fake_db_instance(
+ id=1, uuid=uuid, vm_state=vm_states.ACTIVE, task_state=None)
+ instance = objects.Instance._from_db_object(
+ self.context, objects.Instance(), instance)
+
+ self.compute_api.get(self.context, uuid,
+ want_objects=True).AndReturn(instance)
+ return instance
+
+ def _test_locked_instance(self, action, method=None, body_map=None,
+ compute_api_args_map=None):
+ if method is None:
+ method = action
+ if body_map is None:
+ body_map = {}
+ if compute_api_args_map is None:
+ compute_api_args_map = {}
+
+ instance = self._stub_instance_get()
+ args, kwargs = compute_api_args_map.get(action, ((), {}))
+
+ getattr(compute_api.API, method)(self.context, instance,
+ *args, **kwargs).AndRaise(
+ exception.InstanceIsLocked(instance_uuid=instance['uuid']))
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance['uuid'],
+ {action: body_map.get(action)})
+ self.assertEqual(409, res.status_int)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def test_actions_with_locked_instance(self):
+ actions = ['resize', 'confirmResize', 'revertResize', 'reboot',
+ 'rebuild']
+
+ method_translations = {'confirmResize': 'confirm_resize',
+ 'revertResize': 'revert_resize'}
+
+ body_map = {'resize': {'flavorRef': '2'},
+ 'reboot': {'type': 'HARD'},
+ 'rebuild': {'imageRef': self.image_uuid,
+ 'adminPass': 'TNc53Dr8s7vw'}}
+
+ args_map = {'resize': (('2'), {}),
+ 'confirmResize': ((), {}),
+ 'reboot': (('HARD',), {}),
+ 'rebuild': ((self.image_uuid, 'TNc53Dr8s7vw'),
+ {'files_to_inject': None})}
+
+ for action in actions:
+ method = method_translations.get(action)
+ self.mox.StubOutWithMock(compute_api.API, method or action)
+ self._test_locked_instance(action, method=method,
+ body_map=body_map,
+ compute_api_args_map=args_map)
+
+ def test_server_change_password(self):
+ mock_method = MockSetAdminPassword()
+ self.stubs.Set(compute_api.API, 'set_admin_password', mock_method)
+ body = {'changePassword': {'adminPass': '1234pass'}}
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.controller._action_change_password(req, FAKE_UUID, body)
+
+ self.assertEqual(mock_method.instance_id, self.uuid)
+ self.assertEqual(mock_method.password, '1234pass')
+
+ def test_server_change_password_pass_disabled(self):
+ # run with enable_instance_password disabled to verify adminPass
+ # is missing from response. See lp bug 921814
+ self.flags(enable_instance_password=False)
+
+ mock_method = MockSetAdminPassword()
+ self.stubs.Set(compute_api.API, 'set_admin_password', mock_method)
+ body = {'changePassword': {'adminPass': '1234pass'}}
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.controller._action_change_password(req, FAKE_UUID, body)
+
+ self.assertEqual(mock_method.instance_id, self.uuid)
+ # note,the mock still contains the password.
+ self.assertEqual(mock_method.password, '1234pass')
+
+ def test_server_change_password_not_a_string(self):
+ body = {'changePassword': {'adminPass': 1234}}
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_change_password,
+ req, FAKE_UUID, body)
+
+ def test_server_change_password_bad_request(self):
+ body = {'changePassword': {'pass': '12345'}}
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_change_password,
+ req, FAKE_UUID, body)
+
+ def test_server_change_password_empty_string(self):
+ mock_method = MockSetAdminPassword()
+ self.stubs.Set(compute_api.API, 'set_admin_password', mock_method)
+ body = {'changePassword': {'adminPass': ''}}
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.controller._action_change_password(req, FAKE_UUID, body)
+
+ self.assertEqual(mock_method.instance_id, self.uuid)
+ self.assertEqual(mock_method.password, '')
+
+ def test_server_change_password_none(self):
+ body = {'changePassword': {'adminPass': None}}
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_change_password,
+ req, FAKE_UUID, body)
+
+ def test_reboot_hard(self):
+ body = dict(reboot=dict(type="HARD"))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.controller._action_reboot(req, FAKE_UUID, body)
+
+ def test_reboot_soft(self):
+ body = dict(reboot=dict(type="SOFT"))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.controller._action_reboot(req, FAKE_UUID, body)
+
+ def test_reboot_incorrect_type(self):
+ body = dict(reboot=dict(type="NOT_A_TYPE"))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_reboot_missing_type(self):
+ body = dict(reboot=dict())
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_reboot_none(self):
+ body = dict(reboot=dict(type=None))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_reboot_not_found(self):
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ return_server_not_found)
+
+ body = dict(reboot=dict(type="HARD"))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._action_reboot,
+ req, str(uuid.uuid4()), body)
+
+ def test_reboot_raises_conflict_on_invalid_state(self):
+ body = dict(reboot=dict(type="HARD"))
+
+ def fake_reboot(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ self.stubs.Set(compute_api.API, 'reboot', fake_reboot)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_reboot_soft_with_soft_in_progress_raises_conflict(self):
+ body = dict(reboot=dict(type="SOFT"))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBOOTING))
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_reboot_hard_with_soft_in_progress_does_not_raise(self):
+ body = dict(reboot=dict(type="HARD"))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBOOTING))
+ self.controller._action_reboot(req, FAKE_UUID, body)
+
+ def test_reboot_hard_with_hard_in_progress_raises_conflict(self):
+ body = dict(reboot=dict(type="HARD"))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBOOTING_HARD))
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_rebuild_preserve_ephemeral_is_ignored_when_ext_not_loaded(self):
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE,
+ host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "preserve_ephemeral": False,
+ },
+ }
+ req = fakes.HTTPRequest.blank(self.url)
+ context = req.environ['nova.context']
+
+ self.mox.StubOutWithMock(compute_api.API, 'rebuild')
+ compute_api.API.rebuild(context, mox.IgnoreArg(), self._image_href,
+ mox.IgnoreArg(), files_to_inject=None)
+ self.mox.ReplayAll()
+
+ self.controller._action_rebuild(req, FAKE_UUID, body)
+
+ def _test_rebuild_preserve_ephemeral(self, value=None):
+ def fake_is_loaded(ext):
+ return ext == 'os-preserve-ephemeral-rebuild'
+ self.stubs.Set(self.controller.ext_mgr, 'is_loaded', fake_is_loaded)
+
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE,
+ host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+ if value is not None:
+ body['rebuild']['preserve_ephemeral'] = value
+
+ req = fakes.HTTPRequest.blank(self.url)
+ context = req.environ['nova.context']
+
+ self.mox.StubOutWithMock(compute_api.API, 'rebuild')
+
+ if value is not None:
+ compute_api.API.rebuild(context, mox.IgnoreArg(), self._image_href,
+ mox.IgnoreArg(), preserve_ephemeral=value,
+ files_to_inject=None)
+ else:
+ compute_api.API.rebuild(context, mox.IgnoreArg(), self._image_href,
+ mox.IgnoreArg(), files_to_inject=None)
+ self.mox.ReplayAll()
+
+ self.controller._action_rebuild(req, FAKE_UUID, body)
+
+ def test_rebuild_preserve_ephemeral_true(self):
+ self._test_rebuild_preserve_ephemeral(True)
+
+ def test_rebuild_preserve_ephemeral_false(self):
+ self._test_rebuild_preserve_ephemeral(False)
+
+ def test_rebuild_preserve_ephemeral_default(self):
+ self._test_rebuild_preserve_ephemeral()
+
+ def test_rebuild_accepted_minimum(self):
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE, host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self_href = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ robj = self.controller._action_rebuild(req, FAKE_UUID, body)
+ body = robj.obj
+
+ self.assertEqual(body['server']['image']['id'], '2')
+ self.assertEqual(len(body['server']['adminPass']),
+ CONF.password_length)
+
+ self.assertEqual(robj['location'], self_href)
+
+ def test_rebuild_instance_with_image_uuid(self):
+ info = dict(image_href_in_call=None)
+
+ def rebuild(self2, context, instance, image_href, *args, **kwargs):
+ info['image_href_in_call'] = image_href
+
+ self.stubs.Set(db, 'instance_get',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
+ self.stubs.Set(compute_api.API, 'rebuild', rebuild)
+
+ # proper local hrefs must start with 'http://localhost/v2/'
+ body = {
+ 'rebuild': {
+ 'imageRef': self.image_uuid,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/a/action')
+ self.controller._action_rebuild(req, FAKE_UUID, body)
+ self.assertEqual(info['image_href_in_call'], self.image_uuid)
+
+ def test_rebuild_instance_with_image_href_uses_uuid(self):
+ info = dict(image_href_in_call=None)
+
+ def rebuild(self2, context, instance, image_href, *args, **kwargs):
+ info['image_href_in_call'] = image_href
+
+ self.stubs.Set(db, 'instance_get',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
+ self.stubs.Set(compute_api.API, 'rebuild', rebuild)
+
+ # proper local hrefs must start with 'http://localhost/v2/'
+ body = {
+ 'rebuild': {
+ 'imageRef': self.image_href,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/a/action')
+ self.controller._action_rebuild(req, FAKE_UUID, body)
+ self.assertEqual(info['image_href_in_call'], self.image_uuid)
+
+ def test_rebuild_accepted_minimum_pass_disabled(self):
+ # run with enable_instance_password disabled to verify adminPass
+ # is missing from response. See lp bug 921814
+ self.flags(enable_instance_password=False)
+
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE, host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self_href = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ robj = self.controller._action_rebuild(req, FAKE_UUID, body)
+ body = robj.obj
+
+ self.assertEqual(body['server']['image']['id'], '2')
+ self.assertNotIn("adminPass", body['server'])
+
+ self.assertEqual(robj['location'], self_href)
+
+ def test_rebuild_raises_conflict_on_invalid_state(self):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+
+ def fake_rebuild(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body)
+
+ def test_rebuild_accepted_with_metadata(self):
+ metadata = {'new': 'metadata'}
+
+ return_server = fakes.fake_instance_get(metadata=metadata,
+ vm_state=vm_states.ACTIVE, host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "metadata": metadata,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
+
+ self.assertEqual(body['server']['metadata'], metadata)
+
+ def test_rebuild_accepted_with_bad_metadata(self):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "metadata": "stack",
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body)
+
+ def test_rebuild_with_too_large_metadata(self):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "metadata": {
+ 256 * "k": "value"
+ }
+ }
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+ self.controller._action_rebuild, req,
+ FAKE_UUID, body)
+
+ def test_rebuild_bad_entity(self):
+ body = {
+ "rebuild": {
+ "imageId": self._image_href,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body)
+
+ def test_rebuild_bad_personality(self):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "personality": [{
+ "path": "/path/to/file",
+ "contents": "INVALID b64",
+ }]
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body)
+
+ def test_rebuild_personality(self):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "personality": [{
+ "path": "/path/to/file",
+ "contents": base64.b64encode("Test String"),
+ }]
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
+
+ self.assertNotIn('personality', body['server'])
+
+ def test_rebuild_admin_pass(self):
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE, host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "adminPass": "asdf",
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
+
+ self.assertEqual(body['server']['image']['id'], '2')
+ self.assertEqual(body['server']['adminPass'], 'asdf')
+
+ def test_rebuild_admin_pass_pass_disabled(self):
+ # run with enable_instance_password disabled to verify adminPass
+ # is missing from response. See lp bug 921814
+ self.flags(enable_instance_password=False)
+
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE, host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "adminPass": "asdf",
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
+
+ self.assertEqual(body['server']['image']['id'], '2')
+ self.assertNotIn('adminPass', body['server'])
+
+ def test_rebuild_server_not_found(self):
+ def server_not_found(self, instance_id,
+ columns_to_join=None, use_slave=False):
+ raise exception.InstanceNotFound(instance_id=instance_id)
+ self.stubs.Set(db, 'instance_get_by_uuid', server_not_found)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body)
+
+ def test_rebuild_with_bad_image(self):
+ body = {
+ "rebuild": {
+ "imageRef": "foo",
+ },
+ }
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body)
+
+ def test_rebuild_accessIP(self):
+ attributes = {
+ 'access_ip_v4': '172.19.0.1',
+ 'access_ip_v6': 'fe80::1',
+ }
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "accessIPv4": "172.19.0.1",
+ "accessIPv6": "fe80::1",
+ },
+ }
+
+ data = {'changes': {}}
+ orig_get = compute_api.API.get
+
+ def wrap_get(*args, **kwargs):
+ data['instance'] = orig_get(*args, **kwargs)
+ return data['instance']
+
+ def fake_save(context, **kwargs):
+ data['changes'].update(data['instance'].obj_get_changes())
+
+ self.stubs.Set(compute_api.API, 'get', wrap_get)
+ self.stubs.Set(objects.Instance, 'save', fake_save)
+ req = fakes.HTTPRequest.blank(self.url)
+
+ self.controller._action_rebuild(req, FAKE_UUID, body)
+
+ self.assertEqual(self._image_href, data['changes']['image_ref'])
+ self.assertEqual("", data['changes']['kernel_id'])
+ self.assertEqual("", data['changes']['ramdisk_id'])
+ self.assertEqual(task_states.REBUILDING, data['changes']['task_state'])
+ self.assertEqual(0, data['changes']['progress'])
+ for attr, value in attributes.items():
+ self.assertEqual(value, str(data['changes'][attr]))
+
+ def test_rebuild_when_kernel_not_exists(self):
+
+ def return_image_meta(*args, **kwargs):
+ image_meta_table = {
+ '2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
+ '155d900f-4e14-4e4c-a73d-069cbf4541e6':
+ {'id': 3, 'status': 'active', 'container_format': 'raw',
+ 'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
+ }
+ image_id = args[2]
+ try:
+ image_meta = image_meta_table[str(image_id)]
+ except KeyError:
+ raise exception.ImageNotFound(image_id=image_id)
+
+ return image_meta
+
+ self.stubs.Set(fake._FakeImageService, 'show', return_image_meta)
+ body = {
+ "rebuild": {
+ "imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ },
+ }
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body)
+
+ def test_rebuild_proper_kernel_ram(self):
+ instance_meta = {'kernel_id': None, 'ramdisk_id': None}
+
+ orig_get = compute_api.API.get
+
+ def wrap_get(*args, **kwargs):
+ inst = orig_get(*args, **kwargs)
+ instance_meta['instance'] = inst
+ return inst
+
+ def fake_save(context, **kwargs):
+ instance = instance_meta['instance']
+ for key in instance_meta.keys():
+ if key in instance.obj_what_changed():
+ instance_meta[key] = instance[key]
+
+ def return_image_meta(*args, **kwargs):
+ image_meta_table = {
+ '1': {'id': 1, 'status': 'active', 'container_format': 'aki'},
+ '2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
+ '155d900f-4e14-4e4c-a73d-069cbf4541e6':
+ {'id': 3, 'status': 'active', 'container_format': 'raw',
+ 'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
+ }
+ image_id = args[2]
+ try:
+ image_meta = image_meta_table[str(image_id)]
+ except KeyError:
+ raise exception.ImageNotFound(image_id=image_id)
+
+ return image_meta
+
+ self.stubs.Set(fake._FakeImageService, 'show', return_image_meta)
+ self.stubs.Set(compute_api.API, 'get', wrap_get)
+ self.stubs.Set(objects.Instance, 'save', fake_save)
+ body = {
+ "rebuild": {
+ "imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ },
+ }
+ req = fakes.HTTPRequest.blank(self.url)
+ self.controller._action_rebuild(req, FAKE_UUID, body).obj
+ self.assertEqual(instance_meta['kernel_id'], '1')
+ self.assertEqual(instance_meta['ramdisk_id'], '2')
+
+ @mock.patch.object(compute_api.API, 'rebuild')
+ def test_rebuild_instance_raise_auto_disk_config_exc(self, mock_rebuild):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ mock_rebuild.side_effect = exception.AutoDiskConfigDisabledByImage(
+ image='dummy')
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body)
+
+ def test_resize_server(self):
+
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ self.resize_called = False
+
+ def resize_mock(*args):
+ self.resize_called = True
+
+ self.stubs.Set(compute_api.API, 'resize', resize_mock)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ body = self.controller._action_resize(req, FAKE_UUID, body)
+
+ self.assertEqual(self.resize_called, True)
+
+ def test_resize_server_no_flavor(self):
+ body = dict(resize=dict())
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
+ def test_resize_server_no_flavor_ref(self):
+ body = dict(resize=dict(flavorRef=None))
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
+ def test_resize_with_server_not_found(self):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ self.stubs.Set(compute_api.API, 'get', return_server_not_found)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
+ def test_resize_with_image_exceptions(self):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+ self.resize_called = 0
+ image_id = 'fake_image_id'
+
+ exceptions = [
+ (exception.ImageNotAuthorized(image_id=image_id),
+ webob.exc.HTTPUnauthorized),
+ (exception.ImageNotFound(image_id=image_id),
+ webob.exc.HTTPBadRequest),
+ (exception.Invalid, webob.exc.HTTPBadRequest),
+ (exception.NoValidHost(reason='Bad host'),
+ webob.exc.HTTPBadRequest),
+ (exception.AutoDiskConfigDisabledByImage(image=image_id),
+ webob.exc.HTTPBadRequest),
+ ]
+
+ raised, expected = map(iter, zip(*exceptions))
+
+ def _fake_resize(obj, context, instance, flavor_id):
+ self.resize_called += 1
+ raise raised.next()
+
+ self.stubs.Set(compute_api.API, 'resize', _fake_resize)
+
+ for call_no in range(len(exceptions)):
+ req = fakes.HTTPRequest.blank(self.url)
+ next_exception = expected.next()
+ actual = self.assertRaises(next_exception,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+ if (isinstance(exceptions[call_no][0],
+ exception.NoValidHost)):
+ self.assertEqual(actual.explanation,
+ 'No valid host was found. Bad host')
+ elif (isinstance(exceptions[call_no][0],
+ exception.AutoDiskConfigDisabledByImage)):
+ self.assertEqual(actual.explanation,
+ 'Requested image fake_image_id has automatic'
+ ' disk resize disabled.')
+ self.assertEqual(self.resize_called, call_no + 1)
+
+ @mock.patch('nova.compute.api.API.resize',
+ side_effect=exception.CannotResizeDisk(reason=''))
+ def test_resize_raises_cannot_resize_disk(self, mock_resize):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
+ @mock.patch('nova.compute.api.API.resize',
+ side_effect=exception.FlavorNotFound(reason='',
+ flavor_id='fake_id'))
+ def test_resize_raises_flavor_not_found(self, mock_resize):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
+ def test_resize_with_too_many_instances(self):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ def fake_resize(*args, **kwargs):
+ raise exception.TooManyInstances(message="TooManyInstance")
+
+ self.stubs.Set(compute_api.API, 'resize', fake_resize)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
+ def test_resize_raises_conflict_on_invalid_state(self):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ def fake_resize(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ self.stubs.Set(compute_api.API, 'resize', fake_resize)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
+ @mock.patch('nova.compute.api.API.resize',
+ side_effect=exception.NoValidHost(reason=''))
+ def test_resize_raises_no_valid_host(self, mock_resize):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
+ @mock.patch.object(compute_api.API, 'resize')
+ def test_resize_instance_raise_auto_disk_config_exc(self, mock_resize):
+ mock_resize.side_effect = exception.AutoDiskConfigDisabledByImage(
+ image='dummy')
+
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
+ def test_confirm_resize_server(self):
+ body = dict(confirmResize=None)
+
+ self.confirm_resize_called = False
+
+ def cr_mock(*args):
+ self.confirm_resize_called = True
+
+ self.stubs.Set(compute_api.API, 'confirm_resize', cr_mock)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ body = self.controller._action_confirm_resize(req, FAKE_UUID, body)
+
+ self.assertEqual(self.confirm_resize_called, True)
+
+ def test_confirm_resize_migration_not_found(self):
+ body = dict(confirmResize=None)
+
+ def confirm_resize_mock(*args):
+ raise exception.MigrationNotFoundByStatus(instance_id=1,
+ status='finished')
+
+ self.stubs.Set(compute_api.API,
+ 'confirm_resize',
+ confirm_resize_mock)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_confirm_resize,
+ req, FAKE_UUID, body)
+
+ def test_confirm_resize_raises_conflict_on_invalid_state(self):
+ body = dict(confirmResize=None)
+
+ def fake_confirm_resize(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ self.stubs.Set(compute_api.API, 'confirm_resize',
+ fake_confirm_resize)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_confirm_resize,
+ req, FAKE_UUID, body)
+
+ def test_revert_resize_migration_not_found(self):
+ body = dict(revertResize=None)
+
+ def revert_resize_mock(*args):
+ raise exception.MigrationNotFoundByStatus(instance_id=1,
+ status='finished')
+
+ self.stubs.Set(compute_api.API,
+ 'revert_resize',
+ revert_resize_mock)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_revert_resize,
+ req, FAKE_UUID, body)
+
+ def test_revert_resize_server_not_found(self):
+ body = dict(revertResize=None)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob. exc.HTTPNotFound,
+ self.controller._action_revert_resize,
+ req, "bad_server_id", body)
+
+ def test_revert_resize_server(self):
+ body = dict(revertResize=None)
+
+ self.revert_resize_called = False
+
+ def revert_mock(*args):
+ self.revert_resize_called = True
+
+ self.stubs.Set(compute_api.API, 'revert_resize', revert_mock)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ body = self.controller._action_revert_resize(req, FAKE_UUID, body)
+
+ self.assertEqual(self.revert_resize_called, True)
+
+ def test_revert_resize_raises_conflict_on_invalid_state(self):
+ body = dict(revertResize=None)
+
+ def fake_revert_resize(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ self.stubs.Set(compute_api.API, 'revert_resize',
+ fake_revert_resize)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_revert_resize,
+ req, FAKE_UUID, body)
+
+ def test_create_image(self):
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ response = self.controller._action_create_image(req, FAKE_UUID, body)
+
+ location = response.headers['Location']
+ self.assertEqual('http://localhost/v2/fake/images/123', location)
+
+ def test_create_image_glance_link_prefix(self):
+ self.flags(osapi_glance_link_prefix='https://glancehost')
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ response = self.controller._action_create_image(req, FAKE_UUID, body)
+
+ location = response.headers['Location']
+ self.assertEqual('https://glancehost/v2/fake/images/123', location)
+
+ def test_create_image_name_too_long(self):
+ long_name = 'a' * 260
+ body = {
+ 'createImage': {
+ 'name': long_name,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_create_image, req,
+ FAKE_UUID, body)
+
+ def _do_test_create_volume_backed_image(self, extra_properties):
+
+ def _fake_id(x):
+ return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
+
+ body = dict(createImage=dict(name='snapshot_of_volume_backed'))
+
+ if extra_properties:
+ body['createImage']['metadata'] = extra_properties
+
+ image_service = glance.get_default_image_service()
+
+ bdm = [dict(volume_id=_fake_id('a'),
+ volume_size=1,
+ device_name='vda',
+ delete_on_termination=False)]
+ props = dict(kernel_id=_fake_id('b'),
+ ramdisk_id=_fake_id('c'),
+ root_device_name='/dev/vda',
+ block_device_mapping=bdm)
+ original_image = dict(properties=props,
+ container_format='ami',
+ status='active',
+ is_public=True)
+
+ image_service.create(None, original_image)
+
+ def fake_block_device_mapping_get_all_by_instance(context, inst_id,
+ use_slave=False):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': _fake_id('a'),
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'device_name': 'vda',
+ 'snapshot_id': 1,
+ 'boot_index': 0,
+ 'delete_on_termination': False,
+ 'no_device': None})]
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_block_device_mapping_get_all_by_instance)
+
+ instance = fakes.fake_instance_get(image_ref=original_image['id'],
+ vm_state=vm_states.ACTIVE,
+ root_device_name='/dev/vda')
+ self.stubs.Set(db, 'instance_get_by_uuid', instance)
+
+ volume = dict(id=_fake_id('a'),
+ size=1,
+ host='fake',
+ display_description='fake')
+ snapshot = dict(id=_fake_id('d'))
+ self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
+ volume_api = self.controller.compute_api.volume_api
+ volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
+ volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
+
+ self.mox.ReplayAll()
+
+ req = fakes.HTTPRequest.blank(self.url)
+ response = self.controller._action_create_image(req, FAKE_UUID, body)
+
+ location = response.headers['Location']
+ image_id = location.replace('http://localhost/v2/fake/images/', '')
+ image = image_service.show(None, image_id)
+
+ self.assertEqual(image['name'], 'snapshot_of_volume_backed')
+ properties = image['properties']
+ self.assertEqual(properties['kernel_id'], _fake_id('b'))
+ self.assertEqual(properties['ramdisk_id'], _fake_id('c'))
+ self.assertEqual(properties['root_device_name'], '/dev/vda')
+ self.assertEqual(properties['bdm_v2'], True)
+ bdms = properties['block_device_mapping']
+ self.assertEqual(len(bdms), 1)
+ self.assertEqual(bdms[0]['boot_index'], 0)
+ self.assertEqual(bdms[0]['source_type'], 'snapshot')
+ self.assertEqual(bdms[0]['destination_type'], 'volume')
+ self.assertEqual(bdms[0]['snapshot_id'], snapshot['id'])
+ for fld in ('connection_info', 'id',
+ 'instance_uuid', 'device_name'):
+ self.assertNotIn(fld, bdms[0])
+ for k in extra_properties.keys():
+ self.assertEqual(properties[k], extra_properties[k])
+
+ def test_create_volume_backed_image_no_metadata(self):
+ self._do_test_create_volume_backed_image({})
+
+ def test_create_volume_backed_image_with_metadata(self):
+ self._do_test_create_volume_backed_image(dict(ImageType='Gold',
+ ImageVersion='2.0'))
+
+ def _test_create_volume_backed_image_with_metadata_from_volume(
+ self, extra_metadata=None):
+
+ def _fake_id(x):
+ return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
+
+ body = dict(createImage=dict(name='snapshot_of_volume_backed'))
+ if extra_metadata:
+ body['createImage']['metadata'] = extra_metadata
+
+ image_service = glance.get_default_image_service()
+
+ def fake_block_device_mapping_get_all_by_instance(context, inst_id,
+ use_slave=False):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': _fake_id('a'),
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'device_name': 'vda',
+ 'snapshot_id': 1,
+ 'boot_index': 0,
+ 'delete_on_termination': False,
+ 'no_device': None})]
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_block_device_mapping_get_all_by_instance)
+
+ instance = fakes.fake_instance_get(image_ref='',
+ vm_state=vm_states.ACTIVE,
+ root_device_name='/dev/vda')
+ self.stubs.Set(db, 'instance_get_by_uuid', instance)
+
+ fake_metadata = {'test_key1': 'test_value1',
+ 'test_key2': 'test_value2'}
+ volume = dict(id=_fake_id('a'),
+ size=1,
+ host='fake',
+ display_description='fake',
+ volume_image_metadata=fake_metadata)
+ snapshot = dict(id=_fake_id('d'))
+ self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
+ volume_api = self.controller.compute_api.volume_api
+ volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
+ volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
+ volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
+
+ req = fakes.HTTPRequest.blank(self.url)
+
+ self.mox.ReplayAll()
+ response = self.controller._action_create_image(req, FAKE_UUID, body)
+ location = response.headers['Location']
+ image_id = location.replace('http://localhost/v2/fake/images/', '')
+ image = image_service.show(None, image_id)
+
+ properties = image['properties']
+ self.assertEqual(properties['test_key1'], 'test_value1')
+ self.assertEqual(properties['test_key2'], 'test_value2')
+ if extra_metadata:
+ for key, val in extra_metadata.items():
+ self.assertEqual(properties[key], val)
+
+ def test_create_vol_backed_img_with_meta_from_vol_without_extra_meta(self):
+ self._test_create_volume_backed_image_with_metadata_from_volume()
+
+ def test_create_vol_backed_img_with_meta_from_vol_with_extra_meta(self):
+ self._test_create_volume_backed_image_with_metadata_from_volume(
+ extra_metadata={'a': 'b'})
+
+ def test_create_image_snapshots_disabled(self):
+ """Don't permit a snapshot if the allow_instance_snapshots flag is
+ False
+ """
+ self.flags(allow_instance_snapshots=False)
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ },
+ }
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+ def test_create_image_with_metadata(self):
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ 'metadata': {'key': 'asdf'},
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ response = self.controller._action_create_image(req, FAKE_UUID, body)
+
+ location = response.headers['Location']
+ self.assertEqual('http://localhost/v2/fake/images/123', location)
+
+ def test_create_image_with_too_much_metadata(self):
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ 'metadata': {},
+ },
+ }
+ for num in range(CONF.quota_metadata_items + 1):
+ body['createImage']['metadata']['foo%i' % num] = "bar"
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+ def test_create_image_no_name(self):
+ body = {
+ 'createImage': {},
+ }
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+ def test_create_image_blank_name(self):
+ body = {
+ 'createImage': {
+ 'name': '',
+ }
+ }
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+ def test_create_image_bad_metadata(self):
+ body = {
+ 'createImage': {
+ 'name': 'geoff',
+ 'metadata': 'henry',
+ },
+ }
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+ def test_create_image_raises_conflict_on_invalid_state(self):
+ def snapshot(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+ self.stubs.Set(compute_api.API, 'snapshot', snapshot)
+
+ body = {
+ "createImage": {
+ "name": "test_snapshot",
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+
+class TestServerActionXMLDeserializer(test.TestCase):
+
+ def setUp(self):
+ super(TestServerActionXMLDeserializer, self).setUp()
+ self.deserializer = servers.ActionDeserializer()
+
+ def test_create_image(self):
+ serial_request = """
+<createImage xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test"/>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "createImage": {
+ "name": "new-server-test",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_image_with_metadata(self):
+ serial_request = """
+<createImage xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test">
+ <metadata>
+ <meta key="key1">value1</meta>
+ </metadata>
+</createImage>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "createImage": {
+ "name": "new-server-test",
+ "metadata": {"key1": "value1"},
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_change_pass(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <changePassword
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ adminPass="1234pass"/> """
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "changePassword": {
+ "adminPass": "1234pass",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_change_pass_no_pass(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <changePassword
+ xmlns="http://docs.openstack.org/compute/api/v1.1"/> """
+ self.assertRaises(AttributeError,
+ self.deserializer.deserialize,
+ serial_request,
+ 'action')
+
+ def test_change_pass_empty_pass(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <changePassword
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ adminPass=""/> """
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "changePassword": {
+ "adminPass": "",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_reboot(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <reboot
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ type="HARD"/>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "reboot": {
+ "type": "HARD",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_reboot_no_type(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <reboot
+ xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
+ self.assertRaises(AttributeError,
+ self.deserializer.deserialize,
+ serial_request,
+ 'action')
+
+ def test_resize(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <resize
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ flavorRef="http://localhost/flavors/3"/>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "resize": {"flavorRef": "http://localhost/flavors/3"},
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_resize_no_flavor_ref(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <resize
+ xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
+ self.assertRaises(AttributeError,
+ self.deserializer.deserialize,
+ serial_request,
+ 'action')
+
+ def test_confirm_resize(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <confirmResize
+ xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "confirmResize": None,
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_revert_resize(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <revertResize
+ xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "revertResize": None,
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_rebuild(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <rebuild
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test"
+ imageRef="http://localhost/images/1">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">Mg==</file>
+ </personality>
+ </rebuild>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "rebuild": {
+ "name": "new-server-test",
+ "imageRef": "http://localhost/images/1",
+ "metadata": {
+ "My Server Name": "Apache1",
+ },
+ "personality": [
+ {"path": "/etc/banner.txt", "contents": "Mg=="},
+ ],
+ },
+ }
+ self.assertThat(request['body'], matchers.DictMatches(expected))
+
+ def test_rebuild_minimum(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <rebuild
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ imageRef="http://localhost/images/1"/>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "rebuild": {
+ "imageRef": "http://localhost/images/1",
+ },
+ }
+ self.assertThat(request['body'], matchers.DictMatches(expected))
+
+ def test_rebuild_no_imageRef(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <rebuild
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">Mg==</file>
+ </personality>
+ </rebuild>"""
+ self.assertRaises(AttributeError,
+ self.deserializer.deserialize,
+ serial_request,
+ 'action')
+
+ def test_rebuild_blank_name(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <rebuild
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ imageRef="http://localhost/images/1"
+ name=""/>"""
+ self.assertRaises(AttributeError,
+ self.deserializer.deserialize,
+ serial_request,
+ 'action')
+
+ def test_rebuild_preserve_ephemeral_passed(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <rebuild
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ imageRef="http://localhost/images/1"
+ preserve_ephemeral="true"/>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "rebuild": {
+ "imageRef": "http://localhost/images/1",
+ "preserve_ephemeral": True,
+ },
+ }
+ self.assertThat(request['body'], matchers.DictMatches(expected))
+
+ def test_corrupt_xml(self):
+ """Should throw a 400 error on corrupt xml."""
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ self.deserializer.deserialize,
+ utils.killer_xml_body())
diff --git a/nova/tests/unit/api/openstack/compute/test_server_metadata.py b/nova/tests/unit/api/openstack/compute/test_server_metadata.py
new file mode 100644
index 0000000000..ba9126f0f1
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_server_metadata.py
@@ -0,0 +1,771 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import six
+import webob
+
+from nova.api.openstack.compute.plugins.v3 import server_metadata \
+ as server_metadata_v21
+from nova.api.openstack.compute import server_metadata as server_metadata_v2
+from nova.compute import rpcapi as compute_rpcapi
+from nova.compute import vm_states
+import nova.db
+from nova import exception
+from nova import objects
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+
+CONF = cfg.CONF
+
+
+def return_create_instance_metadata_max(context, server_id, metadata, delete):
+ return stub_max_server_metadata()
+
+
+def return_create_instance_metadata(context, server_id, metadata, delete):
+ return stub_server_metadata()
+
+
+def fake_instance_save(inst, **kwargs):
+ inst.metadata = stub_server_metadata()
+ inst.obj_reset_changes()
+
+
+def return_server_metadata(context, server_id):
+ if not isinstance(server_id, six.string_types) or not len(server_id) == 36:
+ msg = 'id %s must be a uuid in return server metadata' % server_id
+ raise Exception(msg)
+ return stub_server_metadata()
+
+
+def return_empty_server_metadata(context, server_id):
+ return {}
+
+
+def delete_server_metadata(context, server_id, key):
+ pass
+
+
+def stub_server_metadata():
+ metadata = {
+ "key1": "value1",
+ "key2": "value2",
+ "key3": "value3",
+ }
+ return metadata
+
+
+def stub_max_server_metadata():
+ metadata = {"metadata": {}}
+ for num in range(CONF.quota_metadata_items):
+ metadata['metadata']['key%i' % num] = "blah"
+ return metadata
+
+
+def return_server(context, server_id, columns_to_join=None):
+ return fake_instance.fake_db_instance(
+ **{'id': server_id,
+ 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
+ 'name': 'fake',
+ 'locked': False,
+ 'launched_at': timeutils.utcnow(),
+ 'vm_state': vm_states.ACTIVE})
+
+
+def return_server_by_uuid(context, server_uuid,
+ columns_to_join=None, use_slave=False):
+ return fake_instance.fake_db_instance(
+ **{'id': 1,
+ 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
+ 'name': 'fake',
+ 'locked': False,
+ 'launched_at': timeutils.utcnow(),
+ 'metadata': stub_server_metadata(),
+ 'vm_state': vm_states.ACTIVE})
+
+
+def return_server_nonexistent(context, server_id,
+ columns_to_join=None, use_slave=False):
+ raise exception.InstanceNotFound(instance_id=server_id)
+
+
+def fake_change_instance_metadata(self, context, instance, diff):
+ pass
+
+
+class ServerMetaDataTestV21(test.TestCase):
+ validation_ex = exception.ValidationError
+ validation_ex_large = validation_ex
+
+ def setUp(self):
+ super(ServerMetaDataTestV21, self).setUp()
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_by_uuid)
+
+ self.stubs.Set(nova.db, 'instance_metadata_get',
+ return_server_metadata)
+
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
+ fake_change_instance_metadata)
+ self._set_up_resources()
+
+ def _set_up_resources(self):
+ self.controller = server_metadata_v21.ServerMetadataController()
+ self.uuid = str(uuid.uuid4())
+ self.url = '/fake/servers/%s/metadata' % self.uuid
+
+ def _get_request(self, param_url=''):
+ return fakes.HTTPRequestV3.blank(self.url + param_url)
+
+ def test_index(self):
+ req = self._get_request()
+ res_dict = self.controller.index(req, self.uuid)
+
+ expected = {
+ 'metadata': {
+ 'key1': 'value1',
+ 'key2': 'value2',
+ 'key3': 'value3',
+ },
+ }
+ self.assertEqual(expected, res_dict)
+
+ def test_index_nonexistent_server(self):
+ self.stubs.Set(nova.db, 'instance_metadata_get',
+ return_server_nonexistent)
+ req = self._get_request()
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.index, req, self.url)
+
+ def test_index_no_data(self):
+ self.stubs.Set(nova.db, 'instance_metadata_get',
+ return_empty_server_metadata)
+ req = self._get_request()
+ res_dict = self.controller.index(req, self.uuid)
+ expected = {'metadata': {}}
+ self.assertEqual(expected, res_dict)
+
+ def test_show(self):
+ req = self._get_request('/key2')
+ res_dict = self.controller.show(req, self.uuid, 'key2')
+ expected = {"meta": {'key2': 'value2'}}
+ self.assertEqual(expected, res_dict)
+
+ def test_show_nonexistent_server(self):
+ self.stubs.Set(nova.db, 'instance_metadata_get',
+ return_server_nonexistent)
+ req = self._get_request('/key2')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.show, req, self.uuid, 'key2')
+
+ def test_show_meta_not_found(self):
+ self.stubs.Set(nova.db, 'instance_metadata_get',
+ return_empty_server_metadata)
+ req = self._get_request('/key6')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.show, req, self.uuid, 'key6')
+
+ def test_delete(self):
+ self.stubs.Set(nova.db, 'instance_metadata_get',
+ return_server_metadata)
+ self.stubs.Set(nova.db, 'instance_metadata_delete',
+ delete_server_metadata)
+ req = self._get_request('/key2')
+ req.method = 'DELETE'
+ res = self.controller.delete(req, self.uuid, 'key2')
+
+ self.assertIsNone(res)
+
+ def test_delete_nonexistent_server(self):
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_nonexistent)
+ req = self._get_request('/key1')
+ req.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.delete, req, self.uuid, 'key1')
+
+ def test_delete_meta_not_found(self):
+ self.stubs.Set(nova.db, 'instance_metadata_get',
+ return_empty_server_metadata)
+ req = self._get_request('/key6')
+ req.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.delete, req, self.uuid, 'key6')
+
+ def test_create(self):
+ self.stubs.Set(objects.Instance, 'save', fake_instance_save)
+ req = self._get_request()
+ req.method = 'POST'
+ req.content_type = "application/json"
+ body = {"metadata": {"key9": "value9"}}
+ req.body = jsonutils.dumps(body)
+ res_dict = self.controller.create(req, self.uuid, body=body)
+
+ body['metadata'].update({
+ "key1": "value1",
+ "key2": "value2",
+ "key3": "value3",
+ })
+ self.assertEqual(body, res_dict)
+
+ def test_create_empty_body(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request()
+ req.method = 'POST'
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.create, req, self.uuid, body=None)
+
+ def test_create_item_empty_key(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request('/key1')
+ req.method = 'PUT'
+ body = {"metadata": {"": "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.create, req, self.uuid, body=body)
+
+ def test_create_item_non_dict(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request('/key1')
+ req.method = 'PUT'
+ body = {"metadata": None}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.create, req, self.uuid, body=body)
+
+ def test_create_item_key_too_long(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request('/key1')
+ req.method = 'PUT'
+ body = {"metadata": {("a" * 260): "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex_large,
+ self.controller.create,
+ req, self.uuid, body=body)
+
+ def test_create_malformed_container(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = fakes.HTTPRequest.blank(self.url + '/key1')
+ req.method = 'PUT'
+ body = {"meta": {}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.create, req, self.uuid, body=body)
+
+ def test_create_malformed_data(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = fakes.HTTPRequest.blank(self.url + '/key1')
+ req.method = 'PUT'
+ body = {"metadata": ['asdf']}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.create, req, self.uuid, body=body)
+
+ def test_create_nonexistent_server(self):
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_nonexistent)
+ req = self._get_request()
+ req.method = 'POST'
+ body = {"metadata": {"key1": "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.create, req, self.uuid, body=body)
+
+ def test_update_metadata(self):
+ self.stubs.Set(objects.Instance, 'save', fake_instance_save)
+ req = self._get_request()
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ expected = {
+ 'metadata': {
+ 'key1': 'updatedvalue',
+ 'key29': 'newkey',
+ }
+ }
+ req.body = jsonutils.dumps(expected)
+ response = self.controller.update_all(req, self.uuid, body=expected)
+ self.assertEqual(expected, response)
+
+ def test_update_all(self):
+ self.stubs.Set(objects.Instance, 'save', fake_instance_save)
+ req = self._get_request()
+ req.method = 'PUT'
+ req.content_type = "application/json"
+ expected = {
+ 'metadata': {
+ 'key10': 'value10',
+ 'key99': 'value99',
+ },
+ }
+ req.body = jsonutils.dumps(expected)
+ res_dict = self.controller.update_all(req, self.uuid, body=expected)
+
+ self.assertEqual(expected, res_dict)
+
+ def test_update_all_empty_container(self):
+ self.stubs.Set(objects.Instance, 'save', fake_instance_save)
+ req = self._get_request()
+ req.method = 'PUT'
+ req.content_type = "application/json"
+ expected = {'metadata': {}}
+ req.body = jsonutils.dumps(expected)
+ res_dict = self.controller.update_all(req, self.uuid, body=expected)
+
+ self.assertEqual(expected, res_dict)
+
+ def test_update_all_empty_body_item(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = fakes.HTTPRequest.blank(self.url + '/key1')
+ req.method = 'PUT'
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update_all, req, self.uuid,
+ body=None)
+
+ def test_update_all_with_non_dict_item(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = fakes.HTTPRequest.blank(self.url + '/bad')
+ req.method = 'PUT'
+ body = {"metadata": None}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update_all, req, self.uuid,
+ body=body)
+
+ def test_update_all_malformed_container(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request()
+ req.method = 'PUT'
+ req.content_type = "application/json"
+ expected = {'meta': {}}
+ req.body = jsonutils.dumps(expected)
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update_all, req, self.uuid,
+ body=expected)
+
+ def test_update_all_malformed_data(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request()
+ req.method = 'PUT'
+ req.content_type = "application/json"
+ expected = {'metadata': ['asdf']}
+ req.body = jsonutils.dumps(expected)
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update_all, req, self.uuid,
+ body=expected)
+
+ def test_update_all_nonexistent_server(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
+ req = self._get_request()
+ req.method = 'PUT'
+ req.content_type = "application/json"
+ body = {'metadata': {'key10': 'value10'}}
+ req.body = jsonutils.dumps(body)
+
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update_all, req, '100', body=body)
+
+ def test_update_all_non_dict(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request()
+ req.method = 'PUT'
+ body = {"metadata": None}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex, self.controller.update_all,
+ req, self.uuid, body=body)
+
+ def test_update_item(self):
+ self.stubs.Set(objects.Instance, 'save', fake_instance_save)
+ req = self._get_request('/key1')
+ req.method = 'PUT'
+ body = {"meta": {"key1": "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res_dict = self.controller.update(req, self.uuid, 'key1', body=body)
+ expected = {"meta": {'key1': 'value1'}}
+ self.assertEqual(expected, res_dict)
+
+ def test_update_item_nonexistent_server(self):
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_nonexistent)
+ req = self._get_request('/key1')
+ req.method = 'PUT'
+ body = {"meta": {"key1": "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update, req, self.uuid, 'key1',
+ body=body)
+
+ def test_update_item_empty_body(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request('/key1')
+ req.method = 'PUT'
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update, req, self.uuid, 'key1',
+ body=None)
+
+ def test_update_malformed_container(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = fakes.HTTPRequest.blank(self.url)
+ req.method = 'PUT'
+ expected = {'meta': {}}
+ req.body = jsonutils.dumps(expected)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update, req, self.uuid, 'key1',
+ body=expected)
+
+ def test_update_malformed_data(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = fakes.HTTPRequest.blank(self.url)
+ req.method = 'PUT'
+ expected = {'metadata': ['asdf']}
+ req.body = jsonutils.dumps(expected)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update, req, self.uuid, 'key1',
+ body=expected)
+
+ def test_update_item_empty_key(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request('/key1')
+ req.method = 'PUT'
+ body = {"meta": {"": "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update, req, self.uuid, '',
+ body=body)
+
+ def test_update_item_key_too_long(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request('/key1')
+ req.method = 'PUT'
+ body = {"meta": {("a" * 260): "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex_large,
+ self.controller.update,
+ req, self.uuid, ("a" * 260), body=body)
+
+ def test_update_item_value_too_long(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request('/key1')
+ req.method = 'PUT'
+ body = {"meta": {"key1": ("a" * 260)}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex_large,
+ self.controller.update,
+ req, self.uuid, "key1", body=body)
+
+ def test_update_item_too_many_keys(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request('/key1')
+ req.method = 'PUT'
+ body = {"meta": {"key1": "value1", "key2": "value2"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update, req, self.uuid, 'key1',
+ body=body)
+
+ def test_update_item_body_uri_mismatch(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request('/bad')
+ req.method = 'PUT'
+ body = {"meta": {"key1": "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, self.uuid, 'bad',
+ body=body)
+
+ def test_update_item_non_dict(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request('/bad')
+ req.method = 'PUT'
+ body = {"meta": None}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update, req, self.uuid, 'bad',
+ body=body)
+
+ def test_update_empty_container(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = fakes.HTTPRequest.blank(self.url)
+ req.method = 'PUT'
+ expected = {'metadata': {}}
+ req.body = jsonutils.dumps(expected)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update, req, self.uuid, 'bad',
+ body=expected)
+
+ def test_too_many_metadata_items_on_create(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ data = {"metadata": {}}
+ for num in range(CONF.quota_metadata_items + 1):
+ data['metadata']['key%i' % num] = "blah"
+ req = self._get_request()
+ req.method = 'POST'
+ req.body = jsonutils.dumps(data)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create, req, self.uuid, body=data)
+
+ def test_invalid_metadata_items_on_create(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request()
+ req.method = 'POST'
+ req.headers["content-type"] = "application/json"
+
+ # test for long key
+ data = {"metadata": {"a" * 260: "value1"}}
+ req.body = jsonutils.dumps(data)
+ self.assertRaises(self.validation_ex_large,
+ self.controller.create, req, self.uuid, body=data)
+
+ # test for long value
+ data = {"metadata": {"key": "v" * 260}}
+ req.body = jsonutils.dumps(data)
+ self.assertRaises(self.validation_ex_large,
+ self.controller.create, req, self.uuid, body=data)
+
+ # test for empty key.
+ data = {"metadata": {"": "value1"}}
+ req.body = jsonutils.dumps(data)
+ self.assertRaises(self.validation_ex,
+ self.controller.create, req, self.uuid, body=data)
+
+ def test_too_many_metadata_items_on_update_item(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ data = {"metadata": {}}
+ for num in range(CONF.quota_metadata_items + 1):
+ data['metadata']['key%i' % num] = "blah"
+ req = self._get_request()
+ req.method = 'PUT'
+ req.body = jsonutils.dumps(data)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.update_all,
+ req, self.uuid, body=data)
+
+ def test_invalid_metadata_items_on_update_item(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ data = {"metadata": {}}
+ for num in range(CONF.quota_metadata_items + 1):
+ data['metadata']['key%i' % num] = "blah"
+ req = self._get_request()
+ req.method = 'PUT'
+ req.body = jsonutils.dumps(data)
+ req.headers["content-type"] = "application/json"
+
+ # test for long key
+ data = {"metadata": {"a" * 260: "value1"}}
+ req.body = jsonutils.dumps(data)
+ self.assertRaises(self.validation_ex_large,
+ self.controller.update_all, req, self.uuid,
+ body=data)
+
+ # test for long value
+ data = {"metadata": {"key": "v" * 260}}
+ req.body = jsonutils.dumps(data)
+ self.assertRaises(self.validation_ex_large,
+ self.controller.update_all, req, self.uuid,
+ body=data)
+
+ # test for empty key.
+ data = {"metadata": {"": "value1"}}
+ req.body = jsonutils.dumps(data)
+ self.assertRaises(self.validation_ex,
+ self.controller.update_all, req, self.uuid,
+ body=data)
+
+
+class ServerMetaDataTestV2(ServerMetaDataTestV21):
+ validation_ex = webob.exc.HTTPBadRequest
+ validation_ex_large = webob.exc.HTTPRequestEntityTooLarge
+
+ def _set_up_resources(self):
+ self.controller = server_metadata_v2.Controller()
+ self.uuid = str(uuid.uuid4())
+ self.url = '/v1.1/fake/servers/%s/metadata' % self.uuid
+
+ def _get_request(self, param_url=''):
+ return fakes.HTTPRequest.blank(self.url + param_url)
+
+
+class BadStateServerMetaDataTestV21(test.TestCase):
+
+ def setUp(self):
+ super(BadStateServerMetaDataTestV21, self).setUp()
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ self.stubs.Set(nova.db, 'instance_metadata_get',
+ return_server_metadata)
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
+ fake_change_instance_metadata)
+ self.stubs.Set(nova.db, 'instance_get', self._return_server_in_build)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ self._return_server_in_build_by_uuid)
+ self.stubs.Set(nova.db, 'instance_metadata_delete',
+ delete_server_metadata)
+ self._set_up_resources()
+
+ def _set_up_resources(self):
+ self.controller = server_metadata_v21.ServerMetadataController()
+ self.uuid = str(uuid.uuid4())
+ self.url = '/fake/servers/%s/metadata' % self.uuid
+
+ def _get_request(self, param_url=''):
+ return fakes.HTTPRequestV3.blank(self.url + param_url)
+
+ def test_invalid_state_on_delete(self):
+ req = self._get_request('/key2')
+ req.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPConflict, self.controller.delete,
+ req, self.uuid, 'key2')
+
+ def test_invalid_state_on_update_metadata(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request()
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ expected = {
+ 'metadata': {
+ 'key1': 'updatedvalue',
+ 'key29': 'newkey',
+ }
+ }
+ req.body = jsonutils.dumps(expected)
+ self.assertRaises(webob.exc.HTTPConflict, self.controller.update_all,
+ req, self.uuid, body=expected)
+
+ def _return_server_in_build(self, context, server_id,
+ columns_to_join=None):
+ return fake_instance.fake_db_instance(
+ **{'id': server_id,
+ 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
+ 'name': 'fake',
+ 'locked': False,
+ 'vm_state': vm_states.BUILDING})
+
+ def _return_server_in_build_by_uuid(self, context, server_uuid,
+ columns_to_join=None, use_slave=False):
+ return fake_instance.fake_db_instance(
+ **{'id': 1,
+ 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
+ 'name': 'fake',
+ 'locked': False,
+ 'vm_state': vm_states.BUILDING})
+
+ @mock.patch.object(nova.compute.api.API, 'update_instance_metadata',
+ side_effect=exception.InstanceIsLocked(instance_uuid=0))
+ def test_instance_lock_update_metadata(self, mock_update):
+ req = self._get_request()
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ expected = {
+ 'metadata': {
+ 'keydummy': 'newkey',
+ }
+ }
+ req.body = jsonutils.dumps(expected)
+ self.assertRaises(webob.exc.HTTPConflict, self.controller.update_all,
+ req, self.uuid, body=expected)
+
+
+class BadStateServerMetaDataTestV2(BadStateServerMetaDataTestV21):
+ def _set_up_resources(self):
+ self.controller = server_metadata_v2.Controller()
+ self.uuid = str(uuid.uuid4())
+ self.url = '/v1.1/fake/servers/%s/metadata' % self.uuid
+
+ def _get_request(self, param_url=''):
+ return fakes.HTTPRequest.blank(self.url + param_url)
diff --git a/nova/tests/unit/api/openstack/compute/test_servers.py b/nova/tests/unit/api/openstack/compute/test_servers.py
new file mode 100644
index 0000000000..c37df741ec
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_servers.py
@@ -0,0 +1,4625 @@
+# Copyright 2010-2011 OpenStack Foundation
+# Copyright 2011 Piston Cloud Computing, Inc.
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import contextlib
+import datetime
+import urllib
+import uuid
+
+import iso8601
+from lxml import etree
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import six
+import six.moves.urllib.parse as urlparse
+import testtools
+import webob
+
+from nova.api.openstack import compute
+from nova.api.openstack.compute import ips
+from nova.api.openstack.compute import servers
+from nova.api.openstack.compute import views
+from nova.api.openstack import extensions
+from nova.api.openstack import xmlutil
+from nova.compute import api as compute_api
+from nova.compute import delete_types
+from nova.compute import flavors
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import models
+from nova import exception
+from nova.i18n import _
+from nova.image import glance
+from nova.network import manager
+from nova.network.neutronv2 import api as neutron_api
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova.openstack.common import policy as common_policy
+from nova import policy
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_network
+from nova.tests.unit.image import fake
+from nova.tests.unit import matchers
+from nova.tests.unit.objects import test_keypair
+from nova.tests.unit import utils
+from nova import utils as nova_utils
+
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
+
+FAKE_UUID = fakes.FAKE_UUID
+NS = "{http://docs.openstack.org/compute/api/v1.1}"
+ATOMNS = "{http://www.w3.org/2005/Atom}"
+XPATH_NS = {
+ 'atom': 'http://www.w3.org/2005/Atom',
+ 'ns': 'http://docs.openstack.org/compute/api/v1.1'
+}
+
+INSTANCE_IDS = {FAKE_UUID: 1}
+
+FIELDS = instance_obj.INSTANCE_DEFAULT_FIELDS
+
+
+def fake_gen_uuid():
+ return FAKE_UUID
+
+
+def return_servers_empty(context, *args, **kwargs):
+ return []
+
+
+def return_security_group(context, instance_id, security_group_id):
+ pass
+
+
+def instance_update_and_get_original(context, instance_uuid, values,
+ update_cells=True,
+ columns_to_join=None,
+ ):
+ inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
+ name=values.get('display_name'))
+ inst = dict(inst, **values)
+ return (inst, inst)
+
+
+def instance_update(context, instance_uuid, values, update_cells=True):
+ inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
+ name=values.get('display_name'))
+ inst = dict(inst, **values)
+ return inst
+
+
+def fake_compute_api(cls, req, id):
+ return True
+
+
+class MockSetAdminPassword(object):
+ def __init__(self):
+ self.instance_id = None
+ self.password = None
+
+ def __call__(self, context, instance_id, password):
+ self.instance_id = instance_id
+ self.password = password
+
+
+class Base64ValidationTest(test.TestCase):
+ def setUp(self):
+ super(Base64ValidationTest, self).setUp()
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = servers.Controller(self.ext_mgr)
+
+ def test_decode_base64(self):
+ value = "A random string"
+ result = self.controller._decode_base64(base64.b64encode(value))
+ self.assertEqual(result, value)
+
+ def test_decode_base64_binary(self):
+ value = "\x00\x12\x75\x99"
+ result = self.controller._decode_base64(base64.b64encode(value))
+ self.assertEqual(result, value)
+
+ def test_decode_base64_whitespace(self):
+ value = "A random string"
+ encoded = base64.b64encode(value)
+ white = "\n \n%s\t%s\n" % (encoded[:2], encoded[2:])
+ result = self.controller._decode_base64(white)
+ self.assertEqual(result, value)
+
+ def test_decode_base64_invalid(self):
+ invalid = "A random string"
+ result = self.controller._decode_base64(invalid)
+ self.assertIsNone(result)
+
+ def test_decode_base64_illegal_bytes(self):
+ value = "A random string"
+ encoded = base64.b64encode(value)
+ white = ">\x01%s*%s()" % (encoded[:2], encoded[2:])
+ result = self.controller._decode_base64(white)
+ self.assertIsNone(result)
+
+
+class NeutronV2Subclass(neutron_api.API):
+ """Used to ensure that API handles subclasses properly."""
+ pass
+
+
+class ControllerTest(test.TestCase):
+
+ def setUp(self):
+ super(ControllerTest, self).setUp()
+ self.flags(verbose=True, use_ipv6=False)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+ return_server = fakes.fake_instance_get()
+ return_servers = fakes.fake_instance_get_all_by_filters()
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ return_servers)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ return_server)
+ self.stubs.Set(db, 'instance_add_security_group',
+ return_security_group)
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ instance_update_and_get_original)
+
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = servers.Controller(self.ext_mgr)
+ self.ips_controller = ips.Controller()
+ policy.reset()
+ policy.init()
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+
+
+class ServersControllerTest(ControllerTest):
+ def test_can_check_loaded_extensions(self):
+ self.ext_mgr.extensions = {'os-fake': None}
+ self.assertTrue(self.controller.ext_mgr.is_loaded('os-fake'))
+ self.assertFalse(self.controller.ext_mgr.is_loaded('os-not-loaded'))
+
+ def test_requested_networks_prefix(self):
+ uuid = 'br-00000000-0000-0000-0000-000000000000'
+ requested_networks = [{'uuid': uuid}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertIn((uuid, None), res.as_tuples())
+
+ def test_requested_networks_neutronv2_enabled_with_port(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'port': port}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertEqual([(None, None, port, None)], res.as_tuples())
+
+ def test_requested_networks_neutronv2_enabled_with_network(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ requested_networks = [{'uuid': network}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertEqual([(network, None, None, None)], res.as_tuples())
+
+ def test_requested_networks_neutronv2_enabled_with_network_and_port(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network, 'port': port}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertEqual([(None, None, port, None)], res.as_tuples())
+
+ def test_requested_networks_neutronv2_disabled_with_port(self):
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'port': port}]
+ self.assertRaises(
+ webob.exc.HTTPBadRequest,
+ self.controller._get_requested_networks,
+ requested_networks)
+
+ def test_requested_networks_api_enabled_with_v2_subclass(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network, 'port': port}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertEqual([(None, None, port, None)], res.as_tuples())
+
+ def test_requested_networks_neutronv2_subclass_with_port(self):
+ cls = ('nova.tests.unit.api.openstack.compute' +
+ '.test_servers.NeutronV2Subclass')
+ self.flags(network_api_class=cls)
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'port': port}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertEqual([(None, None, port, None)], res.as_tuples())
+
+ def test_get_server_by_uuid(self):
+ req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
+ res_dict = self.controller.show(req, FAKE_UUID)
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+
+ def test_unique_host_id(self):
+ """Create two servers with the same host and different
+ project_ids and check that the hostId's are unique.
+ """
+ def return_instance_with_host(self, *args, **kwargs):
+ project_id = str(uuid.uuid4())
+ return fakes.stub_instance(id=1, uuid=FAKE_UUID,
+ project_id=project_id,
+ host='fake_host')
+
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ return_instance_with_host)
+ self.stubs.Set(db, 'instance_get',
+ return_instance_with_host)
+
+ req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
+ server1 = self.controller.show(req, FAKE_UUID)
+ server2 = self.controller.show(req, FAKE_UUID)
+
+ self.assertNotEqual(server1['server']['hostId'],
+ server2['server']['hostId'])
+
+ def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
+ status="ACTIVE", progress=100):
+ return {
+ "server": {
+ "id": uuid,
+ "user_id": "fake_user",
+ "tenant_id": "fake_project",
+ "updated": "2010-11-11T11:00:00Z",
+ "created": "2010-10-10T12:00:00Z",
+ "progress": progress,
+ "name": "server1",
+ "status": status,
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "hostId": '',
+ "image": {
+ "id": "10",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": image_bookmark,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": flavor_bookmark,
+ },
+ ],
+ },
+ "addresses": {
+ 'test1': [
+ {'version': 4, 'addr': '192.168.1.100'},
+ {'version': 6, 'addr': '2001:db8:0:1::1'}
+ ]
+ },
+ "metadata": {
+ "seq": "1",
+ },
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/fake/servers/%s" % uuid,
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/servers/%s" % uuid,
+ },
+ ],
+ }
+ }
+
+ def test_get_server_by_id(self):
+ self.flags(use_ipv6=True)
+ image_bookmark = "http://localhost/fake/images/10"
+ flavor_bookmark = "http://localhost/fake/flavors/1"
+
+ uuid = FAKE_UUID
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % uuid)
+ res_dict = self.controller.show(req, uuid)
+
+ expected_server = self._get_server_data_dict(uuid,
+ image_bookmark,
+ flavor_bookmark,
+ status="BUILD",
+ progress=0)
+ self.assertThat(res_dict, matchers.DictMatches(expected_server))
+
+ def test_get_server_with_active_status_by_id(self):
+ image_bookmark = "http://localhost/fake/images/10"
+ flavor_bookmark = "http://localhost/fake/flavors/1"
+
+ new_return_server = fakes.fake_instance_get(
+ vm_state=vm_states.ACTIVE, progress=100)
+ self.stubs.Set(db, 'instance_get_by_uuid', new_return_server)
+
+ uuid = FAKE_UUID
+ req = fakes.HTTPRequest.blank('/fake/servers/%s' % uuid)
+ res_dict = self.controller.show(req, uuid)
+ expected_server = self._get_server_data_dict(uuid,
+ image_bookmark,
+ flavor_bookmark)
+ self.assertThat(res_dict, matchers.DictMatches(expected_server))
+
+ def test_get_server_with_id_image_ref_by_id(self):
+ image_ref = "10"
+ image_bookmark = "http://localhost/fake/images/10"
+ flavor_id = "1"
+ flavor_bookmark = "http://localhost/fake/flavors/1"
+
+ new_return_server = fakes.fake_instance_get(
+ vm_state=vm_states.ACTIVE, image_ref=image_ref,
+ flavor_id=flavor_id, progress=100)
+ self.stubs.Set(db, 'instance_get_by_uuid', new_return_server)
+
+ uuid = FAKE_UUID
+ req = fakes.HTTPRequest.blank('/fake/servers/%s' % uuid)
+ res_dict = self.controller.show(req, uuid)
+ expected_server = self._get_server_data_dict(uuid,
+ image_bookmark,
+ flavor_bookmark)
+ self.assertThat(res_dict, matchers.DictMatches(expected_server))
+
+ def test_get_server_addresses_from_cache(self):
+ pub0 = ('172.19.0.1', '172.19.0.2',)
+ pub1 = ('1.2.3.4',)
+ pub2 = ('b33f::fdee:ddff:fecc:bbaa',)
+ priv0 = ('192.168.0.3', '192.168.0.4',)
+
+ def _ip(ip):
+ return {'address': ip, 'type': 'fixed'}
+
+ nw_cache = [
+ {'address': 'aa:aa:aa:aa:aa:aa',
+ 'id': 1,
+ 'network': {'bridge': 'br0',
+ 'id': 1,
+ 'label': 'public',
+ 'subnets': [{'cidr': '172.19.0.0/24',
+ 'ips': [_ip(ip) for ip in pub0]},
+ {'cidr': '1.2.3.0/16',
+ 'ips': [_ip(ip) for ip in pub1]},
+ {'cidr': 'b33f::/64',
+ 'ips': [_ip(ip) for ip in pub2]}]}},
+ {'address': 'bb:bb:bb:bb:bb:bb',
+ 'id': 2,
+ 'network': {'bridge': 'br1',
+ 'id': 2,
+ 'label': 'private',
+ 'subnets': [{'cidr': '192.168.0.0/24',
+ 'ips': [_ip(ip) for ip in priv0]}]}}]
+
+ return_server = fakes.fake_instance_get(nw_cache=nw_cache)
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ req = fakes.HTTPRequest.blank('/fake/servers/%s/ips' % FAKE_UUID)
+ res_dict = self.ips_controller.index(req, FAKE_UUID)
+
+ expected = {
+ 'addresses': {
+ 'private': [
+ {'version': 4, 'addr': '192.168.0.3'},
+ {'version': 4, 'addr': '192.168.0.4'},
+ ],
+ 'public': [
+ {'version': 4, 'addr': '172.19.0.1'},
+ {'version': 4, 'addr': '172.19.0.2'},
+ {'version': 4, 'addr': '1.2.3.4'},
+ {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
+ ],
+ },
+ }
+ self.assertThat(res_dict, matchers.DictMatches(expected))
+
+ def test_get_server_addresses_nonexistent_network(self):
+ url = '/fake/servers/%s/ips/network_0' % FAKE_UUID
+ req = fakes.HTTPRequest.blank(url)
+ self.assertRaises(webob.exc.HTTPNotFound, self.ips_controller.show,
+ req, FAKE_UUID, 'network_0')
+
+ def test_get_server_addresses_nonexistent_server(self):
+ def fake_instance_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+
+ server_id = str(uuid.uuid4())
+ req = fakes.HTTPRequest.blank('/fake/servers/%s/ips' % server_id)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.ips_controller.index, req, server_id)
+
+ def test_get_server_list_empty(self):
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ return_servers_empty)
+
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ res_dict = self.controller.index(req)
+
+ num_servers = len(res_dict['servers'])
+ self.assertEqual(0, num_servers)
+
+ def test_get_server_list_with_reservation_id(self):
+ req = fakes.HTTPRequest.blank('/fake/servers?reservation_id=foo')
+ res_dict = self.controller.index(req)
+
+ i = 0
+ for s in res_dict['servers']:
+ self.assertEqual(s.get('name'), 'server%d' % (i + 1))
+ i += 1
+
+ def test_get_server_list_with_reservation_id_empty(self):
+ req = fakes.HTTPRequest.blank('/fake/servers/detail?'
+ 'reservation_id=foo')
+ res_dict = self.controller.detail(req)
+
+ i = 0
+ for s in res_dict['servers']:
+ self.assertEqual(s.get('name'), 'server%d' % (i + 1))
+ i += 1
+
+ def test_get_server_list_with_reservation_id_details(self):
+ req = fakes.HTTPRequest.blank('/fake/servers/detail?'
+ 'reservation_id=foo')
+ res_dict = self.controller.detail(req)
+
+ i = 0
+ for s in res_dict['servers']:
+ self.assertEqual(s.get('name'), 'server%d' % (i + 1))
+ i += 1
+
+ def test_get_server_list(self):
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ res_dict = self.controller.index(req)
+
+ self.assertEqual(len(res_dict['servers']), 5)
+ for i, s in enumerate(res_dict['servers']):
+ self.assertEqual(s['id'], fakes.get_fake_uuid(i))
+ self.assertEqual(s['name'], 'server%d' % (i + 1))
+ self.assertIsNone(s.get('image', None))
+
+ expected_links = [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/fake/servers/%s" % s['id'],
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/servers/%s" % s['id'],
+ },
+ ]
+
+ self.assertEqual(s['links'], expected_links)
+
+ def test_get_servers_with_limit(self):
+ req = fakes.HTTPRequest.blank('/fake/servers?limit=3')
+ res_dict = self.controller.index(req)
+
+ servers = res_dict['servers']
+ self.assertEqual([s['id'] for s in servers],
+ [fakes.get_fake_uuid(i) for i in xrange(len(servers))])
+
+ servers_links = res_dict['servers_links']
+ self.assertEqual(servers_links[0]['rel'], 'next')
+ href_parts = urlparse.urlparse(servers_links[0]['href'])
+ self.assertEqual('/v2/fake/servers', href_parts.path)
+ params = urlparse.parse_qs(href_parts.query)
+ expected_params = {'limit': ['3'],
+ 'marker': [fakes.get_fake_uuid(2)]}
+ self.assertThat(params, matchers.DictMatches(expected_params))
+
+ def test_get_servers_with_limit_bad_value(self):
+ req = fakes.HTTPRequest.blank('/fake/servers?limit=aaa')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_get_server_details_empty(self):
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ return_servers_empty)
+
+ req = fakes.HTTPRequest.blank('/fake/servers/detail')
+ res_dict = self.controller.detail(req)
+
+ num_servers = len(res_dict['servers'])
+ self.assertEqual(0, num_servers)
+
+ def test_get_server_details_with_limit(self):
+ req = fakes.HTTPRequest.blank('/fake/servers/detail?limit=3')
+ res = self.controller.detail(req)
+
+ servers = res['servers']
+ self.assertEqual([s['id'] for s in servers],
+ [fakes.get_fake_uuid(i) for i in xrange(len(servers))])
+
+ servers_links = res['servers_links']
+ self.assertEqual(servers_links[0]['rel'], 'next')
+
+ href_parts = urlparse.urlparse(servers_links[0]['href'])
+ self.assertEqual('/v2/fake/servers/detail', href_parts.path)
+ params = urlparse.parse_qs(href_parts.query)
+ expected = {'limit': ['3'], 'marker': [fakes.get_fake_uuid(2)]}
+ self.assertThat(params, matchers.DictMatches(expected))
+
+ def test_get_server_details_with_limit_bad_value(self):
+ req = fakes.HTTPRequest.blank('/fake/servers/detail?limit=aaa')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.detail, req)
+
+ def test_get_server_details_with_limit_and_other_params(self):
+ req = fakes.HTTPRequest.blank('/fake/servers/detail'
+ '?limit=3&blah=2:t')
+ res = self.controller.detail(req)
+
+ servers = res['servers']
+ self.assertEqual([s['id'] for s in servers],
+ [fakes.get_fake_uuid(i) for i in xrange(len(servers))])
+
+ servers_links = res['servers_links']
+ self.assertEqual(servers_links[0]['rel'], 'next')
+
+ href_parts = urlparse.urlparse(servers_links[0]['href'])
+ self.assertEqual('/v2/fake/servers/detail', href_parts.path)
+ params = urlparse.parse_qs(href_parts.query)
+ expected = {'limit': ['3'], 'blah': ['2:t'],
+ 'marker': [fakes.get_fake_uuid(2)]}
+ self.assertThat(params, matchers.DictMatches(expected))
+
+ def test_get_servers_with_too_big_limit(self):
+ req = fakes.HTTPRequest.blank('/fake/servers?limit=30')
+ res_dict = self.controller.index(req)
+ self.assertNotIn('servers_links', res_dict)
+
+ def test_get_servers_with_bad_limit(self):
+ req = fakes.HTTPRequest.blank('/fake/servers?limit=asdf')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_get_servers_with_marker(self):
+ url = '/v2/fake/servers?marker=%s' % fakes.get_fake_uuid(2)
+ req = fakes.HTTPRequest.blank(url)
+ servers = self.controller.index(req)['servers']
+ self.assertEqual([s['name'] for s in servers], ["server4", "server5"])
+
+ def test_get_servers_with_limit_and_marker(self):
+ url = '/v2/fake/servers?limit=2&marker=%s' % fakes.get_fake_uuid(1)
+ req = fakes.HTTPRequest.blank(url)
+ servers = self.controller.index(req)['servers']
+ self.assertEqual([s['name'] for s in servers], ['server3', 'server4'])
+
+ def test_get_servers_with_bad_marker(self):
+ req = fakes.HTTPRequest.blank('/fake/servers?limit=2&marker=asdf')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_get_servers_with_bad_option(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?unknownoption=whee')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_allows_image(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('image', search_opts)
+ self.assertEqual(search_opts['image'], '12345')
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?image=12345')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_tenant_id_filter_converts_to_project_id_for_admin(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False):
+ self.assertIsNotNone(filters)
+ self.assertEqual(filters['project_id'], 'newfake')
+ self.assertFalse(filters.get('tenant_id'))
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers'
+ '?all_tenants=1&tenant_id=newfake',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_param_normal(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False):
+ self.assertNotIn('project_id', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?all_tenants',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_param_one(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False):
+ self.assertNotIn('project_id', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?all_tenants=1',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_param_zero(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False):
+ self.assertNotIn('all_tenants', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?all_tenants=0',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_param_false(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False):
+ self.assertNotIn('all_tenants', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?all_tenants=false',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_param_invalid(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None):
+ self.assertNotIn('all_tenants', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?all_tenants=xxx',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_admin_restricted_tenant(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False):
+ self.assertIsNotNone(filters)
+ self.assertEqual(filters['project_id'], 'fake')
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_pass_policy(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False):
+ self.assertIsNotNone(filters)
+ self.assertNotIn('project_id', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ rules = {
+ "compute:get_all_tenants":
+ common_policy.parse_rule("project_id:fake"),
+ "compute:get_all":
+ common_policy.parse_rule("project_id:fake"),
+ }
+
+ policy.set_rules(rules)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?all_tenants=1')
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_fail_policy(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None):
+ self.assertIsNotNone(filters)
+ return [fakes.stub_instance(100)]
+
+ rules = {
+ "compute:get_all_tenants":
+ common_policy.parse_rule("project_id:non_fake"),
+ "compute:get_all":
+ common_policy.parse_rule("project_id:fake"),
+ }
+
+ policy.set_rules(rules)
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?all_tenants=1')
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.index, req)
+
+ def test_get_servers_allows_flavor(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('flavor', search_opts)
+ # flavor is an integer ID
+ self.assertEqual(search_opts['flavor'], '12345')
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?flavor=12345')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_with_bad_flavor(self):
+ req = fakes.HTTPRequest.blank('/fake/servers?flavor=abcde')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 0)
+
+ def test_get_server_details_with_bad_flavor(self):
+ req = fakes.HTTPRequest.blank('/fake/servers/detail?flavor=abcde')
+ servers = self.controller.detail(req)['servers']
+
+ self.assertThat(servers, testtools.matchers.HasLength(0))
+
+ def test_get_servers_allows_status(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('vm_state', search_opts)
+ self.assertEqual(search_opts['vm_state'], [vm_states.ACTIVE])
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?status=active')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ @mock.patch.object(compute_api.API, 'get_all')
+ def test_get_servers_allows_multi_status(self, get_all_mock):
+ server_uuid0 = str(uuid.uuid4())
+ server_uuid1 = str(uuid.uuid4())
+ db_list = [fakes.stub_instance(100, uuid=server_uuid0),
+ fakes.stub_instance(101, uuid=server_uuid1)]
+ get_all_mock.return_value = instance_obj._make_instance_list(
+ context, instance_obj.InstanceList(), db_list, FIELDS)
+
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers?status=active&status=error')
+ servers = self.controller.index(req)['servers']
+ self.assertEqual(2, len(servers))
+ self.assertEqual(server_uuid0, servers[0]['id'])
+ self.assertEqual(server_uuid1, servers[1]['id'])
+ expected_search_opts = dict(deleted=False,
+ vm_state=[vm_states.ACTIVE,
+ vm_states.ERROR],
+ project_id='fake')
+ get_all_mock.assert_called_once_with(mock.ANY,
+ search_opts=expected_search_opts, limit=mock.ANY,
+ marker=mock.ANY, want_objects=mock.ANY)
+
+ @mock.patch.object(compute_api.API, 'get_all')
+ def test_get_servers_system_metadata_filter(self, get_all_mock):
+ server_uuid0 = str(uuid.uuid4())
+ server_uuid1 = str(uuid.uuid4())
+ expected_system_metadata = u'{"some_value": "some_key"}'
+ db_list = [fakes.stub_instance(100, uuid=server_uuid0),
+ fakes.stub_instance(101, uuid=server_uuid1)]
+ get_all_mock.return_value = instance_obj._make_instance_list(
+ context, instance_obj.InstanceList(), db_list, FIELDS)
+
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers?status=active&status=error&system_metadata=' +
+ urllib.quote(expected_system_metadata),
+ use_admin_context=True)
+ servers = self.controller.index(req)['servers']
+ self.assertEqual(2, len(servers))
+ self.assertEqual(server_uuid0, servers[0]['id'])
+ self.assertEqual(server_uuid1, servers[1]['id'])
+ expected_search_opts = dict(
+ deleted=False, vm_state=[vm_states.ACTIVE, vm_states.ERROR],
+ system_metadata=expected_system_metadata, project_id='fake')
+ get_all_mock.assert_called_once_with(mock.ANY,
+ search_opts=expected_search_opts, limit=mock.ANY,
+ marker=mock.ANY, want_objects=mock.ANY)
+
+ @mock.patch.object(compute_api.API, 'get_all')
+ def test_get_servers_flavor_not_found(self, get_all_mock):
+ get_all_mock.side_effect = exception.FlavorNotFound(flavor_id=1)
+
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers?status=active&flavor=abc')
+ servers = self.controller.index(req)['servers']
+ self.assertEqual(0, len(servers))
+
+ @mock.patch.object(compute_api.API, 'get_all')
+ def test_get_servers_allows_invalid_status(self, get_all_mock):
+ server_uuid0 = str(uuid.uuid4())
+ server_uuid1 = str(uuid.uuid4())
+ db_list = [fakes.stub_instance(100, uuid=server_uuid0),
+ fakes.stub_instance(101, uuid=server_uuid1)]
+ get_all_mock.return_value = instance_obj._make_instance_list(
+ context, instance_obj.InstanceList(), db_list, FIELDS)
+
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers?status=active&status=invalid')
+ servers = self.controller.index(req)['servers']
+ self.assertEqual(2, len(servers))
+ self.assertEqual(server_uuid0, servers[0]['id'])
+ self.assertEqual(server_uuid1, servers[1]['id'])
+ expected_search_opts = dict(deleted=False,
+ vm_state=[vm_states.ACTIVE],
+ project_id='fake')
+ get_all_mock.assert_called_once_with(mock.ANY,
+ search_opts=expected_search_opts, limit=mock.ANY,
+ marker=mock.ANY, want_objects=mock.ANY)
+
+ def test_get_servers_allows_task_status(self):
+ server_uuid = str(uuid.uuid4())
+ task_state = task_states.REBOOTING
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('task_state', search_opts)
+ self.assertEqual([task_states.REBOOT_PENDING,
+ task_states.REBOOT_STARTED,
+ task_states.REBOOTING],
+ search_opts['task_state'])
+ db_list = [fakes.stub_instance(100, uuid=server_uuid,
+ task_state=task_state)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/servers?status=reboot')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_resize_status(self):
+ # Test when resize status, it maps list of vm states.
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIn('vm_state', search_opts)
+ self.assertEqual(search_opts['vm_state'],
+ [vm_states.ACTIVE, vm_states.STOPPED])
+
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?status=resize')
+
+ servers = self.controller.detail(req)['servers']
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_invalid_status(self):
+ # Test getting servers by invalid status.
+ req = fakes.HTTPRequest.blank('/fake/servers?status=baloney',
+ use_admin_context=False)
+ servers = self.controller.index(req)['servers']
+ self.assertEqual(len(servers), 0)
+
+ def test_get_servers_deleted_status_as_user(self):
+ req = fakes.HTTPRequest.blank('/fake/servers?status=deleted',
+ use_admin_context=False)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.detail, req)
+
+ def test_get_servers_deleted_status_as_admin(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIn('vm_state', search_opts)
+ self.assertEqual(search_opts['vm_state'], ['deleted'])
+
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?status=deleted',
+ use_admin_context=True)
+
+ servers = self.controller.detail(req)['servers']
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_allows_name(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('name', search_opts)
+ self.assertEqual(search_opts['name'], 'whee.*')
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?name=whee.*')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_allows_changes_since(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('changes-since', search_opts)
+ changes_since = datetime.datetime(2011, 1, 24, 17, 8, 1,
+ tzinfo=iso8601.iso8601.UTC)
+ self.assertEqual(search_opts['changes-since'], changes_since)
+ self.assertNotIn('deleted', search_opts)
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ params = 'changes-since=2011-01-24T17:08:01Z'
+ req = fakes.HTTPRequest.blank('/fake/servers?%s' % params)
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_allows_changes_since_bad_value(self):
+ params = 'changes-since=asdf'
+ req = fakes.HTTPRequest.blank('/fake/servers?%s' % params)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req)
+
+ def test_get_servers_admin_filters_as_user(self):
+ """Test getting servers by admin-only or unknown options when
+ context is not admin. Make sure the admin and unknown options
+ are stripped before they get to compute_api.get_all()
+ """
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIsNotNone(search_opts)
+ # Allowed by user
+ self.assertIn('name', search_opts)
+ self.assertIn('ip', search_opts)
+ # OSAPI converts status to vm_state
+ self.assertIn('vm_state', search_opts)
+ # Allowed only by admins with admin API on
+ self.assertNotIn('unknown_option', search_opts)
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ query_str = "name=foo&ip=10.*&status=active&unknown_option=meow"
+ req = fakes.HTTPRequest.blank('/fake/servers?%s' % query_str)
+ res = self.controller.index(req)
+
+ servers = res['servers']
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_admin_options_as_admin(self):
+ """Test getting servers by admin-only or unknown options when
+ context is admin. All options should be passed
+ """
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIsNotNone(search_opts)
+ # Allowed by user
+ self.assertIn('name', search_opts)
+ # OSAPI converts status to vm_state
+ self.assertIn('vm_state', search_opts)
+ # Allowed only by admins with admin API on
+ self.assertIn('ip', search_opts)
+ self.assertIn('unknown_option', search_opts)
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ query_str = "name=foo&ip=10.*&status=active&unknown_option=meow"
+ req = fakes.HTTPRequest.blank('/fake/servers?%s' % query_str,
+ use_admin_context=True)
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_allows_ip(self):
+ """Test getting servers by ip."""
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('ip', search_opts)
+ self.assertEqual(search_opts['ip'], '10\..*')
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?ip=10\..*')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_admin_allows_ip6(self):
+ """Test getting servers by ip6 with admin_api enabled and
+ admin context
+ """
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('ip6', search_opts)
+ self.assertEqual(search_opts['ip6'], 'ffff.*')
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?ip6=ffff.*',
+ use_admin_context=True)
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_all_server_details(self):
+ expected_flavor = {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": 'http://localhost/fake/flavors/1',
+ },
+ ],
+ }
+ expected_image = {
+ "id": "10",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": 'http://localhost/fake/images/10',
+ },
+ ],
+ }
+ req = fakes.HTTPRequest.blank('/fake/servers/detail')
+ res_dict = self.controller.detail(req)
+
+ for i, s in enumerate(res_dict['servers']):
+ self.assertEqual(s['id'], fakes.get_fake_uuid(i))
+ self.assertEqual(s['hostId'], '')
+ self.assertEqual(s['name'], 'server%d' % (i + 1))
+ self.assertEqual(s['image'], expected_image)
+ self.assertEqual(s['flavor'], expected_flavor)
+ self.assertEqual(s['status'], 'BUILD')
+ self.assertEqual(s['metadata']['seq'], str(i + 1))
+
+ def test_get_all_server_details_with_host(self):
+ """We want to make sure that if two instances are on the same host,
+ then they return the same hostId. If two instances are on different
+ hosts, they should return different hostId's. In this test, there
+ are 5 instances - 2 on one host and 3 on another.
+ """
+
+ def return_servers_with_host(context, *args, **kwargs):
+ return [fakes.stub_instance(i + 1, 'fake', 'fake', host=i % 2,
+ uuid=fakes.get_fake_uuid(i))
+ for i in xrange(5)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ return_servers_with_host)
+
+ req = fakes.HTTPRequest.blank('/fake/servers/detail')
+ res_dict = self.controller.detail(req)
+
+ server_list = res_dict['servers']
+ host_ids = [server_list[0]['hostId'], server_list[1]['hostId']]
+ self.assertTrue(host_ids[0] and host_ids[1])
+ self.assertNotEqual(host_ids[0], host_ids[1])
+
+ for i, s in enumerate(server_list):
+ self.assertEqual(s['id'], fakes.get_fake_uuid(i))
+ self.assertEqual(s['hostId'], host_ids[i % 2])
+ self.assertEqual(s['name'], 'server%d' % (i + 1))
+
+
+class ServersControllerUpdateTest(ControllerTest):
+
+ def _get_request(self, body=None, content_type='json', options=None):
+ if options:
+ self.stubs.Set(db, 'instance_get',
+ fakes.fake_instance_get(**options))
+ req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
+ req.method = 'PUT'
+ req.content_type = 'application/%s' % content_type
+ req.body = jsonutils.dumps(body)
+ return req
+
+ def test_update_server_all_attributes(self):
+ body = {'server': {
+ 'name': 'server_test',
+ 'accessIPv4': '0.0.0.0',
+ 'accessIPv6': 'beef::0123',
+ }}
+ req = self._get_request(body, {'name': 'server_test',
+ 'access_ipv4': '0.0.0.0',
+ 'access_ipv6': 'beef::0123'})
+ res_dict = self.controller.update(req, FAKE_UUID, body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['name'], 'server_test')
+ self.assertEqual(res_dict['server']['accessIPv4'], '0.0.0.0')
+ self.assertEqual(res_dict['server']['accessIPv6'], 'beef::123')
+
+ def test_update_server_invalid_xml_raises_lookup(self):
+ body = """<?xml version="1.0" encoding="TF-8"?>
+ <metadata
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ key="Label"></meta>"""
+ req = self._get_request(body, content_type='xml')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_update_server_invalid_xml_raises_expat(self):
+ body = """<?xml version="1.0" encoding="UTF-8"?>
+ <metadata
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ key="Label"></meta>"""
+ req = self._get_request(body, content_type='xml')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_update_server_name(self):
+ body = {'server': {'name': 'server_test'}}
+ req = self._get_request(body, {'name': 'server_test'})
+ res_dict = self.controller.update(req, FAKE_UUID, body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['name'], 'server_test')
+
+ def test_update_server_name_too_long(self):
+ body = {'server': {'name': 'x' * 256}}
+ req = self._get_request(body, {'name': 'server_test'})
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, FAKE_UUID, body)
+
+ def test_update_server_name_all_blank_spaces(self):
+ body = {'server': {'name': ' ' * 64}}
+ req = self._get_request(body, {'name': 'server_test'})
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, FAKE_UUID, body)
+
+ def test_update_server_access_ipv4(self):
+ body = {'server': {'accessIPv4': '0.0.0.0'}}
+ req = self._get_request(body, {'access_ipv4': '0.0.0.0'})
+ res_dict = self.controller.update(req, FAKE_UUID, body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['accessIPv4'], '0.0.0.0')
+
+ def test_update_server_access_ipv4_bad_format(self):
+ body = {'server': {'accessIPv4': 'bad_format'}}
+ req = self._get_request(body, {'access_ipv4': '0.0.0.0'})
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, FAKE_UUID, body)
+
+ def test_update_server_access_ipv4_none(self):
+ body = {'server': {'accessIPv4': None}}
+ req = self._get_request(body, {'access_ipv4': '0.0.0.0'})
+ res_dict = self.controller.update(req, FAKE_UUID, body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['accessIPv4'], '')
+
+ def test_update_server_access_ipv4_blank(self):
+ body = {'server': {'accessIPv4': ''}}
+ req = self._get_request(body, {'access_ipv4': '0.0.0.0'})
+ res_dict = self.controller.update(req, FAKE_UUID, body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['accessIPv4'], '')
+
+ def test_update_server_access_ipv6(self):
+ body = {'server': {'accessIPv6': 'beef::0123'}}
+ req = self._get_request(body, {'access_ipv6': 'beef::0123'})
+ res_dict = self.controller.update(req, FAKE_UUID, body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['accessIPv6'], 'beef::123')
+
+ def test_update_server_access_ipv6_bad_format(self):
+ body = {'server': {'accessIPv6': 'bad_format'}}
+ req = self._get_request(body, {'access_ipv6': 'beef::0123'})
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, FAKE_UUID, body)
+
+ def test_update_server_access_ipv6_none(self):
+ body = {'server': {'accessIPv6': None}}
+ req = self._get_request(body, {'access_ipv6': 'beef::0123'})
+ res_dict = self.controller.update(req, FAKE_UUID, body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['accessIPv6'], '')
+
+ def test_update_server_access_ipv6_blank(self):
+ body = {'server': {'accessIPv6': ''}}
+ req = self._get_request(body, {'access_ipv6': 'beef::0123'})
+ res_dict = self.controller.update(req, FAKE_UUID, body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['accessIPv6'], '')
+
+ def test_update_server_personality(self):
+ body = {
+ 'server': {
+ 'personality': []
+ }
+ }
+ req = self._get_request(body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, FAKE_UUID, body)
+
+ def test_update_server_adminPass_ignored(self):
+ inst_dict = dict(name='server_test', adminPass='bacon')
+ body = dict(server=inst_dict)
+
+ def server_update(context, id, params):
+ filtered_dict = {
+ 'display_name': 'server_test',
+ }
+ self.assertEqual(params, filtered_dict)
+ filtered_dict['uuid'] = id
+ return filtered_dict
+
+ self.stubs.Set(db, 'instance_update', server_update)
+ # FIXME (comstud)
+ # self.stubs.Set(db, 'instance_get',
+ # return_server_with_attributes(name='server_test'))
+
+ req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
+ req.method = 'PUT'
+ req.content_type = "application/json"
+ req.body = jsonutils.dumps(body)
+ res_dict = self.controller.update(req, FAKE_UUID, body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['name'], 'server_test')
+
+ def test_update_server_not_found(self):
+ def fake_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+ body = {'server': {'name': 'server_test'}}
+ req = self._get_request(body)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
+ req, FAKE_UUID, body)
+
+ def test_update_server_not_found_on_update(self):
+ def fake_update(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(db, 'instance_update_and_get_original', fake_update)
+ body = {'server': {'name': 'server_test'}}
+ req = self._get_request(body)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
+ req, FAKE_UUID, body)
+
+ def test_update_server_policy_fail(self):
+ rule = {'compute:update': common_policy.parse_rule('role:admin')}
+ policy.set_rules(rule)
+ body = {'server': {'name': 'server_test'}}
+ req = self._get_request(body, {'name': 'server_test'})
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.update, req, FAKE_UUID, body)
+
+
+class ServersControllerDeleteTest(ControllerTest):
+
+ def setUp(self):
+ super(ServersControllerDeleteTest, self).setUp()
+ self.server_delete_called = False
+
+ def instance_destroy_mock(*args, **kwargs):
+ self.server_delete_called = True
+ deleted_at = timeutils.utcnow()
+ return fake_instance.fake_db_instance(deleted_at=deleted_at)
+
+ self.stubs.Set(db, 'instance_destroy', instance_destroy_mock)
+
+ def _create_delete_request(self, uuid):
+ fakes.stub_out_instance_quota(self.stubs, 0, 10)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % uuid)
+ req.method = 'DELETE'
+ return req
+
+ def _delete_server_instance(self, uuid=FAKE_UUID):
+ req = self._create_delete_request(uuid)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
+ self.controller.delete(req, uuid)
+
+ def test_delete_server_instance(self):
+ self._delete_server_instance()
+ self.assertTrue(self.server_delete_called)
+
+ def test_delete_server_instance_not_found(self):
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self._delete_server_instance,
+ uuid='non-existent-uuid')
+
+ def test_delete_locked_server(self):
+ req = self._create_delete_request(FAKE_UUID)
+ self.stubs.Set(compute_api.API, delete_types.SOFT_DELETE,
+ fakes.fake_actions_to_locked_server)
+ self.stubs.Set(compute_api.API, delete_types.DELETE,
+ fakes.fake_actions_to_locked_server)
+
+ self.assertRaises(webob.exc.HTTPConflict, self.controller.delete,
+ req, FAKE_UUID)
+
+ def test_delete_server_instance_while_building(self):
+ fakes.stub_out_instance_quota(self.stubs, 0, 10)
+ request = self._create_delete_request(FAKE_UUID)
+ self.controller.delete(request, FAKE_UUID)
+
+ self.assertTrue(self.server_delete_called)
+
+ def test_delete_server_instance_while_deleting_host_up(self):
+ req = self._create_delete_request(FAKE_UUID)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.DELETING,
+ host='fake_host'))
+ self.stubs.Set(objects.Instance, 'save',
+ lambda *args, **kwargs: None)
+
+ @classmethod
+ def fake_get_by_compute_host(cls, context, host):
+ return {'updated_at': timeutils.utcnow()}
+ self.stubs.Set(objects.Service, 'get_by_compute_host',
+ fake_get_by_compute_host)
+
+ self.controller.delete(req, FAKE_UUID)
+ # Delete request can be ignored, because it's been accepted and
+ # forwarded to the compute service already.
+ self.assertFalse(self.server_delete_called)
+
+ def test_delete_server_instance_while_deleting_host_down(self):
+ fake_network.stub_out_network_cleanup(self.stubs)
+ req = self._create_delete_request(FAKE_UUID)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.DELETING,
+ host='fake_host'))
+ self.stubs.Set(objects.Instance, 'save',
+ lambda *args, **kwargs: None)
+
+ @classmethod
+ def fake_get_by_compute_host(cls, context, host):
+ return {'updated_at': datetime.datetime.min}
+ self.stubs.Set(objects.Service, 'get_by_compute_host',
+ fake_get_by_compute_host)
+
+ self.controller.delete(req, FAKE_UUID)
+ # Delete request would be ignored, because it's been accepted before
+ # but since the host is down, api should remove the instance anyway.
+ self.assertTrue(self.server_delete_called)
+
+ def test_delete_server_instance_while_resize(self):
+ req = self._create_delete_request(FAKE_UUID)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.RESIZE_PREP))
+
+ self.controller.delete(req, FAKE_UUID)
+ # Delete shoud be allowed in any case, even during resizing,
+ # because it may get stuck.
+ self.assertTrue(self.server_delete_called)
+
+ def test_delete_server_instance_if_not_launched(self):
+ self.flags(reclaim_instance_interval=3600)
+ req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
+ req.method = 'DELETE'
+
+ self.server_delete_called = False
+
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(launched_at=None))
+
+ def instance_destroy_mock(*args, **kwargs):
+ self.server_delete_called = True
+ deleted_at = timeutils.utcnow()
+ return fake_instance.fake_db_instance(deleted_at=deleted_at)
+ self.stubs.Set(db, 'instance_destroy', instance_destroy_mock)
+
+ self.controller.delete(req, FAKE_UUID)
+ # delete() should be called for instance which has never been active,
+ # even if reclaim_instance_interval has been set.
+ self.assertEqual(self.server_delete_called, True)
+
+
+class ServersControllerRebuildInstanceTest(ControllerTest):
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ image_href = 'http://localhost/v2/fake/images/%s' % image_uuid
+
+ def setUp(self):
+ super(ServersControllerRebuildInstanceTest, self).setUp()
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
+ self.body = {
+ 'rebuild': {
+ 'name': 'new_name',
+ 'imageRef': self.image_href,
+ 'metadata': {
+ 'open': 'stack',
+ },
+ 'personality': [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "MQ==",
+ },
+ ],
+ },
+ }
+ self.req = fakes.HTTPRequest.blank('/fake/servers/a/action')
+ self.req.method = 'POST'
+ self.req.headers["content-type"] = "application/json"
+
+ def test_rebuild_instance_with_access_ipv4_bad_format(self):
+ # proper local hrefs must start with 'http://localhost/v2/'
+ self.body['rebuild']['accessIPv4'] = 'bad_format'
+ self.body['rebuild']['accessIPv6'] = 'fead::1234'
+ self.body['rebuild']['metadata']['hello'] = 'world'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, self.body)
+
+ def test_rebuild_instance_with_blank_metadata_key(self):
+ self.body['rebuild']['accessIPv4'] = '0.0.0.0'
+ self.body['rebuild']['accessIPv6'] = 'fead::1234'
+ self.body['rebuild']['metadata'][''] = 'world'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, self.body)
+
+ def test_rebuild_instance_with_metadata_key_too_long(self):
+ self.body['rebuild']['accessIPv4'] = '0.0.0.0'
+ self.body['rebuild']['accessIPv6'] = 'fead::1234'
+ self.body['rebuild']['metadata'][('a' * 260)] = 'world'
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, self.body)
+
+ def test_rebuild_instance_with_metadata_value_too_long(self):
+ self.body['rebuild']['accessIPv4'] = '0.0.0.0'
+ self.body['rebuild']['accessIPv6'] = 'fead::1234'
+ self.body['rebuild']['metadata']['key1'] = ('a' * 260)
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, self.body)
+
+ def test_rebuild_instance_fails_when_min_ram_too_small(self):
+ # make min_ram larger than our instance ram size
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True,
+ status='active', properties={'key1': 'value1'},
+ min_ram="4096", min_disk="10")
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, self.body)
+
+ def test_rebuild_instance_fails_when_min_disk_too_small(self):
+ # make min_disk larger than our instance disk size
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True,
+ status='active', properties={'key1': 'value1'},
+ min_ram="128", min_disk="100000")
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild, self.req,
+ FAKE_UUID, self.body)
+
+ def test_rebuild_instance_image_too_large(self):
+ # make image size larger than our instance disk size
+ size = str(1000 * (1024 ** 3))
+
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True,
+ status='active', size=size)
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild, self.req, FAKE_UUID, self.body)
+
+ def test_rebuild_instance_with_deleted_image(self):
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True,
+ status='DELETED')
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild, self.req, FAKE_UUID, self.body)
+
+ def test_rebuild_instance_onset_file_limit_over_quota(self):
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True, status='active')
+
+ with contextlib.nested(
+ mock.patch.object(fake._FakeImageService, 'show',
+ side_effect=fake_get_image),
+ mock.patch.object(self.controller.compute_api, 'rebuild',
+ side_effect=exception.OnsetFileLimitExceeded)
+ ) as (
+ show_mock, rebuild_mock
+ ):
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, body=self.body)
+
+ def test_rebuild_instance_with_access_ipv6_bad_format(self):
+ # proper local hrefs must start with 'http://localhost/v2/'
+ self.body['rebuild']['accessIPv4'] = '1.2.3.4'
+ self.body['rebuild']['accessIPv6'] = 'bad_format'
+ self.body['rebuild']['metadata']['hello'] = 'world'
+ self.req.body = jsonutils.dumps(self.body)
+ self.req.headers["content-type"] = "application/json"
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, self.body)
+
+ def test_rebuild_instance_with_null_image_ref(self):
+ self.body['rebuild']['imageRef'] = None
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild, self.req, FAKE_UUID,
+ self.body)
+
+
+class ServerStatusTest(test.TestCase):
+
+ def setUp(self):
+ super(ServerStatusTest, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = servers.Controller(self.ext_mgr)
+
+ def _fake_get_server(context, req, id):
+ return fakes.stub_instance(id)
+
+ self.stubs.Set(self.controller, '_get_server', _fake_get_server)
+
+ def _get_with_state(self, vm_state, task_state=None):
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_state,
+ task_state=task_state))
+
+ request = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
+ return self.controller.show(request, FAKE_UUID)
+
+ def _req_with_policy_fail(self, policy_rule_name):
+ rule = {'compute:%s' % policy_rule_name:
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rule)
+ return fakes.HTTPRequest.blank('/fake/servers/1234/action')
+
+ def test_active(self):
+ response = self._get_with_state(vm_states.ACTIVE)
+ self.assertEqual(response['server']['status'], 'ACTIVE')
+
+ def test_reboot(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.REBOOTING)
+ self.assertEqual(response['server']['status'], 'REBOOT')
+
+ def test_reboot_hard(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.REBOOTING_HARD)
+ self.assertEqual(response['server']['status'], 'HARD_REBOOT')
+
+ def test_reboot_resize_policy_fail(self):
+ req = self._req_with_policy_fail('reboot')
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._action_reboot, req, '1234',
+ {'reboot': {'type': 'HARD'}})
+
+ def test_rebuild(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.REBUILDING)
+ self.assertEqual(response['server']['status'], 'REBUILD')
+
+ def test_rebuild_error(self):
+ response = self._get_with_state(vm_states.ERROR)
+ self.assertEqual(response['server']['status'], 'ERROR')
+
+ def test_resize(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.RESIZE_PREP)
+ self.assertEqual(response['server']['status'], 'RESIZE')
+
+ def test_confirm_resize_policy_fail(self):
+ req = self._req_with_policy_fail('confirm_resize')
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._action_confirm_resize, req, '1234', {})
+
+ def test_verify_resize(self):
+ response = self._get_with_state(vm_states.RESIZED, None)
+ self.assertEqual(response['server']['status'], 'VERIFY_RESIZE')
+
+ def test_revert_resize(self):
+ response = self._get_with_state(vm_states.RESIZED,
+ task_states.RESIZE_REVERTING)
+ self.assertEqual(response['server']['status'], 'REVERT_RESIZE')
+
+ def test_revert_resize_policy_fail(self):
+ req = self._req_with_policy_fail('revert_resize')
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._action_revert_resize, req, '1234', {})
+
+ def test_password_update(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.UPDATING_PASSWORD)
+ self.assertEqual(response['server']['status'], 'PASSWORD')
+
+ def test_stopped(self):
+ response = self._get_with_state(vm_states.STOPPED)
+ self.assertEqual(response['server']['status'], 'SHUTOFF')
+
+
+class ServersControllerCreateTest(test.TestCase):
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTest, self).setUp()
+
+ self.flags(verbose=True,
+ enable_instance_password=True)
+ self.instance_cache_num = 0
+ self.instance_cache_by_id = {}
+ self.instance_cache_by_uuid = {}
+
+ fakes.stub_out_nw_api(self.stubs)
+
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = servers.Controller(self.ext_mgr)
+
+ self.volume_id = 'fake'
+
+ def instance_create(context, inst):
+ inst_type = flavors.get_flavor_by_flavor_id(3)
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ def_image_ref = 'http://localhost/images/%s' % image_uuid
+ self.instance_cache_num += 1
+ instance = fake_instance.fake_db_instance(**{
+ 'id': self.instance_cache_num,
+ 'display_name': inst['display_name'] or 'test',
+ 'uuid': FAKE_UUID,
+ 'instance_type': inst_type,
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fead::1234',
+ 'image_ref': inst.get('image_ref', def_image_ref),
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'reservation_id': inst['reservation_id'],
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "config_drive": None,
+ "progress": 0,
+ "fixed_ips": [],
+ "task_state": "",
+ "vm_state": "",
+ "root_device_name": inst.get('root_device_name', 'vda'),
+ "security_groups": inst['security_groups'],
+ })
+
+ self.instance_cache_by_id[instance['id']] = instance
+ self.instance_cache_by_uuid[instance['uuid']] = instance
+ return instance
+
+ def instance_get(context, instance_id):
+ """Stub for compute/api create() pulling in instance after
+ scheduling
+ """
+ return self.instance_cache_by_id[instance_id]
+
+ def instance_update(context, uuid, values):
+ instance = self.instance_cache_by_uuid[uuid]
+ instance.update(values)
+ return instance
+
+ def server_update(context, instance_uuid, params, update_cells=False):
+ inst = self.instance_cache_by_uuid[instance_uuid]
+ inst.update(params)
+ return inst
+
+ def server_update_and_get_original(
+ context, instance_uuid, params, update_cells=False,
+ columns_to_join=None):
+ inst = self.instance_cache_by_uuid[instance_uuid]
+ inst.update(params)
+ return (inst, inst)
+
+ def fake_method(*args, **kwargs):
+ pass
+
+ def project_get_networks(context, user_id):
+ return dict(id='1', host='localhost')
+
+ def queue_get_for(context, *args):
+ return 'network_topic'
+
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+ self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
+ self.stubs.Set(db, 'instance_add_security_group',
+ return_security_group)
+ self.stubs.Set(db, 'project_get_networks',
+ project_get_networks)
+ self.stubs.Set(db, 'instance_create', instance_create)
+ self.stubs.Set(db, 'instance_system_metadata_update',
+ fake_method)
+ self.stubs.Set(db, 'instance_get', instance_get)
+ self.stubs.Set(db, 'instance_update', instance_update)
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ server_update_and_get_original)
+ self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
+ fake_method)
+ self.body = {
+ 'server': {
+ 'min_count': 2,
+ 'name': 'server_test',
+ 'imageRef': self.image_uuid,
+ 'flavorRef': self.flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ 'personality': [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "MQ==",
+ },
+ ],
+ },
+ }
+ self.bdm = [{'delete_on_termination': 1,
+ 'device_name': 123,
+ 'volume_size': 1,
+ 'volume_id': '11111111-1111-1111-1111-111111111111'}]
+
+ self.req = fakes.HTTPRequest.blank('/fake/servers')
+ self.req.method = 'POST'
+ self.req.headers["content-type"] = "application/json"
+
+ def _check_admin_pass_len(self, server_dict):
+ """utility function - check server_dict for adminPass length."""
+ self.assertEqual(CONF.password_length,
+ len(server_dict["adminPass"]))
+
+ def _check_admin_pass_missing(self, server_dict):
+ """utility function - check server_dict for absence of adminPass."""
+ self.assertNotIn("adminPass", server_dict)
+
+ def _test_create_instance(self, flavor=2):
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ self.body['server']['imageRef'] = image_uuid
+ self.body['server']['flavorRef'] = flavor
+ self.req.body = jsonutils.dumps(self.body)
+ server = self.controller.create(self.req, self.body).obj['server']
+ self._check_admin_pass_len(server)
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_private_flavor(self):
+ values = {
+ 'name': 'fake_name',
+ 'memory_mb': 512,
+ 'vcpus': 1,
+ 'root_gb': 10,
+ 'ephemeral_gb': 10,
+ 'flavorid': '1324',
+ 'swap': 0,
+ 'rxtx_factor': 0.5,
+ 'vcpu_weight': 1,
+ 'disabled': False,
+ 'is_public': False,
+ }
+ db.flavor_create(context.get_admin_context(), values)
+ self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_instance,
+ flavor=1324)
+
+ def test_create_server_bad_image_href(self):
+ image_href = 1
+ self.body['server']['imageRef'] = image_href,
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ self.req, self.body)
+
+ def test_create_server_with_invalid_networks_parameter(self):
+ self.ext_mgr.extensions = {'os-networks': 'fake'}
+ self.body['server']['networks'] = {
+ 'uuid': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'}
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ self.req,
+ self.body)
+
+ def test_create_server_with_deleted_image(self):
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ # Get the fake image service so we can set the status to deleted
+ (image_service, image_id) = glance.get_remote_image_service(
+ context, '')
+ image_service.update(context, image_uuid, {'status': 'DELETED'})
+ self.addCleanup(image_service.update, context, image_uuid,
+ {'status': 'active'})
+
+ self.body['server']['flavorRef'] = 2
+ self.req.body = jsonutils.dumps(self.body)
+ with testtools.ExpectedException(
+ webob.exc.HTTPBadRequest,
+ 'Image 76fa36fc-c930-4bf3-8c8a-ea2a2420deb6 is not active.'):
+ self.controller.create(self.req, self.body)
+
+ def test_create_server_image_too_large(self):
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ # Get the fake image service so we can set the status to deleted
+ (image_service, image_id) = glance.get_remote_image_service(context,
+ image_uuid)
+ image = image_service.show(context, image_id)
+ orig_size = image['size']
+ new_size = str(1000 * (1024 ** 3))
+ image_service.update(context, image_uuid, {'size': new_size})
+
+ self.addCleanup(image_service.update, context, image_uuid,
+ {'size': orig_size})
+
+ self.body['server']['flavorRef'] = 2
+ self.req.body = jsonutils.dumps(self.body)
+ with testtools.ExpectedException(
+ webob.exc.HTTPBadRequest,
+ "Flavor's disk is too small for requested image."):
+ self.controller.create(self.req, self.body)
+
+ def test_create_instance_invalid_negative_min(self):
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ self.body['server']['min_count'] = -1
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ self.req,
+ self.body)
+
+ def test_create_instance_invalid_negative_max(self):
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ self.body['server']['max_count'] = -1
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ self.req,
+ self.body)
+
+ def test_create_instance_invalid_alpha_min(self):
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ self.body['server']['min_count'] = 'abcd',
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ self.req,
+ self.body)
+
+ def test_create_instance_invalid_alpha_max(self):
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ self.body['server']['max_count'] = 'abcd',
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ self.req,
+ self.body)
+
+ def test_create_multiple_instances(self):
+ """Test creating multiple instances but not asking for
+ reservation_id
+ """
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+ self.assertEqual(FAKE_UUID, res["server"]["id"])
+ self._check_admin_pass_len(res["server"])
+
+ def test_create_multiple_instances_pass_disabled(self):
+ """Test creating multiple instances but not asking for
+ reservation_id
+ """
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ self.flags(enable_instance_password=False)
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+ self.assertEqual(FAKE_UUID, res["server"]["id"])
+ self._check_admin_pass_missing(res["server"])
+
+ def test_create_multiple_instances_resv_id_return(self):
+ """Test creating multiple instances with asking for
+ reservation_id
+ """
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ self.body['server']['return_reservation_id'] = True
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body)
+ reservation_id = res.obj.get('reservation_id')
+ self.assertNotEqual(reservation_id, "")
+ self.assertIsNotNone(reservation_id)
+ self.assertTrue(len(reservation_id) > 1)
+
+ def test_create_multiple_instances_with_multiple_volume_bdm(self):
+ """Test that a BadRequest is raised if multiple instances
+ are requested with a list of block device mappings for volumes.
+ """
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ min_count = 2
+ bdm = [{'device_name': 'foo1', 'volume_id': 'vol-xxxx'},
+ {'device_name': 'foo2', 'volume_id': 'vol-yyyy'}
+ ]
+ params = {
+ 'block_device_mapping': bdm,
+ 'min_count': min_count
+ }
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['min_count'], 2)
+ self.assertEqual(len(kwargs['block_device_mapping']), 2)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params, no_image=True)
+
+ def test_create_multiple_instances_with_single_volume_bdm(self):
+ """Test that a BadRequest is raised if multiple instances
+ are requested to boot from a single volume.
+ """
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ min_count = 2
+ bdm = [{'device_name': 'foo1', 'volume_id': 'vol-xxxx'}]
+ params = {
+ 'block_device_mapping': bdm,
+ 'min_count': min_count
+ }
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['min_count'], 2)
+ self.assertEqual(kwargs['block_device_mapping']['volume_id'],
+ 'vol-xxxx')
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params, no_image=True)
+
+ def test_create_multiple_instance_with_non_integer_max_count(self):
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ self.body['server']['max_count'] = 2.5
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_multiple_instance_with_non_integer_min_count(self):
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ self.body['server']['min_count'] = 2.5
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_image_ref_is_bookmark(self):
+ image_href = 'http://localhost/fake/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+ server = res['server']
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_image_ref_is_invalid(self):
+ image_uuid = 'this_is_not_a_valid_uuid'
+ image_href = 'http://localhost/fake/images/%s' % image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ self.req, self.body)
+
+ def test_create_instance_no_key_pair(self):
+ fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False)
+ self._test_create_instance()
+
+ def _test_create_extra(self, params, no_image=False):
+ self.body['server']['flavorRef'] = 2
+ if no_image:
+ self.body['server'].pop('imageRef', None)
+ self.body['server'].update(params)
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertIn('server',
+ self.controller.create(self.req, self.body).obj)
+
+ def test_create_instance_with_security_group_enabled(self):
+ self.ext_mgr.extensions = {'os-security-groups': 'fake'}
+ group = 'foo'
+ old_create = compute_api.API.create
+
+ def sec_group_get(ctx, proj, name):
+ if name == group:
+ return True
+ else:
+ raise exception.SecurityGroupNotFoundForProject(
+ project_id=proj, security_group_id=name)
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['security_group'], [group])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(db, 'security_group_get_by_name', sec_group_get)
+ # negative test
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra,
+ {'security_groups': [{'name': 'bogus'}]})
+ # positive test - extra assert in create path
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra({'security_groups': [{'name': group}]})
+
+ def test_create_instance_with_non_unique_secgroup_name(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network}]
+ params = {'networks': requested_networks,
+ 'security_groups': [{'name': 'dup'}, {'name': 'dup'}]}
+
+ def fake_create(*args, **kwargs):
+ raise exception.NoUniqueMatch("No Unique match found for ...")
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_port_with_no_fixed_ips(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ port_id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'port': port_id}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ raise exception.PortRequiresFixedIP(port_id=port_id)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_instance_raise_user_data_too_large(self, mock_create):
+ mock_create.side_effect = exception.InstanceUserDataTooLarge(
+ maxsize=1, length=2)
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ self.req, self.body)
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_instance_raise_auto_disk_config_exc(self, mock_create):
+ mock_create.side_effect = exception.AutoDiskConfigDisabledByImage(
+ image='dummy')
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ self.req, self.body)
+
+ @mock.patch.object(compute_api.API, 'create',
+ side_effect=exception.InstanceExists(
+ name='instance-name'))
+ def test_create_instance_raise_instance_exists(self, mock_create):
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller.create,
+ self.req, self.body)
+
+ def test_create_instance_with_network_with_no_subnet(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ raise exception.NetworkRequiresSubnet(network_uuid=network)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_access_ip(self):
+ self.body['server']['accessIPv4'] = '1.2.3.4'
+ self.body['server']['accessIPv6'] = 'fead::1234'
+
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+ server = res['server']
+ self._check_admin_pass_len(server)
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_with_access_ip_pass_disabled(self):
+ # test with admin passwords disabled See lp bug 921814
+ self.flags(enable_instance_password=False)
+ self.body['server']['accessIPv4'] = '1.2.3.4'
+ self.body['server']['accessIPv6'] = 'fead::1234'
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+
+ server = res['server']
+ self._check_admin_pass_missing(server)
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_bad_format_access_ip_v4(self):
+ self.body['server']['accessIPv4'] = 'bad_format'
+ self.body['server']['accessIPv6'] = 'fead::1234'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ self.req, self.body)
+
+ def test_create_instance_bad_format_access_ip_v6(self):
+ self.body['server']['accessIPv4'] = '1.2.3.4'
+ self.body['server']['accessIPv6'] = 'bad_format'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ self.req, self.body)
+
+ def test_create_instance_name_all_blank_spaces(self):
+ self.body['server']['name'] = ' ' * 64
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_name_too_long(self):
+ self.body['server']['name'] = 'X' * 256
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ self.req, self.body)
+
+ def test_create_instance(self):
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+
+ server = res['server']
+ self._check_admin_pass_len(server)
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_pass_disabled(self):
+ self.flags(enable_instance_password=False)
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+
+ server = res['server']
+ self._check_admin_pass_missing(server)
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ @mock.patch('nova.virt.hardware.VirtNUMAInstanceTopology.get_constraints')
+ def test_create_instance_numa_topology_wrong(self, numa_constraints_mock):
+ numa_constraints_mock.side_effect = (
+ exception.ImageNUMATopologyIncomplete)
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_too_much_metadata(self):
+ self.flags(quota_metadata_items=1)
+ self.body['server']['metadata']['vote'] = 'fiddletown'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_metadata_key_too_long(self):
+ self.flags(quota_metadata_items=1)
+ self.body['server']['metadata'] = {('a' * 260): '12345'}
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_metadata_value_too_long(self):
+ self.flags(quota_metadata_items=1)
+ self.body['server']['metadata'] = {'key1': ('a' * 260)}
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_metadata_key_blank(self):
+ self.flags(quota_metadata_items=1)
+ self.body['server']['metadata'] = {'': 'abcd'}
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_metadata_not_dict(self):
+ self.flags(quota_metadata_items=1)
+ self.body['server']['metadata'] = 'string'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_metadata_key_not_string(self):
+ self.flags(quota_metadata_items=1)
+ self.body['server']['metadata'] = {1: 'test'}
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_metadata_value_not_string(self):
+ self.flags(quota_metadata_items=1)
+ self.body['server']['metadata'] = {'test': ['a', 'list']}
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_user_data_malformed_bad_request(self):
+ self.ext_mgr.extensions = {'os-user-data': 'fake'}
+ params = {'user_data': 'u1234!'}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ @mock.patch('nova.compute.api.API.create',
+ side_effect=exception.KeypairNotFound(name='nonexistentkey',
+ user_id=1))
+ def test_create_instance_invalid_key_name(self, mock_create):
+ self.body['server']['key_name'] = 'nonexistentkey'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_valid_key_name(self):
+ self.body['server']['key_name'] = 'key'
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+
+ self.assertEqual(FAKE_UUID, res["server"]["id"])
+ self._check_admin_pass_len(res["server"])
+
+ def test_create_instance_invalid_flavor_href(self):
+ flavor_ref = 'http://localhost/v2/flavors/asdf'
+ self.body['server']['flavorRef'] = flavor_ref
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_invalid_flavor_id_int(self):
+ flavor_ref = -1
+ self.body['server']['flavorRef'] = flavor_ref
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_bad_flavor_href(self):
+ flavor_ref = 'http://localhost/v2/flavors/17'
+ self.body['server']['flavorRef'] = flavor_ref
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_with_config_drive(self):
+ self.ext_mgr.extensions = {'os-config-drive': 'fake'}
+ self.body['server']['config_drive'] = "true"
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+ server = res['server']
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_with_bad_config_drive(self):
+ self.ext_mgr.extensions = {'os-config-drive': 'fake'}
+ self.body['server']['config_drive'] = 'adcd'
+ self.req.body = jsonutils.dumps(self.body)
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_without_config_drive(self):
+ self.ext_mgr.extensions = {'os-config-drive': 'fake'}
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+ server = res['server']
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_with_config_drive_disabled(self):
+ config_drive = [{'config_drive': 'foo'}]
+ params = {'config_drive': config_drive}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIsNone(kwargs['config_drive'])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_bad_href(self):
+ image_href = 'asdf'
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_local_href(self):
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+
+ server = res['server']
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_admin_pass(self):
+ self.body['server']['flavorRef'] = 3,
+ self.body['server']['adminPass'] = 'testpass'
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+
+ server = res['server']
+ self.assertEqual(server['adminPass'], self.body['server']['adminPass'])
+
+ def test_create_instance_admin_pass_pass_disabled(self):
+ self.flags(enable_instance_password=False)
+ self.body['server']['flavorRef'] = 3,
+ self.body['server']['adminPass'] = 'testpass'
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+
+ server = res['server']
+ self.assertIn('adminPass', self.body['server'])
+ self.assertNotIn('adminPass', server)
+
+ def test_create_instance_admin_pass_empty(self):
+ self.body['server']['flavorRef'] = 3,
+ self.body['server']['adminPass'] = ''
+ self.req.body = jsonutils.dumps(self.body)
+
+ # The fact that the action doesn't raise is enough validation
+ self.controller.create(self.req, self.body)
+
+ def test_create_instance_with_security_group_disabled(self):
+ group = 'foo'
+ params = {'security_groups': [{'name': group}]}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ # NOTE(vish): if the security groups extension is not
+ # enabled, then security groups passed in
+ # are ignored.
+ self.assertEqual(kwargs['security_group'], ['default'])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_disk_config_enabled(self):
+ self.ext_mgr.extensions = {'OS-DCF': 'fake'}
+ # NOTE(vish): the extension converts OS-DCF:disk_config into
+ # auto_disk_config, so we are testing with
+ # the_internal_value
+ params = {'auto_disk_config': 'AUTO'}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['auto_disk_config'], 'AUTO')
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_disk_config_disabled(self):
+ params = {'auto_disk_config': True}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['auto_disk_config'], False)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_scheduler_hints_enabled(self):
+ self.ext_mgr.extensions = {'OS-SCH-HNT': 'fake'}
+ hints = {'a': 'b'}
+ params = {'scheduler_hints': hints}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['scheduler_hints'], hints)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_scheduler_hints_disabled(self):
+ hints = {'a': 'b'}
+ params = {'scheduler_hints': hints}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['scheduler_hints'], {})
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_volumes_enabled_no_image(self):
+ """Test that the create will fail if there is no image
+ and no bdms supplied in the request
+ """
+ self.ext_mgr.extensions = {'os-volumes': 'fake'}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertNotIn('imageRef', kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, {}, no_image=True)
+
+ def test_create_instance_with_bdm_v2_enabled_no_image(self):
+ self.ext_mgr.extensions = {'os-block-device-mapping-v2-boot': 'fake'}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertNotIn('imageRef', kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, {}, no_image=True)
+
+ def test_create_instance_with_user_data_enabled(self):
+ self.ext_mgr.extensions = {'os-user-data': 'fake'}
+ user_data = 'fake'
+ params = {'user_data': user_data}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['user_data'], user_data)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_user_data_disabled(self):
+ user_data = 'fake'
+ params = {'user_data': user_data}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIsNone(kwargs['user_data'])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_keypairs_enabled(self):
+ self.ext_mgr.extensions = {'os-keypairs': 'fake'}
+ key_name = 'green'
+
+ params = {'key_name': key_name}
+ old_create = compute_api.API.create
+
+ # NOTE(sdague): key pair goes back to the database,
+ # so we need to stub it out for tests
+ def key_pair_get(context, user_id, name):
+ return dict(test_keypair.fake_keypair,
+ public_key='FAKE_KEY',
+ fingerprint='FAKE_FINGERPRINT',
+ name=name)
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['key_name'], key_name)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(db, 'key_pair_get', key_pair_get)
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_keypairs_disabled(self):
+ key_name = 'green'
+
+ params = {'key_name': key_name}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIsNone(kwargs['key_name'])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_availability_zone_enabled(self):
+ self.ext_mgr.extensions = {'os-availability-zone': 'fake'}
+ availability_zone = 'fake'
+ params = {'availability_zone': availability_zone}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['availability_zone'], availability_zone)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ try:
+ self._test_create_extra(params)
+ except webob.exc.HTTPBadRequest as e:
+ expected = 'The requested availability zone is not available'
+ self.assertEqual(e.explanation, expected)
+ admin_context = context.get_admin_context()
+ db.service_create(admin_context, {'host': 'host1_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0})
+ agg = db.aggregate_create(admin_context,
+ {'name': 'agg1'}, {'availability_zone': availability_zone})
+ db.aggregate_host_add(admin_context, agg['id'], 'host1_zones')
+ self._test_create_extra(params)
+
+ def test_create_instance_with_availability_zone_disabled(self):
+ availability_zone = 'fake'
+ params = {'availability_zone': availability_zone}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIsNone(kwargs['availability_zone'])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_multiple_create_enabled(self):
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ min_count = 2
+ max_count = 3
+ params = {
+ 'min_count': min_count,
+ 'max_count': max_count,
+ }
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['min_count'], 2)
+ self.assertEqual(kwargs['max_count'], 3)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_multiple_create_disabled(self):
+ min_count = 2
+ max_count = 3
+ params = {
+ 'min_count': min_count,
+ 'max_count': max_count,
+ }
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['min_count'], 1)
+ self.assertEqual(kwargs['max_count'], 1)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_networks_enabled(self):
+ self.ext_mgr.extensions = {'os-networks': 'fake'}
+ net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ requested_networks = [{'uuid': net_uuid}]
+ params = {'networks': requested_networks}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None)]
+ self.assertEqual(result, kwargs['requested_networks'].as_tuples())
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_neutronv2_port_in_use(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network, 'port': port}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ raise exception.PortInUse(port_id=port)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_neturonv2_not_found_network(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ requested_networks = [{'uuid': network}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ raise exception.NetworkNotFound(network_id=network)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_neutronv2_port_not_found(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network, 'port': port}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ raise exception.PortNotFound(port_id=port)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_multiple_instance_with_specified_ip_neutronv2(self,
+ _api_mock):
+ _api_mock.side_effect = exception.InvalidFixedIpAndMaxCountRequest(
+ reason="")
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ address = '10.0.0.1'
+ self.body['server']['max_count'] = 2
+ requested_networks = [{'uuid': network, 'fixed_ip': address,
+ 'port': port}]
+ params = {'networks': requested_networks}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ def test_create_multiple_instance_with_neutronv2_port(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ self.body['server']['max_count'] = 2
+ requested_networks = [{'uuid': network, 'port': port}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ msg = _("Unable to launch multiple instances with"
+ " a single configured port ID. Please launch your"
+ " instance one by one with different ports.")
+ raise exception.MultiplePortsNotApplicable(reason=msg)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_networks_disabled_neutronv2(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ requested_networks = [{'uuid': net_uuid}]
+ params = {'networks': requested_networks}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None,
+ None, None)]
+ self.assertEqual(result, kwargs['requested_networks'].as_tuples())
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_networks_disabled(self):
+ self.ext_mgr.extensions = {}
+ net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ requested_networks = [{'uuid': net_uuid}]
+ params = {'networks': requested_networks}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIsNone(kwargs['requested_networks'])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_invalid_personality(self):
+
+ def fake_create(*args, **kwargs):
+ codec = 'utf8'
+ content = 'b25zLiINCg0KLVJpY2hhcmQgQ$$%QQmFjaA=='
+ start_position = 19
+ end_position = 20
+ msg = 'invalid start byte'
+ raise UnicodeDecodeError(codec, content, start_position,
+ end_position, msg)
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.body['server']['personality'] = [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "b25zLiINCg0KLVJpY2hhcmQgQ$$%QQmFjaA==",
+ },
+ ]
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_location(self):
+ selfhref = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+ robj = self.controller.create(self.req, self.body)
+ self.assertEqual(robj['Location'], selfhref)
+
+ def _do_test_create_instance_above_quota(self, resource, allowed, quota,
+ expected_msg):
+ fakes.stub_out_instance_quota(self.stubs, allowed, quota, resource)
+ self.body['server']['flavorRef'] = 3
+ self.req.body = jsonutils.dumps(self.body)
+ try:
+ self.controller.create(self.req, self.body).obj['server']
+ self.fail('expected quota to be exceeded')
+ except webob.exc.HTTPForbidden as e:
+ self.assertEqual(e.explanation, expected_msg)
+
+ def test_create_instance_above_quota_instances(self):
+ msg = _('Quota exceeded for instances: Requested 1, but'
+ ' already used 10 of 10 instances')
+ self._do_test_create_instance_above_quota('instances', 0, 10, msg)
+
+ def test_create_instance_above_quota_ram(self):
+ msg = _('Quota exceeded for ram: Requested 4096, but'
+ ' already used 8192 of 10240 ram')
+ self._do_test_create_instance_above_quota('ram', 2048, 10 * 1024, msg)
+
+ def test_create_instance_above_quota_cores(self):
+ msg = _('Quota exceeded for cores: Requested 2, but'
+ ' already used 9 of 10 cores')
+ self._do_test_create_instance_above_quota('cores', 1, 10, msg)
+
+ def test_create_instance_above_quota_group_members(self):
+ ctxt = context.get_admin_context()
+ fake_group = objects.InstanceGroup(ctxt)
+ fake_group.create()
+
+ def fake_count(context, name, group, user_id):
+ self.assertEqual(name, "server_group_members")
+ self.assertEqual(group.uuid, fake_group.uuid)
+ self.assertEqual(user_id,
+ self.req.environ['nova.context'].user_id)
+ return 10
+
+ def fake_limit_check(context, **kwargs):
+ if 'server_group_members' in kwargs:
+ raise exception.OverQuota(overs={})
+
+ def fake_instance_destroy(context, uuid, constraint):
+ return fakes.stub_instance(1)
+
+ self.stubs.Set(fakes.QUOTAS, 'count', fake_count)
+ self.stubs.Set(fakes.QUOTAS, 'limit_check', fake_limit_check)
+ self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
+ self.ext_mgr.extensions = {'OS-SCH-HNT': 'fake',
+ 'os-server-group-quotas': 'fake'}
+ self.body['server']['scheduler_hints'] = {'group': fake_group.uuid}
+ self.req.body = jsonutils.dumps(self.body)
+
+ expected_msg = "Quota exceeded, too many servers in group"
+
+ try:
+ self.controller.create(self.req, self.body).obj['server']
+ self.fail('expected quota to be exceeded')
+ except webob.exc.HTTPForbidden as e:
+ self.assertEqual(e.explanation, expected_msg)
+
+ def test_create_instance_above_quota_server_groups(self):
+
+ def fake_reserve(contex, **deltas):
+ if 'server_groups' in deltas:
+ raise exception.OverQuota(overs={})
+
+ def fake_instance_destroy(context, uuid, constraint):
+ return fakes.stub_instance(1)
+
+ self.stubs.Set(fakes.QUOTAS, 'reserve', fake_reserve)
+ self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
+ self.ext_mgr.extensions = {'OS-SCH-HNT': 'fake',
+ 'os-server-group-quotas': 'fake'}
+ self.body['server']['scheduler_hints'] = {'group': 'fake-group'}
+ self.req.body = jsonutils.dumps(self.body)
+
+ expected_msg = "Quota exceeded, too many server groups."
+
+ try:
+ self.controller.create(self.req, self.body).obj['server']
+ self.fail('expected quota to be exceeded')
+ except webob.exc.HTTPForbidden as e:
+ self.assertEqual(e.explanation, expected_msg)
+
+
+class ServersControllerCreateTestWithMock(test.TestCase):
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTestWithMock, self).setUp()
+
+ self.flags(verbose=True,
+ enable_instance_password=True)
+ self.instance_cache_num = 0
+ self.instance_cache_by_id = {}
+ self.instance_cache_by_uuid = {}
+
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = servers.Controller(self.ext_mgr)
+
+ self.volume_id = 'fake'
+
+ self.body = {
+ 'server': {
+ 'min_count': 2,
+ 'name': 'server_test',
+ 'imageRef': self.image_uuid,
+ 'flavorRef': self.flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ 'personality': [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "MQ==",
+ },
+ ],
+ },
+ }
+
+ self.req = fakes.HTTPRequest.blank('/fake/servers')
+ self.req.method = 'POST'
+ self.req.headers["content-type"] = "application/json"
+
+ def _test_create_extra(self, params, no_image=False):
+ self.body['server']['flavorRef'] = 2
+ if no_image:
+ self.body['server'].pop('imageRef', None)
+ self.body['server'].update(params)
+ self.req.body = jsonutils.dumps(self.body)
+ self.controller.create(self.req, self.body).obj['server']
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_instance_with_neutronv2_fixed_ip_already_in_use(self,
+ create_mock):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ address = '10.0.2.3'
+ requested_networks = [{'uuid': network, 'fixed_ip': address}]
+ params = {'networks': requested_networks}
+ create_mock.side_effect = exception.FixedIpAlreadyInUse(
+ address=address,
+ instance_uuid=network)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+ self.assertEqual(1, len(create_mock.call_args_list))
+
+ @mock.patch.object(compute_api.API, 'create',
+ side_effect=exception.InvalidVolume(reason='error'))
+ def test_create_instance_with_invalid_volume_error(self, create_mock):
+ # Tests that InvalidVolume is translated to a 400 error.
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, {})
+
+
+class TestServerCreateRequestXMLDeserializer(test.TestCase):
+
+ def setUp(self):
+ super(TestServerCreateRequestXMLDeserializer, self).setUp()
+ self.deserializer = servers.CreateDeserializer()
+
+ def test_minimal_request(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2"/>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_alternate_namespace_prefix(self):
+ serial_request = """
+<ns2:server xmlns:ns2="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2">
+ <ns2:metadata><ns2:meta key="hello">world</ns2:meta></ns2:metadata>
+ </ns2:server>
+ """
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ 'metadata': {"hello": "world"},
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_scheduler_hints_and_alternate_namespace_prefix(self):
+ serial_request = """
+<ns2:server xmlns:ns2="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2">
+ <ns2:metadata><ns2:meta key="hello">world</ns2:meta></ns2:metadata>
+ <os:scheduler_hints
+ xmlns:os="http://docs.openstack.org/compute/ext/scheduler-hints/api/v2">
+ <hypervisor>xen</hypervisor>
+ <near>eb999657-dd6b-464e-8713-95c532ac3b18</near>
+ </os:scheduler_hints>
+ </ns2:server>
+ """
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ 'OS-SCH-HNT:scheduler_hints': {
+ 'hypervisor': ['xen'],
+ 'near': ['eb999657-dd6b-464e-8713-95c532ac3b18']
+ },
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "metadata": {
+ "hello": "world"
+ }
+ }
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_access_ipv4(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2"
+ accessIPv4="1.2.3.4"/>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "accessIPv4": "1.2.3.4",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_access_ipv6(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2"
+ accessIPv6="fead::1234"/>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "accessIPv6": "fead::1234",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_access_ip(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2"
+ accessIPv4="1.2.3.4"
+ accessIPv6="fead::1234"/>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "fead::1234",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_admin_pass(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2"
+ adminPass="1234"/>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "adminPass": "1234",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_image_link(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="http://localhost:8774/v2/images/2"
+ flavorRef="3"/>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "http://localhost:8774/v2/images/2",
+ "flavorRef": "3",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_flavor_link(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="http://localhost:8774/v2/flavors/3"/>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "http://localhost:8774/v2/flavors/3",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_empty_metadata_personality(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2">
+ <metadata/>
+ <personality/>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "metadata": {},
+ "personality": [],
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_multiple_metadata_items(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2">
+ <metadata>
+ <meta key="one">two</meta>
+ <meta key="open">snack</meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "metadata": {"one": "two", "open": "snack"},
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_multiple_personality_files(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2">
+ <personality>
+ <file path="/etc/banner.txt">MQ==</file>
+ <file path="/etc/hosts">Mg==</file>
+ </personality>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "personality": [
+ {"path": "/etc/banner.txt", "contents": "MQ=="},
+ {"path": "/etc/hosts", "contents": "Mg=="},
+ ],
+ },
+ }
+ self.assertThat(request['body'], matchers.DictMatches(expected))
+
+ def test_spec_request(self):
+ image_bookmark_link = ("http://servers.api.openstack.org/1234/"
+ "images/52415800-8b69-11e0-9b19-734f6f006e54")
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ imageRef="%s"
+ flavorRef="52415800-8b69-11e0-9b19-734f1195ff37"
+ name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">Mg==</file>
+ </personality>
+</server>""" % (image_bookmark_link)
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": ("http://servers.api.openstack.org/1234/"
+ "images/52415800-8b69-11e0-9b19-734f6f006e54"),
+ "flavorRef": "52415800-8b69-11e0-9b19-734f1195ff37",
+ "metadata": {"My Server Name": "Apache1"},
+ "personality": [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "Mg==",
+ },
+ ],
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_empty_networks(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <networks/>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "networks": [],
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_one_network(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <networks>
+ <network uuid="1" fixed_ip="10.0.1.12"/>
+ </networks>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "networks": [{"uuid": "1", "fixed_ip": "10.0.1.12"}],
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_two_networks(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <networks>
+ <network uuid="1" fixed_ip="10.0.1.12"/>
+ <network uuid="2" fixed_ip="10.0.2.12"/>
+ </networks>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "networks": [{"uuid": "1", "fixed_ip": "10.0.1.12"},
+ {"uuid": "2", "fixed_ip": "10.0.2.12"}],
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_second_network_node_ignored(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <networks>
+ <network uuid="1" fixed_ip="10.0.1.12"/>
+ </networks>
+ <networks>
+ <network uuid="2" fixed_ip="10.0.2.12"/>
+ </networks>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "networks": [{"uuid": "1", "fixed_ip": "10.0.1.12"}],
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_one_network_missing_id(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <networks>
+ <network fixed_ip="10.0.1.12"/>
+ </networks>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "networks": [{"fixed_ip": "10.0.1.12"}],
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_one_network_missing_fixed_ip(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <networks>
+ <network uuid="1"/>
+ </networks>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "networks": [{"uuid": "1"}],
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_one_network_empty_id(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <networks>
+ <network uuid="" fixed_ip="10.0.1.12"/>
+ </networks>
+ </server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "networks": [{"uuid": "", "fixed_ip": "10.0.1.12"}],
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_one_network_empty_fixed_ip(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <networks>
+ <network uuid="1" fixed_ip=""/>
+ </networks>
+ </server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "networks": [{"uuid": "1", "fixed_ip": ""}],
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_networks_duplicate_ids(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <networks>
+ <network uuid="1" fixed_ip="10.0.1.12"/>
+ <network uuid="1" fixed_ip="10.0.2.12"/>
+ </networks>
+ </server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "networks": [{"uuid": "1", "fixed_ip": "10.0.1.12"},
+ {"uuid": "1", "fixed_ip": "10.0.2.12"}],
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_availability_zone(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1"
+ availability_zone="some_zone:some_host">
+ </server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "availability_zone": "some_zone:some_host",
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_multiple_create_args(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1"
+ min_count="1" max_count="3" return_reservation_id="True">
+ </server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "min_count": "1",
+ "max_count": "3",
+ "return_reservation_id": True,
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_disk_config(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v2"
+ xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1"
+ name="new-server-test" imageRef="1" flavorRef="1"
+ OS-DCF:diskConfig="AUTO">
+ </server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "OS-DCF:diskConfig": "AUTO",
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_scheduler_hints(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v2"
+ xmlns:OS-SCH-HNT=
+ "http://docs.openstack.org/compute/ext/scheduler-hints/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <OS-SCH-HNT:scheduler_hints>
+ <different_host>
+ 7329b667-50c7-46a6-b913-cb2a09dfeee0
+ </different_host>
+ <different_host>
+ f31efb24-34d2-43e1-8b44-316052956a39
+ </different_host>
+ </OS-SCH-HNT:scheduler_hints>
+ </server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "OS-SCH-HNT:scheduler_hints": {
+ "different_host": [
+ "7329b667-50c7-46a6-b913-cb2a09dfeee0",
+ "f31efb24-34d2-43e1-8b44-316052956a39",
+ ]
+ }
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_config_drive(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="config_drive_test"
+ imageRef="1"
+ flavorRef="1"
+ config_drive="true"/>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "config_drive_test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "config_drive": "true"
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_corrupt_xml(self):
+ """Should throw a 400 error on corrupt xml."""
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ self.deserializer.deserialize,
+ utils.killer_xml_body())
+
+
+class TestServerActionRequestXMLDeserializer(test.TestCase):
+
+ def setUp(self):
+ super(TestServerActionRequestXMLDeserializer, self).setUp()
+ self.deserializer = servers.ActionDeserializer()
+
+ def _generate_request(self, action, disk_cfg, ref):
+ return """
+<%(action)s xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1"
+ %(disk_config)s="MANUAL" %(ref)s="1"/>""" % (
+ {'action': action, 'disk_config': disk_cfg, 'ref': ref})
+
+ def _generate_expected(self, action, ref):
+ return {
+ "%s" % action: {
+ "%s" % ref: "1",
+ "OS-DCF:diskConfig": "MANUAL",
+ },
+ }
+
+ def test_rebuild_request(self):
+ serial_request = self._generate_request("rebuild", "OS-DCF:diskConfig",
+ "imageRef")
+ request = self.deserializer.deserialize(serial_request)
+ expected = self._generate_expected("rebuild", "imageRef")
+ self.assertEqual(request['body'], expected)
+
+ def test_rebuild_request_auto_disk_config_compat(self):
+ serial_request = self._generate_request("rebuild", "auto_disk_config",
+ "imageRef")
+ request = self.deserializer.deserialize(serial_request)
+ expected = self._generate_expected("rebuild", "imageRef")
+ self.assertEqual(request['body'], expected)
+
+ def test_resize_request(self):
+ serial_request = self._generate_request("resize", "OS-DCF:diskConfig",
+ "flavorRef")
+ request = self.deserializer.deserialize(serial_request)
+ expected = self._generate_expected("resize", "flavorRef")
+ self.assertEqual(request['body'], expected)
+
+ def test_resize_request_auto_disk_config_compat(self):
+ serial_request = self._generate_request("resize", "auto_disk_config",
+ "flavorRef")
+ request = self.deserializer.deserialize(serial_request)
+ expected = self._generate_expected("resize", "flavorRef")
+ self.assertEqual(request['body'], expected)
+
+
+class TestAddressesXMLSerialization(test.TestCase):
+
+ index_serializer = ips.AddressesTemplate()
+ show_serializer = ips.NetworkTemplate()
+
+ def _serializer_test_data(self):
+ return {
+ 'network_2': [
+ {'addr': '192.168.0.1', 'version': 4},
+ {'addr': 'fe80::beef', 'version': 6},
+ ],
+ }
+
+ def test_xml_declaration(self):
+ output = self.show_serializer.serialize(self._serializer_test_data())
+ has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
+ self.assertTrue(has_dec)
+
+ def test_show(self):
+ output = self.show_serializer.serialize(self._serializer_test_data())
+ root = etree.XML(output)
+ network = self._serializer_test_data()['network_2']
+ self.assertEqual(str(root.get('id')), 'network_2')
+ ip_elems = root.findall('{0}ip'.format(NS))
+ for z, ip_elem in enumerate(ip_elems):
+ ip = network[z]
+ self.assertEqual(str(ip_elem.get('version')),
+ str(ip['version']))
+ self.assertEqual(str(ip_elem.get('addr')),
+ str(ip['addr']))
+
+ def test_index(self):
+ fixture = {
+ 'addresses': {
+ 'network_1': [
+ {'addr': '192.168.0.3', 'version': 4},
+ {'addr': '192.168.0.5', 'version': 4},
+ ],
+ 'network_2': [
+ {'addr': '192.168.0.1', 'version': 4},
+ {'addr': 'fe80::beef', 'version': 6},
+ ],
+ },
+ }
+ output = self.index_serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'addresses')
+ addresses_dict = fixture['addresses']
+ network_elems = root.findall('{0}network'.format(NS))
+ self.assertEqual(len(network_elems), 2)
+ for i, network_elem in enumerate(network_elems):
+ network = addresses_dict.items()[i]
+ self.assertEqual(str(network_elem.get('id')), str(network[0]))
+ ip_elems = network_elem.findall('{0}ip'.format(NS))
+ for z, ip_elem in enumerate(ip_elems):
+ ip = network[1][z]
+ self.assertEqual(str(ip_elem.get('version')),
+ str(ip['version']))
+ self.assertEqual(str(ip_elem.get('addr')),
+ str(ip['addr']))
+
+
+class ServersViewBuilderTest(test.TestCase):
+
+ image_bookmark = "http://localhost/fake/images/5"
+ flavor_bookmark = "http://localhost/fake/flavors/1"
+
+ def setUp(self):
+ super(ServersViewBuilderTest, self).setUp()
+ self.flags(use_ipv6=True)
+ db_inst = fakes.stub_instance(
+ id=1,
+ image_ref="5",
+ uuid="deadbeef-feed-edee-beef-d0ea7beefedd",
+ display_name="test_server",
+ include_fake_metadata=False)
+
+ privates = ['172.19.0.1']
+ publics = ['192.168.0.3']
+ public6s = ['b33f::fdee:ddff:fecc:bbaa']
+
+ def nw_info(*args, **kwargs):
+ return [(None, {'label': 'public',
+ 'ips': [dict(ip=ip) for ip in publics],
+ 'ip6s': [dict(ip=ip) for ip in public6s]}),
+ (None, {'label': 'private',
+ 'ips': [dict(ip=ip) for ip in privates]})]
+
+ def floaters(*args, **kwargs):
+ return []
+
+ fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
+ fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
+ floaters)
+
+ self.uuid = db_inst['uuid']
+ self.view_builder = views.servers.ViewBuilder()
+ self.request = fakes.HTTPRequest.blank("/v2/fake")
+ self.request.context = context.RequestContext('fake', 'fake')
+ self.instance = fake_instance.fake_instance_obj(
+ self.request.context,
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
+ **db_inst)
+ self.self_link = "http://localhost/v2/fake/servers/%s" % self.uuid
+ self.bookmark_link = "http://localhost/fake/servers/%s" % self.uuid
+ self.expected_detailed_server = {
+ "server": {
+ "id": self.uuid,
+ "user_id": "fake_user",
+ "tenant_id": "fake_project",
+ "updated": "2010-11-11T11:00:00Z",
+ "created": "2010-10-10T12:00:00Z",
+ "progress": 0,
+ "name": "test_server",
+ "status": "BUILD",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "hostId": '',
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": self.image_bookmark,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": self.flavor_bookmark,
+ },
+ ],
+ },
+ "addresses": {
+ 'test1': [
+ {'version': 4, 'addr': '192.168.1.100'},
+ {'version': 6, 'addr': '2001:db8:0:1::1'}
+ ]
+ },
+ "metadata": {},
+ "links": [
+ {
+ "rel": "self",
+ "href": self.self_link,
+ },
+ {
+ "rel": "bookmark",
+ "href": self.bookmark_link,
+ },
+ ],
+ }
+ }
+
+ self.expected_server = {
+ "server": {
+ "id": self.uuid,
+ "name": "test_server",
+ "links": [
+ {
+ "rel": "self",
+ "href": self.self_link,
+ },
+ {
+ "rel": "bookmark",
+ "href": self.bookmark_link,
+ },
+ ],
+ }
+ }
+
+ def test_get_flavor_valid_flavor(self):
+ expected = {"id": "1",
+ "links": [{"rel": "bookmark",
+ "href": self.flavor_bookmark}]}
+ result = self.view_builder._get_flavor(self.request, self.instance)
+ self.assertEqual(result, expected)
+
+ def test_build_server(self):
+ output = self.view_builder.basic(self.request, self.instance)
+ self.assertThat(output,
+ matchers.DictMatches(self.expected_server))
+
+ def test_build_server_with_project_id(self):
+
+ output = self.view_builder.basic(self.request, self.instance)
+ self.assertThat(output,
+ matchers.DictMatches(self.expected_server))
+
+ def test_build_server_detail(self):
+
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output,
+ matchers.DictMatches(self.expected_detailed_server))
+
+ def test_build_server_no_image(self):
+ self.instance["image_ref"] = ""
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertEqual(output['server']['image'], "")
+
+ def test_build_server_detail_with_fault(self):
+ self.instance['vm_state'] = vm_states.ERROR
+ self.instance['fault'] = fake_instance.fake_fault_obj(
+ self.request.context, self.uuid)
+
+ self.expected_detailed_server["server"]["status"] = "ERROR"
+ self.expected_detailed_server["server"]["fault"] = {
+ "code": 404,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "HTTPNotFound",
+ "details": "Stock details for test",
+ }
+ del self.expected_detailed_server["server"]["progress"]
+
+ self.request.context = context.RequestContext('fake', 'fake')
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output,
+ matchers.DictMatches(self.expected_detailed_server))
+
+ def test_build_server_detail_with_fault_that_has_been_deleted(self):
+ self.instance['deleted'] = 1
+ self.instance['vm_state'] = vm_states.ERROR
+ fault = fake_instance.fake_fault_obj(self.request.context,
+ self.uuid, code=500,
+ message="No valid host was found")
+ self.instance['fault'] = fault
+
+ # Regardless of the vm_state deleted servers sholud have DELETED status
+ self.expected_detailed_server["server"]["status"] = "DELETED"
+ self.expected_detailed_server["server"]["fault"] = {
+ "code": 500,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "No valid host was found",
+ }
+ del self.expected_detailed_server["server"]["progress"]
+
+ self.request.context = context.RequestContext('fake', 'fake')
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output,
+ matchers.DictMatches(self.expected_detailed_server))
+
+ def test_build_server_detail_with_fault_no_details_not_admin(self):
+ self.instance['vm_state'] = vm_states.ERROR
+ self.instance['fault'] = fake_instance.fake_fault_obj(
+ self.request.context,
+ self.uuid,
+ code=500,
+ message='Error')
+
+ expected_fault = {"code": 500,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "Error"}
+
+ self.request.context = context.RequestContext('fake', 'fake')
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
+
+ def test_build_server_detail_with_fault_admin(self):
+ self.instance['vm_state'] = vm_states.ERROR
+ self.instance['fault'] = fake_instance.fake_fault_obj(
+ self.request.context,
+ self.uuid,
+ code=500,
+ message='Error')
+
+ expected_fault = {"code": 500,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "Error",
+ 'details': 'Stock details for test'}
+
+ self.request.environ['nova.context'].is_admin = True
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
+
+ def test_build_server_detail_with_fault_no_details_admin(self):
+ self.instance['vm_state'] = vm_states.ERROR
+ self.instance['fault'] = fake_instance.fake_fault_obj(
+ self.request.context,
+ self.uuid,
+ code=500,
+ message='Error',
+ details='')
+
+ expected_fault = {"code": 500,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "Error"}
+
+ self.request.environ['nova.context'].is_admin = True
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
+
+ def test_build_server_detail_with_fault_but_active(self):
+ self.instance['vm_state'] = vm_states.ACTIVE
+ self.instance['progress'] = 100
+ self.instance['fault'] = fake_instance.fake_fault_obj(
+ self.request.context, self.uuid)
+
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertNotIn('fault', output['server'])
+
+ def test_build_server_detail_active_status(self):
+ # set the power state of the instance to running
+ self.instance['vm_state'] = vm_states.ACTIVE
+ self.instance['progress'] = 100
+
+ self.expected_detailed_server["server"]["status"] = "ACTIVE"
+ self.expected_detailed_server["server"]["progress"] = 100
+
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output,
+ matchers.DictMatches(self.expected_detailed_server))
+
+ def test_build_server_detail_with_accessipv4(self):
+
+ access_ip_v4 = '1.2.3.4'
+ self.instance['access_ip_v4'] = access_ip_v4
+
+ self.expected_detailed_server["server"]["accessIPv4"] = access_ip_v4
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output,
+ matchers.DictMatches(self.expected_detailed_server))
+
+ def test_build_server_detail_with_accessipv6(self):
+
+ access_ip_v6 = 'fead::1234'
+ self.instance['access_ip_v6'] = access_ip_v6
+
+ self.expected_detailed_server["server"]["accessIPv6"] = access_ip_v6
+
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output,
+ matchers.DictMatches(self.expected_detailed_server))
+
+ def test_build_server_detail_with_metadata(self):
+
+ metadata = []
+ metadata.append(models.InstanceMetadata(key="Open", value="Stack"))
+ metadata = nova_utils.metadata_to_dict(metadata)
+ self.instance['metadata'] = metadata
+
+ self.expected_detailed_server["server"]["metadata"] = {"Open": "Stack"}
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output,
+ matchers.DictMatches(self.expected_detailed_server))
+
+
+class ServerXMLSerializationTest(test.TestCase):
+
+ TIMESTAMP = "2010-10-11T10:30:22Z"
+ SERVER_HREF = 'http://localhost/v2/servers/%s' % FAKE_UUID
+ SERVER_NEXT = 'http://localhost/v2/servers?limit=%s&marker=%s'
+ SERVER_BOOKMARK = 'http://localhost/servers/%s' % FAKE_UUID
+ IMAGE_BOOKMARK = 'http://localhost/images/5'
+ FLAVOR_BOOKMARK = 'http://localhost/flavors/1'
+ USERS_ATTRIBUTES = ['name', 'id', 'created', 'accessIPv4',
+ 'updated', 'progress', 'status', 'hostId',
+ 'accessIPv6']
+ ADMINS_ATTRIBUTES = USERS_ATTRIBUTES + ['adminPass']
+
+ def setUp(self):
+ super(ServerXMLSerializationTest, self).setUp()
+ self.body = {
+ "server": {
+ 'id': FAKE_UUID,
+ 'user_id': 'fake_user_id',
+ 'tenant_id': 'fake_tenant_id',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ "progress": 0,
+ "name": "test_server-" + u'\u89e3\u7801',
+ "status": "BUILD",
+ "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0',
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "fead::1234",
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": self.IMAGE_BOOKMARK,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": self.FLAVOR_BOOKMARK,
+ },
+ ],
+ },
+ "addresses": {
+ "network_one": [
+ {
+ "version": 4,
+ "addr": "67.23.10.138",
+ },
+ {
+ "version": 6,
+ "addr": "::babe:67.23.10.138",
+ },
+ ],
+ "network_two": [
+ {
+ "version": 4,
+ "addr": "67.23.10.139",
+ },
+ {
+ "version": 6,
+ "addr": "::babe:67.23.10.139",
+ },
+ ],
+ },
+ "metadata": {
+ "Open": "Stack",
+ "Number": "1",
+ },
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ }
+ }
+
+ def _validate_xml(self, root, server_dict):
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(server_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ metadata_root = root.find('{0}metadata'.format(NS))
+ metadata_elems = metadata_root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 2)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = server_dict['metadata'].items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ image_root = root.find('{0}image'.format(NS))
+ self.assertEqual(image_root.get('id'), server_dict['image']['id'])
+ link_nodes = image_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 1)
+ for i, link in enumerate(server_dict['image']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ flavor_root = root.find('{0}flavor'.format(NS))
+ self.assertEqual(flavor_root.get('id'), server_dict['flavor']['id'])
+ link_nodes = flavor_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 1)
+ for i, link in enumerate(server_dict['flavor']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ addresses_root = root.find('{0}addresses'.format(NS))
+ addresses_dict = server_dict['addresses']
+ network_elems = addresses_root.findall('{0}network'.format(NS))
+ self.assertEqual(len(network_elems), 2)
+ for i, network_elem in enumerate(network_elems):
+ network = addresses_dict.items()[i]
+ self.assertEqual(str(network_elem.get('id')), str(network[0]))
+ ip_elems = network_elem.findall('{0}ip'.format(NS))
+ for z, ip_elem in enumerate(ip_elems):
+ ip = network[1][z]
+ self.assertEqual(str(ip_elem.get('version')),
+ str(ip['version']))
+ self.assertEqual(str(ip_elem.get('addr')),
+ str(ip['addr']))
+
+ def _validate_required_attributes(self, root, server_dict, attributes):
+ for key in attributes:
+ expected = server_dict[key]
+ if not isinstance(expected, six.text_type):
+ expected = str(expected)
+ self.assertEqual(expected, root.get(key))
+
+ def test_xml_declaration(self):
+ serializer = servers.ServerTemplate()
+
+ output = serializer.serialize(self.body)
+ has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
+ self.assertTrue(has_dec)
+
+ def test_show(self):
+ serializer = servers.ServerTemplate()
+
+ output = serializer.serialize(self.body)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'server')
+
+ server_dict = self.body['server']
+
+ self._validate_required_attributes(root, server_dict,
+ self.USERS_ATTRIBUTES)
+ self._validate_xml(root, server_dict)
+
+ def test_create(self):
+ serializer = servers.FullServerTemplate()
+
+ self.body["server"]["adminPass"] = "test_password"
+
+ output = serializer.serialize(self.body)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'server')
+
+ server_dict = self.body['server']
+
+ self._validate_required_attributes(root, server_dict,
+ self.ADMINS_ATTRIBUTES)
+ self._validate_xml(root, server_dict)
+
+ def test_index(self):
+ serializer = servers.MinimalServersTemplate()
+
+ uuid1 = fakes.get_fake_uuid(1)
+ uuid2 = fakes.get_fake_uuid(2)
+ expected_server_href = 'http://localhost/v2/servers/%s' % uuid1
+ expected_server_bookmark = 'http://localhost/servers/%s' % uuid1
+ expected_server_href_2 = 'http://localhost/v2/servers/%s' % uuid2
+ expected_server_bookmark_2 = 'http://localhost/servers/%s' % uuid2
+ fixture = {"servers": [
+ {
+ "id": fakes.get_fake_uuid(1),
+ "name": "test_server",
+ 'links': [
+ {
+ 'href': expected_server_href,
+ 'rel': 'self',
+ },
+ {
+ 'href': expected_server_bookmark,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ {
+ "id": fakes.get_fake_uuid(2),
+ "name": "test_server_2",
+ 'links': [
+ {
+ 'href': expected_server_href_2,
+ 'rel': 'self',
+ },
+ {
+ 'href': expected_server_bookmark_2,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ ]}
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'servers')
+ server_elems = root.findall('{0}server'.format(NS))
+ self.assertEqual(len(server_elems), 2)
+ for i, server_elem in enumerate(server_elems):
+ server_dict = fixture['servers'][i]
+ for key in ['name', 'id']:
+ self.assertEqual(server_elem.get(key), str(server_dict[key]))
+
+ link_nodes = server_elem.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(server_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_index_with_servers_links(self):
+ serializer = servers.MinimalServersTemplate()
+
+ uuid1 = fakes.get_fake_uuid(1)
+ uuid2 = fakes.get_fake_uuid(2)
+ expected_server_href = 'http://localhost/v2/servers/%s' % uuid1
+ expected_server_next = self.SERVER_NEXT % (2, 2)
+ expected_server_bookmark = 'http://localhost/servers/%s' % uuid1
+ expected_server_href_2 = 'http://localhost/v2/servers/%s' % uuid2
+ expected_server_bookmark_2 = 'http://localhost/servers/%s' % uuid2
+ fixture = {"servers": [
+ {
+ "id": fakes.get_fake_uuid(1),
+ "name": "test_server",
+ 'links': [
+ {
+ 'href': expected_server_href,
+ 'rel': 'self',
+ },
+ {
+ 'href': expected_server_bookmark,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ {
+ "id": fakes.get_fake_uuid(2),
+ "name": "test_server_2",
+ 'links': [
+ {
+ 'href': expected_server_href_2,
+ 'rel': 'self',
+ },
+ {
+ 'href': expected_server_bookmark_2,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ ],
+ "servers_links": [
+ {
+ 'rel': 'next',
+ 'href': expected_server_next,
+ },
+ ]}
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'servers')
+ server_elems = root.findall('{0}server'.format(NS))
+ self.assertEqual(len(server_elems), 2)
+ for i, server_elem in enumerate(server_elems):
+ server_dict = fixture['servers'][i]
+ for key in ['name', 'id']:
+ self.assertEqual(server_elem.get(key), str(server_dict[key]))
+
+ link_nodes = server_elem.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(server_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ # Check servers_links
+ servers_links = root.findall('{0}link'.format(ATOMNS))
+ for i, link in enumerate(fixture['servers_links']):
+ for key, value in link.items():
+ self.assertEqual(servers_links[i].get(key), value)
+
+ def test_detail(self):
+ serializer = servers.ServersTemplate()
+
+ uuid1 = fakes.get_fake_uuid(1)
+ expected_server_href = 'http://localhost/v2/servers/%s' % uuid1
+ expected_server_bookmark = 'http://localhost/servers/%s' % uuid1
+ expected_image_bookmark = self.IMAGE_BOOKMARK
+ expected_flavor_bookmark = self.FLAVOR_BOOKMARK
+
+ uuid2 = fakes.get_fake_uuid(2)
+ expected_server_href_2 = 'http://localhost/v2/servers/%s' % uuid2
+ expected_server_bookmark_2 = 'http://localhost/servers/%s' % uuid2
+ fixture = {"servers": [
+ {
+ "id": fakes.get_fake_uuid(1),
+ "user_id": "fake",
+ "tenant_id": "fake",
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ "progress": 0,
+ "name": "test_server",
+ "status": "BUILD",
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "fead::1234",
+ "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0',
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": expected_image_bookmark,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": expected_flavor_bookmark,
+ },
+ ],
+ },
+ "addresses": {
+ "network_one": [
+ {
+ "version": 4,
+ "addr": "67.23.10.138",
+ },
+ {
+ "version": 6,
+ "addr": "::babe:67.23.10.138",
+ },
+ ],
+ "network_two": [
+ {
+ "version": 4,
+ "addr": "67.23.10.139",
+ },
+ {
+ "version": 6,
+ "addr": "::babe:67.23.10.139",
+ },
+ ],
+ },
+ "metadata": {
+ "Open": "Stack",
+ "Number": "1",
+ },
+ "links": [
+ {
+ "href": expected_server_href,
+ "rel": "self",
+ },
+ {
+ "href": expected_server_bookmark,
+ "rel": "bookmark",
+ },
+ ],
+ },
+ {
+ "id": fakes.get_fake_uuid(2),
+ "user_id": 'fake',
+ "tenant_id": 'fake',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ "progress": 100,
+ "name": "test_server_2",
+ "status": "ACTIVE",
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "fead::1234",
+ "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0',
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": expected_image_bookmark,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": expected_flavor_bookmark,
+ },
+ ],
+ },
+ "addresses": {
+ "network_one": [
+ {
+ "version": 4,
+ "addr": "67.23.10.138",
+ },
+ {
+ "version": 6,
+ "addr": "::babe:67.23.10.138",
+ },
+ ],
+ "network_two": [
+ {
+ "version": 4,
+ "addr": "67.23.10.139",
+ },
+ {
+ "version": 6,
+ "addr": "::babe:67.23.10.139",
+ },
+ ],
+ },
+ "metadata": {
+ "Open": "Stack",
+ "Number": "2",
+ },
+ "links": [
+ {
+ "href": expected_server_href_2,
+ "rel": "self",
+ },
+ {
+ "href": expected_server_bookmark_2,
+ "rel": "bookmark",
+ },
+ ],
+ },
+ ]}
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'servers')
+ server_elems = root.findall('{0}server'.format(NS))
+ self.assertEqual(len(server_elems), 2)
+ for i, server_elem in enumerate(server_elems):
+ server_dict = fixture['servers'][i]
+ self._validate_required_attributes(server_elem, server_dict,
+ self.USERS_ATTRIBUTES)
+ self._validate_xml(server_elem, server_dict)
+
+ def test_update(self):
+ serializer = servers.ServerTemplate()
+
+ self.body["server"]["fault"] = {
+ "code": 500,
+ "created": self.TIMESTAMP,
+ "message": "Error Message",
+ "details": "Fault details",
+ }
+ output = serializer.serialize(self.body)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'server')
+
+ server_dict = self.body['server']
+
+ self._validate_required_attributes(root, server_dict,
+ self.USERS_ATTRIBUTES)
+
+ self._validate_xml(root, server_dict)
+ fault_root = root.find('{0}fault'.format(NS))
+ fault_dict = server_dict['fault']
+ self.assertEqual(fault_root.get("code"), str(fault_dict["code"]))
+ self.assertEqual(fault_root.get("created"), fault_dict["created"])
+ msg_elem = fault_root.find('{0}message'.format(NS))
+ self.assertEqual(msg_elem.text, fault_dict["message"])
+ det_elem = fault_root.find('{0}details'.format(NS))
+ self.assertEqual(det_elem.text, fault_dict["details"])
+
+ def test_action(self):
+ serializer = servers.FullServerTemplate()
+
+ self.body["server"]["adminPass"] = u'\u89e3\u7801'
+ output = serializer.serialize(self.body)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'server')
+
+ server_dict = self.body['server']
+
+ self._validate_required_attributes(root, server_dict,
+ self.ADMINS_ATTRIBUTES)
+
+ self._validate_xml(root, server_dict)
+
+
+class ServersAllExtensionsTestCase(test.TestCase):
+ """Servers tests using default API router with all extensions enabled.
+
+ The intent here is to catch cases where extensions end up throwing
+ an exception because of a malformed request before the core API
+ gets a chance to validate the request and return a 422 response.
+
+ For example, ServerDiskConfigController extends servers.Controller::
+
+ | @wsgi.extends
+ | def create(self, req, body):
+ | if 'server' in body:
+ | self._set_disk_config(body['server'])
+ | resp_obj = (yield)
+ | self._show(req, resp_obj)
+
+ we want to ensure that the extension isn't barfing on an invalid
+ body.
+ """
+
+ def setUp(self):
+ super(ServersAllExtensionsTestCase, self).setUp()
+ self.app = compute.APIRouter()
+
+ def test_create_missing_server(self):
+ # Test create with malformed body.
+
+ def fake_create(*args, **kwargs):
+ raise test.TestingException("Should not reach the compute API.")
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'foo': {'a': 'b'}}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(422, res.status_int)
+
+ def test_update_missing_server(self):
+ # Test update with malformed body.
+
+ def fake_update(*args, **kwargs):
+ raise test.TestingException("Should not reach the compute API.")
+
+ self.stubs.Set(compute_api.API, 'update', fake_update)
+
+ req = fakes.HTTPRequest.blank('/fake/servers/1')
+ req.method = 'PUT'
+ req.content_type = 'application/json'
+ body = {'foo': {'a': 'b'}}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(422, res.status_int)
+
+
+class ServersUnprocessableEntityTestCase(test.TestCase):
+ """Tests of places we throw 422 Unprocessable Entity from."""
+
+ def setUp(self):
+ super(ServersUnprocessableEntityTestCase, self).setUp()
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = servers.Controller(self.ext_mgr)
+
+ def _unprocessable_server_create(self, body):
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.controller.create, req, body)
+
+ def test_create_server_no_body(self):
+ self._unprocessable_server_create(body=None)
+
+ def test_create_server_missing_server(self):
+ body = {'foo': {'a': 'b'}}
+ self._unprocessable_server_create(body=body)
+
+ def test_create_server_malformed_entity(self):
+ body = {'server': 'string'}
+ self._unprocessable_server_create(body=body)
+
+ def _unprocessable_server_update(self, body):
+ req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
+ req.method = 'PUT'
+
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.controller.update, req, FAKE_UUID, body)
+
+ def test_update_server_no_body(self):
+ self._unprocessable_server_update(body=None)
+
+ def test_update_server_missing_server(self):
+ body = {'foo': {'a': 'b'}}
+ self._unprocessable_server_update(body=body)
+
+ def test_create_update_malformed_entity(self):
+ body = {'server': 'string'}
+ self._unprocessable_server_update(body=body)
diff --git a/nova/tests/unit/api/openstack/compute/test_urlmap.py b/nova/tests/unit/api/openstack/compute/test_urlmap.py
new file mode 100644
index 0000000000..c95cb95d2c
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_urlmap.py
@@ -0,0 +1,171 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.serialization import jsonutils
+import webob
+
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+import nova.tests.unit.image.fake
+
+
+class UrlmapTest(test.NoDBTestCase):
+ def setUp(self):
+ super(UrlmapTest, self).setUp()
+ fakes.stub_out_rate_limiting(self.stubs)
+ nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
+
+ def tearDown(self):
+ super(UrlmapTest, self).tearDown()
+ nova.tests.unit.image.fake.FakeImageService_reset()
+
+ def test_path_version_v1_1(self):
+ # Test URL path specifying v1.1 returns v2 content.
+ req = webob.Request.blank('/v1.1/')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['version']['id'], 'v2.0')
+
+ def test_content_type_version_v1_1(self):
+ # Test Content-Type specifying v1.1 returns v2 content.
+ req = webob.Request.blank('/')
+ req.content_type = "application/json;version=1.1"
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['version']['id'], 'v2.0')
+
+ def test_accept_version_v1_1(self):
+ # Test Accept header specifying v1.1 returns v2 content.
+ req = webob.Request.blank('/')
+ req.accept = "application/json;version=1.1"
+ res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['version']['id'], 'v2.0')
+
+ def test_path_version_v2(self):
+ # Test URL path specifying v2 returns v2 content.
+ req = webob.Request.blank('/v2/')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['version']['id'], 'v2.0')
+
+ def test_content_type_version_v2(self):
+ # Test Content-Type specifying v2 returns v2 content.
+ req = webob.Request.blank('/')
+ req.content_type = "application/json;version=2"
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['version']['id'], 'v2.0')
+
+ def test_accept_version_v2(self):
+ # Test Accept header specifying v2 returns v2 content.
+ req = webob.Request.blank('/')
+ req.accept = "application/json;version=2"
+ res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['version']['id'], 'v2.0')
+
+ def test_path_content_type(self):
+ # Test URL path specifying JSON returns JSON content.
+ url = '/v2/fake/images/cedef40a-ed67-4d10-800e-17455edce175.json'
+ req = webob.Request.blank(url)
+ req.accept = "application/xml"
+ res = req.get_response(fakes.wsgi_app(init_only=('images',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['image']['id'],
+ 'cedef40a-ed67-4d10-800e-17455edce175')
+
+ def test_accept_content_type(self):
+ # Test Accept header specifying JSON returns JSON content.
+ url = '/v2/fake/images/cedef40a-ed67-4d10-800e-17455edce175'
+ req = webob.Request.blank(url)
+ req.accept = "application/xml;q=0.8, application/json"
+ res = req.get_response(fakes.wsgi_app(init_only=('images',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['image']['id'],
+ 'cedef40a-ed67-4d10-800e-17455edce175')
+
+ def test_path_version_v21(self):
+ # Test URL path specifying v2.1 returns v2.1 content.
+ req = webob.Request.blank('/v2.1/')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app_v21(init_only=('versions',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['version']['id'], 'v2.1')
+
+ def test_content_type_version_v21(self):
+ # Test Content-Type specifying v2.1 returns v2 content.
+ req = webob.Request.blank('/')
+ req.content_type = "application/json;version=2.1"
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app_v21(init_only=('versions',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['version']['id'], 'v2.1')
+
+ def test_accept_version_v21(self):
+ # Test Accept header specifying v2.1 returns v2.1 content.
+ req = webob.Request.blank('/')
+ req.accept = "application/json;version=2.1"
+ res = req.get_response(fakes.wsgi_app_v21(init_only=('versions',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['version']['id'], 'v2.1')
+
+ def test_path_content_type_v21(self):
+ # Test URL path specifying JSON returns JSON content.
+ url = '/v2.1/fake/extensions/extensions.json'
+ req = webob.Request.blank(url)
+ req.accept = "application/xml"
+ res = req.get_response(fakes.wsgi_app_v21())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['extension']['name'], 'Extensions')
+
+ def test_accept_content_type_v21(self):
+ # Test Accept header specifying JSON returns JSON content.
+ url = '/v2.1/fake/extensions/extensions'
+ req = webob.Request.blank(url)
+ req.accept = "application/xml;q=0.8, application/json"
+ res = req.get_response(fakes.wsgi_app_v21(init_only=('extensions',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['extension']['name'], 'Extensions')
diff --git a/nova/tests/unit/api/openstack/compute/test_v21_extensions.py b/nova/tests/unit/api/openstack/compute/test_v21_extensions.py
new file mode 100644
index 0000000000..7998dc82e5
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_v21_extensions.py
@@ -0,0 +1,196 @@
+# Copyright 2013 IBM Corp.
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+import stevedore
+import webob.exc
+
+from nova.api import openstack
+from nova.api.openstack import compute
+from nova.api.openstack.compute import plugins
+from nova.api.openstack import extensions
+from nova import exception
+from nova import test
+
+CONF = cfg.CONF
+
+
+class fake_bad_extension(object):
+ name = "fake_bad_extension"
+ alias = "fake-bad"
+
+
+class fake_stevedore_enabled_extensions(object):
+ def __init__(self, namespace, check_func, invoke_on_load=False,
+ invoke_args=(), invoke_kwds=None):
+ self.extensions = []
+
+ def map(self, func, *args, **kwds):
+ pass
+
+ def __iter__(self):
+ return iter(self.extensions)
+
+
+class fake_loaded_extension_info(object):
+ def __init__(self):
+ self.extensions = {}
+
+ def register_extension(self, ext):
+ self.extensions[ext] = ext
+ return True
+
+ def get_extensions(self):
+ return {'core1': None, 'core2': None, 'noncore1': None}
+
+
+class ExtensionLoadingTestCase(test.NoDBTestCase):
+
+ def _set_v21_core(self, core_extensions):
+ openstack.API_V3_CORE_EXTENSIONS = core_extensions
+
+ def test_extensions_loaded(self):
+ app = compute.APIRouterV21()
+ self.assertIn('servers', app._loaded_extension_info.extensions)
+
+ def test_check_bad_extension(self):
+ extension_info = plugins.LoadedExtensionInfo()
+ self.assertFalse(extension_info._check_extension(fake_bad_extension))
+
+ def test_extensions_blacklist(self):
+ app = compute.APIRouterV21()
+ self.assertIn('os-hosts', app._loaded_extension_info.extensions)
+ CONF.set_override('extensions_blacklist', ['os-hosts'], 'osapi_v3')
+ app = compute.APIRouterV21()
+ self.assertNotIn('os-hosts', app._loaded_extension_info.extensions)
+
+ def test_extensions_whitelist_accept(self):
+ # NOTE(maurosr): just to avoid to get an exception raised for not
+ # loading all core api.
+ v21_core = openstack.API_V3_CORE_EXTENSIONS
+ openstack.API_V3_CORE_EXTENSIONS = set(['servers'])
+ self.addCleanup(self._set_v21_core, v21_core)
+
+ app = compute.APIRouterV21()
+ self.assertIn('os-hosts', app._loaded_extension_info.extensions)
+ CONF.set_override('extensions_whitelist', ['servers', 'os-hosts'],
+ 'osapi_v3')
+ app = compute.APIRouterV21()
+ self.assertIn('os-hosts', app._loaded_extension_info.extensions)
+
+ def test_extensions_whitelist_block(self):
+ # NOTE(maurosr): just to avoid to get an exception raised for not
+ # loading all core api.
+ v21_core = openstack.API_V3_CORE_EXTENSIONS
+ openstack.API_V3_CORE_EXTENSIONS = set(['servers'])
+ self.addCleanup(self._set_v21_core, v21_core)
+
+ app = compute.APIRouterV21()
+ self.assertIn('os-hosts', app._loaded_extension_info.extensions)
+ CONF.set_override('extensions_whitelist', ['servers'], 'osapi_v3')
+ app = compute.APIRouterV21()
+ self.assertNotIn('os-hosts', app._loaded_extension_info.extensions)
+
+ def test_blacklist_overrides_whitelist(self):
+ # NOTE(maurosr): just to avoid to get an exception raised for not
+ # loading all core api.
+ v21_core = openstack.API_V3_CORE_EXTENSIONS
+ openstack.API_V3_CORE_EXTENSIONS = set(['servers'])
+ self.addCleanup(self._set_v21_core, v21_core)
+
+ app = compute.APIRouterV21()
+ self.assertIn('os-hosts', app._loaded_extension_info.extensions)
+ CONF.set_override('extensions_whitelist', ['servers', 'os-hosts'],
+ 'osapi_v3')
+ CONF.set_override('extensions_blacklist', ['os-hosts'], 'osapi_v3')
+ app = compute.APIRouterV21()
+ self.assertNotIn('os-hosts', app._loaded_extension_info.extensions)
+ self.assertIn('servers', app._loaded_extension_info.extensions)
+ self.assertEqual(1, len(app._loaded_extension_info.extensions))
+
+ def test_get_missing_core_extensions(self):
+ v21_core = openstack.API_V3_CORE_EXTENSIONS
+ openstack.API_V3_CORE_EXTENSIONS = set(['core1', 'core2'])
+ self.addCleanup(self._set_v21_core, v21_core)
+ self.assertEqual(0, len(
+ compute.APIRouterV21.get_missing_core_extensions(
+ ['core1', 'core2', 'noncore1'])))
+ missing_core = compute.APIRouterV21.get_missing_core_extensions(
+ ['core1'])
+ self.assertEqual(1, len(missing_core))
+ self.assertIn('core2', missing_core)
+ missing_core = compute.APIRouterV21.get_missing_core_extensions([])
+ self.assertEqual(2, len(missing_core))
+ self.assertIn('core1', missing_core)
+ self.assertIn('core2', missing_core)
+ missing_core = compute.APIRouterV21.get_missing_core_extensions(
+ ['noncore1'])
+ self.assertEqual(2, len(missing_core))
+ self.assertIn('core1', missing_core)
+ self.assertIn('core2', missing_core)
+
+ def test_core_extensions_present(self):
+ self.stubs.Set(stevedore.enabled, 'EnabledExtensionManager',
+ fake_stevedore_enabled_extensions)
+ self.stubs.Set(plugins, 'LoadedExtensionInfo',
+ fake_loaded_extension_info)
+ v21_core = openstack.API_V3_CORE_EXTENSIONS
+ openstack.API_V3_CORE_EXTENSIONS = set(['core1', 'core2'])
+ self.addCleanup(self._set_v21_core, v21_core)
+ # if no core API extensions are missing then an exception will
+ # not be raised when creating an instance of compute.APIRouterV21
+ compute.APIRouterV21()
+
+ def test_core_extensions_missing(self):
+ self.stubs.Set(stevedore.enabled, 'EnabledExtensionManager',
+ fake_stevedore_enabled_extensions)
+ self.stubs.Set(plugins, 'LoadedExtensionInfo',
+ fake_loaded_extension_info)
+ self.assertRaises(exception.CoreAPIMissing, compute.APIRouterV21)
+
+ def test_extensions_expected_error(self):
+ @extensions.expected_errors(404)
+ def fake_func():
+ raise webob.exc.HTTPNotFound()
+
+ self.assertRaises(webob.exc.HTTPNotFound, fake_func)
+
+ def test_extensions_expected_error_from_list(self):
+ @extensions.expected_errors((404, 403))
+ def fake_func():
+ raise webob.exc.HTTPNotFound()
+
+ self.assertRaises(webob.exc.HTTPNotFound, fake_func)
+
+ def test_extensions_unexpected_error(self):
+ @extensions.expected_errors(404)
+ def fake_func():
+ raise webob.exc.HTTPConflict()
+
+ self.assertRaises(webob.exc.HTTPInternalServerError, fake_func)
+
+ def test_extensions_unexpected_error_from_list(self):
+ @extensions.expected_errors((404, 413))
+ def fake_func():
+ raise webob.exc.HTTPConflict()
+
+ self.assertRaises(webob.exc.HTTPInternalServerError, fake_func)
+
+ def test_extensions_unexpected_policy_not_authorized_error(self):
+ @extensions.expected_errors(404)
+ def fake_func():
+ raise exception.PolicyNotAuthorized(action="foo")
+
+ self.assertRaises(exception.PolicyNotAuthorized, fake_func)
diff --git a/nova/tests/unit/api/openstack/compute/test_v3_auth.py b/nova/tests/unit/api/openstack/compute/test_v3_auth.py
new file mode 100644
index 0000000000..e728fa89d6
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_v3_auth.py
@@ -0,0 +1,62 @@
+# Copyright 2013 IBM Corp.
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob
+import webob.dec
+
+from nova import context
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+class TestNoAuthMiddlewareV3(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestNoAuthMiddlewareV3, self).setUp()
+ self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_networking(self.stubs)
+
+ def test_authorize_user(self):
+ req = webob.Request.blank('/v2/fake')
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
+ req.headers['X-Auth-Project-Id'] = 'user1_project'
+ result = req.get_response(fakes.wsgi_app_v21(use_no_auth=True))
+ self.assertEqual(result.status, '204 No Content')
+ self.assertEqual(result.headers['X-Server-Management-Url'],
+ "http://localhost/v2/fake")
+
+ def test_authorize_user_trailing_slash(self):
+ # make sure it works with trailing slash on the request
+ req = webob.Request.blank('/v2/fake/')
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
+ req.headers['X-Auth-Project-Id'] = 'user1_project'
+ result = req.get_response(fakes.wsgi_app_v21(use_no_auth=True))
+ self.assertEqual(result.status, '204 No Content')
+ self.assertEqual(result.headers['X-Server-Management-Url'],
+ "http://localhost/v2/fake")
+
+ def test_auth_token_no_empty_headers(self):
+ req = webob.Request.blank('/v2/fake')
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
+ req.headers['X-Auth-Project-Id'] = 'user1_project'
+ result = req.get_response(fakes.wsgi_app_v21(use_no_auth=True))
+ self.assertEqual(result.status, '204 No Content')
+ self.assertNotIn('X-CDN-Management-Url', result.headers)
+ self.assertNotIn('X-Storage-Url', result.headers)
diff --git a/nova/tests/unit/api/openstack/compute/test_v3_extensions.py b/nova/tests/unit/api/openstack/compute/test_v3_extensions.py
new file mode 100644
index 0000000000..da6aa43d7f
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_v3_extensions.py
@@ -0,0 +1,194 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+import stevedore
+import webob.exc
+
+from nova.api import openstack
+from nova.api.openstack import compute
+from nova.api.openstack.compute import plugins
+from nova.api.openstack import extensions
+from nova import exception
+from nova import test
+
+CONF = cfg.CONF
+
+
+class fake_bad_extension(object):
+ name = "fake_bad_extension"
+ alias = "fake-bad"
+
+
+class fake_stevedore_enabled_extensions(object):
+ def __init__(self, namespace, check_func, invoke_on_load=False,
+ invoke_args=(), invoke_kwds=None):
+ self.extensions = []
+
+ def map(self, func, *args, **kwds):
+ pass
+
+ def __iter__(self):
+ return iter(self.extensions)
+
+
+class fake_loaded_extension_info(object):
+ def __init__(self):
+ self.extensions = {}
+
+ def register_extension(self, ext):
+ self.extensions[ext] = ext
+ return True
+
+ def get_extensions(self):
+ return {'core1': None, 'core2': None, 'noncore1': None}
+
+
+class ExtensionLoadingTestCase(test.NoDBTestCase):
+
+ def _set_v3_core(self, core_extensions):
+ openstack.API_V3_CORE_EXTENSIONS = core_extensions
+
+ def test_extensions_loaded(self):
+ app = compute.APIRouterV3()
+ self.assertIn('servers', app._loaded_extension_info.extensions)
+
+ def test_check_bad_extension(self):
+ extension_info = plugins.LoadedExtensionInfo()
+ self.assertFalse(extension_info._check_extension(fake_bad_extension))
+
+ def test_extensions_blacklist(self):
+ app = compute.APIRouterV3()
+ self.assertIn('os-hosts', app._loaded_extension_info.extensions)
+ CONF.set_override('extensions_blacklist', ['os-hosts'], 'osapi_v3')
+ app = compute.APIRouterV3()
+ self.assertNotIn('os-hosts', app._loaded_extension_info.extensions)
+
+ def test_extensions_whitelist_accept(self):
+ # NOTE(maurosr): just to avoid to get an exception raised for not
+ # loading all core api.
+ v3_core = openstack.API_V3_CORE_EXTENSIONS
+ openstack.API_V3_CORE_EXTENSIONS = set(['servers'])
+ self.addCleanup(self._set_v3_core, v3_core)
+
+ app = compute.APIRouterV3()
+ self.assertIn('os-hosts', app._loaded_extension_info.extensions)
+ CONF.set_override('extensions_whitelist', ['servers', 'os-hosts'],
+ 'osapi_v3')
+ app = compute.APIRouterV3()
+ self.assertIn('os-hosts', app._loaded_extension_info.extensions)
+
+ def test_extensions_whitelist_block(self):
+ # NOTE(maurosr): just to avoid to get an exception raised for not
+ # loading all core api.
+ v3_core = openstack.API_V3_CORE_EXTENSIONS
+ openstack.API_V3_CORE_EXTENSIONS = set(['servers'])
+ self.addCleanup(self._set_v3_core, v3_core)
+
+ app = compute.APIRouterV3()
+ self.assertIn('os-hosts', app._loaded_extension_info.extensions)
+ CONF.set_override('extensions_whitelist', ['servers'], 'osapi_v3')
+ app = compute.APIRouterV3()
+ self.assertNotIn('os-hosts', app._loaded_extension_info.extensions)
+
+ def test_blacklist_overrides_whitelist(self):
+ # NOTE(maurosr): just to avoid to get an exception raised for not
+ # loading all core api.
+ v3_core = openstack.API_V3_CORE_EXTENSIONS
+ openstack.API_V3_CORE_EXTENSIONS = set(['servers'])
+ self.addCleanup(self._set_v3_core, v3_core)
+
+ app = compute.APIRouterV3()
+ self.assertIn('os-hosts', app._loaded_extension_info.extensions)
+ CONF.set_override('extensions_whitelist', ['servers', 'os-hosts'],
+ 'osapi_v3')
+ CONF.set_override('extensions_blacklist', ['os-hosts'], 'osapi_v3')
+ app = compute.APIRouterV3()
+ self.assertNotIn('os-hosts', app._loaded_extension_info.extensions)
+ self.assertIn('servers', app._loaded_extension_info.extensions)
+ self.assertEqual(len(app._loaded_extension_info.extensions), 1)
+
+ def test_get_missing_core_extensions(self):
+ v3_core = openstack.API_V3_CORE_EXTENSIONS
+ openstack.API_V3_CORE_EXTENSIONS = set(['core1', 'core2'])
+ self.addCleanup(self._set_v3_core, v3_core)
+ self.assertEqual(len(compute.APIRouterV3.get_missing_core_extensions(
+ ['core1', 'core2', 'noncore1'])), 0)
+ missing_core = compute.APIRouterV3.get_missing_core_extensions(
+ ['core1'])
+ self.assertEqual(len(missing_core), 1)
+ self.assertIn('core2', missing_core)
+ missing_core = compute.APIRouterV3.get_missing_core_extensions([])
+ self.assertEqual(len(missing_core), 2)
+ self.assertIn('core1', missing_core)
+ self.assertIn('core2', missing_core)
+ missing_core = compute.APIRouterV3.get_missing_core_extensions(
+ ['noncore1'])
+ self.assertEqual(len(missing_core), 2)
+ self.assertIn('core1', missing_core)
+ self.assertIn('core2', missing_core)
+
+ def test_core_extensions_present(self):
+ self.stubs.Set(stevedore.enabled, 'EnabledExtensionManager',
+ fake_stevedore_enabled_extensions)
+ self.stubs.Set(plugins, 'LoadedExtensionInfo',
+ fake_loaded_extension_info)
+ v3_core = openstack.API_V3_CORE_EXTENSIONS
+ openstack.API_V3_CORE_EXTENSIONS = set(['core1', 'core2'])
+ self.addCleanup(self._set_v3_core, v3_core)
+ # if no core API extensions are missing then an exception will
+ # not be raised when creating an instance of compute.APIRouterV3
+ compute.APIRouterV3()
+
+ def test_core_extensions_missing(self):
+ self.stubs.Set(stevedore.enabled, 'EnabledExtensionManager',
+ fake_stevedore_enabled_extensions)
+ self.stubs.Set(plugins, 'LoadedExtensionInfo',
+ fake_loaded_extension_info)
+ self.assertRaises(exception.CoreAPIMissing, compute.APIRouterV3)
+
+ def test_extensions_expected_error(self):
+ @extensions.expected_errors(404)
+ def fake_func():
+ raise webob.exc.HTTPNotFound()
+
+ self.assertRaises(webob.exc.HTTPNotFound, fake_func)
+
+ def test_extensions_expected_error_from_list(self):
+ @extensions.expected_errors((404, 403))
+ def fake_func():
+ raise webob.exc.HTTPNotFound()
+
+ self.assertRaises(webob.exc.HTTPNotFound, fake_func)
+
+ def test_extensions_unexpected_error(self):
+ @extensions.expected_errors(404)
+ def fake_func():
+ raise webob.exc.HTTPConflict()
+
+ self.assertRaises(webob.exc.HTTPInternalServerError, fake_func)
+
+ def test_extensions_unexpected_error_from_list(self):
+ @extensions.expected_errors((404, 413))
+ def fake_func():
+ raise webob.exc.HTTPConflict()
+
+ self.assertRaises(webob.exc.HTTPInternalServerError, fake_func)
+
+ def test_extensions_unexpected_policy_not_authorized_error(self):
+ @extensions.expected_errors(404)
+ def fake_func():
+ raise exception.PolicyNotAuthorized(action="foo")
+
+ self.assertRaises(exception.PolicyNotAuthorized, fake_func)
diff --git a/nova/tests/unit/api/openstack/compute/test_versions.py b/nova/tests/unit/api/openstack/compute/test_versions.py
new file mode 100644
index 0000000000..fabd15e01c
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_versions.py
@@ -0,0 +1,797 @@
+# Copyright 2010-2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid as stdlib_uuid
+
+import feedparser
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute import versions
+from nova.api.openstack.compute import views
+from nova.api.openstack import xmlutil
+from nova import test
+from nova.tests.unit.api.openstack import common
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import matchers
+
+
+NS = {
+ 'atom': 'http://www.w3.org/2005/Atom',
+ 'ns': 'http://docs.openstack.org/common/api/v1.0'
+}
+
+
+EXP_LINKS = {
+ 'v2.0': {
+ 'html': 'http://docs.openstack.org/',
+ },
+ 'v2.1': {
+ 'html': 'http://docs.openstack.org/'
+ },
+}
+
+
+EXP_VERSIONS = {
+ "v2.0": {
+ "id": "v2.0",
+ "status": "CURRENT",
+ "updated": "2011-01-21T11:33:21Z",
+ "links": [
+ {
+ "rel": "describedby",
+ "type": "text/html",
+ "href": EXP_LINKS['v2.0']['html'],
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/vnd.openstack.compute+xml;version=2",
+ },
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute+json;version=2",
+ },
+ ],
+ },
+ "v2.1": {
+ "id": "v2.1",
+ "status": "EXPERIMENTAL",
+ "updated": "2013-07-23T11:33:21Z",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2.1/",
+ },
+ {
+ "rel": "describedby",
+ "type": "text/html",
+ "href": EXP_LINKS['v2.1']['html'],
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute+json;version=2.1",
+ }
+ ],
+ }
+}
+
+
+class VersionsTestV20(test.NoDBTestCase):
+
+ def test_get_version_list(self):
+ req = webob.Request.blank('/')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ versions = jsonutils.loads(res.body)["versions"]
+ expected = [
+ {
+ "id": "v2.0",
+ "status": "CURRENT",
+ "updated": "2011-01-21T11:33:21Z",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/",
+ }],
+ },
+ {
+ "id": "v2.1",
+ "status": "EXPERIMENTAL",
+ "updated": "2013-07-23T11:33:21Z",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/",
+ }],
+ },
+ ]
+ self.assertEqual(versions, expected)
+
+ def test_get_version_list_302(self):
+ req = webob.Request.blank('/v2')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 302)
+ redirect_req = webob.Request.blank('/v2/')
+ self.assertEqual(res.location, redirect_req.url)
+
+ def _test_get_version_2_detail(self, url, accept=None):
+ if accept is None:
+ accept = "application/json"
+ req = webob.Request.blank(url)
+ req.accept = accept
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ version = jsonutils.loads(res.body)
+ expected = {
+ "version": {
+ "id": "v2.0",
+ "status": "CURRENT",
+ "updated": "2011-01-21T11:33:21Z",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/",
+ },
+ {
+ "rel": "describedby",
+ "type": "text/html",
+ "href": EXP_LINKS['v2.0']['html'],
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/"
+ "vnd.openstack.compute+xml;version=2",
+ },
+ {
+ "base": "application/json",
+ "type": "application/"
+ "vnd.openstack.compute+json;version=2",
+ },
+ ],
+ },
+ }
+ self.assertEqual(expected, version)
+
+ def test_get_version_2_detail(self):
+ self._test_get_version_2_detail('/v2/')
+
+ def test_get_version_2_detail_content_type(self):
+ accept = "application/json;version=2"
+ self._test_get_version_2_detail('/', accept=accept)
+
+ def test_get_version_2_versions_invalid(self):
+ req = webob.Request.blank('/v2/versions/1234')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
+ def test_get_version_2_detail_xml(self):
+ req = webob.Request.blank('/v2/')
+ req.accept = "application/xml"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/xml")
+
+ version = etree.XML(res.body)
+ xmlutil.validate_schema(version, 'version')
+
+ expected = EXP_VERSIONS['v2.0']
+ self.assertTrue(version.xpath('/ns:version', namespaces=NS))
+ media_types = version.xpath('ns:media-types/ns:media-type',
+ namespaces=NS)
+ self.assertTrue(common.compare_media_types(media_types,
+ expected['media-types']))
+ for key in ['id', 'status', 'updated']:
+ self.assertEqual(version.get(key), expected[key])
+ links = version.xpath('atom:link', namespaces=NS)
+ self.assertTrue(common.compare_links(links,
+ [{'rel': 'self', 'href': 'http://localhost/v2/'}]
+ + expected['links']))
+
+ def test_get_version_list_xml(self):
+ req = webob.Request.blank('/')
+ req.accept = "application/xml"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/xml")
+
+ root = etree.XML(res.body)
+ xmlutil.validate_schema(root, 'versions')
+
+ self.assertTrue(root.xpath('/ns:versions', namespaces=NS))
+ versions = root.xpath('ns:version', namespaces=NS)
+ self.assertEqual(len(versions), 2)
+
+ for i, v in enumerate(['v2.0', 'v2.1']):
+ version = versions[i]
+ expected = EXP_VERSIONS[v]
+ for key in ['id', 'status', 'updated']:
+ self.assertEqual(version.get(key), expected[key])
+ (link,) = version.xpath('atom:link', namespaces=NS)
+ self.assertTrue(common.compare_links(link,
+ [{'rel': 'self', 'href': 'http://localhost/%s/' % v}]))
+
+ def test_get_version_2_detail_atom(self):
+ req = webob.Request.blank('/v2/')
+ req.accept = "application/atom+xml"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual("application/atom+xml", res.content_type)
+
+ xmlutil.validate_schema(etree.XML(res.body), 'atom')
+
+ f = feedparser.parse(res.body)
+ self.assertEqual(f.feed.title, 'About This Version')
+ self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z')
+ self.assertEqual(f.feed.id, 'http://localhost/v2/')
+ self.assertEqual(f.feed.author, 'Rackspace')
+ self.assertEqual(f.feed.author_detail.href,
+ 'http://www.rackspace.com/')
+ self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v2/')
+ self.assertEqual(f.feed.links[0]['rel'], 'self')
+
+ self.assertEqual(len(f.entries), 1)
+ entry = f.entries[0]
+ self.assertEqual(entry.id, 'http://localhost/v2/')
+ self.assertEqual(entry.title, 'Version v2.0')
+ self.assertEqual(entry.updated, '2011-01-21T11:33:21Z')
+ self.assertEqual(len(entry.content), 1)
+ self.assertEqual(entry.content[0].value,
+ 'Version v2.0 CURRENT (2011-01-21T11:33:21Z)')
+ self.assertEqual(len(entry.links), 2)
+ self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/')
+ self.assertEqual(entry.links[0]['rel'], 'self')
+ self.assertEqual(entry.links[1], {
+ 'href': EXP_LINKS['v2.0']['html'],
+ 'type': 'text/html',
+ 'rel': 'describedby'})
+
+ def test_get_version_list_atom(self):
+ req = webob.Request.blank('/')
+ req.accept = "application/atom+xml"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/atom+xml")
+
+ f = feedparser.parse(res.body)
+ self.assertEqual(f.feed.title, 'Available API Versions')
+ self.assertEqual(f.feed.updated, '2013-07-23T11:33:21Z')
+ self.assertEqual(f.feed.id, 'http://localhost/')
+ self.assertEqual(f.feed.author, 'Rackspace')
+ self.assertEqual(f.feed.author_detail.href,
+ 'http://www.rackspace.com/')
+ self.assertEqual(f.feed.links[0]['href'], 'http://localhost/')
+ self.assertEqual(f.feed.links[0]['rel'], 'self')
+
+ self.assertEqual(len(f.entries), 2)
+ entry = f.entries[0]
+ self.assertEqual(entry.id, 'http://localhost/v2/')
+ self.assertEqual(entry.title, 'Version v2.0')
+ self.assertEqual(entry.updated, '2011-01-21T11:33:21Z')
+ self.assertEqual(len(entry.content), 1)
+ self.assertEqual(entry.content[0].value,
+ 'Version v2.0 CURRENT (2011-01-21T11:33:21Z)')
+ self.assertEqual(len(entry.links), 1)
+ self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/')
+ self.assertEqual(entry.links[0]['rel'], 'self')
+
+ entry = f.entries[1]
+ self.assertEqual(entry.id, 'http://localhost/v2/')
+ self.assertEqual(entry.title, 'Version v2.1')
+ self.assertEqual(entry.updated, '2013-07-23T11:33:21Z')
+ self.assertEqual(len(entry.content), 1)
+ self.assertEqual(entry.content[0].value,
+ 'Version v2.1 EXPERIMENTAL (2013-07-23T11:33:21Z)')
+ self.assertEqual(len(entry.links), 1)
+ self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/')
+ self.assertEqual(entry.links[0]['rel'], 'self')
+
+ def test_multi_choice_image(self):
+ req = webob.Request.blank('/images/1')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 300)
+ self.assertEqual(res.content_type, "application/json")
+
+ expected = {
+ "choices": [
+ {
+ "id": "v2.0",
+ "status": "CURRENT",
+ "links": [
+ {
+ "href": "http://localhost/v2/images/1",
+ "rel": "self",
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/vnd.openstack.compute+xml"
+ ";version=2"
+ },
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute+json"
+ ";version=2"
+ },
+ ],
+ },
+ {
+ "id": "v2.1",
+ "status": "EXPERIMENTAL",
+ "links": [
+ {
+ "href": "http://localhost/v2/images/1",
+ "rel": "self",
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/json",
+ "type":
+ "application/vnd.openstack.compute+json;version=2.1",
+ }
+ ],
+ },
+ ], }
+
+ self.assertThat(jsonutils.loads(res.body),
+ matchers.DictMatches(expected))
+
+ def test_multi_choice_image_xml(self):
+ req = webob.Request.blank('/images/1')
+ req.accept = "application/xml"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 300)
+ self.assertEqual(res.content_type, "application/xml")
+
+ root = etree.XML(res.body)
+ self.assertTrue(root.xpath('/ns:choices', namespaces=NS))
+ versions = root.xpath('ns:version', namespaces=NS)
+ self.assertEqual(len(versions), 2)
+
+ version = versions[0]
+ self.assertEqual(version.get('id'), 'v2.0')
+ self.assertEqual(version.get('status'), 'CURRENT')
+ media_types = version.xpath('ns:media-types/ns:media-type',
+ namespaces=NS)
+ self.assertTrue(common.
+ compare_media_types(media_types,
+ EXP_VERSIONS['v2.0']['media-types']
+ ))
+
+ links = version.xpath('atom:link', namespaces=NS)
+ self.assertTrue(common.compare_links(links,
+ [{'rel': 'self', 'href': 'http://localhost/v2/images/1'}]))
+
+ version = versions[1]
+ self.assertEqual(version.get('id'), 'v2.1')
+ self.assertEqual(version.get('status'), 'EXPERIMENTAL')
+ media_types = version.xpath('ns:media-types/ns:media-type',
+ namespaces=NS)
+ self.assertTrue(common.
+ compare_media_types(media_types,
+ EXP_VERSIONS['v2.1']['media-types']
+ ))
+
+ links = version.xpath('atom:link', namespaces=NS)
+ self.assertTrue(common.compare_links(links,
+ [{'rel': 'self', 'href': 'http://localhost/v2/images/1'}]))
+
+ def test_multi_choice_server_atom(self):
+ """Make sure multi choice responses do not have content-type
+ application/atom+xml (should use default of json)
+ """
+ req = webob.Request.blank('/servers')
+ req.accept = "application/atom+xml"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 300)
+ self.assertEqual(res.content_type, "application/json")
+
+ def test_multi_choice_server(self):
+ uuid = str(stdlib_uuid.uuid4())
+ req = webob.Request.blank('/servers/' + uuid)
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 300)
+ self.assertEqual(res.content_type, "application/json")
+
+ expected = {
+ "choices": [
+ {
+ "id": "v2.0",
+ "status": "CURRENT",
+ "links": [
+ {
+ "href": "http://localhost/v2/servers/" + uuid,
+ "rel": "self",
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/vnd.openstack.compute+xml"
+ ";version=2"
+ },
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute+json"
+ ";version=2"
+ },
+ ],
+ },
+ {
+ "id": "v2.1",
+ "status": "EXPERIMENTAL",
+ "links": [
+ {
+ "href": "http://localhost/v2/servers/" + uuid,
+ "rel": "self",
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/json",
+ "type":
+ "application/vnd.openstack.compute+json;version=2.1",
+ }
+ ],
+ },
+ ], }
+
+ self.assertThat(jsonutils.loads(res.body),
+ matchers.DictMatches(expected))
+
+
+class VersionsViewBuilderTests(test.NoDBTestCase):
+ def test_view_builder(self):
+ base_url = "http://example.org/"
+
+ version_data = {
+ "v3.2.1": {
+ "id": "3.2.1",
+ "status": "CURRENT",
+ "updated": "2011-07-18T11:30:00Z",
+ }
+ }
+
+ expected = {
+ "versions": [
+ {
+ "id": "3.2.1",
+ "status": "CURRENT",
+ "updated": "2011-07-18T11:30:00Z",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://example.org/v2/",
+ },
+ ],
+ }
+ ]
+ }
+
+ builder = views.versions.ViewBuilder(base_url)
+ output = builder.build_versions(version_data)
+
+ self.assertEqual(output, expected)
+
+ def test_generate_href(self):
+ base_url = "http://example.org/app/"
+
+ expected = "http://example.org/app/v2/"
+
+ builder = views.versions.ViewBuilder(base_url)
+ actual = builder.generate_href('v2')
+
+ self.assertEqual(actual, expected)
+
+ def test_generate_href_v21(self):
+ base_url = "http://example.org/app/"
+
+ expected = "http://example.org/app/v2/"
+
+ builder = views.versions.ViewBuilder(base_url)
+ actual = builder.generate_href('v2.1')
+
+ self.assertEqual(actual, expected)
+
+ def test_generate_href_unknown(self):
+ base_url = "http://example.org/app/"
+
+ expected = "http://example.org/app/v2/"
+
+ builder = views.versions.ViewBuilder(base_url)
+ actual = builder.generate_href('foo')
+
+ self.assertEqual(actual, expected)
+
+
+class VersionsSerializerTests(test.NoDBTestCase):
+ def test_versions_list_xml_serializer(self):
+ versions_data = {
+ 'versions': [
+ {
+ "id": "2.7",
+ "updated": "2011-07-18T11:30:00Z",
+ "status": "DEPRECATED",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://test/v2",
+ },
+ ],
+ },
+ ]
+ }
+
+ serializer = versions.VersionsTemplate()
+ response = serializer.serialize(versions_data)
+
+ root = etree.XML(response)
+ xmlutil.validate_schema(root, 'versions')
+
+ self.assertTrue(root.xpath('/ns:versions', namespaces=NS))
+ version_elems = root.xpath('ns:version', namespaces=NS)
+ self.assertEqual(len(version_elems), 1)
+ version = version_elems[0]
+ self.assertEqual(version.get('id'), versions_data['versions'][0]['id'])
+ self.assertEqual(version.get('status'),
+ versions_data['versions'][0]['status'])
+
+ (link,) = version.xpath('atom:link', namespaces=NS)
+ self.assertTrue(common.compare_links(link, [{
+ 'rel': 'self',
+ 'href': 'http://test/v2',
+ 'type': 'application/atom+xml'}]))
+
+ def test_versions_multi_xml_serializer(self):
+ versions_data = {
+ 'choices': [
+ {
+ "id": "2.7",
+ "updated": "2011-07-18T11:30:00Z",
+ "status": "DEPRECATED",
+ "media-types": EXP_VERSIONS['v2.0']['media-types'],
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://test/v2/images",
+ },
+ ],
+ },
+ ]
+ }
+
+ serializer = versions.ChoicesTemplate()
+ response = serializer.serialize(versions_data)
+
+ root = etree.XML(response)
+ self.assertTrue(root.xpath('/ns:choices', namespaces=NS))
+ (version,) = root.xpath('ns:version', namespaces=NS)
+ self.assertEqual(version.get('id'), versions_data['choices'][0]['id'])
+ self.assertEqual(version.get('status'),
+ versions_data['choices'][0]['status'])
+
+ media_types = list(version)[0]
+ self.assertEqual(media_types.tag.split('}')[1], "media-types")
+
+ media_types = version.xpath('ns:media-types/ns:media-type',
+ namespaces=NS)
+ self.assertTrue(common.compare_media_types(media_types,
+ versions_data['choices'][0]['media-types']))
+
+ (link,) = version.xpath('atom:link', namespaces=NS)
+ self.assertTrue(common.compare_links(link,
+ versions_data['choices'][0]['links']))
+
+ def test_versions_list_atom_serializer(self):
+ versions_data = {
+ 'versions': [
+ {
+ "id": "2.9.8",
+ "updated": "2011-07-20T11:40:00Z",
+ "status": "CURRENT",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://test/2.9.8",
+ },
+ ],
+ },
+ ]
+ }
+
+ serializer = versions.VersionsAtomSerializer()
+ response = serializer.serialize(versions_data)
+ f = feedparser.parse(response)
+
+ self.assertEqual(f.feed.title, 'Available API Versions')
+ self.assertEqual(f.feed.updated, '2011-07-20T11:40:00Z')
+ self.assertEqual(f.feed.id, 'http://test/')
+ self.assertEqual(f.feed.author, 'Rackspace')
+ self.assertEqual(f.feed.author_detail.href,
+ 'http://www.rackspace.com/')
+ self.assertEqual(f.feed.links[0]['href'], 'http://test/')
+ self.assertEqual(f.feed.links[0]['rel'], 'self')
+
+ self.assertEqual(len(f.entries), 1)
+ entry = f.entries[0]
+ self.assertEqual(entry.id, 'http://test/2.9.8')
+ self.assertEqual(entry.title, 'Version 2.9.8')
+ self.assertEqual(entry.updated, '2011-07-20T11:40:00Z')
+ self.assertEqual(len(entry.content), 1)
+ self.assertEqual(entry.content[0].value,
+ 'Version 2.9.8 CURRENT (2011-07-20T11:40:00Z)')
+ self.assertEqual(len(entry.links), 1)
+ self.assertEqual(entry.links[0]['href'], 'http://test/2.9.8')
+ self.assertEqual(entry.links[0]['rel'], 'self')
+
+ def test_version_detail_atom_serializer(self):
+ versions_data = {
+ "version": {
+ "id": "v2.0",
+ "status": "CURRENT",
+ "updated": "2011-01-21T11:33:21Z",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/",
+ },
+ {
+ "rel": "describedby",
+ "type": "text/html",
+ "href": EXP_LINKS['v2.0']['html'],
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/vnd.openstack.compute+xml"
+ ";version=2",
+ },
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute+json"
+ ";version=2",
+ }
+ ],
+ },
+ }
+
+ serializer = versions.VersionAtomSerializer()
+ response = serializer.serialize(versions_data)
+ f = feedparser.parse(response)
+
+ self.assertEqual(f.feed.title, 'About This Version')
+ self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z')
+ self.assertEqual(f.feed.id, 'http://localhost/v2/')
+ self.assertEqual(f.feed.author, 'Rackspace')
+ self.assertEqual(f.feed.author_detail.href,
+ 'http://www.rackspace.com/')
+ self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v2/')
+ self.assertEqual(f.feed.links[0]['rel'], 'self')
+
+ self.assertEqual(len(f.entries), 1)
+ entry = f.entries[0]
+ self.assertEqual(entry.id, 'http://localhost/v2/')
+ self.assertEqual(entry.title, 'Version v2.0')
+ self.assertEqual(entry.updated, '2011-01-21T11:33:21Z')
+ self.assertEqual(len(entry.content), 1)
+ self.assertEqual(entry.content[0].value,
+ 'Version v2.0 CURRENT (2011-01-21T11:33:21Z)')
+ self.assertEqual(len(entry.links), 2)
+ self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/')
+ self.assertEqual(entry.links[0]['rel'], 'self')
+ self.assertEqual(entry.links[1], {
+ 'rel': 'describedby',
+ 'type': 'text/html',
+ 'href': EXP_LINKS['v2.0']['html']})
+
+ def test_multi_choice_image_with_body(self):
+ req = webob.Request.blank('/images/1')
+ req.accept = "application/json"
+ req.method = 'POST'
+ req.content_type = "application/json"
+ req.body = "{\"foo\": \"bar\"}"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(300, res.status_int)
+ self.assertEqual("application/json", res.content_type)
+
+ def test_get_version_list_with_body(self):
+ req = webob.Request.blank('/')
+ req.accept = "application/json"
+ req.method = 'POST'
+ req.content_type = "application/json"
+ req.body = "{\"foo\": \"bar\"}"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+ self.assertEqual("application/json", res.content_type)
+
+
+# NOTE(oomichi): Now version API of v2.0 covers "/"(root).
+# So this class tests "/v2.1" only for v2.1 API.
+class VersionsTestV21(test.NoDBTestCase):
+ exp_versions = copy.deepcopy(EXP_VERSIONS)
+ exp_versions['v2.0']['links'].insert(0,
+ {'href': 'http://localhost/v2.1/', 'rel': 'self'},
+ )
+
+ def test_get_version_list_302(self):
+ req = webob.Request.blank('/v2.1')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app_v21())
+ self.assertEqual(res.status_int, 302)
+ redirect_req = webob.Request.blank('/v2.1/')
+ self.assertEqual(res.location, redirect_req.url)
+
+ def test_get_version_21_detail(self):
+ req = webob.Request.blank('/v2.1/')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app_v21())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ version = jsonutils.loads(res.body)
+ expected = {"version": self.exp_versions['v2.1']}
+ self.assertEqual(expected, version)
+
+ def test_get_version_21_versions_v21_detail(self):
+ req = webob.Request.blank('/v2.1/fake/versions/v2.1')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app_v21())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ version = jsonutils.loads(res.body)
+ expected = {"version": self.exp_versions['v2.1']}
+ self.assertEqual(expected, version)
+
+ def test_get_version_21_versions_v20_detail(self):
+ req = webob.Request.blank('/v2.1/fake/versions/v2.0')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app_v21())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ version = jsonutils.loads(res.body)
+ expected = {"version": self.exp_versions['v2.0']}
+ self.assertEqual(expected, version)
+
+ def test_get_version_21_versions_invalid(self):
+ req = webob.Request.blank('/v2.1/versions/1234')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app_v21())
+ self.assertEqual(res.status_int, 404)
+
+ def test_get_version_21_detail_content_type(self):
+ req = webob.Request.blank('/')
+ req.accept = "application/json;version=2.1"
+ res = req.get_response(fakes.wsgi_app_v21())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ version = jsonutils.loads(res.body)
+ expected = {"version": self.exp_versions['v2.1']}
+ self.assertEqual(expected, version)
diff --git a/nova/tests/unit/api/openstack/fakes.py b/nova/tests/unit/api/openstack/fakes.py
new file mode 100644
index 0000000000..34c072a634
--- /dev/null
+++ b/nova/tests/unit/api/openstack/fakes.py
@@ -0,0 +1,662 @@
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import uuid
+
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import routes
+import six
+import webob
+import webob.dec
+import webob.request
+
+from nova.api import auth as api_auth
+from nova.api import openstack as openstack_api
+from nova.api.openstack import auth
+from nova.api.openstack import compute
+from nova.api.openstack.compute import limits
+from nova.api.openstack.compute import versions
+from nova.api.openstack import urlmap
+from nova.api.openstack import wsgi as os_wsgi
+from nova.compute import api as compute_api
+from nova.compute import flavors
+from nova.compute import vm_states
+from nova import context
+from nova.db.sqlalchemy import models
+from nova import exception as exc
+import nova.netconf
+from nova.network import api as network_api
+from nova import quota
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_network
+from nova.tests.unit.objects import test_keypair
+from nova import utils
+from nova import wsgi
+
+
+QUOTAS = quota.QUOTAS
+
+
+FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+FAKE_UUIDS = {}
+
+
+class Context(object):
+ pass
+
+
+class FakeRouter(wsgi.Router):
+ def __init__(self, ext_mgr=None):
+ pass
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ res = webob.Response()
+ res.status = '200'
+ res.headers['X-Test-Success'] = 'True'
+ return res
+
+
+@webob.dec.wsgify
+def fake_wsgi(self, req):
+ return self.application
+
+
+def wsgi_app(inner_app_v2=None, fake_auth_context=None,
+ use_no_auth=False, ext_mgr=None, init_only=None):
+ if not inner_app_v2:
+ inner_app_v2 = compute.APIRouter(ext_mgr, init_only)
+
+ if use_no_auth:
+ api_v2 = openstack_api.FaultWrapper(auth.NoAuthMiddleware(
+ limits.RateLimitingMiddleware(inner_app_v2)))
+ else:
+ if fake_auth_context is not None:
+ ctxt = fake_auth_context
+ else:
+ ctxt = context.RequestContext('fake', 'fake', auth_token=True)
+ api_v2 = openstack_api.FaultWrapper(api_auth.InjectContext(ctxt,
+ limits.RateLimitingMiddleware(inner_app_v2)))
+
+ mapper = urlmap.URLMap()
+ mapper['/v2'] = api_v2
+ mapper['/v1.1'] = api_v2
+ mapper['/'] = openstack_api.FaultWrapper(versions.Versions())
+ return mapper
+
+
+def wsgi_app_v21(inner_app_v21=None, fake_auth_context=None,
+ use_no_auth=False, ext_mgr=None, init_only=None):
+ if not inner_app_v21:
+ inner_app_v21 = compute.APIRouterV21(init_only)
+
+ if use_no_auth:
+ api_v21 = openstack_api.FaultWrapper(auth.NoAuthMiddlewareV3(
+ limits.RateLimitingMiddleware(inner_app_v21)))
+ else:
+ if fake_auth_context is not None:
+ ctxt = fake_auth_context
+ else:
+ ctxt = context.RequestContext('fake', 'fake', auth_token=True)
+ api_v21 = openstack_api.FaultWrapper(api_auth.InjectContext(ctxt,
+ limits.RateLimitingMiddleware(inner_app_v21)))
+
+ mapper = urlmap.URLMap()
+ mapper['/v2'] = api_v21
+ mapper['/v2.1'] = api_v21
+ return mapper
+
+
+def stub_out_key_pair_funcs(stubs, have_key_pair=True):
+ def key_pair(context, user_id):
+ return [dict(test_keypair.fake_keypair,
+ name='key', public_key='public_key')]
+
+ def one_key_pair(context, user_id, name):
+ if name == 'key':
+ return dict(test_keypair.fake_keypair,
+ name='key', public_key='public_key')
+ else:
+ raise exc.KeypairNotFound(user_id=user_id, name=name)
+
+ def no_key_pair(context, user_id):
+ return []
+
+ if have_key_pair:
+ stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair)
+ stubs.Set(nova.db, 'key_pair_get', one_key_pair)
+ else:
+ stubs.Set(nova.db, 'key_pair_get_all_by_user', no_key_pair)
+
+
+def stub_out_rate_limiting(stubs):
+ def fake_rate_init(self, app):
+ super(limits.RateLimitingMiddleware, self).__init__(app)
+ self.application = app
+
+ stubs.Set(nova.api.openstack.compute.limits.RateLimitingMiddleware,
+ '__init__', fake_rate_init)
+
+ stubs.Set(nova.api.openstack.compute.limits.RateLimitingMiddleware,
+ '__call__', fake_wsgi)
+
+
+def stub_out_instance_quota(stubs, allowed, quota, resource='instances'):
+ def fake_reserve(context, **deltas):
+ requested = deltas.pop(resource, 0)
+ if requested > allowed:
+ quotas = dict(instances=1, cores=1, ram=1)
+ quotas[resource] = quota
+ usages = dict(instances=dict(in_use=0, reserved=0),
+ cores=dict(in_use=0, reserved=0),
+ ram=dict(in_use=0, reserved=0))
+ usages[resource]['in_use'] = (quotas[resource] * 0.9 -
+ allowed)
+ usages[resource]['reserved'] = quotas[resource] * 0.1
+ headroom = dict(
+ (res, value - (usages[res]['in_use'] + usages[res]['reserved']))
+ for res, value in quotas.iteritems()
+ )
+ raise exc.OverQuota(overs=[resource], quotas=quotas,
+ usages=usages, headroom=headroom)
+ stubs.Set(QUOTAS, 'reserve', fake_reserve)
+
+
+def stub_out_networking(stubs):
+ def get_my_ip():
+ return '127.0.0.1'
+ stubs.Set(nova.netconf, '_get_my_ip', get_my_ip)
+
+
+def stub_out_compute_api_snapshot(stubs):
+
+ def snapshot(self, context, instance, name, extra_properties=None):
+ # emulate glance rejecting image names which are too long
+ if len(name) > 256:
+ raise exc.Invalid
+ return dict(id='123', status='ACTIVE', name=name,
+ properties=extra_properties)
+
+ stubs.Set(compute_api.API, 'snapshot', snapshot)
+
+
+class stub_out_compute_api_backup(object):
+
+ def __init__(self, stubs):
+ self.stubs = stubs
+ self.extra_props_last_call = None
+ stubs.Set(compute_api.API, 'backup', self.backup)
+
+ def backup(self, context, instance, name, backup_type, rotation,
+ extra_properties=None):
+ self.extra_props_last_call = extra_properties
+ props = dict(backup_type=backup_type,
+ rotation=rotation)
+ props.update(extra_properties or {})
+ return dict(id='123', status='ACTIVE', name=name, properties=props)
+
+
+def stub_out_nw_api_get_instance_nw_info(stubs, num_networks=1, func=None):
+ fake_network.stub_out_nw_api_get_instance_nw_info(stubs)
+
+
+def stub_out_nw_api_get_floating_ips_by_fixed_address(stubs, func=None):
+ def get_floating_ips_by_fixed_address(self, context, fixed_ip):
+ return ['1.2.3.4']
+
+ if func is None:
+ func = get_floating_ips_by_fixed_address
+ stubs.Set(network_api.API, 'get_floating_ips_by_fixed_address', func)
+
+
+def stub_out_nw_api(stubs, cls=None, private=None, publics=None):
+ if not private:
+ private = '192.168.0.3'
+ if not publics:
+ publics = ['1.2.3.4']
+
+ class Fake:
+ def get_instance_nw_info(*args, **kwargs):
+ pass
+
+ def get_floating_ips_by_fixed_address(*args, **kwargs):
+ return publics
+
+ def validate_networks(self, context, networks, max_count):
+ return max_count
+
+ def create_pci_requests_for_sriov_ports(self, context,
+ system_metadata,
+ requested_networks):
+ pass
+
+ if cls is None:
+ cls = Fake
+ stubs.Set(network_api, 'API', cls)
+ fake_network.stub_out_nw_api_get_instance_nw_info(stubs)
+
+
+class FakeToken(object):
+ id_count = 0
+
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ def __init__(self, **kwargs):
+ FakeToken.id_count += 1
+ self.id = FakeToken.id_count
+ for k, v in kwargs.iteritems():
+ setattr(self, k, v)
+
+
+class FakeRequestContext(context.RequestContext):
+ def __init__(self, *args, **kwargs):
+ kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token')
+ return super(FakeRequestContext, self).__init__(*args, **kwargs)
+
+
+class HTTPRequest(os_wsgi.Request):
+
+ @staticmethod
+ def blank(*args, **kwargs):
+ kwargs['base_url'] = 'http://localhost/v2'
+ use_admin_context = kwargs.pop('use_admin_context', False)
+ out = os_wsgi.Request.blank(*args, **kwargs)
+ out.environ['nova.context'] = FakeRequestContext('fake_user', 'fake',
+ is_admin=use_admin_context)
+ return out
+
+
+class HTTPRequestV3(os_wsgi.Request):
+
+ @staticmethod
+ def blank(*args, **kwargs):
+ kwargs['base_url'] = 'http://localhost/v3'
+ use_admin_context = kwargs.pop('use_admin_context', False)
+ out = os_wsgi.Request.blank(*args, **kwargs)
+ out.environ['nova.context'] = FakeRequestContext('fake_user', 'fake',
+ is_admin=use_admin_context)
+ return out
+
+
+class TestRouter(wsgi.Router):
+ def __init__(self, controller, mapper=None):
+ if not mapper:
+ mapper = routes.Mapper()
+ mapper.resource("test", "tests",
+ controller=os_wsgi.Resource(controller))
+ super(TestRouter, self).__init__(mapper)
+
+
+class FakeAuthDatabase(object):
+ data = {}
+
+ @staticmethod
+ def auth_token_get(context, token_hash):
+ return FakeAuthDatabase.data.get(token_hash, None)
+
+ @staticmethod
+ def auth_token_create(context, token):
+ fake_token = FakeToken(created_at=timeutils.utcnow(), **token)
+ FakeAuthDatabase.data[fake_token.token_hash] = fake_token
+ FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token
+ return fake_token
+
+ @staticmethod
+ def auth_token_destroy(context, token_id):
+ token = FakeAuthDatabase.data.get('id_%i' % token_id)
+ if token and token.token_hash in FakeAuthDatabase.data:
+ del FakeAuthDatabase.data[token.token_hash]
+ del FakeAuthDatabase.data['id_%i' % token_id]
+
+
+class FakeRateLimiter(object):
+ def __init__(self, application):
+ self.application = application
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ return self.application
+
+
+def create_info_cache(nw_cache):
+ if nw_cache is None:
+ pub0 = ('192.168.1.100',)
+ pub1 = ('2001:db8:0:1::1',)
+
+ def _ip(ip):
+ return {'address': ip, 'type': 'fixed'}
+
+ nw_cache = [
+ {'address': 'aa:aa:aa:aa:aa:aa',
+ 'id': 1,
+ 'network': {'bridge': 'br0',
+ 'id': 1,
+ 'label': 'test1',
+ 'subnets': [{'cidr': '192.168.1.0/24',
+ 'ips': [_ip(ip) for ip in pub0]},
+ {'cidr': 'b33f::/64',
+ 'ips': [_ip(ip) for ip in pub1]}]}}]
+
+ if not isinstance(nw_cache, six.string_types):
+ nw_cache = jsonutils.dumps(nw_cache)
+
+ return {
+ "info_cache": {
+ "network_info": nw_cache,
+ "deleted": False,
+ "created_at": None,
+ "deleted_at": None,
+ "updated_at": None,
+ }
+ }
+
+
+def get_fake_uuid(token=0):
+ if token not in FAKE_UUIDS:
+ FAKE_UUIDS[token] = str(uuid.uuid4())
+ return FAKE_UUIDS[token]
+
+
+def fake_instance_get(**kwargs):
+ def _return_server(context, uuid, columns_to_join=None, use_slave=False):
+ return stub_instance(1, **kwargs)
+ return _return_server
+
+
+def fake_actions_to_locked_server(self, context, instance, *args, **kwargs):
+ raise exc.InstanceIsLocked(instance_uuid=instance['uuid'])
+
+
+def fake_instance_get_all_by_filters(num_servers=5, **kwargs):
+ def _return_servers(context, *args, **kwargs):
+ servers_list = []
+ marker = None
+ limit = None
+ found_marker = False
+ if "marker" in kwargs:
+ marker = kwargs["marker"]
+ if "limit" in kwargs:
+ limit = kwargs["limit"]
+
+ if 'columns_to_join' in kwargs:
+ kwargs.pop('columns_to_join')
+
+ if 'use_slave' in kwargs:
+ kwargs.pop('use_slave')
+
+ for i in xrange(num_servers):
+ uuid = get_fake_uuid(i)
+ server = stub_instance(id=i + 1, uuid=uuid,
+ **kwargs)
+ servers_list.append(server)
+ if marker is not None and uuid == marker:
+ found_marker = True
+ servers_list = []
+ if marker is not None and not found_marker:
+ raise exc.MarkerNotFound(marker=marker)
+ if limit is not None:
+ servers_list = servers_list[:limit]
+ return servers_list
+ return _return_servers
+
+
+def stub_instance(id, user_id=None, project_id=None, host=None,
+ node=None, vm_state=None, task_state=None,
+ reservation_id="", uuid=FAKE_UUID, image_ref="10",
+ flavor_id="1", name=None, key_name='',
+ access_ipv4=None, access_ipv6=None, progress=0,
+ auto_disk_config=False, display_name=None,
+ include_fake_metadata=True, config_drive=None,
+ power_state=None, nw_cache=None, metadata=None,
+ security_groups=None, root_device_name=None,
+ limit=None, marker=None,
+ launched_at=timeutils.utcnow(),
+ terminated_at=timeutils.utcnow(),
+ availability_zone='', locked_by=None, cleaned=False,
+ memory_mb=0, vcpus=0, root_gb=0, ephemeral_gb=0):
+ if user_id is None:
+ user_id = 'fake_user'
+ if project_id is None:
+ project_id = 'fake_project'
+
+ if metadata:
+ metadata = [{'key': k, 'value': v} for k, v in metadata.items()]
+ elif include_fake_metadata:
+ metadata = [models.InstanceMetadata(key='seq', value=str(id))]
+ else:
+ metadata = []
+
+ inst_type = flavors.get_flavor_by_flavor_id(int(flavor_id))
+ sys_meta = flavors.save_flavor_info({}, inst_type)
+
+ if host is not None:
+ host = str(host)
+
+ if key_name:
+ key_data = 'FAKE'
+ else:
+ key_data = ''
+
+ if security_groups is None:
+ security_groups = [{"id": 1, "name": "test", "description": "Foo:",
+ "project_id": "project", "user_id": "user",
+ "created_at": None, "updated_at": None,
+ "deleted_at": None, "deleted": False}]
+
+ # ReservationID isn't sent back, hack it in there.
+ server_name = name or "server%s" % id
+ if reservation_id != "":
+ server_name = "reservation_%s" % (reservation_id, )
+
+ info_cache = create_info_cache(nw_cache)
+
+ instance = {
+ "id": int(id),
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "deleted_at": datetime.datetime(2010, 12, 12, 10, 0, 0),
+ "deleted": None,
+ "user_id": user_id,
+ "project_id": project_id,
+ "image_ref": image_ref,
+ "kernel_id": "",
+ "ramdisk_id": "",
+ "launch_index": 0,
+ "key_name": key_name,
+ "key_data": key_data,
+ "config_drive": config_drive,
+ "vm_state": vm_state or vm_states.BUILDING,
+ "task_state": task_state,
+ "power_state": power_state,
+ "memory_mb": memory_mb,
+ "vcpus": vcpus,
+ "root_gb": root_gb,
+ "ephemeral_gb": ephemeral_gb,
+ "ephemeral_key_uuid": None,
+ "hostname": display_name or server_name,
+ "host": host,
+ "node": node,
+ "instance_type_id": 1,
+ "instance_type": inst_type,
+ "user_data": "",
+ "reservation_id": reservation_id,
+ "mac_address": "",
+ "scheduled_at": timeutils.utcnow(),
+ "launched_at": launched_at,
+ "terminated_at": terminated_at,
+ "availability_zone": availability_zone,
+ "display_name": display_name or server_name,
+ "display_description": "",
+ "locked": locked_by is not None,
+ "locked_by": locked_by,
+ "metadata": metadata,
+ "access_ip_v4": access_ipv4,
+ "access_ip_v6": access_ipv6,
+ "uuid": uuid,
+ "progress": progress,
+ "auto_disk_config": auto_disk_config,
+ "name": "instance-%s" % id,
+ "shutdown_terminate": True,
+ "disable_terminate": False,
+ "security_groups": security_groups,
+ "root_device_name": root_device_name,
+ "system_metadata": utils.dict_to_metadata(sys_meta),
+ "pci_devices": [],
+ "vm_mode": "",
+ "default_swap_device": "",
+ "default_ephemeral_device": "",
+ "launched_on": "",
+ "cell_name": "",
+ "architecture": "",
+ "os_type": "",
+ "cleaned": cleaned}
+
+ instance.update(info_cache)
+ instance['info_cache']['instance_uuid'] = instance['uuid']
+
+ return instance
+
+
+def stub_volume(id, **kwargs):
+ volume = {
+ 'id': id,
+ 'user_id': 'fakeuser',
+ 'project_id': 'fakeproject',
+ 'host': 'fakehost',
+ 'size': 1,
+ 'availability_zone': 'fakeaz',
+ 'instance_uuid': 'fakeuuid',
+ 'mountpoint': '/',
+ 'status': 'fakestatus',
+ 'attach_status': 'attached',
+ 'name': 'vol name',
+ 'display_name': 'displayname',
+ 'display_description': 'displaydesc',
+ 'created_at': datetime.datetime(1999, 1, 1, 1, 1, 1),
+ 'snapshot_id': None,
+ 'volume_type_id': 'fakevoltype',
+ 'volume_metadata': [],
+ 'volume_type': {'name': 'vol_type_name'}}
+
+ volume.update(kwargs)
+ return volume
+
+
+def stub_volume_create(self, context, size, name, description, snapshot,
+ **param):
+ vol = stub_volume('1')
+ vol['size'] = size
+ vol['display_name'] = name
+ vol['display_description'] = description
+ try:
+ vol['snapshot_id'] = snapshot['id']
+ except (KeyError, TypeError):
+ vol['snapshot_id'] = None
+ vol['availability_zone'] = param.get('availability_zone', 'fakeaz')
+ return vol
+
+
+def stub_volume_update(self, context, *args, **param):
+ pass
+
+
+def stub_volume_delete(self, context, *args, **param):
+ pass
+
+
+def stub_volume_get(self, context, volume_id):
+ return stub_volume(volume_id)
+
+
+def stub_volume_notfound(self, context, volume_id):
+ raise exc.VolumeNotFound(volume_id=volume_id)
+
+
+def stub_volume_get_all(context, search_opts=None):
+ return [stub_volume(100, project_id='fake'),
+ stub_volume(101, project_id='superfake'),
+ stub_volume(102, project_id='superduperfake')]
+
+
+def stub_volume_check_attach(self, context, *args, **param):
+ pass
+
+
+def stub_snapshot(id, **kwargs):
+ snapshot = {
+ 'id': id,
+ 'volume_id': 12,
+ 'status': 'available',
+ 'volume_size': 100,
+ 'created_at': timeutils.utcnow(),
+ 'display_name': 'Default name',
+ 'display_description': 'Default description',
+ 'project_id': 'fake'
+ }
+
+ snapshot.update(kwargs)
+ return snapshot
+
+
+def stub_snapshot_create(self, context, volume_id, name, description):
+ return stub_snapshot(100, volume_id=volume_id, display_name=name,
+ display_description=description)
+
+
+def stub_compute_volume_snapshot_create(self, context, volume_id, create_info):
+ return {'snapshot': {'id': 100, 'volumeId': volume_id}}
+
+
+def stub_snapshot_delete(self, context, snapshot_id):
+ if snapshot_id == '-1':
+ raise exc.NotFound
+
+
+def stub_compute_volume_snapshot_delete(self, context, volume_id, snapshot_id,
+ delete_info):
+ pass
+
+
+def stub_snapshot_get(self, context, snapshot_id):
+ if snapshot_id == '-1':
+ raise exc.NotFound
+ return stub_snapshot(snapshot_id)
+
+
+def stub_snapshot_get_all(self, context):
+ return [stub_snapshot(100, project_id='fake'),
+ stub_snapshot(101, project_id='superfake'),
+ stub_snapshot(102, project_id='superduperfake')]
+
+
+def stub_bdm_get_all_by_instance(context, instance_uuid, use_slave=False):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'source_type': 'volume', 'destination_type': 'volume',
+ 'volume_id': 'volume_id1', 'instance_uuid': instance_uuid}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2, 'source_type': 'volume', 'destination_type': 'volume',
+ 'volume_id': 'volume_id2', 'instance_uuid': instance_uuid})]
+
+
+def fake_get_available_languages():
+ existing_translations = ['en_GB', 'en_AU', 'de', 'zh_CN', 'en_US']
+ return existing_translations
+
+
+def fake_not_implemented(*args, **kwargs):
+ raise NotImplementedError()
diff --git a/nova/tests/unit/api/openstack/test_common.py b/nova/tests/unit/api/openstack/test_common.py
new file mode 100644
index 0000000000..a61f70cf95
--- /dev/null
+++ b/nova/tests/unit/api/openstack/test_common.py
@@ -0,0 +1,764 @@
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test suites for 'common' code used throughout the OpenStack HTTP API.
+"""
+
+import xml.dom.minidom as minidom
+
+from lxml import etree
+import mock
+import six
+from testtools import matchers
+import webob
+import webob.exc
+import webob.multidict
+
+from nova.api.openstack import common
+from nova.api.openstack import xmlutil
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import exception
+from nova import test
+from nova.tests.unit import utils
+
+
+NS = "{http://docs.openstack.org/compute/api/v1.1}"
+ATOMNS = "{http://www.w3.org/2005/Atom}"
+
+
+class LimiterTest(test.TestCase):
+ """Unit tests for the `nova.api.openstack.common.limited` method which
+ takes in a list of items and, depending on the 'offset' and 'limit' GET
+ params, returns a subset or complete set of the given items.
+ """
+
+ def setUp(self):
+ """Run before each test."""
+ super(LimiterTest, self).setUp()
+ self.tiny = range(1)
+ self.small = range(10)
+ self.medium = range(1000)
+ self.large = range(10000)
+
+ def test_limiter_offset_zero(self):
+ # Test offset key works with 0.
+ req = webob.Request.blank('/?offset=0')
+ self.assertEqual(common.limited(self.tiny, req), self.tiny)
+ self.assertEqual(common.limited(self.small, req), self.small)
+ self.assertEqual(common.limited(self.medium, req), self.medium)
+ self.assertEqual(common.limited(self.large, req), self.large[:1000])
+
+ def test_limiter_offset_medium(self):
+ # Test offset key works with a medium sized number.
+ req = webob.Request.blank('/?offset=10')
+ self.assertEqual(common.limited(self.tiny, req), [])
+ self.assertEqual(common.limited(self.small, req), self.small[10:])
+ self.assertEqual(common.limited(self.medium, req), self.medium[10:])
+ self.assertEqual(common.limited(self.large, req), self.large[10:1010])
+
+ def test_limiter_offset_over_max(self):
+ # Test offset key works with a number over 1000 (max_limit).
+ req = webob.Request.blank('/?offset=1001')
+ self.assertEqual(common.limited(self.tiny, req), [])
+ self.assertEqual(common.limited(self.small, req), [])
+ self.assertEqual(common.limited(self.medium, req), [])
+ self.assertEqual(
+ common.limited(self.large, req), self.large[1001:2001])
+
+ def test_limiter_offset_blank(self):
+ # Test offset key works with a blank offset.
+ req = webob.Request.blank('/?offset=')
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
+
+ def test_limiter_offset_bad(self):
+ # Test offset key works with a BAD offset.
+ req = webob.Request.blank(u'/?offset=\u0020aa')
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
+
+ def test_limiter_nothing(self):
+ # Test request with no offset or limit.
+ req = webob.Request.blank('/')
+ self.assertEqual(common.limited(self.tiny, req), self.tiny)
+ self.assertEqual(common.limited(self.small, req), self.small)
+ self.assertEqual(common.limited(self.medium, req), self.medium)
+ self.assertEqual(common.limited(self.large, req), self.large[:1000])
+
+ def test_limiter_limit_zero(self):
+ # Test limit of zero.
+ req = webob.Request.blank('/?limit=0')
+ self.assertEqual(common.limited(self.tiny, req), self.tiny)
+ self.assertEqual(common.limited(self.small, req), self.small)
+ self.assertEqual(common.limited(self.medium, req), self.medium)
+ self.assertEqual(common.limited(self.large, req), self.large[:1000])
+
+ def test_limiter_limit_medium(self):
+ # Test limit of 10.
+ req = webob.Request.blank('/?limit=10')
+ self.assertEqual(common.limited(self.tiny, req), self.tiny)
+ self.assertEqual(common.limited(self.small, req), self.small)
+ self.assertEqual(common.limited(self.medium, req), self.medium[:10])
+ self.assertEqual(common.limited(self.large, req), self.large[:10])
+
+ def test_limiter_limit_over_max(self):
+ # Test limit of 3000.
+ req = webob.Request.blank('/?limit=3000')
+ self.assertEqual(common.limited(self.tiny, req), self.tiny)
+ self.assertEqual(common.limited(self.small, req), self.small)
+ self.assertEqual(common.limited(self.medium, req), self.medium)
+ self.assertEqual(common.limited(self.large, req), self.large[:1000])
+
+ def test_limiter_limit_and_offset(self):
+ # Test request with both limit and offset.
+ items = range(2000)
+ req = webob.Request.blank('/?offset=1&limit=3')
+ self.assertEqual(common.limited(items, req), items[1:4])
+ req = webob.Request.blank('/?offset=3&limit=0')
+ self.assertEqual(common.limited(items, req), items[3:1003])
+ req = webob.Request.blank('/?offset=3&limit=1500')
+ self.assertEqual(common.limited(items, req), items[3:1003])
+ req = webob.Request.blank('/?offset=3000&limit=10')
+ self.assertEqual(common.limited(items, req), [])
+
+ def test_limiter_custom_max_limit(self):
+ # Test a max_limit other than 1000.
+ items = range(2000)
+ req = webob.Request.blank('/?offset=1&limit=3')
+ self.assertEqual(
+ common.limited(items, req, max_limit=2000), items[1:4])
+ req = webob.Request.blank('/?offset=3&limit=0')
+ self.assertEqual(
+ common.limited(items, req, max_limit=2000), items[3:])
+ req = webob.Request.blank('/?offset=3&limit=2500')
+ self.assertEqual(
+ common.limited(items, req, max_limit=2000), items[3:])
+ req = webob.Request.blank('/?offset=3000&limit=10')
+ self.assertEqual(common.limited(items, req, max_limit=2000), [])
+
+ def test_limiter_negative_limit(self):
+ # Test a negative limit.
+ req = webob.Request.blank('/?limit=-3000')
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
+
+ def test_limiter_negative_offset(self):
+ # Test a negative offset.
+ req = webob.Request.blank('/?offset=-30')
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
+
+
+class SortParamUtilsTest(test.TestCase):
+
+ def test_get_sort_params_defaults(self):
+ '''Verifies the default sort key and direction.'''
+ sort_keys, sort_dirs = common.get_sort_params({})
+ self.assertEqual(['created_at'], sort_keys)
+ self.assertEqual(['desc'], sort_dirs)
+
+ def test_get_sort_params_override_defaults(self):
+ '''Verifies that the defaults can be overriden.'''
+ sort_keys, sort_dirs = common.get_sort_params({}, default_key='key1',
+ default_dir='dir1')
+ self.assertEqual(['key1'], sort_keys)
+ self.assertEqual(['dir1'], sort_dirs)
+
+ sort_keys, sort_dirs = common.get_sort_params({}, default_key=None,
+ default_dir=None)
+ self.assertEqual([], sort_keys)
+ self.assertEqual([], sort_dirs)
+
+ def test_get_sort_params_single_value(self):
+ '''Verifies a single sort key and direction.'''
+ params = webob.multidict.MultiDict()
+ params.add('sort_key', 'key1')
+ params.add('sort_dir', 'dir1')
+ sort_keys, sort_dirs = common.get_sort_params(params)
+ self.assertEqual(['key1'], sort_keys)
+ self.assertEqual(['dir1'], sort_dirs)
+
+ def test_get_sort_params_single_with_default(self):
+ '''Verifies a single sort value with a default.'''
+ params = webob.multidict.MultiDict()
+ params.add('sort_key', 'key1')
+ sort_keys, sort_dirs = common.get_sort_params(params)
+ self.assertEqual(['key1'], sort_keys)
+ # sort_key was supplied, sort_dir should be defaulted
+ self.assertEqual(['desc'], sort_dirs)
+
+ params = webob.multidict.MultiDict()
+ params.add('sort_dir', 'dir1')
+ sort_keys, sort_dirs = common.get_sort_params(params)
+ self.assertEqual(['created_at'], sort_keys)
+ # sort_dir was supplied, sort_key should be defaulted
+ self.assertEqual(['dir1'], sort_dirs)
+
+ def test_get_sort_params_multiple_values(self):
+ '''Verifies multiple sort parameter values.'''
+ params = webob.multidict.MultiDict()
+ params.add('sort_key', 'key1')
+ params.add('sort_key', 'key2')
+ params.add('sort_key', 'key3')
+ params.add('sort_dir', 'dir1')
+ params.add('sort_dir', 'dir2')
+ params.add('sort_dir', 'dir3')
+ sort_keys, sort_dirs = common.get_sort_params(params)
+ self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
+ self.assertEqual(['dir1', 'dir2', 'dir3'], sort_dirs)
+ # Also ensure that the input parameters are not modified
+ sort_key_vals = []
+ sort_dir_vals = []
+ while 'sort_key' in params:
+ sort_key_vals.append(params.pop('sort_key'))
+ while 'sort_dir' in params:
+ sort_dir_vals.append(params.pop('sort_dir'))
+ self.assertEqual(['key1', 'key2', 'key3'], sort_key_vals)
+ self.assertEqual(['dir1', 'dir2', 'dir3'], sort_dir_vals)
+ self.assertEqual(0, len(params))
+
+
+class PaginationParamsTest(test.TestCase):
+ """Unit tests for the `nova.api.openstack.common.get_pagination_params`
+ method which takes in a request object and returns 'marker' and 'limit'
+ GET params.
+ """
+
+ def test_no_params(self):
+ # Test no params.
+ req = webob.Request.blank('/')
+ self.assertEqual(common.get_pagination_params(req), {})
+
+ def test_valid_marker(self):
+ # Test valid marker param.
+ req = webob.Request.blank(
+ '/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
+ self.assertEqual(common.get_pagination_params(req),
+ {'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'})
+
+ def test_valid_limit(self):
+ # Test valid limit param.
+ req = webob.Request.blank('/?limit=10')
+ self.assertEqual(common.get_pagination_params(req), {'limit': 10})
+
+ def test_invalid_limit(self):
+ # Test invalid limit param.
+ req = webob.Request.blank('/?limit=-2')
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.get_pagination_params, req)
+
+ def test_valid_limit_and_marker(self):
+ # Test valid limit and marker parameters.
+ marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2'
+ req = webob.Request.blank('/?limit=20&marker=%s' % marker)
+ self.assertEqual(common.get_pagination_params(req),
+ {'marker': marker, 'limit': 20})
+
+ def test_valid_page_size(self):
+ # Test valid page_size param.
+ req = webob.Request.blank('/?page_size=10')
+ self.assertEqual(common.get_pagination_params(req),
+ {'page_size': 10})
+
+ def test_invalid_page_size(self):
+ # Test invalid page_size param.
+ req = webob.Request.blank('/?page_size=-2')
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.get_pagination_params, req)
+
+ def test_valid_limit_and_page_size(self):
+ # Test valid limit and page_size parameters.
+ req = webob.Request.blank('/?limit=20&page_size=5')
+ self.assertEqual(common.get_pagination_params(req),
+ {'page_size': 5, 'limit': 20})
+
+
+class MiscFunctionsTest(test.TestCase):
+
+ def test_remove_major_version_from_href(self):
+ fixture = 'http://www.testsite.com/v1/images'
+ expected = 'http://www.testsite.com/images'
+ actual = common.remove_version_from_href(fixture)
+ self.assertEqual(actual, expected)
+
+ def test_remove_version_from_href(self):
+ fixture = 'http://www.testsite.com/v1.1/images'
+ expected = 'http://www.testsite.com/images'
+ actual = common.remove_version_from_href(fixture)
+ self.assertEqual(actual, expected)
+
+ def test_remove_version_from_href_2(self):
+ fixture = 'http://www.testsite.com/v1.1/'
+ expected = 'http://www.testsite.com/'
+ actual = common.remove_version_from_href(fixture)
+ self.assertEqual(actual, expected)
+
+ def test_remove_version_from_href_3(self):
+ fixture = 'http://www.testsite.com/v10.10'
+ expected = 'http://www.testsite.com'
+ actual = common.remove_version_from_href(fixture)
+ self.assertEqual(actual, expected)
+
+ def test_remove_version_from_href_4(self):
+ fixture = 'http://www.testsite.com/v1.1/images/v10.5'
+ expected = 'http://www.testsite.com/images/v10.5'
+ actual = common.remove_version_from_href(fixture)
+ self.assertEqual(actual, expected)
+
+ def test_remove_version_from_href_bad_request(self):
+ fixture = 'http://www.testsite.com/1.1/images'
+ self.assertRaises(ValueError,
+ common.remove_version_from_href,
+ fixture)
+
+ def test_remove_version_from_href_bad_request_2(self):
+ fixture = 'http://www.testsite.com/v/images'
+ self.assertRaises(ValueError,
+ common.remove_version_from_href,
+ fixture)
+
+ def test_remove_version_from_href_bad_request_3(self):
+ fixture = 'http://www.testsite.com/v1.1images'
+ self.assertRaises(ValueError,
+ common.remove_version_from_href,
+ fixture)
+
+ def test_get_id_from_href_with_int_url(self):
+ fixture = 'http://www.testsite.com/dir/45'
+ actual = common.get_id_from_href(fixture)
+ expected = '45'
+ self.assertEqual(actual, expected)
+
+ def test_get_id_from_href_with_int(self):
+ fixture = '45'
+ actual = common.get_id_from_href(fixture)
+ expected = '45'
+ self.assertEqual(actual, expected)
+
+ def test_get_id_from_href_with_int_url_query(self):
+ fixture = 'http://www.testsite.com/dir/45?asdf=jkl'
+ actual = common.get_id_from_href(fixture)
+ expected = '45'
+ self.assertEqual(actual, expected)
+
+ def test_get_id_from_href_with_uuid_url(self):
+ fixture = 'http://www.testsite.com/dir/abc123'
+ actual = common.get_id_from_href(fixture)
+ expected = "abc123"
+ self.assertEqual(actual, expected)
+
+ def test_get_id_from_href_with_uuid_url_query(self):
+ fixture = 'http://www.testsite.com/dir/abc123?asdf=jkl'
+ actual = common.get_id_from_href(fixture)
+ expected = "abc123"
+ self.assertEqual(actual, expected)
+
+ def test_get_id_from_href_with_uuid(self):
+ fixture = 'abc123'
+ actual = common.get_id_from_href(fixture)
+ expected = 'abc123'
+ self.assertEqual(actual, expected)
+
+ def test_raise_http_conflict_for_instance_invalid_state(self):
+ exc = exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+ try:
+ common.raise_http_conflict_for_instance_invalid_state(exc,
+ 'meow', 'fake_server_id')
+ except webob.exc.HTTPConflict as e:
+ self.assertEqual(six.text_type(e),
+ "Cannot 'meow' instance fake_server_id while it is in "
+ "fake_attr fake_state")
+ else:
+ self.fail("webob.exc.HTTPConflict was not raised")
+
+ def test_check_img_metadata_properties_quota_valid_metadata(self):
+ ctxt = utils.get_test_admin_context()
+ metadata1 = {"key": "value"}
+ actual = common.check_img_metadata_properties_quota(ctxt, metadata1)
+ self.assertIsNone(actual)
+
+ metadata2 = {"key": "v" * 260}
+ actual = common.check_img_metadata_properties_quota(ctxt, metadata2)
+ self.assertIsNone(actual)
+
+ metadata3 = {"key": ""}
+ actual = common.check_img_metadata_properties_quota(ctxt, metadata3)
+ self.assertIsNone(actual)
+
+ def test_check_img_metadata_properties_quota_inv_metadata(self):
+ ctxt = utils.get_test_admin_context()
+ metadata1 = {"a" * 260: "value"}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ common.check_img_metadata_properties_quota, ctxt, metadata1)
+
+ metadata2 = {"": "value"}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ common.check_img_metadata_properties_quota, ctxt, metadata2)
+
+ metadata3 = "invalid metadata"
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ common.check_img_metadata_properties_quota, ctxt, metadata3)
+
+ metadata4 = None
+ self.assertIsNone(common.check_img_metadata_properties_quota(ctxt,
+ metadata4))
+ metadata5 = {}
+ self.assertIsNone(common.check_img_metadata_properties_quota(ctxt,
+ metadata5))
+
+ def test_status_from_state(self):
+ for vm_state in (vm_states.ACTIVE, vm_states.STOPPED):
+ for task_state in (task_states.RESIZE_PREP,
+ task_states.RESIZE_MIGRATING,
+ task_states.RESIZE_MIGRATED,
+ task_states.RESIZE_FINISH):
+ actual = common.status_from_state(vm_state, task_state)
+ expected = 'RESIZE'
+ self.assertEqual(expected, actual)
+
+ def test_status_rebuild_from_state(self):
+ for vm_state in (vm_states.ACTIVE, vm_states.STOPPED,
+ vm_states.ERROR):
+ for task_state in (task_states.REBUILDING,
+ task_states.REBUILD_BLOCK_DEVICE_MAPPING,
+ task_states.REBUILD_SPAWNING):
+ actual = common.status_from_state(vm_state, task_state)
+ expected = 'REBUILD'
+ self.assertEqual(expected, actual)
+
+ def test_task_and_vm_state_from_status(self):
+ fixture1 = ['reboot']
+ actual = common.task_and_vm_state_from_status(fixture1)
+ expected = [vm_states.ACTIVE], [task_states.REBOOT_PENDING,
+ task_states.REBOOT_STARTED,
+ task_states.REBOOTING]
+ self.assertEqual(expected, actual)
+
+ fixture2 = ['resize']
+ actual = common.task_and_vm_state_from_status(fixture2)
+ expected = ([vm_states.ACTIVE, vm_states.STOPPED],
+ [task_states.RESIZE_FINISH,
+ task_states.RESIZE_MIGRATED,
+ task_states.RESIZE_MIGRATING,
+ task_states.RESIZE_PREP])
+ self.assertEqual(expected, actual)
+
+ fixture3 = ['resize', 'reboot']
+ actual = common.task_and_vm_state_from_status(fixture3)
+ expected = ([vm_states.ACTIVE, vm_states.STOPPED],
+ [task_states.REBOOT_PENDING,
+ task_states.REBOOT_STARTED,
+ task_states.REBOOTING,
+ task_states.RESIZE_FINISH,
+ task_states.RESIZE_MIGRATED,
+ task_states.RESIZE_MIGRATING,
+ task_states.RESIZE_PREP])
+ self.assertEqual(expected, actual)
+
+
+class TestCollectionLinks(test.NoDBTestCase):
+ """Tests the _get_collection_links method."""
+
+ @mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link')
+ def test_items_less_than_limit(self, href_link_mock):
+ items = [
+ {"uuid": "123"}
+ ]
+ req = mock.MagicMock()
+ params = mock.PropertyMock(return_value=dict(limit=10))
+ type(req).params = params
+
+ builder = common.ViewBuilder()
+ results = builder._get_collection_links(req, items, "ignored", "uuid")
+
+ self.assertFalse(href_link_mock.called)
+ self.assertThat(results, matchers.HasLength(0))
+
+ @mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link')
+ def test_items_equals_given_limit(self, href_link_mock):
+ items = [
+ {"uuid": "123"}
+ ]
+ req = mock.MagicMock()
+ params = mock.PropertyMock(return_value=dict(limit=1))
+ type(req).params = params
+
+ builder = common.ViewBuilder()
+ results = builder._get_collection_links(req, items,
+ mock.sentinel.coll_key,
+ "uuid")
+
+ href_link_mock.assert_called_once_with(req, "123",
+ mock.sentinel.coll_key)
+ self.assertThat(results, matchers.HasLength(1))
+
+ @mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link')
+ def test_items_equals_default_limit(self, href_link_mock):
+ items = [
+ {"uuid": "123"}
+ ]
+ req = mock.MagicMock()
+ params = mock.PropertyMock(return_value=dict())
+ type(req).params = params
+ self.flags(osapi_max_limit=1)
+
+ builder = common.ViewBuilder()
+ results = builder._get_collection_links(req, items,
+ mock.sentinel.coll_key,
+ "uuid")
+
+ href_link_mock.assert_called_once_with(req, "123",
+ mock.sentinel.coll_key)
+ self.assertThat(results, matchers.HasLength(1))
+
+ @mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link')
+ def test_items_equals_default_limit_with_given(self, href_link_mock):
+ items = [
+ {"uuid": "123"}
+ ]
+ req = mock.MagicMock()
+ # Given limit is greater than default max, only return default max
+ params = mock.PropertyMock(return_value=dict(limit=2))
+ type(req).params = params
+ self.flags(osapi_max_limit=1)
+
+ builder = common.ViewBuilder()
+ results = builder._get_collection_links(req, items,
+ mock.sentinel.coll_key,
+ "uuid")
+
+ href_link_mock.assert_called_once_with(req, "123",
+ mock.sentinel.coll_key)
+ self.assertThat(results, matchers.HasLength(1))
+
+
+class MetadataXMLDeserializationTest(test.TestCase):
+
+ deserializer = common.MetadataXMLDeserializer()
+
+ def test_create(self):
+ request_body = """
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key='123'>asdf</meta>
+ <meta key='567'>jkl;</meta>
+ </metadata>"""
+ output = self.deserializer.deserialize(request_body, 'create')
+ expected = {"body": {"metadata": {"123": "asdf", "567": "jkl;"}}}
+ self.assertEqual(output, expected)
+
+ def test_create_empty(self):
+ request_body = """
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
+ output = self.deserializer.deserialize(request_body, 'create')
+ expected = {"body": {"metadata": {}}}
+ self.assertEqual(output, expected)
+
+ def test_update_all(self):
+ request_body = """
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key='123'>asdf</meta>
+ <meta key='567'>jkl;</meta>
+ </metadata>"""
+ output = self.deserializer.deserialize(request_body, 'update_all')
+ expected = {"body": {"metadata": {"123": "asdf", "567": "jkl;"}}}
+ self.assertEqual(output, expected)
+
+ def test_update(self):
+ request_body = """
+ <meta xmlns="http://docs.openstack.org/compute/api/v1.1"
+ key='123'>asdf</meta>"""
+ output = self.deserializer.deserialize(request_body, 'update')
+ expected = {"body": {"meta": {"123": "asdf"}}}
+ self.assertEqual(output, expected)
+
+
+class MetadataXMLSerializationTest(test.TestCase):
+
+ def test_xml_declaration(self):
+ serializer = common.MetadataTemplate()
+ fixture = {
+ 'metadata': {
+ 'one': 'two',
+ 'three': 'four',
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
+ self.assertTrue(has_dec)
+
+ def test_index(self):
+ serializer = common.MetadataTemplate()
+ fixture = {
+ 'metadata': {
+ 'one': 'two',
+ 'three': 'four',
+ },
+ }
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'metadata')
+ metadata_dict = fixture['metadata']
+ metadata_elems = root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 2)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = metadata_dict.items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ def test_index_null(self):
+ serializer = common.MetadataTemplate()
+ fixture = {
+ 'metadata': {
+ None: None,
+ },
+ }
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'metadata')
+ metadata_dict = fixture['metadata']
+ metadata_elems = root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 1)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = metadata_dict.items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ def test_index_unicode(self):
+ serializer = common.MetadataTemplate()
+ fixture = {
+ 'metadata': {
+ u'three': u'Jos\xe9',
+ },
+ }
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'metadata')
+ metadata_dict = fixture['metadata']
+ metadata_elems = root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 1)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = metadata_dict.items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(metadata_elem.text.strip(), meta_value)
+
+ def test_show(self):
+ serializer = common.MetaItemTemplate()
+ fixture = {
+ 'meta': {
+ 'one': 'two',
+ },
+ }
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ meta_dict = fixture['meta']
+ (meta_key, meta_value) = meta_dict.items()[0]
+ self.assertEqual(str(root.get('key')), str(meta_key))
+ self.assertEqual(root.text.strip(), meta_value)
+
+ def test_update_all(self):
+ serializer = common.MetadataTemplate()
+ fixture = {
+ 'metadata': {
+ 'key6': 'value6',
+ 'key4': 'value4',
+ },
+ }
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'metadata')
+ metadata_dict = fixture['metadata']
+ metadata_elems = root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 2)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = metadata_dict.items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ def test_update_item(self):
+ serializer = common.MetaItemTemplate()
+ fixture = {
+ 'meta': {
+ 'one': 'two',
+ },
+ }
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ meta_dict = fixture['meta']
+ (meta_key, meta_value) = meta_dict.items()[0]
+ self.assertEqual(str(root.get('key')), str(meta_key))
+ self.assertEqual(root.text.strip(), meta_value)
+
+ def test_create(self):
+ serializer = common.MetadataTemplate()
+ fixture = {
+ 'metadata': {
+ 'key9': 'value9',
+ 'key2': 'value2',
+ 'key1': 'value1',
+ },
+ }
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'metadata')
+ metadata_dict = fixture['metadata']
+ metadata_elems = root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 3)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = metadata_dict.items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="key2">value2</meta>
+ <meta key="key9">value9</meta>
+ <meta key="key1">value1</meta>
+ </metadata>
+ """.replace(" ", "").replace("\n", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_metadata_deserializer(self):
+ """Should throw a 400 error on corrupt xml."""
+ deserializer = common.MetadataXMLDeserializer()
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ deserializer.deserialize,
+ utils.killer_xml_body())
+
+
+class LinkPrefixTest(test.NoDBTestCase):
+
+ def test_update_link_prefix(self):
+ vb = common.ViewBuilder()
+ result = vb._update_link_prefix("http://192.168.0.243:24/",
+ "http://127.0.0.1/compute")
+ self.assertEqual("http://127.0.0.1/compute", result)
+
+ result = vb._update_link_prefix("http://foo.x.com/v1",
+ "http://new.prefix.com")
+ self.assertEqual("http://new.prefix.com/v1", result)
+
+ result = vb._update_link_prefix(
+ "http://foo.x.com/v1",
+ "http://new.prefix.com:20455/new_extra_prefix")
+ self.assertEqual("http://new.prefix.com:20455/new_extra_prefix/v1",
+ result)
diff --git a/nova/tests/unit/api/openstack/test_faults.py b/nova/tests/unit/api/openstack/test_faults.py
new file mode 100644
index 0000000000..b52a7e5896
--- /dev/null
+++ b/nova/tests/unit/api/openstack/test_faults.py
@@ -0,0 +1,315 @@
+# Copyright 2013 IBM Corp.
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from xml.dom import minidom
+
+import mock
+from oslo.serialization import jsonutils
+import webob
+import webob.dec
+import webob.exc
+
+import nova.api.openstack
+from nova.api.openstack import common
+from nova.api.openstack import wsgi
+from nova import exception
+from nova import i18n
+from nova.i18n import _
+from nova import test
+
+
+class TestFaultWrapper(test.NoDBTestCase):
+ """Tests covering `nova.api.openstack:FaultWrapper` class."""
+
+ @mock.patch('oslo.i18n.translate')
+ @mock.patch('nova.i18n.get_available_languages')
+ def test_safe_exception_translated(self, mock_languages, mock_translate):
+ def fake_translate(value, locale):
+ return "I've been translated!"
+
+ mock_translate.side_effect = fake_translate
+
+ # Create an exception, passing a translatable message with a
+ # known value we can test for later.
+ safe_exception = exception.NotFound(_('Should be translated.'))
+ safe_exception.safe = True
+ safe_exception.code = 404
+
+ req = webob.Request.blank('/')
+
+ def raiser(*args, **kwargs):
+ raise safe_exception
+
+ wrapper = nova.api.openstack.FaultWrapper(raiser)
+ response = req.get_response(wrapper)
+
+ # The text of the exception's message attribute (replaced
+ # above with a non-default value) should be passed to
+ # translate().
+ mock_translate.assert_any_call(u'Should be translated.', None)
+ # The return value from translate() should appear in the response.
+ self.assertIn("I've been translated!", unicode(response.body))
+
+
+class TestFaults(test.NoDBTestCase):
+ """Tests covering `nova.api.openstack.faults:Fault` class."""
+
+ def _prepare_xml(self, xml_string):
+ """Remove characters from string which hinder XML equality testing."""
+ xml_string = xml_string.replace(" ", "")
+ xml_string = xml_string.replace("\n", "")
+ xml_string = xml_string.replace("\t", "")
+ return xml_string
+
+ def test_400_fault_json(self):
+ # Test fault serialized to JSON via file-extension and/or header.
+ requests = [
+ webob.Request.blank('/.json'),
+ webob.Request.blank('/', headers={"Accept": "application/json"}),
+ ]
+
+ for request in requests:
+ fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
+ response = request.get_response(fault)
+
+ expected = {
+ "badRequest": {
+ "message": "scram",
+ "code": 400,
+ },
+ }
+ actual = jsonutils.loads(response.body)
+
+ self.assertEqual(response.content_type, "application/json")
+ self.assertEqual(expected, actual)
+
+ def test_413_fault_json(self):
+ # Test fault serialized to JSON via file-extension and/or header.
+ requests = [
+ webob.Request.blank('/.json'),
+ webob.Request.blank('/', headers={"Accept": "application/json"}),
+ ]
+
+ for request in requests:
+ exc = webob.exc.HTTPRequestEntityTooLarge
+ # NOTE(aloga): we intentionally pass an integer for the
+ # 'Retry-After' header. It should be then converted to a str
+ fault = wsgi.Fault(exc(explanation='sorry',
+ headers={'Retry-After': 4}))
+ response = request.get_response(fault)
+
+ expected = {
+ "overLimit": {
+ "message": "sorry",
+ "code": 413,
+ "retryAfter": "4",
+ },
+ }
+ actual = jsonutils.loads(response.body)
+
+ self.assertEqual(response.content_type, "application/json")
+ self.assertEqual(expected, actual)
+
+ def test_429_fault_json(self):
+ # Test fault serialized to JSON via file-extension and/or header.
+ requests = [
+ webob.Request.blank('/.json'),
+ webob.Request.blank('/', headers={"Accept": "application/json"}),
+ ]
+
+ for request in requests:
+ exc = webob.exc.HTTPTooManyRequests
+ # NOTE(aloga): we intentionally pass an integer for the
+ # 'Retry-After' header. It should be then converted to a str
+ fault = wsgi.Fault(exc(explanation='sorry',
+ headers={'Retry-After': 4}))
+ response = request.get_response(fault)
+
+ expected = {
+ "overLimit": {
+ "message": "sorry",
+ "code": 429,
+ "retryAfter": "4",
+ },
+ }
+ actual = jsonutils.loads(response.body)
+
+ self.assertEqual(response.content_type, "application/json")
+ self.assertEqual(expected, actual)
+
+ def test_raise(self):
+ # Ensure the ability to raise :class:`Fault` in WSGI-ified methods.
+ @webob.dec.wsgify
+ def raiser(req):
+ raise wsgi.Fault(webob.exc.HTTPNotFound(explanation='whut?'))
+
+ req = webob.Request.blank('/.xml')
+ resp = req.get_response(raiser)
+ self.assertEqual(resp.content_type, "application/xml")
+ self.assertEqual(resp.status_int, 404)
+ self.assertIn('whut?', resp.body)
+
+ def test_raise_403(self):
+ # Ensure the ability to raise :class:`Fault` in WSGI-ified methods.
+ @webob.dec.wsgify
+ def raiser(req):
+ raise wsgi.Fault(webob.exc.HTTPForbidden(explanation='whut?'))
+
+ req = webob.Request.blank('/.xml')
+ resp = req.get_response(raiser)
+ self.assertEqual(resp.content_type, "application/xml")
+ self.assertEqual(resp.status_int, 403)
+ self.assertNotIn('resizeNotAllowed', resp.body)
+ self.assertIn('forbidden', resp.body)
+
+ def test_raise_localize_explanation(self):
+ msgid = "String with params: %s"
+ params = ('blah', )
+ lazy_gettext = i18n._
+ expl = lazy_gettext(msgid) % params
+
+ @webob.dec.wsgify
+ def raiser(req):
+ raise wsgi.Fault(webob.exc.HTTPNotFound(explanation=expl))
+
+ req = webob.Request.blank('/.xml')
+ resp = req.get_response(raiser)
+ self.assertEqual(resp.content_type, "application/xml")
+ self.assertEqual(resp.status_int, 404)
+ self.assertIn((msgid % params), resp.body)
+
+ def test_fault_has_status_int(self):
+ # Ensure the status_int is set correctly on faults.
+ fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?'))
+ self.assertEqual(fault.status_int, 400)
+
+ def test_xml_serializer(self):
+ # Ensure that a v1.1 request responds with a v1.1 xmlns.
+ request = webob.Request.blank('/v1.1',
+ headers={"Accept": "application/xml"})
+
+ fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
+ response = request.get_response(fault)
+
+ self.assertIn(common.XML_NS_V11, response.body)
+ self.assertEqual(response.content_type, "application/xml")
+ self.assertEqual(response.status_int, 400)
+
+
+class FaultsXMLSerializationTestV11(test.NoDBTestCase):
+ """Tests covering `nova.api.openstack.faults:Fault` class."""
+
+ def _prepare_xml(self, xml_string):
+ xml_string = xml_string.replace(" ", "")
+ xml_string = xml_string.replace("\n", "")
+ xml_string = xml_string.replace("\t", "")
+ return xml_string
+
+ def test_400_fault(self):
+ metadata = {'attributes': {"badRequest": 'code'}}
+ serializer = wsgi.XMLDictSerializer(metadata=metadata,
+ xmlns=common.XML_NS_V11)
+
+ fixture = {
+ "badRequest": {
+ "message": "scram",
+ "code": 400,
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ actual = minidom.parseString(self._prepare_xml(output))
+
+ expected = minidom.parseString(self._prepare_xml("""
+ <badRequest code="400" xmlns="%s">
+ <message>scram</message>
+ </badRequest>
+ """) % common.XML_NS_V11)
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_413_fault(self):
+ metadata = {'attributes': {"overLimit": 'code'}}
+ serializer = wsgi.XMLDictSerializer(metadata=metadata,
+ xmlns=common.XML_NS_V11)
+
+ fixture = {
+ "overLimit": {
+ "message": "sorry",
+ "code": 413,
+ "retryAfter": 4,
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ actual = minidom.parseString(self._prepare_xml(output))
+
+ expected = minidom.parseString(self._prepare_xml("""
+ <overLimit code="413" xmlns="%s">
+ <message>sorry</message>
+ <retryAfter>4</retryAfter>
+ </overLimit>
+ """) % common.XML_NS_V11)
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_429_fault(self):
+ metadata = {'attributes': {"overLimit": 'code'}}
+ serializer = wsgi.XMLDictSerializer(metadata=metadata,
+ xmlns=common.XML_NS_V11)
+
+ fixture = {
+ "overLimit": {
+ "message": "sorry",
+ "code": 429,
+ "retryAfter": 4,
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ actual = minidom.parseString(self._prepare_xml(output))
+
+ expected = minidom.parseString(self._prepare_xml("""
+ <overLimit code="429" xmlns="%s">
+ <message>sorry</message>
+ <retryAfter>4</retryAfter>
+ </overLimit>
+ """) % common.XML_NS_V11)
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_404_fault(self):
+ metadata = {'attributes': {"itemNotFound": 'code'}}
+ serializer = wsgi.XMLDictSerializer(metadata=metadata,
+ xmlns=common.XML_NS_V11)
+
+ fixture = {
+ "itemNotFound": {
+ "message": "sorry",
+ "code": 404,
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ actual = minidom.parseString(self._prepare_xml(output))
+
+ expected = minidom.parseString(self._prepare_xml("""
+ <itemNotFound code="404" xmlns="%s">
+ <message>sorry</message>
+ </itemNotFound>
+ """) % common.XML_NS_V11)
+
+ self.assertEqual(expected.toxml(), actual.toxml())
diff --git a/nova/tests/unit/api/openstack/test_mapper.py b/nova/tests/unit/api/openstack/test_mapper.py
new file mode 100644
index 0000000000..b872be546f
--- /dev/null
+++ b/nova/tests/unit/api/openstack/test_mapper.py
@@ -0,0 +1,46 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob
+
+from nova.api import openstack as openstack_api
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+class MapperTest(test.NoDBTestCase):
+ def test_resource_project_prefix(self):
+ class Controller(object):
+ def index(self, req):
+ return 'foo'
+
+ app = fakes.TestRouter(Controller(),
+ openstack_api.ProjectMapper())
+ req = webob.Request.blank('/1234/tests')
+ resp = req.get_response(app)
+ self.assertEqual(resp.body, 'foo')
+ self.assertEqual(resp.status_int, 200)
+
+ def test_resource_no_project_prefix(self):
+ class Controller(object):
+ def index(self, req):
+ return 'foo'
+
+ app = fakes.TestRouter(Controller(),
+ openstack_api.PlainMapper())
+ req = webob.Request.blank('/tests')
+ resp = req.get_response(app)
+ self.assertEqual(resp.body, 'foo')
+ self.assertEqual(resp.status_int, 200)
diff --git a/nova/tests/unit/api/openstack/test_wsgi.py b/nova/tests/unit/api/openstack/test_wsgi.py
new file mode 100644
index 0000000000..7607101628
--- /dev/null
+++ b/nova/tests/unit/api/openstack/test_wsgi.py
@@ -0,0 +1,1244 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import inspect
+
+import webob
+
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova import exception
+from nova import i18n
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import utils
+
+
+class RequestTest(test.NoDBTestCase):
+ def test_content_type_missing(self):
+ request = wsgi.Request.blank('/tests/123', method='POST')
+ request.body = "<body />"
+ self.assertIsNone(request.get_content_type())
+
+ def test_content_type_unsupported(self):
+ request = wsgi.Request.blank('/tests/123', method='POST')
+ request.headers["Content-Type"] = "text/html"
+ request.body = "asdf<br />"
+ self.assertRaises(exception.InvalidContentType,
+ request.get_content_type)
+
+ def test_content_type_with_charset(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Content-Type"] = "application/json; charset=UTF-8"
+ result = request.get_content_type()
+ self.assertEqual(result, "application/json")
+
+ def test_content_type_from_accept(self):
+ for content_type in ('application/xml',
+ 'application/vnd.openstack.compute+xml',
+ 'application/json',
+ 'application/vnd.openstack.compute+json'):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = content_type
+ result = request.best_match_content_type()
+ self.assertEqual(result, content_type)
+
+ def test_content_type_from_accept_best(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/xml, application/json"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = ("application/json; q=0.3, "
+ "application/xml; q=0.9")
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ def test_content_type_from_query_extension(self):
+ request = wsgi.Request.blank('/tests/123.xml')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ request = wsgi.Request.blank('/tests/123.json')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ request = wsgi.Request.blank('/tests/123.invalid')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ def test_content_type_accept_and_query_extension(self):
+ request = wsgi.Request.blank('/tests/123.xml')
+ request.headers["Accept"] = "application/json"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ def test_content_type_accept_default(self):
+ request = wsgi.Request.blank('/tests/123.unsupported')
+ request.headers["Accept"] = "application/unsupported1"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ def test_cache_and_retrieve_instances(self):
+ request = wsgi.Request.blank('/foo')
+ instances = []
+ for x in xrange(3):
+ instances.append({'uuid': 'uuid%s' % x})
+ # Store 2
+ request.cache_db_instances(instances[:2])
+ # Store 1
+ request.cache_db_instance(instances[2])
+ self.assertEqual(request.get_db_instance('uuid0'),
+ instances[0])
+ self.assertEqual(request.get_db_instance('uuid1'),
+ instances[1])
+ self.assertEqual(request.get_db_instance('uuid2'),
+ instances[2])
+ self.assertIsNone(request.get_db_instance('uuid3'))
+ self.assertEqual(request.get_db_instances(),
+ {'uuid0': instances[0],
+ 'uuid1': instances[1],
+ 'uuid2': instances[2]})
+
+ def test_cache_and_retrieve_compute_nodes(self):
+ request = wsgi.Request.blank('/foo')
+ compute_nodes = []
+ for x in xrange(3):
+ compute_nodes.append({'id': 'id%s' % x})
+ # Store 2
+ request.cache_db_compute_nodes(compute_nodes[:2])
+ # Store 1
+ request.cache_db_compute_node(compute_nodes[2])
+ self.assertEqual(request.get_db_compute_node('id0'),
+ compute_nodes[0])
+ self.assertEqual(request.get_db_compute_node('id1'),
+ compute_nodes[1])
+ self.assertEqual(request.get_db_compute_node('id2'),
+ compute_nodes[2])
+ self.assertIsNone(request.get_db_compute_node('id3'))
+ self.assertEqual(request.get_db_compute_nodes(),
+ {'id0': compute_nodes[0],
+ 'id1': compute_nodes[1],
+ 'id2': compute_nodes[2]})
+
+ def test_from_request(self):
+ self.stubs.Set(i18n, 'get_available_languages',
+ fakes.fake_get_available_languages)
+
+ request = wsgi.Request.blank('/')
+ accepted = 'bogus;q=1.1, en-gb;q=0.7,en-us,en;q=.5,*;q=.7'
+ request.headers = {'Accept-Language': accepted}
+ self.assertEqual(request.best_match_language(), 'en_US')
+
+ def test_asterisk(self):
+ # asterisk should match first available if there
+ # are not any other available matches
+ self.stubs.Set(i18n, 'get_available_languages',
+ fakes.fake_get_available_languages)
+
+ request = wsgi.Request.blank('/')
+ accepted = '*,es;q=.5'
+ request.headers = {'Accept-Language': accepted}
+ self.assertEqual(request.best_match_language(), 'en_GB')
+
+ def test_prefix(self):
+ self.stubs.Set(i18n, 'get_available_languages',
+ fakes.fake_get_available_languages)
+
+ request = wsgi.Request.blank('/')
+ accepted = 'zh'
+ request.headers = {'Accept-Language': accepted}
+ self.assertEqual(request.best_match_language(), 'zh_CN')
+
+ def test_secondary(self):
+ self.stubs.Set(i18n, 'get_available_languages',
+ fakes.fake_get_available_languages)
+
+ request = wsgi.Request.blank('/')
+ accepted = 'nn,en-gb;q=.5'
+ request.headers = {'Accept-Language': accepted}
+ self.assertEqual(request.best_match_language(), 'en_GB')
+
+ def test_none_found(self):
+ self.stubs.Set(i18n, 'get_available_languages',
+ fakes.fake_get_available_languages)
+
+ request = wsgi.Request.blank('/')
+ accepted = 'nb-no'
+ request.headers = {'Accept-Language': accepted}
+ self.assertIs(request.best_match_language(), None)
+
+ def test_no_lang_header(self):
+ self.stubs.Set(i18n, 'get_available_languages',
+ fakes.fake_get_available_languages)
+
+ request = wsgi.Request.blank('/')
+ accepted = ''
+ request.headers = {'Accept-Language': accepted}
+ self.assertIs(request.best_match_language(), None)
+
+
+class ActionDispatcherTest(test.NoDBTestCase):
+ def test_dispatch(self):
+ serializer = wsgi.ActionDispatcher()
+ serializer.create = lambda x: 'pants'
+ self.assertEqual(serializer.dispatch({}, action='create'), 'pants')
+
+ def test_dispatch_action_None(self):
+ serializer = wsgi.ActionDispatcher()
+ serializer.create = lambda x: 'pants'
+ serializer.default = lambda x: 'trousers'
+ self.assertEqual(serializer.dispatch({}, action=None), 'trousers')
+
+ def test_dispatch_default(self):
+ serializer = wsgi.ActionDispatcher()
+ serializer.create = lambda x: 'pants'
+ serializer.default = lambda x: 'trousers'
+ self.assertEqual(serializer.dispatch({}, action='update'), 'trousers')
+
+
+class DictSerializerTest(test.NoDBTestCase):
+ def test_dispatch_default(self):
+ serializer = wsgi.DictSerializer()
+ self.assertEqual(serializer.serialize({}, 'update'), '')
+
+
+class XMLDictSerializerTest(test.NoDBTestCase):
+ def test_xml(self):
+ input_dict = dict(servers=dict(a=(2, 3)))
+ expected_xml = '<serversxmlns="asdf"><a>(2,3)</a></servers>'
+ serializer = wsgi.XMLDictSerializer(xmlns="asdf")
+ result = serializer.serialize(input_dict)
+ result = result.replace('\n', '').replace(' ', '')
+ self.assertEqual(result, expected_xml)
+
+ def test_xml_contains_unicode(self):
+ input_dict = dict(test=u'\u89e3\u7801')
+ expected_xml = '<test>\xe8\xa7\xa3\xe7\xa0\x81</test>'
+ serializer = wsgi.XMLDictSerializer()
+ result = serializer.serialize(input_dict)
+ result = result.replace('\n', '').replace(' ', '')
+ self.assertEqual(expected_xml, result)
+
+
+class JSONDictSerializerTest(test.NoDBTestCase):
+ def test_json(self):
+ input_dict = dict(servers=dict(a=(2, 3)))
+ expected_json = '{"servers":{"a":[2,3]}}'
+ serializer = wsgi.JSONDictSerializer()
+ result = serializer.serialize(input_dict)
+ result = result.replace('\n', '').replace(' ', '')
+ self.assertEqual(result, expected_json)
+
+
+class TextDeserializerTest(test.NoDBTestCase):
+ def test_dispatch_default(self):
+ deserializer = wsgi.TextDeserializer()
+ self.assertEqual(deserializer.deserialize({}, 'update'), {})
+
+
+class JSONDeserializerTest(test.NoDBTestCase):
+ def test_json(self):
+ data = """{"a": {
+ "a1": "1",
+ "a2": "2",
+ "bs": ["1", "2", "3", {"c": {"c1": "1"}}],
+ "d": {"e": "1"},
+ "f": "1"}}"""
+ as_dict = {
+ 'body': {
+ 'a': {
+ 'a1': '1',
+ 'a2': '2',
+ 'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
+ 'd': {'e': '1'},
+ 'f': '1',
+ },
+ },
+ }
+ deserializer = wsgi.JSONDeserializer()
+ self.assertEqual(deserializer.deserialize(data), as_dict)
+
+ def test_json_valid_utf8(self):
+ data = """{"server": {"min_count": 1, "flavorRef": "1",
+ "name": "\xe6\xa6\x82\xe5\xbf\xb5",
+ "imageRef": "10bab10c-1304-47d",
+ "max_count": 1}} """
+ as_dict = {
+ 'body': {
+ u'server': {
+ u'min_count': 1, u'flavorRef': u'1',
+ u'name': u'\u6982\u5ff5',
+ u'imageRef': u'10bab10c-1304-47d',
+ u'max_count': 1
+ }
+ }
+ }
+ deserializer = wsgi.JSONDeserializer()
+ self.assertEqual(deserializer.deserialize(data), as_dict)
+
+ def test_json_invalid_utf8(self):
+ """Send invalid utf-8 to JSONDeserializer."""
+ data = """{"server": {"min_count": 1, "flavorRef": "1",
+ "name": "\xf0\x28\x8c\x28",
+ "imageRef": "10bab10c-1304-47d",
+ "max_count": 1}} """
+
+ deserializer = wsgi.JSONDeserializer()
+ self.assertRaises(exception.MalformedRequestBody,
+ deserializer.deserialize, data)
+
+
+class XMLDeserializerTest(test.NoDBTestCase):
+ def test_xml(self):
+ xml = """
+ <a a1="1" a2="2">
+ <bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs>
+ <d><e>1</e></d>
+ <f>1</f>
+ </a>
+ """.strip()
+ as_dict = {
+ 'body': {
+ 'a': {
+ 'a1': '1',
+ 'a2': '2',
+ 'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
+ 'd': {'e': '1'},
+ 'f': '1',
+ },
+ },
+ }
+ metadata = {'plurals': {'bs': 'b', 'ts': 't'}}
+ deserializer = wsgi.XMLDeserializer(metadata=metadata)
+ self.assertEqual(deserializer.deserialize(xml), as_dict)
+
+ def test_xml_empty(self):
+ xml = '<a></a>'
+ as_dict = {"body": {"a": {}}}
+ deserializer = wsgi.XMLDeserializer()
+ self.assertEqual(deserializer.deserialize(xml), as_dict)
+
+ def test_xml_valid_utf8(self):
+ xml = """ <a><name>\xe6\xa6\x82\xe5\xbf\xb5</name></a> """
+ deserializer = wsgi.XMLDeserializer()
+ as_dict = {'body': {u'a': {u'name': u'\u6982\u5ff5'}}}
+ self.assertEqual(deserializer.deserialize(xml), as_dict)
+
+ def test_xml_invalid_utf8(self):
+ """Send invalid utf-8 to XMLDeserializer."""
+ xml = """ <a><name>\xf0\x28\x8c\x28</name></a> """
+ deserializer = wsgi.XMLDeserializer()
+ self.assertRaises(exception.MalformedRequestBody,
+ deserializer.deserialize, xml)
+
+
+class ResourceTest(test.NoDBTestCase):
+
+ def get_req_id_header_name(self, request):
+ header_name = 'x-openstack-request-id'
+ if utils.get_api_version(request) < 3:
+ header_name = 'x-compute-request-id'
+
+ return header_name
+
+ def test_resource_call_with_method_get(self):
+ class Controller(object):
+ def index(self, req):
+ return 'success'
+
+ app = fakes.TestRouter(Controller())
+ # the default method is GET
+ req = webob.Request.blank('/tests')
+ response = req.get_response(app)
+ self.assertEqual(response.body, 'success')
+ self.assertEqual(response.status_int, 200)
+ req.body = '{"body": {"key": "value"}}'
+ response = req.get_response(app)
+ self.assertEqual(response.body, 'success')
+ self.assertEqual(response.status_int, 200)
+ req.content_type = 'application/json'
+ response = req.get_response(app)
+ self.assertEqual(response.body, 'success')
+ self.assertEqual(response.status_int, 200)
+
+ def test_resource_call_with_method_post(self):
+ class Controller(object):
+ @extensions.expected_errors(400)
+ def create(self, req, body):
+ if expected_body != body:
+ msg = "The request body invalid"
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+ return "success"
+ # verify the method: POST
+ app = fakes.TestRouter(Controller())
+ req = webob.Request.blank('/tests', method="POST",
+ content_type='application/json')
+ req.body = '{"body": {"key": "value"}}'
+ expected_body = {'body': {
+ "key": "value"
+ }
+ }
+ response = req.get_response(app)
+ self.assertEqual(response.status_int, 200)
+ self.assertEqual(response.body, 'success')
+ # verify without body
+ expected_body = None
+ req.body = None
+ response = req.get_response(app)
+ self.assertEqual(response.status_int, 200)
+ self.assertEqual(response.body, 'success')
+ # the body is validated in the controller
+ expected_body = {'body': None}
+ response = req.get_response(app)
+ expected_unsupported_type_body = ('{"badRequest": '
+ '{"message": "The request body invalid", "code": 400}}')
+ self.assertEqual(response.status_int, 400)
+ self.assertEqual(expected_unsupported_type_body, response.body)
+
+ def test_resource_call_with_method_put(self):
+ class Controller(object):
+ def update(self, req, id, body):
+ if expected_body != body:
+ msg = "The request body invalid"
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+ return "success"
+ # verify the method: PUT
+ app = fakes.TestRouter(Controller())
+ req = webob.Request.blank('/tests/test_id', method="PUT",
+ content_type='application/json')
+ req.body = '{"body": {"key": "value"}}'
+ expected_body = {'body': {
+ "key": "value"
+ }
+ }
+ response = req.get_response(app)
+ self.assertEqual(response.body, 'success')
+ self.assertEqual(response.status_int, 200)
+ req.body = None
+ expected_body = None
+ response = req.get_response(app)
+ self.assertEqual(response.status_int, 200)
+ # verify no content_type is contained in the request
+ req.content_type = None
+ req.body = '{"body": {"key": "value"}}'
+ response = req.get_response(app)
+ expected_unsupported_type_body = ('{"badRequest": '
+ '{"message": "Unsupported Content-Type", "code": 400}}')
+ self.assertEqual(response.status_int, 400)
+ self.assertEqual(expected_unsupported_type_body, response.body)
+
+ def test_resource_call_with_method_delete(self):
+ class Controller(object):
+ def delete(self, req, id):
+ return "success"
+
+ # verify the method: DELETE
+ app = fakes.TestRouter(Controller())
+ req = webob.Request.blank('/tests/test_id', method="DELETE")
+ response = req.get_response(app)
+ self.assertEqual(response.status_int, 200)
+ self.assertEqual(response.body, 'success')
+ # ignore the body
+ req.body = '{"body": {"key": "value"}}'
+ response = req.get_response(app)
+ self.assertEqual(response.status_int, 200)
+ self.assertEqual(response.body, 'success')
+
+ def test_resource_not_authorized(self):
+ class Controller(object):
+ def index(self, req):
+ raise exception.Forbidden()
+
+ req = webob.Request.blank('/tests')
+ app = fakes.TestRouter(Controller())
+ response = req.get_response(app)
+ self.assertEqual(response.status_int, 403)
+
+ def test_dispatch(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ method, extensions = resource.get_method(None, 'index', None, '')
+ actual = resource.dispatch(method, None, {'pants': 'off'})
+ expected = 'off'
+ self.assertEqual(actual, expected)
+
+ def test_get_method_unknown_controller_method(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ self.assertRaises(AttributeError, resource.get_method,
+ None, 'create', None, '')
+
+ def test_get_method_action_json(self):
+ class Controller(wsgi.Controller):
+ @wsgi.action('fooAction')
+ def _action_foo(self, req, id, body):
+ return body
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ method, extensions = resource.get_method(None, 'action',
+ 'application/json',
+ '{"fooAction": true}')
+ self.assertEqual(controller._action_foo, method)
+
+ def test_get_method_action_xml(self):
+ class Controller(wsgi.Controller):
+ @wsgi.action('fooAction')
+ def _action_foo(self, req, id, body):
+ return body
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ method, extensions = resource.get_method(None, 'action',
+ 'application/xml',
+ '<fooAction>true</fooAction>')
+ self.assertEqual(controller._action_foo, method)
+
+ def test_get_method_action_corrupt_xml(self):
+ class Controller(wsgi.Controller):
+ @wsgi.action('fooAction')
+ def _action_foo(self, req, id, body):
+ return body
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ resource.get_method,
+ None, 'action',
+ 'application/xml',
+ utils.killer_xml_body())
+
+ def test_get_method_action_bad_body(self):
+ class Controller(wsgi.Controller):
+ @wsgi.action('fooAction')
+ def _action_foo(self, req, id, body):
+ return body
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ self.assertRaises(exception.MalformedRequestBody, resource.get_method,
+ None, 'action', 'application/json', '{}')
+
+ def test_get_method_unknown_controller_action(self):
+ class Controller(wsgi.Controller):
+ @wsgi.action('fooAction')
+ def _action_foo(self, req, id, body):
+ return body
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ self.assertRaises(KeyError, resource.get_method,
+ None, 'action', 'application/json',
+ '{"barAction": true}')
+
+ def test_get_method_action_method(self):
+ class Controller():
+ def action(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ method, extensions = resource.get_method(None, 'action',
+ 'application/xml',
+ '<fooAction>true</fooAction')
+ self.assertEqual(controller.action, method)
+
+ def test_get_action_args(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ env = {
+ 'wsgiorg.routing_args': [None, {
+ 'controller': None,
+ 'format': None,
+ 'action': 'update',
+ 'id': 12,
+ }],
+ }
+
+ expected = {'action': 'update', 'id': 12}
+
+ self.assertEqual(resource.get_action_args(env), expected)
+
+ def test_get_body_bad_content(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ request = wsgi.Request.blank('/', method='POST')
+ request.headers['Content-Type'] = 'application/none'
+ request.body = 'foo'
+
+ content_type, body = resource.get_body(request)
+ self.assertIsNone(content_type)
+ self.assertEqual(body, '')
+
+ def test_get_body_no_content_type(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ request = wsgi.Request.blank('/', method='POST')
+ request.body = 'foo'
+
+ content_type, body = resource.get_body(request)
+ self.assertIsNone(content_type)
+ self.assertEqual(body, 'foo')
+
+ def test_get_body_no_content_body(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ request = wsgi.Request.blank('/', method='POST')
+ request.headers['Content-Type'] = 'application/json'
+ request.body = ''
+
+ content_type, body = resource.get_body(request)
+ self.assertEqual('application/json', content_type)
+ self.assertEqual(body, '')
+
+ def test_get_body(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ request = wsgi.Request.blank('/', method='POST')
+ request.headers['Content-Type'] = 'application/json'
+ request.body = 'foo'
+
+ content_type, body = resource.get_body(request)
+ self.assertEqual(content_type, 'application/json')
+ self.assertEqual(body, 'foo')
+
+ def test_get_request_id_with_dict_response_body(self):
+ class Controller(wsgi.Controller):
+ def index(self, req):
+ return {'foo': 'bar'}
+
+ req = fakes.HTTPRequest.blank('/tests')
+ app = fakes.TestRouter(Controller())
+ response = req.get_response(app)
+ self.assertIn('nova.context', req.environ)
+ self.assertEqual(response.body, '{"foo": "bar"}')
+ self.assertEqual(response.status_int, 200)
+
+ def test_no_request_id_with_str_response_body(self):
+ class Controller(wsgi.Controller):
+ def index(self, req):
+ return 'foo'
+
+ req = fakes.HTTPRequest.blank('/tests')
+ app = fakes.TestRouter(Controller())
+ response = req.get_response(app)
+ # NOTE(alaski): This test is really to ensure that a str response
+ # doesn't error. Not having a request_id header is a side effect of
+ # our wsgi setup, ideally it would be there.
+ expected_header = self.get_req_id_header_name(req)
+ self.assertFalse(hasattr(response.headers, expected_header))
+ self.assertEqual(response.body, 'foo')
+ self.assertEqual(response.status_int, 200)
+
+ def test_get_request_id_no_response_body(self):
+ class Controller(object):
+ def index(self, req):
+ pass
+
+ req = fakes.HTTPRequest.blank('/tests')
+ app = fakes.TestRouter(Controller())
+ response = req.get_response(app)
+ self.assertIn('nova.context', req.environ)
+ self.assertEqual(response.body, '')
+ self.assertEqual(response.status_int, 200)
+
+ def test_deserialize_badtype(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ self.assertRaises(exception.InvalidContentType,
+ resource.deserialize,
+ controller.index, 'application/none', 'foo')
+
+ def test_deserialize_default(self):
+ class JSONDeserializer(object):
+ def deserialize(self, body):
+ return 'json'
+
+ class XMLDeserializer(object):
+ def deserialize(self, body):
+ return 'xml'
+
+ class Controller(object):
+ @wsgi.deserializers(xml=XMLDeserializer)
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller, json=JSONDeserializer)
+
+ obj = resource.deserialize(controller.index, 'application/json', 'foo')
+ self.assertEqual(obj, 'json')
+
+ def test_deserialize_decorator(self):
+ class JSONDeserializer(object):
+ def deserialize(self, body):
+ return 'json'
+
+ class XMLDeserializer(object):
+ def deserialize(self, body):
+ return 'xml'
+
+ class Controller(object):
+ @wsgi.deserializers(xml=XMLDeserializer)
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller, json=JSONDeserializer)
+
+ obj = resource.deserialize(controller.index, 'application/xml', 'foo')
+ self.assertEqual(obj, 'xml')
+
+ def test_register_actions(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ class ControllerExtended(wsgi.Controller):
+ @wsgi.action('fooAction')
+ def _action_foo(self, req, id, body):
+ return body
+
+ @wsgi.action('barAction')
+ def _action_bar(self, req, id, body):
+ return body
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ self.assertEqual({}, resource.wsgi_actions)
+
+ extended = ControllerExtended()
+ resource.register_actions(extended)
+ self.assertEqual({
+ 'fooAction': extended._action_foo,
+ 'barAction': extended._action_bar,
+ }, resource.wsgi_actions)
+
+ def test_register_extensions(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ class ControllerExtended(wsgi.Controller):
+ @wsgi.extends
+ def index(self, req, resp_obj, pants=None):
+ return None
+
+ @wsgi.extends(action='fooAction')
+ def _action_foo(self, req, resp, id, body):
+ return None
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ self.assertEqual({}, resource.wsgi_extensions)
+ self.assertEqual({}, resource.wsgi_action_extensions)
+
+ extended = ControllerExtended()
+ resource.register_extensions(extended)
+ self.assertEqual({'index': [extended.index]}, resource.wsgi_extensions)
+ self.assertEqual({'fooAction': [extended._action_foo]},
+ resource.wsgi_action_extensions)
+
+ def test_get_method_extensions(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ class ControllerExtended(wsgi.Controller):
+ @wsgi.extends
+ def index(self, req, resp_obj, pants=None):
+ return None
+
+ controller = Controller()
+ extended = ControllerExtended()
+ resource = wsgi.Resource(controller)
+ resource.register_extensions(extended)
+ method, extensions = resource.get_method(None, 'index', None, '')
+ self.assertEqual(method, controller.index)
+ self.assertEqual(extensions, [extended.index])
+
+ def test_get_method_action_extensions(self):
+ class Controller(wsgi.Controller):
+ def index(self, req, pants=None):
+ return pants
+
+ @wsgi.action('fooAction')
+ def _action_foo(self, req, id, body):
+ return body
+
+ class ControllerExtended(wsgi.Controller):
+ @wsgi.extends(action='fooAction')
+ def _action_foo(self, req, resp_obj, id, body):
+ return None
+
+ controller = Controller()
+ extended = ControllerExtended()
+ resource = wsgi.Resource(controller)
+ resource.register_extensions(extended)
+ method, extensions = resource.get_method(None, 'action',
+ 'application/json',
+ '{"fooAction": true}')
+ self.assertEqual(method, controller._action_foo)
+ self.assertEqual(extensions, [extended._action_foo])
+
+ def test_get_method_action_whitelist_extensions(self):
+ class Controller(wsgi.Controller):
+ def index(self, req, pants=None):
+ return pants
+
+ class ControllerExtended(wsgi.Controller):
+ @wsgi.action('create')
+ def _create(self, req, body):
+ pass
+
+ @wsgi.action('delete')
+ def _delete(self, req, id):
+ pass
+
+ controller = Controller()
+ extended = ControllerExtended()
+ resource = wsgi.Resource(controller)
+ resource.register_actions(extended)
+
+ method, extensions = resource.get_method(None, 'create',
+ 'application/json',
+ '{"create": true}')
+ self.assertEqual(method, extended._create)
+ self.assertEqual(extensions, [])
+
+ method, extensions = resource.get_method(None, 'delete', None, None)
+ self.assertEqual(method, extended._delete)
+ self.assertEqual(extensions, [])
+
+ def test_pre_process_extensions_regular(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ called = []
+
+ def extension1(req, resp_obj):
+ called.append(1)
+ return None
+
+ def extension2(req, resp_obj):
+ called.append(2)
+ return None
+
+ extensions = [extension1, extension2]
+ response, post = resource.pre_process_extensions(extensions, None, {})
+ self.assertEqual(called, [])
+ self.assertIsNone(response)
+ self.assertEqual(list(post), [extension2, extension1])
+
+ def test_pre_process_extensions_generator(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ called = []
+
+ def extension1(req):
+ called.append('pre1')
+ yield
+ called.append('post1')
+
+ def extension2(req):
+ called.append('pre2')
+ yield
+ called.append('post2')
+
+ extensions = [extension1, extension2]
+ response, post = resource.pre_process_extensions(extensions, None, {})
+ post = list(post)
+ self.assertEqual(called, ['pre1', 'pre2'])
+ self.assertIsNone(response)
+ self.assertEqual(len(post), 2)
+ self.assertTrue(inspect.isgenerator(post[0]))
+ self.assertTrue(inspect.isgenerator(post[1]))
+
+ for gen in post:
+ try:
+ gen.send(None)
+ except StopIteration:
+ continue
+
+ self.assertEqual(called, ['pre1', 'pre2', 'post2', 'post1'])
+
+ def test_pre_process_extensions_generator_response(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ called = []
+
+ def extension1(req):
+ called.append('pre1')
+ yield 'foo'
+
+ def extension2(req):
+ called.append('pre2')
+
+ extensions = [extension1, extension2]
+ response, post = resource.pre_process_extensions(extensions, None, {})
+ self.assertEqual(called, ['pre1'])
+ self.assertEqual(response, 'foo')
+ self.assertEqual(post, [])
+
+ def test_post_process_extensions_regular(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ called = []
+
+ def extension1(req, resp_obj):
+ called.append(1)
+ return None
+
+ def extension2(req, resp_obj):
+ called.append(2)
+ return None
+
+ response = resource.post_process_extensions([extension2, extension1],
+ None, None, {})
+ self.assertEqual(called, [2, 1])
+ self.assertIsNone(response)
+
+ def test_post_process_extensions_regular_response(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ called = []
+
+ def extension1(req, resp_obj):
+ called.append(1)
+ return None
+
+ def extension2(req, resp_obj):
+ called.append(2)
+ return 'foo'
+
+ response = resource.post_process_extensions([extension2, extension1],
+ None, None, {})
+ self.assertEqual(called, [2])
+ self.assertEqual(response, 'foo')
+
+ def test_post_process_extensions_generator(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ called = []
+
+ def extension1(req):
+ yield
+ called.append(1)
+
+ def extension2(req):
+ yield
+ called.append(2)
+
+ ext1 = extension1(None)
+ ext1.next()
+ ext2 = extension2(None)
+ ext2.next()
+
+ response = resource.post_process_extensions([ext2, ext1],
+ None, None, {})
+
+ self.assertEqual(called, [2, 1])
+ self.assertIsNone(response)
+
+ def test_post_process_extensions_generator_response(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ called = []
+
+ def extension1(req):
+ yield
+ called.append(1)
+
+ def extension2(req):
+ yield
+ called.append(2)
+ yield 'foo'
+
+ ext1 = extension1(None)
+ ext1.next()
+ ext2 = extension2(None)
+ ext2.next()
+
+ response = resource.post_process_extensions([ext2, ext1],
+ None, None, {})
+
+ self.assertEqual(called, [2])
+ self.assertEqual(response, 'foo')
+
+ def test_resource_exception_handler_type_error(self):
+ # A TypeError should be translated to a Fault/HTTP 400.
+ def foo(a,):
+ return a
+
+ try:
+ with wsgi.ResourceExceptionHandler():
+ foo() # generate a TypeError
+ self.fail("Should have raised a Fault (HTTP 400)")
+ except wsgi.Fault as fault:
+ self.assertEqual(400, fault.status_int)
+
+ def test_resource_headers_are_utf8(self):
+ resp = webob.Response(status_int=202)
+ resp.headers['x-header1'] = 1
+ resp.headers['x-header2'] = u'header2'
+ resp.headers['x-header3'] = u'header3'
+
+ class Controller(object):
+ def index(self, req):
+ return resp
+
+ req = webob.Request.blank('/tests')
+ app = fakes.TestRouter(Controller())
+ response = req.get_response(app)
+
+ for hdr, val in response.headers.iteritems():
+ # All headers must be utf8
+ self.assertIsInstance(hdr, str)
+ self.assertIsInstance(val, str)
+ self.assertEqual(response.headers['x-header1'], '1')
+ self.assertEqual(response.headers['x-header2'], 'header2')
+ self.assertEqual(response.headers['x-header3'], 'header3')
+
+ def test_resource_valid_utf8_body(self):
+ class Controller(object):
+ def update(self, req, id, body):
+ return body
+
+ req = webob.Request.blank('/tests/test_id', method="PUT")
+ body = """ {"name": "\xe6\xa6\x82\xe5\xbf\xb5" } """
+ expected_body = '{"name": "\\u6982\\u5ff5"}'
+ req.body = body
+ req.headers['Content-Type'] = 'application/json'
+ app = fakes.TestRouter(Controller())
+ response = req.get_response(app)
+ self.assertEqual(response.body, expected_body)
+ self.assertEqual(response.status_int, 200)
+
+ def test_resource_invalid_utf8(self):
+ class Controller(object):
+ def update(self, req, id, body):
+ return body
+
+ req = webob.Request.blank('/tests/test_id', method="PUT")
+ body = """ {"name": "\xf0\x28\x8c\x28" } """
+ req.body = body
+ req.headers['Content-Type'] = 'application/json'
+ app = fakes.TestRouter(Controller())
+ self.assertRaises(UnicodeDecodeError, req.get_response, app)
+
+
+class ResponseObjectTest(test.NoDBTestCase):
+ def test_default_code(self):
+ robj = wsgi.ResponseObject({})
+ self.assertEqual(robj.code, 200)
+
+ def test_modified_code(self):
+ robj = wsgi.ResponseObject({})
+ robj._default_code = 202
+ self.assertEqual(robj.code, 202)
+
+ def test_override_default_code(self):
+ robj = wsgi.ResponseObject({}, code=404)
+ self.assertEqual(robj.code, 404)
+
+ def test_override_modified_code(self):
+ robj = wsgi.ResponseObject({}, code=404)
+ robj._default_code = 202
+ self.assertEqual(robj.code, 404)
+
+ def test_set_header(self):
+ robj = wsgi.ResponseObject({})
+ robj['Header'] = 'foo'
+ self.assertEqual(robj.headers, {'header': 'foo'})
+
+ def test_get_header(self):
+ robj = wsgi.ResponseObject({})
+ robj['Header'] = 'foo'
+ self.assertEqual(robj['hEADER'], 'foo')
+
+ def test_del_header(self):
+ robj = wsgi.ResponseObject({})
+ robj['Header'] = 'foo'
+ del robj['hEADER']
+ self.assertNotIn('header', robj.headers)
+
+ def test_header_isolation(self):
+ robj = wsgi.ResponseObject({})
+ robj['Header'] = 'foo'
+ hdrs = robj.headers
+ hdrs['hEADER'] = 'bar'
+ self.assertEqual(robj['hEADER'], 'foo')
+
+ def test_default_serializers(self):
+ robj = wsgi.ResponseObject({})
+ self.assertEqual(robj.serializers, {})
+
+ def test_bind_serializers(self):
+ robj = wsgi.ResponseObject({}, json='foo')
+ robj._bind_method_serializers(dict(xml='bar', json='baz'))
+ self.assertEqual(robj.serializers, dict(xml='bar', json='foo'))
+
+ def test_get_serializer(self):
+ robj = wsgi.ResponseObject({}, json='json', xml='xml', atom='atom')
+ for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
+ _mtype, serializer = robj.get_serializer(content_type)
+ self.assertEqual(serializer, mtype)
+
+ def test_get_serializer_defaults(self):
+ robj = wsgi.ResponseObject({})
+ default_serializers = dict(json='json', xml='xml', atom='atom')
+ for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
+ self.assertRaises(exception.InvalidContentType,
+ robj.get_serializer, content_type)
+ _mtype, serializer = robj.get_serializer(content_type,
+ default_serializers)
+ self.assertEqual(serializer, mtype)
+
+ def test_serialize(self):
+ class JSONSerializer(object):
+ def serialize(self, obj):
+ return 'json'
+
+ class XMLSerializer(object):
+ def serialize(self, obj):
+ return 'xml'
+
+ class AtomSerializer(object):
+ def serialize(self, obj):
+ return 'atom'
+
+ robj = wsgi.ResponseObject({}, code=202,
+ json=JSONSerializer,
+ xml=XMLSerializer,
+ atom=AtomSerializer)
+ robj['X-header1'] = 'header1'
+ robj['X-header2'] = 'header2'
+ robj['X-header3'] = 3
+ robj['X-header-unicode'] = u'header-unicode'
+
+ for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
+ request = wsgi.Request.blank('/tests/123')
+ response = robj.serialize(request, content_type)
+
+ self.assertEqual(response.headers['Content-Type'], content_type)
+ for hdr, val in response.headers.iteritems():
+ # All headers must be utf8
+ self.assertIsInstance(hdr, str)
+ self.assertIsInstance(val, str)
+ self.assertEqual(response.headers['X-header1'], 'header1')
+ self.assertEqual(response.headers['X-header2'], 'header2')
+ self.assertEqual(response.headers['X-header3'], '3')
+ self.assertEqual(response.status_int, 202)
+ self.assertEqual(response.body, mtype)
+
+
+class ValidBodyTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(ValidBodyTest, self).setUp()
+ self.controller = wsgi.Controller()
+
+ def test_is_valid_body(self):
+ body = {'foo': {}}
+ self.assertTrue(self.controller.is_valid_body(body, 'foo'))
+
+ def test_is_valid_body_none(self):
+ wsgi.Resource(controller=None)
+ self.assertFalse(self.controller.is_valid_body(None, 'foo'))
+
+ def test_is_valid_body_empty(self):
+ wsgi.Resource(controller=None)
+ self.assertFalse(self.controller.is_valid_body({}, 'foo'))
+
+ def test_is_valid_body_no_entity(self):
+ wsgi.Resource(controller=None)
+ body = {'bar': {}}
+ self.assertFalse(self.controller.is_valid_body(body, 'foo'))
+
+ def test_is_valid_body_malformed_entity(self):
+ wsgi.Resource(controller=None)
+ body = {'foo': 'bar'}
+ self.assertFalse(self.controller.is_valid_body(body, 'foo'))
diff --git a/nova/tests/unit/api/openstack/test_xmlutil.py b/nova/tests/unit/api/openstack/test_xmlutil.py
new file mode 100644
index 0000000000..19186889bb
--- /dev/null
+++ b/nova/tests/unit/api/openstack/test_xmlutil.py
@@ -0,0 +1,948 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from xml.dom import minidom
+
+from lxml import etree
+
+from nova.api.openstack import xmlutil
+from nova import exception
+from nova import test
+from nova.tests.unit import utils as tests_utils
+
+
+class SelectorTest(test.NoDBTestCase):
+ obj_for_test = {
+ 'test': {
+ 'name': 'test',
+ 'values': [1, 2, 3],
+ 'attrs': {
+ 'foo': 1,
+ 'bar': 2,
+ 'baz': 3,
+ },
+ },
+ }
+
+ def test_repr(self):
+ sel = xmlutil.Selector()
+ self.assertEqual(repr(sel), "Selector()")
+
+ def test_empty_selector(self):
+ sel = xmlutil.EmptyStringSelector()
+ self.assertEqual(len(sel.chain), 0)
+ self.assertEqual(sel(self.obj_for_test), self.obj_for_test)
+ self.assertEqual(
+ repr(self.obj_for_test),
+ "{'test': {'values': [1, 2, 3], 'name': 'test', 'attrs': "
+ "{'baz': 3, 'foo': 1, 'bar': 2}}}")
+
+ def test_dict_selector(self):
+ sel = xmlutil.Selector('test')
+ self.assertEqual(len(sel.chain), 1)
+ self.assertEqual(sel.chain[0], 'test')
+ self.assertEqual(sel(self.obj_for_test),
+ self.obj_for_test['test'])
+
+ def test_datum_selector(self):
+ sel = xmlutil.Selector('test', 'name')
+ self.assertEqual(len(sel.chain), 2)
+ self.assertEqual(sel.chain[0], 'test')
+ self.assertEqual(sel.chain[1], 'name')
+ self.assertEqual(sel(self.obj_for_test), 'test')
+
+ def test_list_selector(self):
+ sel = xmlutil.Selector('test', 'values', 0)
+ self.assertEqual(len(sel.chain), 3)
+ self.assertEqual(sel.chain[0], 'test')
+ self.assertEqual(sel.chain[1], 'values')
+ self.assertEqual(sel.chain[2], 0)
+ self.assertEqual(sel(self.obj_for_test), 1)
+
+ def test_items_selector(self):
+ sel = xmlutil.Selector('test', 'attrs', xmlutil.get_items)
+ self.assertEqual(len(sel.chain), 3)
+ self.assertEqual(sel.chain[2], xmlutil.get_items)
+ for key, val in sel(self.obj_for_test):
+ self.assertEqual(self.obj_for_test['test']['attrs'][key], val)
+
+ def test_missing_key_selector(self):
+ sel = xmlutil.Selector('test2', 'attrs')
+ self.assertIsNone(sel(self.obj_for_test))
+ self.assertRaises(KeyError, sel, self.obj_for_test, True)
+
+ def test_constant_selector(self):
+ sel = xmlutil.ConstantSelector('Foobar')
+ self.assertEqual(sel.value, 'Foobar')
+ self.assertEqual(sel(self.obj_for_test), 'Foobar')
+ self.assertEqual(repr(sel), "'Foobar'")
+
+
+class TemplateElementTest(test.NoDBTestCase):
+ def test_element_initial_attributes(self):
+ # Create a template element with some attributes
+ elem = xmlutil.TemplateElement('test', attrib=dict(a=1, b=2, c=3),
+ c=4, d=5, e=6)
+
+ # Verify all the attributes are as expected
+ expected = dict(a=1, b=2, c=4, d=5, e=6)
+ for k, v in expected.items():
+ self.assertEqual(elem.attrib[k].chain[0], v)
+ self.assertTrue(repr(elem))
+
+ def test_element_get_attributes(self):
+ expected = dict(a=1, b=2, c=3)
+
+ # Create a template element with some attributes
+ elem = xmlutil.TemplateElement('test', attrib=expected)
+
+ # Verify that get() retrieves the attributes
+ for k, v in expected.items():
+ self.assertEqual(elem.get(k).chain[0], v)
+
+ def test_element_set_attributes(self):
+ attrs = dict(a=None, b='foo', c=xmlutil.Selector('foo', 'bar'))
+
+ # Create a bare template element with no attributes
+ elem = xmlutil.TemplateElement('test')
+
+ # Set the attribute values
+ for k, v in attrs.items():
+ elem.set(k, v)
+
+ # Now verify what got set
+ self.assertEqual(len(elem.attrib['a'].chain), 1)
+ self.assertEqual(elem.attrib['a'].chain[0], 'a')
+ self.assertEqual(len(elem.attrib['b'].chain), 1)
+ self.assertEqual(elem.attrib['b'].chain[0], 'foo')
+ self.assertEqual(elem.attrib['c'], attrs['c'])
+
+ def test_element_attribute_keys(self):
+ attrs = dict(a=1, b=2, c=3, d=4)
+ expected = set(attrs.keys())
+
+ # Create a template element with some attributes
+ elem = xmlutil.TemplateElement('test', attrib=attrs)
+
+ # Now verify keys
+ self.assertEqual(set(elem.keys()), expected)
+
+ def test_element_attribute_items(self):
+ expected = dict(a=xmlutil.Selector(1),
+ b=xmlutil.Selector(2),
+ c=xmlutil.Selector(3))
+ keys = set(expected.keys())
+
+ # Create a template element with some attributes
+ elem = xmlutil.TemplateElement('test', attrib=expected)
+
+ # Now verify items
+ for k, v in elem.items():
+ self.assertEqual(expected[k], v)
+ keys.remove(k)
+
+ # Did we visit all keys?
+ self.assertEqual(len(keys), 0)
+
+ def test_element_selector_none(self):
+ # Create a template element with no selector
+ elem = xmlutil.TemplateElement('test')
+
+ self.assertEqual(len(elem.selector.chain), 0)
+
+ def test_element_selector_string(self):
+ # Create a template element with a string selector
+ elem = xmlutil.TemplateElement('test', selector='test')
+
+ self.assertEqual(len(elem.selector.chain), 1)
+ self.assertEqual(elem.selector.chain[0], 'test')
+
+ def test_element_selector(self):
+ sel = xmlutil.Selector('a', 'b')
+
+ # Create a template element with an explicit selector
+ elem = xmlutil.TemplateElement('test', selector=sel)
+
+ self.assertEqual(elem.selector, sel)
+
+ def test_element_subselector_none(self):
+ # Create a template element with no subselector
+ elem = xmlutil.TemplateElement('test')
+
+ self.assertIsNone(elem.subselector)
+
+ def test_element_subselector_string(self):
+ # Create a template element with a string subselector
+ elem = xmlutil.TemplateElement('test', subselector='test')
+
+ self.assertEqual(len(elem.subselector.chain), 1)
+ self.assertEqual(elem.subselector.chain[0], 'test')
+
+ def test_element_subselector(self):
+ sel = xmlutil.Selector('a', 'b')
+
+ # Create a template element with an explicit subselector
+ elem = xmlutil.TemplateElement('test', subselector=sel)
+
+ self.assertEqual(elem.subselector, sel)
+
+ def test_element_append_child(self):
+ # Create an element
+ elem = xmlutil.TemplateElement('test')
+
+ # Make sure the element starts off empty
+ self.assertEqual(len(elem), 0)
+
+ # Create a child element
+ child = xmlutil.TemplateElement('child')
+
+ # Append the child to the parent
+ elem.append(child)
+
+ # Verify that the child was added
+ self.assertEqual(len(elem), 1)
+ self.assertEqual(elem[0], child)
+ self.assertIn('child', elem)
+ self.assertEqual(elem['child'], child)
+
+ # Ensure that multiple children of the same name are rejected
+ child2 = xmlutil.TemplateElement('child')
+ self.assertRaises(KeyError, elem.append, child2)
+
+ def test_element_extend_children(self):
+ # Create an element
+ elem = xmlutil.TemplateElement('test')
+
+ # Make sure the element starts off empty
+ self.assertEqual(len(elem), 0)
+
+ # Create a few children
+ children = [
+ xmlutil.TemplateElement('child1'),
+ xmlutil.TemplateElement('child2'),
+ xmlutil.TemplateElement('child3'),
+ ]
+
+ # Extend the parent by those children
+ elem.extend(children)
+
+ # Verify that the children were added
+ self.assertEqual(len(elem), 3)
+ for idx in range(len(elem)):
+ self.assertEqual(children[idx], elem[idx])
+ self.assertIn(children[idx].tag, elem)
+ self.assertEqual(elem[children[idx].tag], children[idx])
+
+ # Ensure that multiple children of the same name are rejected
+ children2 = [
+ xmlutil.TemplateElement('child4'),
+ xmlutil.TemplateElement('child1'),
+ ]
+ self.assertRaises(KeyError, elem.extend, children2)
+
+ # Also ensure that child4 was not added
+ self.assertEqual(len(elem), 3)
+ self.assertEqual(elem[-1].tag, 'child3')
+
+ def test_element_insert_child(self):
+ # Create an element
+ elem = xmlutil.TemplateElement('test')
+
+ # Make sure the element starts off empty
+ self.assertEqual(len(elem), 0)
+
+ # Create a few children
+ children = [
+ xmlutil.TemplateElement('child1'),
+ xmlutil.TemplateElement('child2'),
+ xmlutil.TemplateElement('child3'),
+ ]
+
+ # Extend the parent by those children
+ elem.extend(children)
+
+ # Create a child to insert
+ child = xmlutil.TemplateElement('child4')
+
+ # Insert it
+ elem.insert(1, child)
+
+ # Ensure the child was inserted in the right place
+ self.assertEqual(len(elem), 4)
+ children.insert(1, child)
+ for idx in range(len(elem)):
+ self.assertEqual(children[idx], elem[idx])
+ self.assertIn(children[idx].tag, elem)
+ self.assertEqual(elem[children[idx].tag], children[idx])
+
+ # Ensure that multiple children of the same name are rejected
+ child2 = xmlutil.TemplateElement('child2')
+ self.assertRaises(KeyError, elem.insert, 2, child2)
+
+ def test_element_remove_child(self):
+ # Create an element
+ elem = xmlutil.TemplateElement('test')
+
+ # Make sure the element starts off empty
+ self.assertEqual(len(elem), 0)
+
+ # Create a few children
+ children = [
+ xmlutil.TemplateElement('child1'),
+ xmlutil.TemplateElement('child2'),
+ xmlutil.TemplateElement('child3'),
+ ]
+
+ # Extend the parent by those children
+ elem.extend(children)
+
+ # Create a test child to remove
+ child = xmlutil.TemplateElement('child2')
+
+ # Try to remove it
+ self.assertRaises(ValueError, elem.remove, child)
+
+ # Ensure that no child was removed
+ self.assertEqual(len(elem), 3)
+
+ # Now remove a legitimate child
+ elem.remove(children[1])
+
+ # Ensure that the child was removed
+ self.assertEqual(len(elem), 2)
+ self.assertEqual(elem[0], children[0])
+ self.assertEqual(elem[1], children[2])
+ self.assertEqual('child2' in elem, False)
+
+ # Ensure the child cannot be retrieved by name
+ def get_key(elem, key):
+ return elem[key]
+ self.assertRaises(KeyError, get_key, elem, 'child2')
+
+ def test_element_text(self):
+ # Create an element
+ elem = xmlutil.TemplateElement('test')
+
+ # Ensure that it has no text
+ self.assertIsNone(elem.text)
+
+ # Try setting it to a string and ensure it becomes a selector
+ elem.text = 'test'
+ self.assertEqual(hasattr(elem.text, 'chain'), True)
+ self.assertEqual(len(elem.text.chain), 1)
+ self.assertEqual(elem.text.chain[0], 'test')
+
+ # Try resetting the text to None
+ elem.text = None
+ self.assertIsNone(elem.text)
+
+ # Now make up a selector and try setting the text to that
+ sel = xmlutil.Selector()
+ elem.text = sel
+ self.assertEqual(elem.text, sel)
+
+ # Finally, try deleting the text and see what happens
+ del elem.text
+ self.assertIsNone(elem.text)
+
+ def test_apply_attrs(self):
+ # Create a template element
+ attrs = dict(attr1=xmlutil.ConstantSelector(1),
+ attr2=xmlutil.ConstantSelector(2))
+ tmpl_elem = xmlutil.TemplateElement('test', attrib=attrs)
+
+ # Create an etree element
+ elem = etree.Element('test')
+
+ # Apply the template to the element
+ tmpl_elem.apply(elem, None)
+
+ # Now, verify the correct attributes were set
+ for k, v in elem.items():
+ self.assertEqual(str(attrs[k].value), v)
+
+ def test_apply_text(self):
+ # Create a template element
+ tmpl_elem = xmlutil.TemplateElement('test')
+ tmpl_elem.text = xmlutil.ConstantSelector(1)
+
+ # Create an etree element
+ elem = etree.Element('test')
+
+ # Apply the template to the element
+ tmpl_elem.apply(elem, None)
+
+ # Now, verify the text was set
+ self.assertEqual(str(tmpl_elem.text.value), elem.text)
+
+ def test__render(self):
+ attrs = dict(attr1=xmlutil.ConstantSelector(1),
+ attr2=xmlutil.ConstantSelector(2),
+ attr3=xmlutil.ConstantSelector(3))
+
+ # Create a master template element
+ master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
+
+ # Create a couple of slave template element
+ slave_elems = [
+ xmlutil.TemplateElement('test', attr2=attrs['attr2']),
+ xmlutil.TemplateElement('test', attr3=attrs['attr3']),
+ ]
+
+ # Try the render
+ elem = master_elem._render(None, None, slave_elems, None)
+
+ # Verify the particulars of the render
+ self.assertEqual(elem.tag, 'test')
+ self.assertEqual(len(elem.nsmap), 0)
+ for k, v in elem.items():
+ self.assertEqual(str(attrs[k].value), v)
+
+ # Create a parent for the element to be rendered
+ parent = etree.Element('parent')
+
+ # Try the render again...
+ elem = master_elem._render(parent, None, slave_elems, dict(a='foo'))
+
+ # Verify the particulars of the render
+ self.assertEqual(len(parent), 1)
+ self.assertEqual(parent[0], elem)
+ self.assertEqual(len(elem.nsmap), 1)
+ self.assertEqual(elem.nsmap['a'], 'foo')
+
+ def test_render(self):
+ # Create a template element
+ tmpl_elem = xmlutil.TemplateElement('test')
+ tmpl_elem.text = xmlutil.Selector()
+
+ # Create the object we're going to render
+ obj = ['elem1', 'elem2', 'elem3', 'elem4']
+
+ # Try a render with no object
+ elems = tmpl_elem.render(None, None)
+ self.assertEqual(len(elems), 0)
+
+ # Try a render with one object
+ elems = tmpl_elem.render(None, 'foo')
+ self.assertEqual(len(elems), 1)
+ self.assertEqual(elems[0][0].text, 'foo')
+ self.assertEqual(elems[0][1], 'foo')
+
+ # Now, try rendering an object with multiple entries
+ parent = etree.Element('parent')
+ elems = tmpl_elem.render(parent, obj)
+ self.assertEqual(len(elems), 4)
+
+ # Check the results
+ for idx in range(len(obj)):
+ self.assertEqual(elems[idx][0].text, obj[idx])
+ self.assertEqual(elems[idx][1], obj[idx])
+
+ # Check with a subselector
+ tmpl_elem = xmlutil.TemplateElement(
+ 'test',
+ subselector=xmlutil.ConstantSelector('foo'))
+ parent = etree.Element('parent')
+
+ # Try a render with no object
+ elems = tmpl_elem.render(parent, obj)
+ self.assertEqual(len(elems), 4)
+
+ def test_subelement(self):
+ # Try the SubTemplateElement constructor
+ parent = xmlutil.SubTemplateElement(None, 'parent')
+ self.assertEqual(parent.tag, 'parent')
+ self.assertEqual(len(parent), 0)
+
+ # Now try it with a parent element
+ child = xmlutil.SubTemplateElement(parent, 'child')
+ self.assertEqual(child.tag, 'child')
+ self.assertEqual(len(parent), 1)
+ self.assertEqual(parent[0], child)
+
+ def test_wrap(self):
+ # These are strange methods, but they make things easier
+ elem = xmlutil.TemplateElement('test')
+ self.assertEqual(elem.unwrap(), elem)
+ self.assertEqual(elem.wrap().root, elem)
+
+ def test_dyntag(self):
+ obj = ['a', 'b', 'c']
+
+ # Create a template element with a dynamic tag
+ tmpl_elem = xmlutil.TemplateElement(xmlutil.Selector())
+
+ # Try the render
+ parent = etree.Element('parent')
+ elems = tmpl_elem.render(parent, obj)
+
+ # Verify the particulars of the render
+ self.assertEqual(len(elems), len(obj))
+ for idx in range(len(obj)):
+ self.assertEqual(elems[idx][0].tag, obj[idx])
+
+ def test_tree(self):
+ # Create a template element
+ elem = xmlutil.TemplateElement('test', attr3='attr3')
+ elem.text = 'test'
+ self.assertEqual(elem.tree(),
+ "<test !selector=Selector() "
+ "!text=Selector('test',) "
+ "attr3=Selector('attr3',)"
+ "/>")
+
+ # Create a template element
+ elem = xmlutil.TemplateElement('test2')
+
+ # Create a child element
+ child = xmlutil.TemplateElement('child')
+
+ # Append the child to the parent
+ elem.append(child)
+
+ self.assertEqual(elem.tree(),
+ "<test2 !selector=Selector()>"
+ "<child !selector=Selector()/></test2>")
+
+
+class TemplateTest(test.NoDBTestCase):
+ def test_tree(self):
+ elem = xmlutil.TemplateElement('test')
+ tmpl = xmlutil.Template(elem)
+ self.assertTrue(tmpl.tree())
+
+ def test_wrap(self):
+ # These are strange methods, but they make things easier
+ elem = xmlutil.TemplateElement('test')
+ tmpl = xmlutil.Template(elem)
+ self.assertEqual(tmpl.unwrap(), elem)
+ self.assertEqual(tmpl.wrap(), tmpl)
+
+ def test__siblings(self):
+ # Set up a basic template
+ elem = xmlutil.TemplateElement('test')
+ tmpl = xmlutil.Template(elem)
+
+ # Check that we get the right siblings
+ siblings = tmpl._siblings()
+ self.assertEqual(len(siblings), 1)
+ self.assertEqual(siblings[0], elem)
+
+ def test__nsmap(self):
+ # Set up a basic template
+ elem = xmlutil.TemplateElement('test')
+ tmpl = xmlutil.Template(elem, nsmap=dict(a="foo"))
+
+ # Check out that we get the right namespace dictionary
+ nsmap = tmpl._nsmap()
+ self.assertNotEqual(id(nsmap), id(tmpl.nsmap))
+ self.assertEqual(len(nsmap), 1)
+ self.assertEqual(nsmap['a'], 'foo')
+
+ def test_master_attach(self):
+ # Set up a master template
+ elem = xmlutil.TemplateElement('test')
+ tmpl = xmlutil.MasterTemplate(elem, 1)
+
+ # Make sure it has a root but no slaves
+ self.assertEqual(tmpl.root, elem)
+ self.assertEqual(len(tmpl.slaves), 0)
+ self.assertTrue(repr(tmpl))
+
+ # Try to attach an invalid slave
+ bad_elem = xmlutil.TemplateElement('test2')
+ self.assertRaises(ValueError, tmpl.attach, bad_elem)
+ self.assertEqual(len(tmpl.slaves), 0)
+
+ # Try to attach an invalid and a valid slave
+ good_elem = xmlutil.TemplateElement('test')
+ self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem)
+ self.assertEqual(len(tmpl.slaves), 0)
+
+ # Try to attach an inapplicable template
+ class InapplicableTemplate(xmlutil.Template):
+ def apply(self, master):
+ return False
+ inapp_tmpl = InapplicableTemplate(good_elem)
+ tmpl.attach(inapp_tmpl)
+ self.assertEqual(len(tmpl.slaves), 0)
+
+ # Now try attaching an applicable template
+ tmpl.attach(good_elem)
+ self.assertEqual(len(tmpl.slaves), 1)
+ self.assertEqual(tmpl.slaves[0].root, good_elem)
+
+ def test_master_copy(self):
+ # Construct a master template
+ elem = xmlutil.TemplateElement('test')
+ tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo'))
+
+ # Give it a slave
+ slave = xmlutil.TemplateElement('test')
+ tmpl.attach(slave)
+
+ # Construct a copy
+ copy = tmpl.copy()
+
+ # Check to see if we actually managed a copy
+ self.assertNotEqual(tmpl, copy)
+ self.assertEqual(tmpl.root, copy.root)
+ self.assertEqual(tmpl.version, copy.version)
+ self.assertEqual(id(tmpl.nsmap), id(copy.nsmap))
+ self.assertNotEqual(id(tmpl.slaves), id(copy.slaves))
+ self.assertEqual(len(tmpl.slaves), len(copy.slaves))
+ self.assertEqual(tmpl.slaves[0], copy.slaves[0])
+
+ def test_slave_apply(self):
+ # Construct a master template
+ elem = xmlutil.TemplateElement('test')
+ master = xmlutil.MasterTemplate(elem, 3)
+
+ # Construct a slave template with applicable minimum version
+ slave = xmlutil.SlaveTemplate(elem, 2)
+ self.assertEqual(slave.apply(master), True)
+ self.assertTrue(repr(slave))
+
+ # Construct a slave template with equal minimum version
+ slave = xmlutil.SlaveTemplate(elem, 3)
+ self.assertEqual(slave.apply(master), True)
+
+ # Construct a slave template with inapplicable minimum version
+ slave = xmlutil.SlaveTemplate(elem, 4)
+ self.assertEqual(slave.apply(master), False)
+
+ # Construct a slave template with applicable version range
+ slave = xmlutil.SlaveTemplate(elem, 2, 4)
+ self.assertEqual(slave.apply(master), True)
+
+ # Construct a slave template with low version range
+ slave = xmlutil.SlaveTemplate(elem, 1, 2)
+ self.assertEqual(slave.apply(master), False)
+
+ # Construct a slave template with high version range
+ slave = xmlutil.SlaveTemplate(elem, 4, 5)
+ self.assertEqual(slave.apply(master), False)
+
+ # Construct a slave template with matching version range
+ slave = xmlutil.SlaveTemplate(elem, 3, 3)
+ self.assertEqual(slave.apply(master), True)
+
+ def test__serialize(self):
+ # Our test object to serialize
+ obj = {
+ 'test': {
+ 'name': 'foobar',
+ 'values': [1, 2, 3, 4],
+ 'attrs': {
+ 'a': 1,
+ 'b': 2,
+ 'c': 3,
+ 'd': 4,
+ },
+ 'image': {
+ 'name': 'image_foobar',
+ 'id': 42,
+ },
+ },
+ }
+
+ # Set up our master template
+ root = xmlutil.TemplateElement('test', selector='test',
+ name='name')
+ value = xmlutil.SubTemplateElement(root, 'value', selector='values')
+ value.text = xmlutil.Selector()
+ attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs')
+ xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items,
+ key=0, value=1)
+ master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo'))
+
+ # Set up our slave template
+ root_slave = xmlutil.TemplateElement('test', selector='test')
+ image = xmlutil.SubTemplateElement(root_slave, 'image',
+ selector='image', id='id')
+ image.text = xmlutil.Selector('name')
+ slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar'))
+
+ # Attach the slave to the master...
+ master.attach(slave)
+
+ # Try serializing our object
+ siblings = master._siblings()
+ nsmap = master._nsmap()
+ result = master._serialize(None, obj, siblings, nsmap)
+
+ # Now we get to manually walk the element tree...
+ self.assertEqual(result.tag, 'test')
+ self.assertEqual(len(result.nsmap), 2)
+ self.assertEqual(result.nsmap['f'], 'foo')
+ self.assertEqual(result.nsmap['b'], 'bar')
+ self.assertEqual(result.get('name'), obj['test']['name'])
+ for idx, val in enumerate(obj['test']['values']):
+ self.assertEqual(result[idx].tag, 'value')
+ self.assertEqual(result[idx].text, str(val))
+ idx += 1
+ self.assertEqual(result[idx].tag, 'attrs')
+ for attr in result[idx]:
+ self.assertEqual(attr.tag, 'attr')
+ self.assertEqual(attr.get('value'),
+ str(obj['test']['attrs'][attr.get('key')]))
+ idx += 1
+ self.assertEqual(result[idx].tag, 'image')
+ self.assertEqual(result[idx].get('id'),
+ str(obj['test']['image']['id']))
+ self.assertEqual(result[idx].text, obj['test']['image']['name'])
+
+ templ = xmlutil.Template(None)
+ self.assertEqual(templ.serialize(None), '')
+
+ def test_serialize_with_colon_tagname_support(self):
+ # Our test object to serialize
+ obj = {'extra_specs': {'foo:bar': '999'}}
+ expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<extra_specs><foo:bar xmlns:foo="foo">999</foo:bar>'
+ '</extra_specs>'))
+ # Set up our master template
+ root = xmlutil.TemplateElement('extra_specs', selector='extra_specs',
+ colon_ns=True)
+ value = xmlutil.SubTemplateElement(root, 'foo:bar', selector='foo:bar',
+ colon_ns=True)
+ value.text = xmlutil.Selector()
+ master = xmlutil.MasterTemplate(root, 1)
+ result = master.serialize(obj)
+ self.assertEqual(expected_xml, result)
+
+ def test__serialize_with_empty_datum_selector(self):
+ # Our test object to serialize
+ obj = {
+ 'test': {
+ 'name': 'foobar',
+ 'image': ''
+ },
+ }
+
+ root = xmlutil.TemplateElement('test', selector='test',
+ name='name')
+ master = xmlutil.MasterTemplate(root, 1)
+ root_slave = xmlutil.TemplateElement('test', selector='test')
+ image = xmlutil.SubTemplateElement(root_slave, 'image',
+ selector='image')
+ image.set('id')
+ xmlutil.make_links(image, 'links')
+ slave = xmlutil.SlaveTemplate(root_slave, 1)
+ master.attach(slave)
+
+ siblings = master._siblings()
+ result = master._serialize(None, obj, siblings)
+ self.assertEqual(result.tag, 'test')
+ self.assertEqual(result[0].tag, 'image')
+ self.assertEqual(result[0].get('id'), str(obj['test']['image']))
+
+
+class MasterTemplateBuilder(xmlutil.TemplateBuilder):
+ def construct(self):
+ elem = xmlutil.TemplateElement('test')
+ return xmlutil.MasterTemplate(elem, 1)
+
+
+class SlaveTemplateBuilder(xmlutil.TemplateBuilder):
+ def construct(self):
+ elem = xmlutil.TemplateElement('test')
+ return xmlutil.SlaveTemplate(elem, 1)
+
+
+class TemplateBuilderTest(test.NoDBTestCase):
+ def test_master_template_builder(self):
+ # Make sure the template hasn't been built yet
+ self.assertIsNone(MasterTemplateBuilder._tmpl)
+
+ # Now, construct the template
+ tmpl1 = MasterTemplateBuilder()
+
+ # Make sure that there is a template cached...
+ self.assertIsNotNone(MasterTemplateBuilder._tmpl)
+
+ # Make sure it wasn't what was returned...
+ self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1)
+
+ # Make sure it doesn't get rebuilt
+ cached = MasterTemplateBuilder._tmpl
+ tmpl2 = MasterTemplateBuilder()
+ self.assertEqual(MasterTemplateBuilder._tmpl, cached)
+
+ # Make sure we're always getting fresh copies
+ self.assertNotEqual(tmpl1, tmpl2)
+
+ # Make sure we can override the copying behavior
+ tmpl3 = MasterTemplateBuilder(False)
+ self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3)
+
+ def test_slave_template_builder(self):
+ # Make sure the template hasn't been built yet
+ self.assertIsNone(SlaveTemplateBuilder._tmpl)
+
+ # Now, construct the template
+ tmpl1 = SlaveTemplateBuilder()
+
+ # Make sure there is a template cached...
+ self.assertIsNotNone(SlaveTemplateBuilder._tmpl)
+
+ # Make sure it was what was returned...
+ self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
+
+ # Make sure it doesn't get rebuilt
+ tmpl2 = SlaveTemplateBuilder()
+ self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
+
+ # Make sure we're always getting the cached copy
+ self.assertEqual(tmpl1, tmpl2)
+
+
+class MiscellaneousXMLUtilTests(test.NoDBTestCase):
+ def test_validate_schema(self):
+ xml = '''<?xml version='1.0' encoding='UTF-8'?>
+<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+<meta key="key6">value6</meta><meta key="key4">value4</meta>
+</metadata>
+'''
+ xmlutil.validate_schema(xml, 'metadata')
+ # No way to test the return value of validate_schema.
+ # It just raises an exception when something is wrong.
+ self.assertTrue(True)
+
+ def test_make_links(self):
+ elem = xmlutil.TemplateElement('image', selector='image')
+ self.assertTrue(repr(xmlutil.make_links(elem, 'links')))
+
+ def test_make_flat_dict(self):
+ expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<wrapper><a>foo</a><b>bar</b></wrapper>')
+ root = xmlutil.make_flat_dict('wrapper')
+ tmpl = xmlutil.MasterTemplate(root, 1)
+ result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
+ self.assertEqual(result, expected_xml)
+
+ expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+'<ns0:wrapper xmlns:ns0="ns"><ns0:a>foo</ns0:a><ns0:b>bar</ns0:b>'
+"</ns0:wrapper>")
+ root = xmlutil.make_flat_dict('wrapper', ns='ns')
+ tmpl = xmlutil.MasterTemplate(root, 1)
+ result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
+ self.assertEqual(result, expected_xml)
+
+ def test_make_flat_dict_with_colon_tagname_support(self):
+ # Our test object to serialize
+ obj = {'extra_specs': {'foo:bar': '999'}}
+ expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<extra_specs><foo:bar xmlns:foo="foo">999</foo:bar>'
+ '</extra_specs>'))
+ # Set up our master template
+ root = xmlutil.make_flat_dict('extra_specs', colon_ns=True)
+ master = xmlutil.MasterTemplate(root, 1)
+ result = master.serialize(obj)
+ self.assertEqual(expected_xml, result)
+
+ def test_make_flat_dict_with_parent(self):
+ # Our test object to serialize
+ obj = {"device": {"id": 1,
+ "extra_info": {"key1": "value1",
+ "key2": "value2"}}}
+
+ expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<device id="1"><extra_info><key2>value2</key2>'
+ '<key1>value1</key1></extra_info></device>'))
+
+ root = xmlutil.TemplateElement('device', selector='device')
+ root.set('id')
+ extra = xmlutil.make_flat_dict('extra_info', root=root)
+ root.append(extra)
+ master = xmlutil.MasterTemplate(root, 1)
+ result = master.serialize(obj)
+ self.assertEqual(expected_xml, result)
+
+ def test_make_flat_dict_with_dicts(self):
+ # Our test object to serialize
+ obj = {"device": {"id": 1,
+ "extra_info": {"key1": "value1",
+ "key2": "value2"}}}
+
+ expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<device><id>1</id><extra_info><key2>value2</key2>'
+ '<key1>value1</key1></extra_info></device>'))
+
+ root = xmlutil.make_flat_dict('device', selector='device',
+ ignore_sub_dicts=True)
+ extra = xmlutil.make_flat_dict('extra_info', selector='extra_info')
+ root.append(extra)
+ master = xmlutil.MasterTemplate(root, 1)
+ result = master.serialize(obj)
+ self.assertEqual(expected_xml, result)
+
+ def test_safe_parse_xml(self):
+
+ normal_body = ('<?xml version="1.0" ?>'
+ '<foo><bar><v1>hey</v1><v2>there</v2></bar></foo>')
+
+ dom = xmlutil.safe_minidom_parse_string(normal_body)
+ # Some versions of minidom inject extra newlines so we ignore them
+ result = str(dom.toxml()).replace('\n', '')
+ self.assertEqual(normal_body, result)
+
+ self.assertRaises(exception.MalformedRequestBody,
+ xmlutil.safe_minidom_parse_string,
+ tests_utils.killer_xml_body())
+
+
+class SafeParserTestCase(test.NoDBTestCase):
+ def test_external_dtd(self):
+ xml_string = ("""<?xml version="1.0" encoding="utf-8"?>
+ <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+ <html>
+ <head/>
+ <body>html with dtd</body>
+ </html>""")
+
+ parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
+ forbid_entities=True)
+ self.assertRaises(ValueError,
+ minidom.parseString,
+ xml_string, parser)
+
+ def test_external_file(self):
+ xml_string = """<!DOCTYPE external [
+ <!ENTITY ee SYSTEM "file:///PATH/TO/root.xml">
+ ]>
+ <root>&ee;</root>"""
+
+ parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
+ forbid_entities=True)
+ self.assertRaises(ValueError,
+ minidom.parseString,
+ xml_string, parser)
+
+ def test_notation(self):
+ xml_string = """<?xml version="1.0" standalone="no"?>
+ <!-- comment data -->
+ <!DOCTYPE x [
+ <!NOTATION notation SYSTEM "notation.jpeg">
+ ]>
+ <root attr1="value1">
+ </root>"""
+
+ parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
+ forbid_entities=True)
+ self.assertRaises(ValueError,
+ minidom.parseString,
+ xml_string, parser)
diff --git a/nova/tests/unit/api/test_auth.py b/nova/tests/unit/api/test_auth.py
new file mode 100644
index 0000000000..e11c611b3a
--- /dev/null
+++ b/nova/tests/unit/api/test_auth.py
@@ -0,0 +1,214 @@
+# Copyright (c) 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+from oslo.middleware import request_id
+from oslo.serialization import jsonutils
+import webob
+import webob.exc
+
+import nova.api.auth
+from nova.i18n import _
+from nova import test
+
+CONF = cfg.CONF
+
+
+class TestNovaKeystoneContextMiddleware(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestNovaKeystoneContextMiddleware, self).setUp()
+
+ @webob.dec.wsgify()
+ def fake_app(req):
+ self.context = req.environ['nova.context']
+ return webob.Response()
+
+ self.context = None
+ self.middleware = nova.api.auth.NovaKeystoneContext(fake_app)
+ self.request = webob.Request.blank('/')
+ self.request.headers['X_TENANT_ID'] = 'testtenantid'
+ self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken'
+ self.request.headers['X_SERVICE_CATALOG'] = jsonutils.dumps({})
+
+ def test_no_user_or_user_id(self):
+ response = self.request.get_response(self.middleware)
+ self.assertEqual(response.status, '401 Unauthorized')
+
+ def test_user_only(self):
+ self.request.headers['X_USER_ID'] = 'testuserid'
+ response = self.request.get_response(self.middleware)
+ self.assertEqual(response.status, '200 OK')
+ self.assertEqual(self.context.user_id, 'testuserid')
+
+ def test_user_id_only(self):
+ self.request.headers['X_USER'] = 'testuser'
+ response = self.request.get_response(self.middleware)
+ self.assertEqual(response.status, '200 OK')
+ self.assertEqual(self.context.user_id, 'testuser')
+
+ def test_user_id_trumps_user(self):
+ self.request.headers['X_USER_ID'] = 'testuserid'
+ self.request.headers['X_USER'] = 'testuser'
+ response = self.request.get_response(self.middleware)
+ self.assertEqual(response.status, '200 OK')
+ self.assertEqual(self.context.user_id, 'testuserid')
+
+ def test_invalid_service_catalog(self):
+ self.request.headers['X_USER'] = 'testuser'
+ self.request.headers['X_SERVICE_CATALOG'] = "bad json"
+ response = self.request.get_response(self.middleware)
+ self.assertEqual(response.status, '500 Internal Server Error')
+
+ def test_request_id_extracted_from_env(self):
+ req_id = 'dummy-request-id'
+ self.request.headers['X_PROJECT_ID'] = 'testtenantid'
+ self.request.headers['X_USER_ID'] = 'testuserid'
+ self.request.environ[request_id.ENV_REQUEST_ID] = req_id
+ self.request.get_response(self.middleware)
+ self.assertEqual(req_id, self.context.request_id)
+
+
+class TestKeystoneMiddlewareRoles(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestKeystoneMiddlewareRoles, self).setUp()
+
+ @webob.dec.wsgify()
+ def role_check_app(req):
+ context = req.environ['nova.context']
+
+ if "knight" in context.roles and "bad" not in context.roles:
+ return webob.Response(status="200 Role Match")
+ elif context.roles == ['']:
+ return webob.Response(status="200 No Roles")
+ else:
+ raise webob.exc.HTTPBadRequest(_("unexpected role header"))
+
+ self.middleware = nova.api.auth.NovaKeystoneContext(role_check_app)
+ self.request = webob.Request.blank('/')
+ self.request.headers['X_USER'] = 'testuser'
+ self.request.headers['X_TENANT_ID'] = 'testtenantid'
+ self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken'
+ self.request.headers['X_SERVICE_CATALOG'] = jsonutils.dumps({})
+
+ self.roles = "pawn, knight, rook"
+
+ def test_roles(self):
+ # Test that the newer style role header takes precedence.
+ self.request.headers['X_ROLES'] = 'pawn,knight,rook'
+ self.request.headers['X_ROLE'] = 'bad'
+
+ response = self.request.get_response(self.middleware)
+ self.assertEqual(response.status, '200 Role Match')
+
+ def test_roles_empty(self):
+ self.request.headers['X_ROLES'] = ''
+ response = self.request.get_response(self.middleware)
+ self.assertEqual(response.status, '200 No Roles')
+
+ def test_deprecated_role(self):
+ # Test fallback to older role header.
+ self.request.headers['X_ROLE'] = 'pawn,knight,rook'
+
+ response = self.request.get_response(self.middleware)
+ self.assertEqual(response.status, '200 Role Match')
+
+ def test_role_empty(self):
+ self.request.headers['X_ROLE'] = ''
+ response = self.request.get_response(self.middleware)
+ self.assertEqual(response.status, '200 No Roles')
+
+ def test_no_role_headers(self):
+ # Test with no role headers set.
+
+ response = self.request.get_response(self.middleware)
+ self.assertEqual(response.status, '200 No Roles')
+
+
+class TestPipeLineFactory(test.NoDBTestCase):
+
+ class FakeFilter(object):
+ def __init__(self, name):
+ self.name = name
+ self.obj = None
+
+ def __call__(self, obj):
+ self.obj = obj
+ return self
+
+ class FakeApp(object):
+ def __init__(self, name):
+ self.name = name
+
+ class FakeLoader():
+ def get_filter(self, name):
+ return TestPipeLineFactory.FakeFilter(name)
+
+ def get_app(self, name):
+ return TestPipeLineFactory.FakeApp(name)
+
+ def _test_pipeline(self, pipeline, app):
+ for p in pipeline.split()[:-1]:
+ self.assertEqual(app.name, p)
+ self.assertIsInstance(app, TestPipeLineFactory.FakeFilter)
+ app = app.obj
+ self.assertEqual(app.name, pipeline.split()[-1])
+ self.assertIsInstance(app, TestPipeLineFactory.FakeApp)
+
+ def test_pipeline_factory(self):
+ fake_pipeline = 'test1 test2 test3'
+ app = nova.api.auth.pipeline_factory(
+ TestPipeLineFactory.FakeLoader(), None, noauth=fake_pipeline)
+ self._test_pipeline(fake_pipeline, app)
+
+ def test_pipeline_factory_v21(self):
+ fake_pipeline = 'test1 test2 test3'
+ app = nova.api.auth.pipeline_factory_v21(
+ TestPipeLineFactory.FakeLoader(), None, noauth=fake_pipeline)
+ self._test_pipeline(fake_pipeline, app)
+
+ def test_pipeline_factory_with_rate_limits(self):
+ CONF.set_override('api_rate_limit', True)
+ CONF.set_override('auth_strategy', 'keystone')
+ fake_pipeline = 'test1 test2 test3'
+ app = nova.api.auth.pipeline_factory(
+ TestPipeLineFactory.FakeLoader(), None, keystone=fake_pipeline)
+ self._test_pipeline(fake_pipeline, app)
+
+ def test_pipeline_factory_without_rate_limits(self):
+ CONF.set_override('auth_strategy', 'keystone')
+ fake_pipeline1 = 'test1 test2 test3'
+ fake_pipeline2 = 'test4 test5 test6'
+ app = nova.api.auth.pipeline_factory(
+ TestPipeLineFactory.FakeLoader(), None,
+ keystone_nolimit=fake_pipeline1,
+ keystone=fake_pipeline2)
+ self._test_pipeline(fake_pipeline1, app)
+
+ def test_pipeline_factory_missing_nolimits_pipeline(self):
+ CONF.set_override('api_rate_limit', False)
+ CONF.set_override('auth_strategy', 'keystone')
+ fake_pipeline = 'test1 test2 test3'
+ app = nova.api.auth.pipeline_factory(
+ TestPipeLineFactory.FakeLoader(), None, keystone=fake_pipeline)
+ self._test_pipeline(fake_pipeline, app)
+
+ def test_pipeline_factory_compatibility_with_v3(self):
+ CONF.set_override('api_rate_limit', True)
+ CONF.set_override('auth_strategy', 'keystone')
+ fake_pipeline = 'test1 ratelimit_v3 test3'
+ app = nova.api.auth.pipeline_factory(
+ TestPipeLineFactory.FakeLoader(), None, keystone=fake_pipeline)
+ self._test_pipeline('test1 test3', app)
diff --git a/nova/tests/unit/api/test_compute_req_id.py b/nova/tests/unit/api/test_compute_req_id.py
new file mode 100644
index 0000000000..bbdbfab726
--- /dev/null
+++ b/nova/tests/unit/api/test_compute_req_id.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2014 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from testtools import matchers
+import webob
+import webob.dec
+
+from nova.api import compute_req_id
+from nova.openstack.common import context
+from nova import test
+
+
+class RequestIdTest(test.TestCase):
+ def test_generate_request_id(self):
+ @webob.dec.wsgify
+ def application(req):
+ return req.environ[compute_req_id.ENV_REQUEST_ID]
+
+ app = compute_req_id.ComputeReqIdMiddleware(application)
+ req = webob.Request.blank('/test')
+ req_id = context.generate_request_id()
+ req.environ[compute_req_id.ENV_REQUEST_ID] = req_id
+ res = req.get_response(app)
+
+ res_id = res.headers.get(compute_req_id.HTTP_RESP_HEADER_REQUEST_ID)
+ self.assertThat(res_id, matchers.StartsWith('req-'))
+ self.assertEqual(res_id, res.body)
diff --git a/nova/tests/unit/api/test_validator.py b/nova/tests/unit/api/test_validator.py
new file mode 100644
index 0000000000..e9e349194a
--- /dev/null
+++ b/nova/tests/unit/api/test_validator.py
@@ -0,0 +1,103 @@
+# Copyright 2011 Cloudscaling, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+
+from nova.api import validator
+from nova import test
+
+
+class ValidatorTestCase(test.NoDBTestCase):
+
+ def test_validate(self):
+ fixture = {
+ 'foo': lambda val: val is True
+ }
+
+ self.assertTrue(
+ validator.validate({'foo': True}, fixture))
+ self.assertFalse(
+ validator.validate({'foo': False}, fixture))
+
+ def test_only_tests_intersect(self):
+ """Test that validator.validate only tests the intersect of keys
+ from args and validator.
+ """
+
+ fixture = {
+ 'foo': lambda val: True,
+ 'bar': lambda val: True
+ }
+
+ self.assertTrue(
+ validator.validate({'foo': True}, fixture))
+ self.assertTrue(
+ validator.validate({'foo': True, 'bar': True}, fixture))
+ self.assertTrue(
+ validator.validate({'foo': True, 'bar': True, 'baz': True},
+ fixture))
+
+ def test_validate_str(self):
+ self.assertTrue(validator.validate_str()('foo'))
+ self.assertFalse(validator.validate_str()(1))
+ self.assertTrue(validator.validate_str(4)('foo'))
+ self.assertFalse(validator.validate_str(2)('foo'))
+ self.assertFalse(validator.validate_str()(None))
+ self.assertTrue(validator.validate_str()(u'foo'))
+
+ def test_validate_int(self):
+ self.assertTrue(validator.validate_int()(1))
+ self.assertFalse(validator.validate_int()('foo'))
+ self.assertTrue(validator.validate_int(100)(1))
+ self.assertFalse(validator.validate_int(4)(5))
+ self.assertFalse(validator.validate_int()(None))
+
+ def test_validate_ec2_id(self):
+ self.assertFalse(validator.validate_ec2_id('foobar'))
+ self.assertFalse(validator.validate_ec2_id(''))
+ self.assertFalse(validator.validate_ec2_id(1234))
+ self.assertTrue(validator.validate_ec2_id('i-284f3a41'))
+
+ def test_validate_url_path(self):
+ self.assertTrue(validator.validate_url_path('/path/to/file'))
+ self.assertFalse(validator.validate_url_path('path/to/file'))
+ self.assertFalse(
+ validator.validate_url_path('#this is not a path!@#$%^&*()')
+ )
+ self.assertFalse(validator.validate_url_path(None))
+ self.assertFalse(validator.validate_url_path(123))
+
+ def test_validate_image_path(self):
+ self.assertTrue(validator.validate_image_path('path/to/file'))
+ self.assertFalse(validator.validate_image_path('/path/to/file'))
+ self.assertFalse(validator.validate_image_path('path'))
+
+ def test_validate_user_data(self):
+ fixture = base64.b64encode('foo')
+ self.assertTrue(validator.validate_user_data(fixture))
+ self.assertFalse(validator.validate_user_data(False))
+ self.assertFalse(validator.validate_user_data('hello, world!'))
+
+ def test_default_validator(self):
+ expect_pass = {
+ 'attribute': 'foobar'
+ }
+ self.assertTrue(validator.validate(expect_pass,
+ validator.DEFAULT_VALIDATOR))
+ expect_fail = {
+ 'attribute': 0
+ }
+ self.assertFalse(validator.validate(expect_fail,
+ validator.DEFAULT_VALIDATOR))
diff --git a/nova/tests/unit/api/test_wsgi.py b/nova/tests/unit/api/test_wsgi.py
new file mode 100644
index 0000000000..aecfb8e219
--- /dev/null
+++ b/nova/tests/unit/api/test_wsgi.py
@@ -0,0 +1,65 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test WSGI basics and provide some helper functions for other WSGI tests.
+"""
+
+from nova import test
+
+import routes
+import webob
+
+from nova import wsgi
+
+
+class Test(test.NoDBTestCase):
+
+ def test_debug(self):
+
+ class Application(wsgi.Application):
+ """Dummy application to test debug."""
+
+ def __call__(self, environ, start_response):
+ start_response("200", [("X-Test", "checking")])
+ return ['Test result']
+
+ application = wsgi.Debug(Application())
+ result = webob.Request.blank('/').get_response(application)
+ self.assertEqual(result.body, "Test result")
+
+ def test_router(self):
+
+ class Application(wsgi.Application):
+ """Test application to call from router."""
+
+ def __call__(self, environ, start_response):
+ start_response("200", [])
+ return ['Router result']
+
+ class Router(wsgi.Router):
+ """Test router."""
+
+ def __init__(self):
+ mapper = routes.Mapper()
+ mapper.connect("/test", controller=Application())
+ super(Router, self).__init__(mapper)
+
+ result = webob.Request.blank('/test').get_response(Router())
+ self.assertEqual(result.body, "Router result")
+ result = webob.Request.blank('/bad').get_response(Router())
+ self.assertNotEqual(result.body, "Router result")
diff --git a/nova/tests/unit/bundle/1mb.manifest.xml b/nova/tests/unit/bundle/1mb.manifest.xml
new file mode 100644
index 0000000000..01648a5441
--- /dev/null
+++ b/nova/tests/unit/bundle/1mb.manifest.xml
@@ -0,0 +1 @@
+<?xml version="1.0" ?><manifest><version>2007-10-10</version><bundler><name>euca-tools</name><version>1.2</version><release>31337</release></bundler><machine_configuration><architecture>x86_64</architecture><kernel_id>aki-test</kernel_id><ramdisk_id>ari-test</ramdisk_id></machine_configuration><signature>4e00b5ba28114dda4a9df7eeae94be847ec46117a09a1cbe41e578660642f0660dda1776b39fb3bf826b6cfec019e2a5e9c566728d186b7400ebc989a30670eb1db26ce01e68bd9d3f31290370077a85b81c66b63c1e0d5499bac115c06c17a21a81b6d3a67ebbce6c17019095af7ab07f3796c708cc843e58efc12ddc788c5e</signature></manifest>
diff --git a/nova/tests/unit/bundle/1mb.no_kernel_or_ramdisk.manifest.xml b/nova/tests/unit/bundle/1mb.no_kernel_or_ramdisk.manifest.xml
new file mode 100644
index 0000000000..73d7ace006
--- /dev/null
+++ b/nova/tests/unit/bundle/1mb.no_kernel_or_ramdisk.manifest.xml
@@ -0,0 +1 @@
+<?xml version="1.0" ?><manifest><version>2007-10-10</version><bundler><name>euca-tools</name><version>1.2</version><release>31337</release></bundler><machine_configuration><architecture>x86_64</architecture></machine_configuration><signature>4e00b5ba28114dda4a9df7eeae94be847ec46117a09a1cbe41e578660642f0660dda1776b39fb3bf826b6cfec019e2a5e9c566728d186b7400ebc989a30670eb1db26ce01e68bd9d3f31290370077a85b81c66b63c1e0d5499bac115c06c17a21a81b6d3a67ebbce6c17019095af7ab07f3796c708cc843e58efc12ddc788c5e</signature></manifest>
diff --git a/nova/tests/unit/bundle/1mb.part.0 b/nova/tests/unit/bundle/1mb.part.0
new file mode 100644
index 0000000000..15a1657c57
--- /dev/null
+++ b/nova/tests/unit/bundle/1mb.part.0
Binary files differ
diff --git a/nova/tests/unit/bundle/1mb.part.1 b/nova/tests/unit/bundle/1mb.part.1
new file mode 100644
index 0000000000..2f0406e2d1
--- /dev/null
+++ b/nova/tests/unit/bundle/1mb.part.1
@@ -0,0 +1 @@
+­´ˆà«€ç‰°Ƴ ¡ÀiDHW̽×JÈ8ïrV¼³h§X’·@Yj“~Ø ·Gû5û 3Nt«˜•H6Ñ$§Ëgö™é Lá¢+³æ¤X†pm¬@,øŽ>7ÚÊ×užp¼ aü`¥V2X@£#ᶠ\ No newline at end of file
diff --git a/nova/tests/unit/cast_as_call.py b/nova/tests/unit/cast_as_call.py
new file mode 100644
index 0000000000..f75600a3b5
--- /dev/null
+++ b/nova/tests/unit/cast_as_call.py
@@ -0,0 +1,49 @@
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+from oslo import messaging
+
+
+class CastAsCall(fixtures.Fixture):
+
+ """Make RPC 'cast' behave like a 'call'.
+
+ This is a little hack for tests that need to know when a cast
+ operation has completed. The idea is that we wait for the RPC
+ endpoint method to complete and return before continuing on the
+ caller.
+
+ See Ia7f40718533e450f00cd3e7d753ac65755c70588 for more background.
+ """
+
+ def __init__(self, stubs):
+ super(CastAsCall, self).__init__()
+ self.stubs = stubs
+
+ @staticmethod
+ def _stub_out(stubs, obj):
+ orig_prepare = obj.prepare
+
+ def prepare(self, *args, **kwargs):
+ cctxt = orig_prepare(self, *args, **kwargs)
+ CastAsCall._stub_out(stubs, cctxt) # woo, recurse!
+ return cctxt
+
+ stubs.Set(obj, 'prepare', prepare)
+ stubs.Set(obj, 'cast', obj.call)
+
+ def setUp(self):
+ super(CastAsCall, self).setUp()
+ self._stub_out(self.stubs, messaging.RPCClient)
diff --git a/nova/tests/unit/cells/__init__.py b/nova/tests/unit/cells/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/cells/__init__.py
diff --git a/nova/tests/unit/cells/fakes.py b/nova/tests/unit/cells/fakes.py
new file mode 100644
index 0000000000..983e450262
--- /dev/null
+++ b/nova/tests/unit/cells/fakes.py
@@ -0,0 +1,207 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Fakes For Cells tests.
+"""
+
+from oslo.config import cfg
+
+from nova.cells import driver
+from nova.cells import manager as cells_manager
+from nova.cells import state as cells_state
+from nova.cells import utils as cells_utils
+import nova.db
+from nova.db import base
+from nova import exception
+
+CONF = cfg.CONF
+CONF.import_opt('name', 'nova.cells.opts', group='cells')
+
+
+# Fake Cell Hierarchy
+FAKE_TOP_LEVEL_CELL_NAME = 'api-cell'
+FAKE_CELL_LAYOUT = [{'child-cell1': []},
+ {'child-cell2': [{'grandchild-cell1': []}]},
+ {'child-cell3': [{'grandchild-cell2': []},
+ {'grandchild-cell3': []}]},
+ {'child-cell4': []}]
+
+# build_cell_stub_infos() below will take the above layout and create
+# a fake view of the DB from the perspective of each of the cells.
+# For each cell, a CellStubInfo will be created with this info.
+CELL_NAME_TO_STUB_INFO = {}
+
+
+class FakeDBApi(object):
+ """Cells uses a different DB in each cell. This means in order to
+ stub out things differently per cell, I need to create a fake DBApi
+ object that is instantiated by each fake cell.
+ """
+ def __init__(self, cell_db_entries):
+ self.cell_db_entries = cell_db_entries
+
+ def __getattr__(self, key):
+ return getattr(nova.db, key)
+
+ def cell_get_all(self, ctxt):
+ return self.cell_db_entries
+
+ def compute_node_get_all(self, ctxt):
+ return []
+
+ def instance_get_all_by_filters(self, ctxt, *args, **kwargs):
+ return []
+
+ def instance_get_by_uuid(self, ctxt, instance_uuid):
+ raise exception.InstanceNotFound(instance_id=instance_uuid)
+
+
+class FakeCellsDriver(driver.BaseCellsDriver):
+ pass
+
+
+class FakeCellState(cells_state.CellState):
+ def send_message(self, message):
+ message_runner = get_message_runner(self.name)
+ orig_ctxt = message.ctxt
+ json_message = message.to_json()
+ message = message_runner.message_from_json(json_message)
+ # Restore this so we can use mox and verify same context
+ message.ctxt = orig_ctxt
+ message.process()
+
+
+class FakeCellStateManager(cells_state.CellStateManagerDB):
+ def __init__(self, *args, **kwargs):
+ super(FakeCellStateManager, self).__init__(*args,
+ cell_state_cls=FakeCellState, **kwargs)
+
+
+class FakeCellsManager(cells_manager.CellsManager):
+ def __init__(self, *args, **kwargs):
+ super(FakeCellsManager, self).__init__(*args,
+ cell_state_manager=FakeCellStateManager,
+ **kwargs)
+
+
+class CellStubInfo(object):
+ def __init__(self, test_case, cell_name, db_entries):
+ self.test_case = test_case
+ self.cell_name = cell_name
+ self.db_entries = db_entries
+
+ def fake_base_init(_self, *args, **kwargs):
+ _self.db = FakeDBApi(db_entries)
+
+ test_case.stubs.Set(base.Base, '__init__', fake_base_init)
+ self.cells_manager = FakeCellsManager()
+ # Fix the cell name, as it normally uses CONF.cells.name
+ msg_runner = self.cells_manager.msg_runner
+ msg_runner.our_name = self.cell_name
+ self.cells_manager.state_manager.my_cell_state.name = self.cell_name
+
+
+def _build_cell_transport_url(cur_db_id):
+ username = 'username%s' % cur_db_id
+ password = 'password%s' % cur_db_id
+ hostname = 'rpc_host%s' % cur_db_id
+ port = 3090 + cur_db_id
+ virtual_host = 'rpc_vhost%s' % cur_db_id
+
+ return 'rabbit://%s:%s@%s:%s/%s' % (username, password, hostname, port,
+ virtual_host)
+
+
+def _build_cell_stub_info(test_case, our_name, parent_path, children):
+ cell_db_entries = []
+ cur_db_id = 1
+ sep_char = cells_utils.PATH_CELL_SEP
+ if parent_path:
+ cell_db_entries.append(
+ dict(id=cur_db_id,
+ name=parent_path.split(sep_char)[-1],
+ is_parent=True,
+ transport_url=_build_cell_transport_url(cur_db_id)))
+ cur_db_id += 1
+ our_path = parent_path + sep_char + our_name
+ else:
+ our_path = our_name
+ for child in children:
+ for child_name, grandchildren in child.items():
+ _build_cell_stub_info(test_case, child_name, our_path,
+ grandchildren)
+ cell_entry = dict(id=cur_db_id,
+ name=child_name,
+ transport_url=_build_cell_transport_url(
+ cur_db_id),
+ is_parent=False)
+ cell_db_entries.append(cell_entry)
+ cur_db_id += 1
+ stub_info = CellStubInfo(test_case, our_name, cell_db_entries)
+ CELL_NAME_TO_STUB_INFO[our_name] = stub_info
+
+
+def _build_cell_stub_infos(test_case):
+ _build_cell_stub_info(test_case, FAKE_TOP_LEVEL_CELL_NAME, '',
+ FAKE_CELL_LAYOUT)
+
+
+def init(test_case):
+ global CELL_NAME_TO_STUB_INFO
+ test_case.flags(driver='nova.tests.unit.cells.fakes.FakeCellsDriver',
+ group='cells')
+ CELL_NAME_TO_STUB_INFO = {}
+ _build_cell_stub_infos(test_case)
+
+
+def _get_cell_stub_info(cell_name):
+ return CELL_NAME_TO_STUB_INFO[cell_name]
+
+
+def get_state_manager(cell_name):
+ return _get_cell_stub_info(cell_name).cells_manager.state_manager
+
+
+def get_cell_state(cur_cell_name, tgt_cell_name):
+ state_manager = get_state_manager(cur_cell_name)
+ cell = state_manager.child_cells.get(tgt_cell_name)
+ if cell is None:
+ cell = state_manager.parent_cells.get(tgt_cell_name)
+ return cell
+
+
+def get_cells_manager(cell_name):
+ return _get_cell_stub_info(cell_name).cells_manager
+
+
+def get_message_runner(cell_name):
+ return _get_cell_stub_info(cell_name).cells_manager.msg_runner
+
+
+def stub_tgt_method(test_case, cell_name, method_name, method):
+ msg_runner = get_message_runner(cell_name)
+ tgt_msg_methods = msg_runner.methods_by_type['targeted']
+ setattr(tgt_msg_methods, method_name, method)
+
+
+def stub_bcast_method(test_case, cell_name, method_name, method):
+ msg_runner = get_message_runner(cell_name)
+ tgt_msg_methods = msg_runner.methods_by_type['broadcast']
+ setattr(tgt_msg_methods, method_name, method)
+
+
+def stub_bcast_methods(test_case, method_name, method):
+ for cell_name in CELL_NAME_TO_STUB_INFO.keys():
+ stub_bcast_method(test_case, cell_name, method_name, method)
diff --git a/nova/tests/unit/cells/test_cells_filters.py b/nova/tests/unit/cells/test_cells_filters.py
new file mode 100644
index 0000000000..0ae832f6c8
--- /dev/null
+++ b/nova/tests/unit/cells/test_cells_filters.py
@@ -0,0 +1,173 @@
+# Copyright (c) 2012-2013 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for cells scheduler filters.
+"""
+
+from nova.cells import filters
+from nova import context
+from nova.db.sqlalchemy import models
+from nova import test
+from nova.tests.unit.cells import fakes
+
+
+class FiltersTestCase(test.NoDBTestCase):
+ """Makes sure the proper filters are in the directory."""
+
+ def test_all_filters(self):
+ filter_classes = filters.all_filters()
+ class_names = [cls.__name__ for cls in filter_classes]
+ self.assertIn("TargetCellFilter", class_names)
+
+
+class _FilterTestClass(test.NoDBTestCase):
+ """Base class for testing individual filter plugins."""
+ filter_cls_name = None
+
+ def setUp(self):
+ super(_FilterTestClass, self).setUp()
+ fakes.init(self)
+ self.msg_runner = fakes.get_message_runner('api-cell')
+ self.scheduler = self.msg_runner.scheduler
+ self.my_cell_state = self.msg_runner.state_manager.get_my_state()
+ self.filter_handler = filters.CellFilterHandler()
+ self.filter_classes = self.filter_handler.get_matching_classes(
+ [self.filter_cls_name])
+ self.context = context.RequestContext('fake', 'fake',
+ is_admin=True)
+
+ def _filter_cells(self, cells, filter_properties):
+ return self.filter_handler.get_filtered_objects(self.filter_classes,
+ cells,
+ filter_properties)
+
+
+class ImagePropertiesFilter(_FilterTestClass):
+ filter_cls_name = \
+ 'nova.cells.filters.image_properties.ImagePropertiesFilter'
+
+ def setUp(self):
+ super(ImagePropertiesFilter, self).setUp()
+ self.cell1 = models.Cell()
+ self.cell2 = models.Cell()
+ self.cell3 = models.Cell()
+ self.cells = [self.cell1, self.cell2, self.cell3]
+ for cell in self.cells:
+ cell.capabilities = {}
+ self.filter_props = {'context': self.context, 'request_spec': {}}
+
+ def test_missing_image_properties(self):
+ self.assertEqual(self.cells,
+ self._filter_cells(self.cells, self.filter_props))
+
+ def test_missing_hypervisor_version_requires(self):
+ self.filter_props['request_spec'] = {'image': {'properties': {}}}
+ for cell in self.cells:
+ cell.capabilities = {"prominent_hypervisor_version": set([u"6.2"])}
+ self.assertEqual(self.cells,
+ self._filter_cells(self.cells, self.filter_props))
+
+ def test_missing_hypervisor_version_in_cells(self):
+ image = {'properties': {'hypervisor_version_requires': '>6.2.1'}}
+ self.filter_props['request_spec'] = {'image': image}
+ self.cell1.capabilities = {"prominent_hypervisor_version": set([])}
+ self.assertEqual(self.cells,
+ self._filter_cells(self.cells, self.filter_props))
+
+ def test_cells_matching_hypervisor_version(self):
+ image = {'properties': {'hypervisor_version_requires': '>6.0, <=6.3'}}
+ self.filter_props['request_spec'] = {'image': image}
+
+ self.cell1.capabilities = {"prominent_hypervisor_version":
+ set([u"6.2"])}
+ self.cell2.capabilities = {"prominent_hypervisor_version":
+ set([u"6.3"])}
+ self.cell3.capabilities = {"prominent_hypervisor_version":
+ set([u"6.0"])}
+
+ self.assertEqual([self.cell1, self.cell2],
+ self._filter_cells(self.cells, self.filter_props))
+
+ # assert again to verify filter doesn't mutate state
+ # LP bug #1325705
+ self.assertEqual([self.cell1, self.cell2],
+ self._filter_cells(self.cells, self.filter_props))
+
+
+class TestTargetCellFilter(_FilterTestClass):
+ filter_cls_name = 'nova.cells.filters.target_cell.TargetCellFilter'
+
+ def test_missing_scheduler_hints(self):
+ cells = [1, 2, 3]
+ # No filtering
+ filter_props = {'context': self.context}
+ self.assertEqual(cells, self._filter_cells(cells, filter_props))
+
+ def test_no_target_cell_hint(self):
+ cells = [1, 2, 3]
+ filter_props = {'scheduler_hints': {},
+ 'context': self.context}
+ # No filtering
+ self.assertEqual(cells, self._filter_cells(cells, filter_props))
+
+ def test_target_cell_specified_me(self):
+ cells = [1, 2, 3]
+ target_cell = 'fake!cell!path'
+ current_cell = 'fake!cell!path'
+ filter_props = {'scheduler_hints': {'target_cell': target_cell},
+ 'routing_path': current_cell,
+ 'scheduler': self.scheduler,
+ 'context': self.context}
+ # Only myself in the list.
+ self.assertEqual([self.my_cell_state],
+ self._filter_cells(cells, filter_props))
+
+ def test_target_cell_specified_me_but_not_admin(self):
+ ctxt = context.RequestContext('fake', 'fake')
+ cells = [1, 2, 3]
+ target_cell = 'fake!cell!path'
+ current_cell = 'fake!cell!path'
+ filter_props = {'scheduler_hints': {'target_cell': target_cell},
+ 'routing_path': current_cell,
+ 'scheduler': self.scheduler,
+ 'context': ctxt}
+ # No filtering, because not an admin.
+ self.assertEqual(cells, self._filter_cells(cells, filter_props))
+
+ def test_target_cell_specified_not_me(self):
+ info = {}
+
+ def _fake_build_instances(ctxt, cell, sched_kwargs):
+ info['ctxt'] = ctxt
+ info['cell'] = cell
+ info['sched_kwargs'] = sched_kwargs
+
+ self.stubs.Set(self.msg_runner, 'build_instances',
+ _fake_build_instances)
+ cells = [1, 2, 3]
+ target_cell = 'fake!cell!path'
+ current_cell = 'not!the!same'
+ filter_props = {'scheduler_hints': {'target_cell': target_cell},
+ 'routing_path': current_cell,
+ 'scheduler': self.scheduler,
+ 'context': self.context,
+ 'host_sched_kwargs': 'meow'}
+ # None is returned to bypass further scheduling.
+ self.assertIsNone(self._filter_cells(cells, filter_props))
+ # The filter should have re-scheduled to the child cell itself.
+ expected_info = {'ctxt': self.context,
+ 'cell': 'fake!cell!path',
+ 'sched_kwargs': 'meow'}
+ self.assertEqual(expected_info, info)
diff --git a/nova/tests/unit/cells/test_cells_manager.py b/nova/tests/unit/cells/test_cells_manager.py
new file mode 100644
index 0000000000..ca77abd1d2
--- /dev/null
+++ b/nova/tests/unit/cells/test_cells_manager.py
@@ -0,0 +1,808 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For CellsManager
+"""
+import copy
+import datetime
+
+import mock
+from oslo.config import cfg
+from oslo.utils import timeutils
+
+from nova.cells import messaging
+from nova.cells import utils as cells_utils
+from nova import context
+from nova import test
+from nova.tests.unit.cells import fakes
+from nova.tests.unit import fake_server_actions
+
+CONF = cfg.CONF
+CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
+
+
+FAKE_COMPUTE_NODES = [dict(id=1), dict(id=2)]
+FAKE_SERVICES = [dict(id=1, host='host1',
+ compute_node=[FAKE_COMPUTE_NODES[0]]),
+ dict(id=2, host='host2',
+ compute_node=[FAKE_COMPUTE_NODES[1]]),
+ dict(id=3, host='host3', compute_node=[])]
+FAKE_TASK_LOGS = [dict(id=1, host='host1'),
+ dict(id=2, host='host2')]
+
+
+class CellsManagerClassTestCase(test.NoDBTestCase):
+ """Test case for CellsManager class."""
+
+ def setUp(self):
+ super(CellsManagerClassTestCase, self).setUp()
+ fakes.init(self)
+ # pick a child cell to use for tests.
+ self.our_cell = 'grandchild-cell1'
+ self.cells_manager = fakes.get_cells_manager(self.our_cell)
+ self.msg_runner = self.cells_manager.msg_runner
+ self.state_manager = fakes.get_state_manager(self.our_cell)
+ self.driver = self.cells_manager.driver
+ self.ctxt = 'fake_context'
+
+ def _get_fake_response(self, raw_response=None, exc=False):
+ if exc:
+ return messaging.Response('fake', test.TestingException(),
+ True)
+ if raw_response is None:
+ raw_response = 'fake-response'
+ return messaging.Response('fake', raw_response, False)
+
+ def test_get_cell_info_for_neighbors(self):
+ self.mox.StubOutWithMock(self.cells_manager.state_manager,
+ 'get_cell_info_for_neighbors')
+ self.cells_manager.state_manager.get_cell_info_for_neighbors()
+ self.mox.ReplayAll()
+ self.cells_manager.get_cell_info_for_neighbors(self.ctxt)
+
+ def test_post_start_hook_child_cell(self):
+ self.mox.StubOutWithMock(self.driver, 'start_servers')
+ self.mox.StubOutWithMock(context, 'get_admin_context')
+ self.mox.StubOutWithMock(self.cells_manager, '_update_our_parents')
+
+ self.driver.start_servers(self.msg_runner)
+ context.get_admin_context().AndReturn(self.ctxt)
+ self.cells_manager._update_our_parents(self.ctxt)
+ self.mox.ReplayAll()
+ self.cells_manager.post_start_hook()
+
+ def test_post_start_hook_middle_cell(self):
+ cells_manager = fakes.get_cells_manager('child-cell2')
+ msg_runner = cells_manager.msg_runner
+ driver = cells_manager.driver
+
+ self.mox.StubOutWithMock(driver, 'start_servers')
+ self.mox.StubOutWithMock(context, 'get_admin_context')
+ self.mox.StubOutWithMock(msg_runner,
+ 'ask_children_for_capabilities')
+ self.mox.StubOutWithMock(msg_runner,
+ 'ask_children_for_capacities')
+
+ driver.start_servers(msg_runner)
+ context.get_admin_context().AndReturn(self.ctxt)
+ msg_runner.ask_children_for_capabilities(self.ctxt)
+ msg_runner.ask_children_for_capacities(self.ctxt)
+ self.mox.ReplayAll()
+ cells_manager.post_start_hook()
+
+ def test_update_our_parents(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'tell_parents_our_capabilities')
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'tell_parents_our_capacities')
+
+ self.msg_runner.tell_parents_our_capabilities(self.ctxt)
+ self.msg_runner.tell_parents_our_capacities(self.ctxt)
+ self.mox.ReplayAll()
+ self.cells_manager._update_our_parents(self.ctxt)
+
+ def test_build_instances(self):
+ build_inst_kwargs = {'instances': [1, 2]}
+ self.mox.StubOutWithMock(self.msg_runner, 'build_instances')
+ our_cell = self.msg_runner.state_manager.get_my_state()
+ self.msg_runner.build_instances(self.ctxt, our_cell, build_inst_kwargs)
+ self.mox.ReplayAll()
+ self.cells_manager.build_instances(self.ctxt,
+ build_inst_kwargs=build_inst_kwargs)
+
+ def test_run_compute_api_method(self):
+ # Args should just be silently passed through
+ cell_name = 'fake-cell-name'
+ method_info = 'fake-method-info'
+
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'run_compute_api_method')
+ fake_response = self._get_fake_response()
+ self.msg_runner.run_compute_api_method(self.ctxt,
+ cell_name,
+ method_info,
+ True).AndReturn(fake_response)
+ self.mox.ReplayAll()
+ response = self.cells_manager.run_compute_api_method(
+ self.ctxt, cell_name=cell_name, method_info=method_info,
+ call=True)
+ self.assertEqual('fake-response', response)
+
+ def test_instance_update_at_top(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'instance_update_at_top')
+ self.msg_runner.instance_update_at_top(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.instance_update_at_top(self.ctxt,
+ instance='fake-instance')
+
+ def test_instance_destroy_at_top(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'instance_destroy_at_top')
+ self.msg_runner.instance_destroy_at_top(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.instance_destroy_at_top(self.ctxt,
+ instance='fake-instance')
+
+ def test_instance_delete_everywhere(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'instance_delete_everywhere')
+ self.msg_runner.instance_delete_everywhere(self.ctxt,
+ 'fake-instance',
+ 'fake-type')
+ self.mox.ReplayAll()
+ self.cells_manager.instance_delete_everywhere(
+ self.ctxt, instance='fake-instance',
+ delete_type='fake-type')
+
+ def test_instance_fault_create_at_top(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'instance_fault_create_at_top')
+ self.msg_runner.instance_fault_create_at_top(self.ctxt,
+ 'fake-fault')
+ self.mox.ReplayAll()
+ self.cells_manager.instance_fault_create_at_top(
+ self.ctxt, instance_fault='fake-fault')
+
+ def test_bw_usage_update_at_top(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'bw_usage_update_at_top')
+ self.msg_runner.bw_usage_update_at_top(self.ctxt,
+ 'fake-bw-info')
+ self.mox.ReplayAll()
+ self.cells_manager.bw_usage_update_at_top(
+ self.ctxt, bw_update_info='fake-bw-info')
+
+ def test_heal_instances(self):
+ self.flags(instance_updated_at_threshold=1000,
+ instance_update_num_instances=2,
+ group='cells')
+
+ fake_context = context.RequestContext('fake', 'fake')
+ stalled_time = timeutils.utcnow()
+ updated_since = stalled_time - datetime.timedelta(seconds=1000)
+
+ def utcnow():
+ return stalled_time
+
+ call_info = {'get_instances': 0, 'sync_instances': []}
+
+ instances = ['instance1', 'instance2', 'instance3']
+
+ def get_instances_to_sync(context, **kwargs):
+ self.assertEqual(context, fake_context)
+ call_info['shuffle'] = kwargs.get('shuffle')
+ call_info['project_id'] = kwargs.get('project_id')
+ call_info['updated_since'] = kwargs.get('updated_since')
+ call_info['get_instances'] += 1
+ return iter(instances)
+
+ def instance_get_by_uuid(context, uuid):
+ return instances[int(uuid[-1]) - 1]
+
+ def sync_instance(context, instance):
+ self.assertEqual(context, fake_context)
+ call_info['sync_instances'].append(instance)
+
+ self.stubs.Set(cells_utils, 'get_instances_to_sync',
+ get_instances_to_sync)
+ self.stubs.Set(self.cells_manager.db, 'instance_get_by_uuid',
+ instance_get_by_uuid)
+ self.stubs.Set(self.cells_manager, '_sync_instance',
+ sync_instance)
+ self.stubs.Set(timeutils, 'utcnow', utcnow)
+
+ self.cells_manager._heal_instances(fake_context)
+ self.assertEqual(call_info['shuffle'], True)
+ self.assertIsNone(call_info['project_id'])
+ self.assertEqual(call_info['updated_since'], updated_since)
+ self.assertEqual(call_info['get_instances'], 1)
+ # Only first 2
+ self.assertEqual(call_info['sync_instances'],
+ instances[:2])
+
+ call_info['sync_instances'] = []
+ self.cells_manager._heal_instances(fake_context)
+ self.assertEqual(call_info['shuffle'], True)
+ self.assertIsNone(call_info['project_id'])
+ self.assertEqual(call_info['updated_since'], updated_since)
+ self.assertEqual(call_info['get_instances'], 2)
+ # Now the last 1 and the first 1
+ self.assertEqual(call_info['sync_instances'],
+ [instances[-1], instances[0]])
+
+ def test_sync_instances(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'sync_instances')
+ self.msg_runner.sync_instances(self.ctxt, 'fake-project',
+ 'fake-time', 'fake-deleted')
+ self.mox.ReplayAll()
+ self.cells_manager.sync_instances(self.ctxt,
+ project_id='fake-project',
+ updated_since='fake-time',
+ deleted='fake-deleted')
+
+ def test_service_get_all(self):
+ responses = []
+ expected_response = []
+ # 3 cells... so 3 responses. Each response is a list of services.
+ # Manager should turn these into a single list of responses.
+ for i in xrange(3):
+ cell_name = 'path!to!cell%i' % i
+ services = []
+ for service in FAKE_SERVICES:
+ services.append(copy.deepcopy(service))
+ expected_service = copy.deepcopy(service)
+ cells_utils.add_cell_to_service(expected_service, cell_name)
+ expected_response.append(expected_service)
+ response = messaging.Response(cell_name, services, False)
+ responses.append(response)
+
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'service_get_all')
+ self.msg_runner.service_get_all(self.ctxt,
+ 'fake-filters').AndReturn(responses)
+ self.mox.ReplayAll()
+ response = self.cells_manager.service_get_all(self.ctxt,
+ filters='fake-filters')
+ self.assertEqual(expected_response, response)
+
+ def test_service_get_by_compute_host(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'service_get_by_compute_host')
+ fake_cell = 'fake-cell'
+ fake_response = messaging.Response(fake_cell, FAKE_SERVICES[0],
+ False)
+ expected_response = copy.deepcopy(FAKE_SERVICES[0])
+ cells_utils.add_cell_to_service(expected_response, fake_cell)
+
+ cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
+ self.msg_runner.service_get_by_compute_host(self.ctxt,
+ fake_cell, 'fake-host').AndReturn(fake_response)
+ self.mox.ReplayAll()
+ response = self.cells_manager.service_get_by_compute_host(self.ctxt,
+ host_name=cell_and_host)
+ self.assertEqual(expected_response, response)
+
+ def test_get_host_uptime(self):
+ fake_cell = 'parent!fake-cell'
+ fake_host = 'fake-host'
+ fake_cell_and_host = cells_utils.cell_with_item(fake_cell, fake_host)
+ host_uptime = (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
+ " 0.20, 0.12, 0.14")
+ fake_response = messaging.Response(fake_cell, host_uptime, False)
+
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'get_host_uptime')
+ self.msg_runner.get_host_uptime(self.ctxt, fake_cell, fake_host).\
+ AndReturn(fake_response)
+ self.mox.ReplayAll()
+
+ response = self.cells_manager.get_host_uptime(self.ctxt,
+ fake_cell_and_host)
+ self.assertEqual(host_uptime, response)
+
+ def test_service_update(self):
+ fake_cell = 'fake-cell'
+ fake_response = messaging.Response(
+ fake_cell, FAKE_SERVICES[0], False)
+ expected_response = copy.deepcopy(FAKE_SERVICES[0])
+ cells_utils.add_cell_to_service(expected_response, fake_cell)
+ cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
+ params_to_update = {'disabled': True}
+
+ self.mox.StubOutWithMock(self.msg_runner, 'service_update')
+ self.msg_runner.service_update(self.ctxt,
+ fake_cell, 'fake-host', 'nova-api',
+ params_to_update).AndReturn(fake_response)
+ self.mox.ReplayAll()
+
+ response = self.cells_manager.service_update(
+ self.ctxt, host_name=cell_and_host, binary='nova-api',
+ params_to_update=params_to_update)
+ self.assertEqual(expected_response, response)
+
+ def test_service_delete(self):
+ fake_cell = 'fake-cell'
+ service_id = '1'
+ cell_service_id = cells_utils.cell_with_item(fake_cell, service_id)
+
+ with mock.patch.object(self.msg_runner,
+ 'service_delete') as service_delete:
+ self.cells_manager.service_delete(self.ctxt, cell_service_id)
+ service_delete.assert_called_once_with(
+ self.ctxt, fake_cell, service_id)
+
+ def test_proxy_rpc_to_manager(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'proxy_rpc_to_manager')
+ fake_response = self._get_fake_response()
+ cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
+ topic = "%s.%s" % (CONF.compute_topic, cell_and_host)
+ self.msg_runner.proxy_rpc_to_manager(self.ctxt, 'fake-cell',
+ 'fake-host', topic, 'fake-rpc-msg',
+ True, -1).AndReturn(fake_response)
+ self.mox.ReplayAll()
+ response = self.cells_manager.proxy_rpc_to_manager(self.ctxt,
+ topic=topic, rpc_message='fake-rpc-msg', call=True,
+ timeout=-1)
+ self.assertEqual('fake-response', response)
+
+ def _build_task_log_responses(self, num):
+ responses = []
+ expected_response = []
+ # 3 cells... so 3 responses. Each response is a list of task log
+ # entries. Manager should turn these into a single list of
+ # task log entries.
+ for i in xrange(num):
+ cell_name = 'path!to!cell%i' % i
+ task_logs = []
+ for task_log in FAKE_TASK_LOGS:
+ task_logs.append(copy.deepcopy(task_log))
+ expected_task_log = copy.deepcopy(task_log)
+ cells_utils.add_cell_to_task_log(expected_task_log,
+ cell_name)
+ expected_response.append(expected_task_log)
+ response = messaging.Response(cell_name, task_logs, False)
+ responses.append(response)
+ return expected_response, responses
+
+ def test_task_log_get_all(self):
+ expected_response, responses = self._build_task_log_responses(3)
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'task_log_get_all')
+ self.msg_runner.task_log_get_all(self.ctxt, None,
+ 'fake-name', 'fake-begin',
+ 'fake-end', host=None, state=None).AndReturn(responses)
+ self.mox.ReplayAll()
+ response = self.cells_manager.task_log_get_all(self.ctxt,
+ task_name='fake-name',
+ period_beginning='fake-begin', period_ending='fake-end')
+ self.assertEqual(expected_response, response)
+
+ def test_task_log_get_all_with_filters(self):
+ expected_response, responses = self._build_task_log_responses(1)
+ cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'task_log_get_all')
+ self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell',
+ 'fake-name', 'fake-begin', 'fake-end', host='fake-host',
+ state='fake-state').AndReturn(responses)
+ self.mox.ReplayAll()
+ response = self.cells_manager.task_log_get_all(self.ctxt,
+ task_name='fake-name',
+ period_beginning='fake-begin', period_ending='fake-end',
+ host=cell_and_host, state='fake-state')
+ self.assertEqual(expected_response, response)
+
+ def test_task_log_get_all_with_cell_but_no_host_filters(self):
+ expected_response, responses = self._build_task_log_responses(1)
+ # Host filter only has cell name.
+ cell_and_host = 'fake-cell'
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'task_log_get_all')
+ self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell',
+ 'fake-name', 'fake-begin', 'fake-end', host=None,
+ state='fake-state').AndReturn(responses)
+ self.mox.ReplayAll()
+ response = self.cells_manager.task_log_get_all(self.ctxt,
+ task_name='fake-name',
+ period_beginning='fake-begin', period_ending='fake-end',
+ host=cell_and_host, state='fake-state')
+ self.assertEqual(expected_response, response)
+
+ def test_compute_node_get_all(self):
+ responses = []
+ expected_response = []
+ # 3 cells... so 3 responses. Each response is a list of computes.
+ # Manager should turn these into a single list of responses.
+ for i in xrange(3):
+ cell_name = 'path!to!cell%i' % i
+ compute_nodes = []
+ for compute_node in FAKE_COMPUTE_NODES:
+ compute_nodes.append(copy.deepcopy(compute_node))
+ expected_compute_node = copy.deepcopy(compute_node)
+ cells_utils.add_cell_to_compute_node(expected_compute_node,
+ cell_name)
+ expected_response.append(expected_compute_node)
+ response = messaging.Response(cell_name, compute_nodes, False)
+ responses.append(response)
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'compute_node_get_all')
+ self.msg_runner.compute_node_get_all(self.ctxt,
+ hypervisor_match='fake-match').AndReturn(responses)
+ self.mox.ReplayAll()
+ response = self.cells_manager.compute_node_get_all(self.ctxt,
+ hypervisor_match='fake-match')
+ self.assertEqual(expected_response, response)
+
+ def test_compute_node_stats(self):
+ raw_resp1 = {'key1': 1, 'key2': 2}
+ raw_resp2 = {'key2': 1, 'key3': 2}
+ raw_resp3 = {'key3': 1, 'key4': 2}
+ responses = [messaging.Response('cell1', raw_resp1, False),
+ messaging.Response('cell2', raw_resp2, False),
+ messaging.Response('cell2', raw_resp3, False)]
+ expected_resp = {'key1': 1, 'key2': 3, 'key3': 3, 'key4': 2}
+
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'compute_node_stats')
+ self.msg_runner.compute_node_stats(self.ctxt).AndReturn(responses)
+ self.mox.ReplayAll()
+ response = self.cells_manager.compute_node_stats(self.ctxt)
+ self.assertEqual(expected_resp, response)
+
+ def test_compute_node_get(self):
+ fake_cell = 'fake-cell'
+ fake_response = messaging.Response(fake_cell,
+ FAKE_COMPUTE_NODES[0],
+ False)
+ expected_response = copy.deepcopy(FAKE_COMPUTE_NODES[0])
+ cells_utils.add_cell_to_compute_node(expected_response, fake_cell)
+ cell_and_id = cells_utils.cell_with_item(fake_cell, 'fake-id')
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'compute_node_get')
+ self.msg_runner.compute_node_get(self.ctxt,
+ 'fake-cell', 'fake-id').AndReturn(fake_response)
+ self.mox.ReplayAll()
+ response = self.cells_manager.compute_node_get(self.ctxt,
+ compute_id=cell_and_id)
+ self.assertEqual(expected_response, response)
+
+ def test_actions_get(self):
+ fake_uuid = fake_server_actions.FAKE_UUID
+ fake_req_id = fake_server_actions.FAKE_REQUEST_ID1
+ fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
+ fake_response = messaging.Response('fake-cell', [fake_act], False)
+ expected_response = [fake_act]
+ self.mox.StubOutWithMock(self.msg_runner, 'actions_get')
+ self.msg_runner.actions_get(self.ctxt, 'fake-cell',
+ 'fake-uuid').AndReturn(fake_response)
+ self.mox.ReplayAll()
+ response = self.cells_manager.actions_get(self.ctxt, 'fake-cell',
+ 'fake-uuid')
+ self.assertEqual(expected_response, response)
+
+ def test_action_get_by_request_id(self):
+ fake_uuid = fake_server_actions.FAKE_UUID
+ fake_req_id = fake_server_actions.FAKE_REQUEST_ID1
+ fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
+ fake_response = messaging.Response('fake-cell', fake_act, False)
+ expected_response = fake_act
+ self.mox.StubOutWithMock(self.msg_runner, 'action_get_by_request_id')
+ self.msg_runner.action_get_by_request_id(self.ctxt, 'fake-cell',
+ 'fake-uuid', 'req-fake').AndReturn(fake_response)
+ self.mox.ReplayAll()
+ response = self.cells_manager.action_get_by_request_id(self.ctxt,
+ 'fake-cell',
+ 'fake-uuid',
+ 'req-fake')
+ self.assertEqual(expected_response, response)
+
+ def test_action_events_get(self):
+ fake_action_id = fake_server_actions.FAKE_ACTION_ID1
+ fake_events = fake_server_actions.FAKE_EVENTS[fake_action_id]
+ fake_response = messaging.Response('fake-cell', fake_events, False)
+ expected_response = fake_events
+ self.mox.StubOutWithMock(self.msg_runner, 'action_events_get')
+ self.msg_runner.action_events_get(self.ctxt, 'fake-cell',
+ 'fake-action').AndReturn(fake_response)
+ self.mox.ReplayAll()
+ response = self.cells_manager.action_events_get(self.ctxt, 'fake-cell',
+ 'fake-action')
+ self.assertEqual(expected_response, response)
+
+ def test_consoleauth_delete_tokens(self):
+ instance_uuid = 'fake-instance-uuid'
+
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'consoleauth_delete_tokens')
+ self.msg_runner.consoleauth_delete_tokens(self.ctxt, instance_uuid)
+ self.mox.ReplayAll()
+ self.cells_manager.consoleauth_delete_tokens(self.ctxt,
+ instance_uuid=instance_uuid)
+
+ def test_get_capacities(self):
+ cell_name = 'cell_name'
+ response = {"ram_free":
+ {"units_by_mb": {"64": 20, "128": 10}, "total_mb": 1491}}
+ self.mox.StubOutWithMock(self.state_manager,
+ 'get_capacities')
+ self.state_manager.get_capacities(cell_name).AndReturn(response)
+ self.mox.ReplayAll()
+ self.assertEqual(response,
+ self.cells_manager.get_capacities(self.ctxt, cell_name))
+
+ def test_validate_console_port(self):
+ instance_uuid = 'fake-instance-uuid'
+ cell_name = 'fake-cell-name'
+ instance = {'cell_name': cell_name}
+ console_port = 'fake-console-port'
+ console_type = 'fake-console-type'
+
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'validate_console_port')
+ self.mox.StubOutWithMock(self.cells_manager.db,
+ 'instance_get_by_uuid')
+ fake_response = self._get_fake_response()
+
+ self.cells_manager.db.instance_get_by_uuid(self.ctxt,
+ instance_uuid).AndReturn(instance)
+ self.msg_runner.validate_console_port(self.ctxt, cell_name,
+ instance_uuid, console_port,
+ console_type).AndReturn(fake_response)
+ self.mox.ReplayAll()
+ response = self.cells_manager.validate_console_port(self.ctxt,
+ instance_uuid=instance_uuid, console_port=console_port,
+ console_type=console_type)
+ self.assertEqual('fake-response', response)
+
+ def test_bdm_update_or_create_at_top(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'bdm_update_or_create_at_top')
+ self.msg_runner.bdm_update_or_create_at_top(self.ctxt,
+ 'fake-bdm',
+ create='foo')
+ self.mox.ReplayAll()
+ self.cells_manager.bdm_update_or_create_at_top(self.ctxt,
+ 'fake-bdm',
+ create='foo')
+
+ def test_bdm_destroy_at_top(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'bdm_destroy_at_top')
+ self.msg_runner.bdm_destroy_at_top(self.ctxt,
+ 'fake_instance_uuid',
+ device_name='fake_device_name',
+ volume_id='fake_volume_id')
+
+ self.mox.ReplayAll()
+ self.cells_manager.bdm_destroy_at_top(self.ctxt,
+ 'fake_instance_uuid',
+ device_name='fake_device_name',
+ volume_id='fake_volume_id')
+
+ def test_get_migrations(self):
+ filters = {'status': 'confirmed'}
+ cell1_migrations = [{'id': 123}]
+ cell2_migrations = [{'id': 456}]
+ fake_responses = [self._get_fake_response(cell1_migrations),
+ self._get_fake_response(cell2_migrations)]
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'get_migrations')
+ self.msg_runner.get_migrations(self.ctxt, None, False, filters).\
+ AndReturn(fake_responses)
+ self.mox.ReplayAll()
+
+ response = self.cells_manager.get_migrations(self.ctxt, filters)
+
+ self.assertEqual([cell1_migrations[0], cell2_migrations[0]], response)
+
+ def test_get_migrations_for_a_given_cell(self):
+ filters = {'status': 'confirmed', 'cell_name': 'ChildCell1'}
+ target_cell = '%s%s%s' % (CONF.cells.name, '!', filters['cell_name'])
+ migrations = [{'id': 123}]
+ fake_responses = [self._get_fake_response(migrations)]
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'get_migrations')
+ self.msg_runner.get_migrations(self.ctxt, target_cell, False,
+ filters).AndReturn(fake_responses)
+ self.mox.ReplayAll()
+
+ response = self.cells_manager.get_migrations(self.ctxt, filters)
+ self.assertEqual(migrations, response)
+
+ def test_instance_update_from_api(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'instance_update_from_api')
+ self.msg_runner.instance_update_from_api(self.ctxt,
+ 'fake-instance',
+ 'exp_vm', 'exp_task',
+ 'admin_reset')
+ self.mox.ReplayAll()
+ self.cells_manager.instance_update_from_api(
+ self.ctxt, instance='fake-instance',
+ expected_vm_state='exp_vm',
+ expected_task_state='exp_task',
+ admin_state_reset='admin_reset')
+
+ def test_start_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'start_instance')
+ self.msg_runner.start_instance(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.start_instance(self.ctxt, instance='fake-instance')
+
+ def test_stop_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'stop_instance')
+ self.msg_runner.stop_instance(self.ctxt, 'fake-instance',
+ do_cast='meow')
+ self.mox.ReplayAll()
+ self.cells_manager.stop_instance(self.ctxt,
+ instance='fake-instance',
+ do_cast='meow')
+
+ def test_cell_create(self):
+ values = 'values'
+ response = 'created_cell'
+ self.mox.StubOutWithMock(self.state_manager,
+ 'cell_create')
+ self.state_manager.cell_create(self.ctxt, values).\
+ AndReturn(response)
+ self.mox.ReplayAll()
+ self.assertEqual(response,
+ self.cells_manager.cell_create(self.ctxt, values))
+
+ def test_cell_update(self):
+ cell_name = 'cell_name'
+ values = 'values'
+ response = 'updated_cell'
+ self.mox.StubOutWithMock(self.state_manager,
+ 'cell_update')
+ self.state_manager.cell_update(self.ctxt, cell_name, values).\
+ AndReturn(response)
+ self.mox.ReplayAll()
+ self.assertEqual(response,
+ self.cells_manager.cell_update(self.ctxt, cell_name,
+ values))
+
+ def test_cell_delete(self):
+ cell_name = 'cell_name'
+ response = 1
+ self.mox.StubOutWithMock(self.state_manager,
+ 'cell_delete')
+ self.state_manager.cell_delete(self.ctxt, cell_name).\
+ AndReturn(response)
+ self.mox.ReplayAll()
+ self.assertEqual(response,
+ self.cells_manager.cell_delete(self.ctxt, cell_name))
+
+ def test_cell_get(self):
+ cell_name = 'cell_name'
+ response = 'cell_info'
+ self.mox.StubOutWithMock(self.state_manager,
+ 'cell_get')
+ self.state_manager.cell_get(self.ctxt, cell_name).\
+ AndReturn(response)
+ self.mox.ReplayAll()
+ self.assertEqual(response,
+ self.cells_manager.cell_get(self.ctxt, cell_name))
+
+ def test_reboot_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'reboot_instance')
+ self.msg_runner.reboot_instance(self.ctxt, 'fake-instance',
+ 'HARD')
+ self.mox.ReplayAll()
+ self.cells_manager.reboot_instance(self.ctxt,
+ instance='fake-instance',
+ reboot_type='HARD')
+
+ def test_suspend_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'suspend_instance')
+ self.msg_runner.suspend_instance(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.suspend_instance(self.ctxt,
+ instance='fake-instance')
+
+ def test_resume_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'resume_instance')
+ self.msg_runner.resume_instance(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.resume_instance(self.ctxt,
+ instance='fake-instance')
+
+ def test_terminate_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'terminate_instance')
+ self.msg_runner.terminate_instance(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.terminate_instance(self.ctxt,
+ instance='fake-instance')
+
+ def test_soft_delete_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'soft_delete_instance')
+ self.msg_runner.soft_delete_instance(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.soft_delete_instance(self.ctxt,
+ instance='fake-instance')
+
+ def test_resize_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'resize_instance')
+ self.msg_runner.resize_instance(self.ctxt, 'fake-instance',
+ 'fake-flavor', 'fake-updates')
+ self.mox.ReplayAll()
+ self.cells_manager.resize_instance(
+ self.ctxt, instance='fake-instance', flavor='fake-flavor',
+ extra_instance_updates='fake-updates')
+
+ def test_live_migrate_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'live_migrate_instance')
+ self.msg_runner.live_migrate_instance(self.ctxt, 'fake-instance',
+ 'fake-block', 'fake-commit',
+ 'fake-host')
+ self.mox.ReplayAll()
+ self.cells_manager.live_migrate_instance(
+ self.ctxt, instance='fake-instance',
+ block_migration='fake-block', disk_over_commit='fake-commit',
+ host_name='fake-host')
+
+ def test_revert_resize(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'revert_resize')
+ self.msg_runner.revert_resize(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.revert_resize(self.ctxt, instance='fake-instance')
+
+ def test_confirm_resize(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'confirm_resize')
+ self.msg_runner.confirm_resize(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.confirm_resize(self.ctxt, instance='fake-instance')
+
+ def test_reset_network(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'reset_network')
+ self.msg_runner.reset_network(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.reset_network(self.ctxt, instance='fake-instance')
+
+ def test_inject_network_info(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'inject_network_info')
+ self.msg_runner.inject_network_info(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.inject_network_info(self.ctxt,
+ instance='fake-instance')
+
+ def test_snapshot_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'snapshot_instance')
+ self.msg_runner.snapshot_instance(self.ctxt, 'fake-instance',
+ 'fake-id')
+ self.mox.ReplayAll()
+ self.cells_manager.snapshot_instance(self.ctxt,
+ instance='fake-instance',
+ image_id='fake-id')
+
+ def test_backup_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'backup_instance')
+ self.msg_runner.backup_instance(self.ctxt, 'fake-instance',
+ 'fake-id', 'backup-type',
+ 'rotation')
+ self.mox.ReplayAll()
+ self.cells_manager.backup_instance(self.ctxt,
+ instance='fake-instance',
+ image_id='fake-id',
+ backup_type='backup-type',
+ rotation='rotation')
+
+ def test_set_admin_password(self):
+ with mock.patch.object(self.msg_runner,
+ 'set_admin_password') as set_admin_password:
+ self.cells_manager.set_admin_password(self.ctxt,
+ instance='fake-instance', new_pass='fake-password')
+ set_admin_password.assert_called_once_with(self.ctxt,
+ 'fake-instance', 'fake-password')
diff --git a/nova/tests/unit/cells/test_cells_messaging.py b/nova/tests/unit/cells/test_cells_messaging.py
new file mode 100644
index 0000000000..dc15fd1079
--- /dev/null
+++ b/nova/tests/unit/cells/test_cells_messaging.py
@@ -0,0 +1,2129 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Cells Messaging module
+"""
+
+import contextlib
+
+import mock
+import mox
+from oslo.config import cfg
+from oslo import messaging as oslo_messaging
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+
+from nova.cells import messaging
+from nova.cells import utils as cells_utils
+from nova.compute import delete_types
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.network import model as network_model
+from nova import objects
+from nova.objects import base as objects_base
+from nova.objects import fields as objects_fields
+from nova.openstack.common import uuidutils
+from nova import rpc
+from nova import test
+from nova.tests.unit.cells import fakes
+from nova.tests.unit import fake_server_actions
+
+CONF = cfg.CONF
+CONF.import_opt('name', 'nova.cells.opts', group='cells')
+
+
+class CellsMessageClassesTestCase(test.TestCase):
+ """Test case for the main Cells Message classes."""
+ def setUp(self):
+ super(CellsMessageClassesTestCase, self).setUp()
+ fakes.init(self)
+ self.ctxt = context.RequestContext('fake', 'fake')
+ self.our_name = 'api-cell'
+ self.msg_runner = fakes.get_message_runner(self.our_name)
+ self.state_manager = self.msg_runner.state_manager
+
+ def test_reverse_path(self):
+ path = 'a!b!c!d'
+ expected = 'd!c!b!a'
+ rev_path = messaging._reverse_path(path)
+ self.assertEqual(rev_path, expected)
+
+ def test_response_cell_name_from_path(self):
+ # test array with tuples of inputs/expected outputs
+ test_paths = [('cell1', 'cell1'),
+ ('cell1!cell2', 'cell2!cell1'),
+ ('cell1!cell2!cell3', 'cell3!cell2!cell1')]
+
+ for test_input, expected_output in test_paths:
+ self.assertEqual(expected_output,
+ messaging._response_cell_name_from_path(test_input))
+
+ def test_response_cell_name_from_path_neighbor_only(self):
+ # test array with tuples of inputs/expected outputs
+ test_paths = [('cell1', 'cell1'),
+ ('cell1!cell2', 'cell2!cell1'),
+ ('cell1!cell2!cell3', 'cell3!cell2')]
+
+ for test_input, expected_output in test_paths:
+ self.assertEqual(expected_output,
+ messaging._response_cell_name_from_path(test_input,
+ neighbor_only=True))
+
+ def test_targeted_message(self):
+ self.flags(max_hop_count=99, group='cells')
+ target_cell = 'api-cell!child-cell2!grandchild-cell1'
+ method = 'fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell)
+ self.assertEqual(self.ctxt, tgt_message.ctxt)
+ self.assertEqual(method, tgt_message.method_name)
+ self.assertEqual(method_kwargs, tgt_message.method_kwargs)
+ self.assertEqual(direction, tgt_message.direction)
+ self.assertEqual(target_cell, target_cell)
+ self.assertFalse(tgt_message.fanout)
+ self.assertFalse(tgt_message.need_response)
+ self.assertEqual(self.our_name, tgt_message.routing_path)
+ self.assertEqual(1, tgt_message.hop_count)
+ self.assertEqual(99, tgt_message.max_hop_count)
+ self.assertFalse(tgt_message.is_broadcast)
+ # Correct next hop?
+ next_hop = tgt_message._get_next_hop()
+ child_cell = self.state_manager.get_child_cell('child-cell2')
+ self.assertEqual(child_cell, next_hop)
+
+ def test_create_targeted_message_with_response(self):
+ self.flags(max_hop_count=99, group='cells')
+ our_name = 'child-cell1'
+ target_cell = 'child-cell1!api-cell'
+ msg_runner = fakes.get_message_runner(our_name)
+ method = 'fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'up'
+ tgt_message = messaging._TargetedMessage(msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell,
+ need_response=True)
+ self.assertEqual(self.ctxt, tgt_message.ctxt)
+ self.assertEqual(method, tgt_message.method_name)
+ self.assertEqual(method_kwargs, tgt_message.method_kwargs)
+ self.assertEqual(direction, tgt_message.direction)
+ self.assertEqual(target_cell, target_cell)
+ self.assertFalse(tgt_message.fanout)
+ self.assertTrue(tgt_message.need_response)
+ self.assertEqual(our_name, tgt_message.routing_path)
+ self.assertEqual(1, tgt_message.hop_count)
+ self.assertEqual(99, tgt_message.max_hop_count)
+ self.assertFalse(tgt_message.is_broadcast)
+ # Correct next hop?
+ next_hop = tgt_message._get_next_hop()
+ parent_cell = msg_runner.state_manager.get_parent_cell('api-cell')
+ self.assertEqual(parent_cell, next_hop)
+
+ def test_targeted_message_when_target_is_cell_state(self):
+ method = 'fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+ target_cell = self.state_manager.get_child_cell('child-cell2')
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell)
+ self.assertEqual('api-cell!child-cell2', tgt_message.target_cell)
+ # Correct next hop?
+ next_hop = tgt_message._get_next_hop()
+ self.assertEqual(target_cell, next_hop)
+
+ def test_targeted_message_when_target_cell_state_is_me(self):
+ method = 'fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+ target_cell = self.state_manager.get_my_state()
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell)
+ self.assertEqual('api-cell', tgt_message.target_cell)
+ # Correct next hop?
+ next_hop = tgt_message._get_next_hop()
+ self.assertEqual(target_cell, next_hop)
+
+ def test_create_broadcast_message(self):
+ self.flags(max_hop_count=99, group='cells')
+ self.flags(name='api-cell', max_hop_count=99, group='cells')
+ method = 'fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction)
+ self.assertEqual(self.ctxt, bcast_message.ctxt)
+ self.assertEqual(method, bcast_message.method_name)
+ self.assertEqual(method_kwargs, bcast_message.method_kwargs)
+ self.assertEqual(direction, bcast_message.direction)
+ self.assertFalse(bcast_message.fanout)
+ self.assertFalse(bcast_message.need_response)
+ self.assertEqual(self.our_name, bcast_message.routing_path)
+ self.assertEqual(1, bcast_message.hop_count)
+ self.assertEqual(99, bcast_message.max_hop_count)
+ self.assertTrue(bcast_message.is_broadcast)
+ # Correct next hops?
+ next_hops = bcast_message._get_next_hops()
+ child_cells = self.state_manager.get_child_cells()
+ self.assertEqual(child_cells, next_hops)
+
+ def test_create_broadcast_message_with_response(self):
+ self.flags(max_hop_count=99, group='cells')
+ our_name = 'child-cell1'
+ msg_runner = fakes.get_message_runner(our_name)
+ method = 'fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'up'
+ bcast_message = messaging._BroadcastMessage(msg_runner, self.ctxt,
+ method, method_kwargs, direction, need_response=True)
+ self.assertEqual(self.ctxt, bcast_message.ctxt)
+ self.assertEqual(method, bcast_message.method_name)
+ self.assertEqual(method_kwargs, bcast_message.method_kwargs)
+ self.assertEqual(direction, bcast_message.direction)
+ self.assertFalse(bcast_message.fanout)
+ self.assertTrue(bcast_message.need_response)
+ self.assertEqual(our_name, bcast_message.routing_path)
+ self.assertEqual(1, bcast_message.hop_count)
+ self.assertEqual(99, bcast_message.max_hop_count)
+ self.assertTrue(bcast_message.is_broadcast)
+ # Correct next hops?
+ next_hops = bcast_message._get_next_hops()
+ parent_cells = msg_runner.state_manager.get_parent_cells()
+ self.assertEqual(parent_cells, next_hops)
+
+ def test_self_targeted_message(self):
+ target_cell = 'api-cell'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ call_info = {}
+
+ def our_fake_method(message, **kwargs):
+ call_info['context'] = message.ctxt
+ call_info['routing_path'] = message.routing_path
+ call_info['kwargs'] = kwargs
+
+ fakes.stub_tgt_method(self, 'api-cell', 'our_fake_method',
+ our_fake_method)
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell)
+ tgt_message.process()
+
+ self.assertEqual(self.ctxt, call_info['context'])
+ self.assertEqual(method_kwargs, call_info['kwargs'])
+ self.assertEqual(target_cell, call_info['routing_path'])
+
+ def test_child_targeted_message(self):
+ target_cell = 'api-cell!child-cell1'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ call_info = {}
+
+ def our_fake_method(message, **kwargs):
+ call_info['context'] = message.ctxt
+ call_info['routing_path'] = message.routing_path
+ call_info['kwargs'] = kwargs
+
+ fakes.stub_tgt_method(self, 'child-cell1', 'our_fake_method',
+ our_fake_method)
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell)
+ tgt_message.process()
+
+ self.assertEqual(self.ctxt, call_info['context'])
+ self.assertEqual(method_kwargs, call_info['kwargs'])
+ self.assertEqual(target_cell, call_info['routing_path'])
+
+ def test_child_targeted_message_with_object(self):
+ target_cell = 'api-cell!child-cell1'
+ method = 'our_fake_method'
+ direction = 'down'
+
+ call_info = {}
+
+ class CellsMsgingTestObject(objects_base.NovaObject):
+ """Test object. We just need 1 field in order to test
+ that this gets serialized properly.
+ """
+ fields = {'test': objects_fields.StringField()}
+
+ test_obj = CellsMsgingTestObject()
+ test_obj.test = 'meow'
+
+ method_kwargs = dict(obj=test_obj, arg1=1, arg2=2)
+
+ def our_fake_method(message, **kwargs):
+ call_info['context'] = message.ctxt
+ call_info['routing_path'] = message.routing_path
+ call_info['kwargs'] = kwargs
+
+ fakes.stub_tgt_method(self, 'child-cell1', 'our_fake_method',
+ our_fake_method)
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell)
+ tgt_message.process()
+
+ self.assertEqual(self.ctxt, call_info['context'])
+ self.assertEqual(target_cell, call_info['routing_path'])
+ self.assertEqual(3, len(call_info['kwargs']))
+ self.assertEqual(1, call_info['kwargs']['arg1'])
+ self.assertEqual(2, call_info['kwargs']['arg2'])
+ # Verify we get a new object with what we expect.
+ obj = call_info['kwargs']['obj']
+ self.assertIsInstance(obj, CellsMsgingTestObject)
+ self.assertNotEqual(id(test_obj), id(obj))
+ self.assertEqual(test_obj.test, obj.test)
+
+ def test_grandchild_targeted_message(self):
+ target_cell = 'api-cell!child-cell2!grandchild-cell1'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ call_info = {}
+
+ def our_fake_method(message, **kwargs):
+ call_info['context'] = message.ctxt
+ call_info['routing_path'] = message.routing_path
+ call_info['kwargs'] = kwargs
+
+ fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
+ our_fake_method)
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell)
+ tgt_message.process()
+
+ self.assertEqual(self.ctxt, call_info['context'])
+ self.assertEqual(method_kwargs, call_info['kwargs'])
+ self.assertEqual(target_cell, call_info['routing_path'])
+
+ def test_grandchild_targeted_message_with_response(self):
+ target_cell = 'api-cell!child-cell2!grandchild-cell1'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ call_info = {}
+
+ def our_fake_method(message, **kwargs):
+ call_info['context'] = message.ctxt
+ call_info['routing_path'] = message.routing_path
+ call_info['kwargs'] = kwargs
+ return 'our_fake_response'
+
+ fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
+ our_fake_method)
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell,
+ need_response=True)
+ response = tgt_message.process()
+
+ self.assertEqual(self.ctxt, call_info['context'])
+ self.assertEqual(method_kwargs, call_info['kwargs'])
+ self.assertEqual(target_cell, call_info['routing_path'])
+ self.assertFalse(response.failure)
+ self.assertEqual(response.value_or_raise(), 'our_fake_response')
+
+ def test_grandchild_targeted_message_with_error(self):
+ target_cell = 'api-cell!child-cell2!grandchild-cell1'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ def our_fake_method(message, **kwargs):
+ raise test.TestingException('this should be returned')
+
+ fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
+ our_fake_method)
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell,
+ need_response=True)
+ response = tgt_message.process()
+ self.assertTrue(response.failure)
+ self.assertRaises(test.TestingException, response.value_or_raise)
+
+ def test_grandchild_targeted_message_max_hops(self):
+ self.flags(max_hop_count=2, group='cells')
+ target_cell = 'api-cell!child-cell2!grandchild-cell1'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ def our_fake_method(message, **kwargs):
+ raise test.TestingException('should not be reached')
+
+ fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
+ our_fake_method)
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell,
+ need_response=True)
+ response = tgt_message.process()
+ self.assertTrue(response.failure)
+ self.assertRaises(exception.CellMaxHopCountReached,
+ response.value_or_raise)
+
+ def test_targeted_message_invalid_cell(self):
+ target_cell = 'api-cell!child-cell2!grandchild-cell4'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell,
+ need_response=True)
+ response = tgt_message.process()
+ self.assertTrue(response.failure)
+ self.assertRaises(exception.CellRoutingInconsistency,
+ response.value_or_raise)
+
+ def test_targeted_message_invalid_cell2(self):
+ target_cell = 'unknown-cell!child-cell2'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell,
+ need_response=True)
+ response = tgt_message.process()
+ self.assertTrue(response.failure)
+ self.assertRaises(exception.CellRoutingInconsistency,
+ response.value_or_raise)
+
+ def test_broadcast_routing(self):
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ cells = set()
+
+ def our_fake_method(message, **kwargs):
+ cells.add(message.routing_path)
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs,
+ direction,
+ run_locally=True)
+ bcast_message.process()
+ # fakes creates 8 cells (including ourself).
+ self.assertEqual(len(cells), 8)
+
+ def test_broadcast_routing_up(self):
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'up'
+ msg_runner = fakes.get_message_runner('grandchild-cell3')
+
+ cells = set()
+
+ def our_fake_method(message, **kwargs):
+ cells.add(message.routing_path)
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+
+ bcast_message = messaging._BroadcastMessage(msg_runner, self.ctxt,
+ method, method_kwargs,
+ direction,
+ run_locally=True)
+ bcast_message.process()
+ # Paths are reversed, since going 'up'
+ expected = set(['grandchild-cell3', 'grandchild-cell3!child-cell3',
+ 'grandchild-cell3!child-cell3!api-cell'])
+ self.assertEqual(expected, cells)
+
+ def test_broadcast_routing_without_ourselves(self):
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ cells = set()
+
+ def our_fake_method(message, **kwargs):
+ cells.add(message.routing_path)
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs,
+ direction,
+ run_locally=False)
+ bcast_message.process()
+ # fakes creates 8 cells (including ourself). So we should see
+ # only 7 here.
+ self.assertEqual(len(cells), 7)
+
+ def test_broadcast_routing_with_response(self):
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ def our_fake_method(message, **kwargs):
+ return 'response-%s' % message.routing_path
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs,
+ direction,
+ run_locally=True,
+ need_response=True)
+ responses = bcast_message.process()
+ self.assertEqual(len(responses), 8)
+ for response in responses:
+ self.assertFalse(response.failure)
+ self.assertEqual('response-%s' % response.cell_name,
+ response.value_or_raise())
+
+ def test_broadcast_routing_with_response_max_hops(self):
+ self.flags(max_hop_count=2, group='cells')
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ def our_fake_method(message, **kwargs):
+ return 'response-%s' % message.routing_path
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs,
+ direction,
+ run_locally=True,
+ need_response=True)
+ responses = bcast_message.process()
+ # Should only get responses from our immediate children (and
+ # ourselves)
+ self.assertEqual(len(responses), 5)
+ for response in responses:
+ self.assertFalse(response.failure)
+ self.assertEqual('response-%s' % response.cell_name,
+ response.value_or_raise())
+
+ def test_broadcast_routing_with_all_erroring(self):
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ def our_fake_method(message, **kwargs):
+ raise test.TestingException('fake failure')
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs,
+ direction,
+ run_locally=True,
+ need_response=True)
+ responses = bcast_message.process()
+ self.assertEqual(len(responses), 8)
+ for response in responses:
+ self.assertTrue(response.failure)
+ self.assertRaises(test.TestingException, response.value_or_raise)
+
+ def test_broadcast_routing_with_two_erroring(self):
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ def our_fake_method_failing(message, **kwargs):
+ raise test.TestingException('fake failure')
+
+ def our_fake_method(message, **kwargs):
+ return 'response-%s' % message.routing_path
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+ fakes.stub_bcast_method(self, 'child-cell2', 'our_fake_method',
+ our_fake_method_failing)
+ fakes.stub_bcast_method(self, 'grandchild-cell3', 'our_fake_method',
+ our_fake_method_failing)
+
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs,
+ direction,
+ run_locally=True,
+ need_response=True)
+ responses = bcast_message.process()
+ self.assertEqual(len(responses), 8)
+ failure_responses = [resp for resp in responses if resp.failure]
+ success_responses = [resp for resp in responses if not resp.failure]
+ self.assertEqual(len(failure_responses), 2)
+ self.assertEqual(len(success_responses), 6)
+
+ for response in success_responses:
+ self.assertFalse(response.failure)
+ self.assertEqual('response-%s' % response.cell_name,
+ response.value_or_raise())
+
+ for response in failure_responses:
+ self.assertIn(response.cell_name, ['api-cell!child-cell2',
+ 'api-cell!child-cell3!grandchild-cell3'])
+ self.assertTrue(response.failure)
+ self.assertRaises(test.TestingException, response.value_or_raise)
+
+
+class CellsTargetedMethodsTestCase(test.TestCase):
+ """Test case for _TargetedMessageMethods class. Most of these
+ tests actually test the full path from the MessageRunner through
+ to the functionality of the message method. Hits 2 birds with 1
+ stone, even though it's a little more than a unit test.
+ """
+ def setUp(self):
+ super(CellsTargetedMethodsTestCase, self).setUp()
+ fakes.init(self)
+ self.ctxt = context.RequestContext('fake', 'fake')
+ self._setup_attrs('api-cell', 'api-cell!child-cell2')
+
+ def _setup_attrs(self, source_cell, target_cell):
+ self.tgt_cell_name = target_cell
+ self.src_msg_runner = fakes.get_message_runner(source_cell)
+ self.src_state_manager = self.src_msg_runner.state_manager
+ tgt_shortname = target_cell.split('!')[-1]
+ self.tgt_cell_mgr = fakes.get_cells_manager(tgt_shortname)
+ self.tgt_msg_runner = self.tgt_cell_mgr.msg_runner
+ self.tgt_scheduler = self.tgt_msg_runner.scheduler
+ self.tgt_state_manager = self.tgt_msg_runner.state_manager
+ methods_cls = self.tgt_msg_runner.methods_by_type['targeted']
+ self.tgt_methods_cls = methods_cls
+ self.tgt_compute_api = methods_cls.compute_api
+ self.tgt_host_api = methods_cls.host_api
+ self.tgt_db_inst = methods_cls.db
+ self.tgt_c_rpcapi = methods_cls.compute_rpcapi
+
+ def test_build_instances(self):
+ build_inst_kwargs = {'filter_properties': {},
+ 'key1': 'value1',
+ 'key2': 'value2'}
+ self.mox.StubOutWithMock(self.tgt_scheduler, 'build_instances')
+ self.tgt_scheduler.build_instances(self.ctxt, build_inst_kwargs)
+ self.mox.ReplayAll()
+ self.src_msg_runner.build_instances(self.ctxt, self.tgt_cell_name,
+ build_inst_kwargs)
+
+ def test_run_compute_api_method(self):
+
+ instance_uuid = 'fake_instance_uuid'
+ method_info = {'method': 'backup',
+ 'method_args': (instance_uuid, 2, 3),
+ 'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}}
+ self.mox.StubOutWithMock(self.tgt_compute_api, 'backup')
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
+
+ self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
+ instance_uuid).AndReturn('fake_instance')
+ self.tgt_compute_api.backup(self.ctxt, 'fake_instance', 2, 3,
+ arg1='val1', arg2='val2').AndReturn('fake_result')
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.run_compute_api_method(
+ self.ctxt,
+ self.tgt_cell_name,
+ method_info,
+ True)
+ result = response.value_or_raise()
+ self.assertEqual('fake_result', result)
+
+ def _run_compute_api_method_expects_object(self, tgt_compute_api_function,
+ method_name,
+ expected_attrs=None):
+ # runs compute api methods which expects instance to be an object
+ instance_uuid = 'fake_instance_uuid'
+ method_info = {'method': method_name,
+ 'method_args': (instance_uuid, 2, 3),
+ 'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}}
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
+
+ self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
+ instance_uuid).AndReturn('fake_instance')
+
+ def get_instance_mock():
+ # NOTE(comstud): This block of code simulates the following
+ # mox code:
+ #
+ # self.mox.StubOutWithMock(objects, 'Instance',
+ # use_mock_anything=True)
+ # self.mox.StubOutWithMock(objects.Instance,
+ # '_from_db_object')
+ # instance_mock = self.mox.CreateMock(objects.Instance)
+ # objects.Instance().AndReturn(instance_mock)
+ #
+ # Unfortunately, the above code fails on py27 do to some
+ # issue with the Mock object do to similar issue as this:
+ # https://code.google.com/p/pymox/issues/detail?id=35
+ #
+ class FakeInstance(object):
+ @classmethod
+ def _from_db_object(cls, ctxt, obj, db_obj, **kwargs):
+ pass
+
+ instance_mock = FakeInstance()
+
+ def fake_instance():
+ return instance_mock
+
+ self.stubs.Set(objects, 'Instance', fake_instance)
+ self.mox.StubOutWithMock(instance_mock, '_from_db_object')
+ return instance_mock
+
+ instance = get_instance_mock()
+ instance._from_db_object(self.ctxt,
+ instance,
+ 'fake_instance',
+ expected_attrs=expected_attrs
+ ).AndReturn(instance)
+ tgt_compute_api_function(self.ctxt, instance, 2, 3,
+ arg1='val1', arg2='val2').AndReturn('fake_result')
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.run_compute_api_method(
+ self.ctxt,
+ self.tgt_cell_name,
+ method_info,
+ True)
+ result = response.value_or_raise()
+ self.assertEqual('fake_result', result)
+
+ def test_run_compute_api_method_expects_obj(self):
+ # Run compute_api start method
+ self.mox.StubOutWithMock(self.tgt_compute_api, 'start')
+ self._run_compute_api_method_expects_object(self.tgt_compute_api.start,
+ 'start')
+
+ def test_run_compute_api_method_expects_obj_with_info_cache(self):
+ # Run compute_api shelve method as it requires info_cache and
+ # metadata to be present in instance object
+ self.mox.StubOutWithMock(self.tgt_compute_api, 'shelve')
+ self._run_compute_api_method_expects_object(
+ self.tgt_compute_api.shelve, 'shelve',
+ expected_attrs=['metadata', 'info_cache'])
+
+ def test_run_compute_api_method_unknown_instance(self):
+ # Unknown instance should send a broadcast up that instance
+ # is gone.
+ instance_uuid = 'fake_instance_uuid'
+ instance = {'uuid': instance_uuid}
+ method_info = {'method': 'reboot',
+ 'method_args': (instance_uuid, 2, 3),
+ 'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}}
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'instance_destroy_at_top')
+
+ self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
+ 'fake_instance_uuid').AndRaise(
+ exception.InstanceNotFound(instance_id=instance_uuid))
+ self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, instance)
+
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.run_compute_api_method(
+ self.ctxt,
+ self.tgt_cell_name,
+ method_info,
+ True)
+ self.assertRaises(exception.InstanceNotFound,
+ response.value_or_raise)
+
+ def test_update_capabilities(self):
+ # Route up to API
+ self._setup_attrs('child-cell2', 'child-cell2!api-cell')
+ capabs = {'cap1': set(['val1', 'val2']),
+ 'cap2': set(['val3'])}
+ # The list(set([])) seems silly, but we can't assume the order
+ # of the list... This behavior should match the code we're
+ # testing... which is check that a set was converted to a list.
+ expected_capabs = {'cap1': list(set(['val1', 'val2'])),
+ 'cap2': ['val3']}
+ self.mox.StubOutWithMock(self.src_state_manager,
+ 'get_our_capabilities')
+ self.mox.StubOutWithMock(self.tgt_state_manager,
+ 'update_cell_capabilities')
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'tell_parents_our_capabilities')
+ self.src_state_manager.get_our_capabilities().AndReturn(capabs)
+ self.tgt_state_manager.update_cell_capabilities('child-cell2',
+ expected_capabs)
+ self.tgt_msg_runner.tell_parents_our_capabilities(self.ctxt)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.tell_parents_our_capabilities(self.ctxt)
+
+ def test_update_capacities(self):
+ self._setup_attrs('child-cell2', 'child-cell2!api-cell')
+ capacs = 'fake_capacs'
+ self.mox.StubOutWithMock(self.src_state_manager,
+ 'get_our_capacities')
+ self.mox.StubOutWithMock(self.tgt_state_manager,
+ 'update_cell_capacities')
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'tell_parents_our_capacities')
+ self.src_state_manager.get_our_capacities().AndReturn(capacs)
+ self.tgt_state_manager.update_cell_capacities('child-cell2',
+ capacs)
+ self.tgt_msg_runner.tell_parents_our_capacities(self.ctxt)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.tell_parents_our_capacities(self.ctxt)
+
+ def test_announce_capabilities(self):
+ self._setup_attrs('api-cell', 'api-cell!child-cell1')
+ # To make this easier to test, make us only have 1 child cell.
+ cell_state = self.src_state_manager.child_cells['child-cell1']
+ self.src_state_manager.child_cells = {'child-cell1': cell_state}
+
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'tell_parents_our_capabilities')
+ self.tgt_msg_runner.tell_parents_our_capabilities(self.ctxt)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.ask_children_for_capabilities(self.ctxt)
+
+ def test_announce_capacities(self):
+ self._setup_attrs('api-cell', 'api-cell!child-cell1')
+ # To make this easier to test, make us only have 1 child cell.
+ cell_state = self.src_state_manager.child_cells['child-cell1']
+ self.src_state_manager.child_cells = {'child-cell1': cell_state}
+
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'tell_parents_our_capacities')
+ self.tgt_msg_runner.tell_parents_our_capacities(self.ctxt)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.ask_children_for_capacities(self.ctxt)
+
+ def test_service_get_by_compute_host(self):
+ fake_host_name = 'fake-host-name'
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'service_get_by_compute_host')
+
+ self.tgt_db_inst.service_get_by_compute_host(self.ctxt,
+ fake_host_name).AndReturn('fake-service')
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.service_get_by_compute_host(
+ self.ctxt,
+ self.tgt_cell_name,
+ fake_host_name)
+ result = response.value_or_raise()
+ self.assertEqual('fake-service', result)
+
+ def test_service_update(self):
+ binary = 'nova-compute'
+ fake_service = dict(id=42, host='fake_host', binary='nova-compute',
+ topic='compute')
+ fake_compute = dict(
+ id=7116, service_id=42, host='fake_host', vcpus=0, memory_mb=0,
+ local_gb=0, vcpus_used=0, memory_mb_used=0, local_gb_used=0,
+ hypervisor_type=0, hypervisor_version=0, hypervisor_hostname=0,
+ free_ram_mb=0, free_disk_gb=0, current_workload=0, running_vms=0,
+ cpu_info='HAL', disk_available_least=0)
+ params_to_update = {'disabled': True, 'report_count': 13}
+
+ ctxt = context.RequestContext('fake_user', 'fake_project',
+ is_admin=True)
+ # We use the real DB for this test, as it's too hard to reach the
+ # host_api to mock out its DB methods
+ db.service_create(ctxt, fake_service)
+ db.compute_node_create(ctxt, fake_compute)
+
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.service_update(
+ ctxt, self.tgt_cell_name,
+ 'fake_host', binary, params_to_update)
+ result = response.value_or_raise()
+ result.pop('created_at', None)
+ result.pop('updated_at', None)
+ result.pop('disabled_reason', None)
+ expected_result = dict(
+ deleted=0, deleted_at=None,
+ binary=fake_service['binary'],
+ disabled=True, # We just updated this..
+ report_count=13, # ..and this
+ host='fake_host', id=42,
+ topic='compute')
+ self.assertEqual(expected_result, result)
+
+ def test_service_delete(self):
+ fake_service = dict(id=42, host='fake_host', binary='nova-compute',
+ topic='compute')
+
+ ctxt = self.ctxt.elevated()
+ db.service_create(ctxt, fake_service)
+
+ self.src_msg_runner.service_delete(
+ ctxt, self.tgt_cell_name, fake_service['id'])
+ self.assertRaises(exception.ServiceNotFound,
+ db.service_get, ctxt, fake_service['id'])
+
+ def test_proxy_rpc_to_manager_call(self):
+ fake_topic = 'fake-topic'
+ fake_rpc_message = {'method': 'fake_rpc_method', 'args': {}}
+ fake_host_name = 'fake-host-name'
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'service_get_by_compute_host')
+ self.tgt_db_inst.service_get_by_compute_host(self.ctxt,
+ fake_host_name)
+
+ target = oslo_messaging.Target(topic='fake-topic')
+ rpcclient = self.mox.CreateMockAnything()
+
+ self.mox.StubOutWithMock(rpc, 'get_client')
+ rpc.get_client(target).AndReturn(rpcclient)
+ rpcclient.prepare(timeout=5).AndReturn(rpcclient)
+ rpcclient.call(mox.IgnoreArg(),
+ 'fake_rpc_method').AndReturn('fake_result')
+
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.proxy_rpc_to_manager(
+ self.ctxt,
+ self.tgt_cell_name,
+ fake_host_name,
+ fake_topic,
+ fake_rpc_message, True, timeout=5)
+ result = response.value_or_raise()
+ self.assertEqual('fake_result', result)
+
+ def test_proxy_rpc_to_manager_cast(self):
+ fake_topic = 'fake-topic'
+ fake_rpc_message = {'method': 'fake_rpc_method', 'args': {}}
+ fake_host_name = 'fake-host-name'
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'service_get_by_compute_host')
+ self.tgt_db_inst.service_get_by_compute_host(self.ctxt,
+ fake_host_name)
+
+ target = oslo_messaging.Target(topic='fake-topic')
+ rpcclient = self.mox.CreateMockAnything()
+
+ self.mox.StubOutWithMock(rpc, 'get_client')
+ rpc.get_client(target).AndReturn(rpcclient)
+ rpcclient.cast(mox.IgnoreArg(), 'fake_rpc_method')
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.proxy_rpc_to_manager(
+ self.ctxt,
+ self.tgt_cell_name,
+ fake_host_name,
+ fake_topic,
+ fake_rpc_message, False, timeout=None)
+
+ def test_task_log_get_all_targeted(self):
+ task_name = 'fake_task_name'
+ begin = 'fake_begin'
+ end = 'fake_end'
+ host = 'fake_host'
+ state = 'fake_state'
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'task_log_get_all')
+ self.tgt_db_inst.task_log_get_all(self.ctxt, task_name,
+ begin, end, host=host,
+ state=state).AndReturn(['fake_result'])
+
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.task_log_get_all(self.ctxt,
+ self.tgt_cell_name, task_name, begin, end, host=host,
+ state=state)
+ self.assertIsInstance(response, list)
+ self.assertEqual(1, len(response))
+ result = response[0].value_or_raise()
+ self.assertEqual(['fake_result'], result)
+
+ def test_compute_node_get(self):
+ compute_id = 'fake-id'
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'compute_node_get')
+ self.tgt_db_inst.compute_node_get(self.ctxt,
+ compute_id).AndReturn('fake_result')
+
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.compute_node_get(self.ctxt,
+ self.tgt_cell_name, compute_id)
+ result = response.value_or_raise()
+ self.assertEqual('fake_result', result)
+
+ def test_actions_get(self):
+ fake_uuid = fake_server_actions.FAKE_UUID
+ fake_req_id = fake_server_actions.FAKE_REQUEST_ID1
+ fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'actions_get')
+ self.tgt_db_inst.actions_get(self.ctxt,
+ 'fake-uuid').AndReturn([fake_act])
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.actions_get(self.ctxt,
+ self.tgt_cell_name,
+ 'fake-uuid')
+ result = response.value_or_raise()
+ self.assertEqual([jsonutils.to_primitive(fake_act)], result)
+
+ def test_action_get_by_request_id(self):
+ fake_uuid = fake_server_actions.FAKE_UUID
+ fake_req_id = fake_server_actions.FAKE_REQUEST_ID1
+ fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'action_get_by_request_id')
+ self.tgt_db_inst.action_get_by_request_id(self.ctxt,
+ 'fake-uuid', 'req-fake').AndReturn(fake_act)
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.action_get_by_request_id(self.ctxt,
+ self.tgt_cell_name, 'fake-uuid', 'req-fake')
+ result = response.value_or_raise()
+ self.assertEqual(jsonutils.to_primitive(fake_act), result)
+
+ def test_action_events_get(self):
+ fake_action_id = fake_server_actions.FAKE_ACTION_ID1
+ fake_events = fake_server_actions.FAKE_EVENTS[fake_action_id]
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'action_events_get')
+ self.tgt_db_inst.action_events_get(self.ctxt,
+ 'fake-action').AndReturn(fake_events)
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.action_events_get(self.ctxt,
+ self.tgt_cell_name,
+ 'fake-action')
+ result = response.value_or_raise()
+ self.assertEqual(jsonutils.to_primitive(fake_events), result)
+
+ def test_validate_console_port(self):
+ instance_uuid = 'fake_instance_uuid'
+ instance = {'uuid': instance_uuid}
+ console_port = 'fake-port'
+ console_type = 'fake-type'
+
+ self.mox.StubOutWithMock(self.tgt_c_rpcapi, 'validate_console_port')
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
+
+ self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
+ instance_uuid).AndReturn(instance)
+ self.tgt_c_rpcapi.validate_console_port(self.ctxt,
+ instance, console_port, console_type).AndReturn('fake_result')
+
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.validate_console_port(self.ctxt,
+ self.tgt_cell_name, instance_uuid, console_port,
+ console_type)
+ result = response.value_or_raise()
+ self.assertEqual('fake_result', result)
+
+ def test_get_migrations_for_a_given_cell(self):
+ filters = {'cell_name': 'child-cell2', 'status': 'confirmed'}
+ migrations_in_progress = [{'id': 123}]
+ self.mox.StubOutWithMock(self.tgt_compute_api,
+ 'get_migrations')
+
+ self.tgt_compute_api.get_migrations(self.ctxt, filters).\
+ AndReturn(migrations_in_progress)
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.get_migrations(
+ self.ctxt,
+ self.tgt_cell_name, False, filters)
+ result = responses[0].value_or_raise()
+ self.assertEqual(migrations_in_progress, result)
+
+ def test_get_migrations_for_an_invalid_cell(self):
+ filters = {'cell_name': 'invalid_Cell', 'status': 'confirmed'}
+
+ responses = self.src_msg_runner.get_migrations(
+ self.ctxt,
+ 'api_cell!invalid_cell', False, filters)
+
+ self.assertEqual(0, len(responses))
+
+ def test_call_compute_api_with_obj(self):
+ instance = objects.Instance()
+ instance.uuid = uuidutils.generate_uuid()
+ self.mox.StubOutWithMock(instance, 'refresh')
+ # Using 'snapshot' for this test, because it
+ # takes args and kwargs.
+ self.mox.StubOutWithMock(self.tgt_compute_api, 'snapshot')
+ instance.refresh(self.ctxt)
+ self.tgt_compute_api.snapshot(
+ self.ctxt, instance, 'name',
+ extra_properties='props').AndReturn('foo')
+
+ self.mox.ReplayAll()
+ result = self.tgt_methods_cls._call_compute_api_with_obj(
+ self.ctxt, instance, 'snapshot', 'name',
+ extra_properties='props')
+ self.assertEqual('foo', result)
+
+ def test_call_compute_api_with_obj_no_cache(self):
+ instance = objects.Instance()
+ instance.uuid = uuidutils.generate_uuid()
+ error = exception.InstanceInfoCacheNotFound(
+ instance_uuid=instance.uuid)
+ with mock.patch.object(instance, 'refresh', side_effect=error):
+ self.assertRaises(exception.InstanceInfoCacheNotFound,
+ self.tgt_methods_cls._call_compute_api_with_obj,
+ self.ctxt, instance, 'snapshot')
+
+ def test_call_delete_compute_api_with_obj_no_cache(self):
+ instance = objects.Instance()
+ instance.uuid = uuidutils.generate_uuid()
+ error = exception.InstanceInfoCacheNotFound(
+ instance_uuid=instance.uuid)
+ with contextlib.nested(
+ mock.patch.object(instance, 'refresh',
+ side_effect=error),
+ mock.patch.object(self.tgt_compute_api, 'delete')) as (inst,
+ delete):
+ self.tgt_methods_cls._call_compute_api_with_obj(self.ctxt,
+ instance,
+ 'delete')
+ delete.assert_called_once_with(self.ctxt, instance)
+
+ def test_call_compute_with_obj_unknown_instance(self):
+ instance = objects.Instance()
+ instance.uuid = uuidutils.generate_uuid()
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = None
+ self.mox.StubOutWithMock(instance, 'refresh')
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'instance_destroy_at_top')
+
+ instance.refresh(self.ctxt).AndRaise(
+ exception.InstanceNotFound(instance_id=instance.uuid))
+
+ self.tgt_msg_runner.instance_destroy_at_top(self.ctxt,
+ {'uuid': instance.uuid})
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InstanceNotFound,
+ self.tgt_methods_cls._call_compute_api_with_obj,
+ self.ctxt, instance, 'snapshot', 'name')
+
+ def _instance_update_helper(self, admin_state_reset):
+ class FakeMessage(object):
+ pass
+
+ message = FakeMessage()
+ message.ctxt = self.ctxt
+
+ instance = objects.Instance()
+ instance.cell_name = self.tgt_cell_name
+ instance.obj_reset_changes()
+ instance.task_state = 'meow'
+ instance.vm_state = 'wuff'
+ instance.user_data = 'foo'
+ instance.metadata = {'meta': 'data'}
+ instance.system_metadata = {'system': 'metadata'}
+ self.assertEqual(set(['user_data', 'vm_state', 'task_state',
+ 'metadata', 'system_metadata']),
+ instance.obj_what_changed())
+
+ self.mox.StubOutWithMock(instance, 'save')
+
+ def _check_object(*args, **kwargs):
+ # task_state and vm_state changes should have been cleared
+ # before calling save()
+ if admin_state_reset:
+ self.assertEqual(
+ set(['user_data', 'vm_state', 'task_state']),
+ instance.obj_what_changed())
+ else:
+ self.assertEqual(set(['user_data']),
+ instance.obj_what_changed())
+
+ instance.save(self.ctxt, expected_task_state='exp_task',
+ expected_vm_state='exp_vm').WithSideEffects(
+ _check_object)
+
+ self.mox.ReplayAll()
+
+ self.tgt_methods_cls.instance_update_from_api(
+ message,
+ instance,
+ expected_vm_state='exp_vm',
+ expected_task_state='exp_task',
+ admin_state_reset=admin_state_reset)
+
+ def test_instance_update_from_api(self):
+ self._instance_update_helper(False)
+
+ def test_instance_update_from_api_admin_state_reset(self):
+ self._instance_update_helper(True)
+
+ def _test_instance_action_method(self, method, args, kwargs,
+ expected_args, expected_kwargs,
+ expect_result):
+ class FakeMessage(object):
+ pass
+
+ message = FakeMessage()
+ message.ctxt = self.ctxt
+ message.need_response = expect_result
+
+ meth_cls = self.tgt_methods_cls
+ self.mox.StubOutWithMock(meth_cls, '_call_compute_api_with_obj')
+
+ method_corrections = {
+ 'terminate': 'delete',
+ }
+ api_method = method_corrections.get(method, method)
+
+ meth_cls._call_compute_api_with_obj(
+ self.ctxt, 'fake-instance', api_method,
+ *expected_args, **expected_kwargs).AndReturn('meow')
+
+ self.mox.ReplayAll()
+
+ method_translations = {'revert_resize': 'revert_resize',
+ 'confirm_resize': 'confirm_resize',
+ 'reset_network': 'reset_network',
+ 'inject_network_info': 'inject_network_info',
+ 'set_admin_password': 'set_admin_password',
+ }
+ tgt_method = method_translations.get(method,
+ '%s_instance' % method)
+ result = getattr(meth_cls, tgt_method)(
+ message, 'fake-instance', *args, **kwargs)
+ if expect_result:
+ self.assertEqual('meow', result)
+
+ def test_start_instance(self):
+ self._test_instance_action_method('start', (), {}, (), {}, False)
+
+ def test_stop_instance_cast(self):
+ self._test_instance_action_method('stop', (), {}, (),
+ {'do_cast': True}, False)
+
+ def test_stop_instance_call(self):
+ self._test_instance_action_method('stop', (), {}, (),
+ {'do_cast': False}, True)
+
+ def test_reboot_instance(self):
+ kwargs = dict(reboot_type='HARD')
+ self._test_instance_action_method('reboot', (), kwargs, (),
+ kwargs, False)
+
+ def test_suspend_instance(self):
+ self._test_instance_action_method('suspend', (), {}, (), {}, False)
+
+ def test_resume_instance(self):
+ self._test_instance_action_method('resume', (), {}, (), {}, False)
+
+ def test_get_host_uptime(self):
+ host_name = "fake-host"
+ host_uptime = (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
+ " 0.20, 0.12, 0.14")
+ self.mox.StubOutWithMock(self.tgt_host_api, 'get_host_uptime')
+ self.tgt_host_api.get_host_uptime(self.ctxt, host_name).\
+ AndReturn(host_uptime)
+ self.mox.ReplayAll()
+ response = self.src_msg_runner.get_host_uptime(self.ctxt,
+ self.tgt_cell_name,
+ host_name)
+ expected_host_uptime = response.value_or_raise()
+ self.assertEqual(host_uptime, expected_host_uptime)
+
+ def test_terminate_instance(self):
+ self._test_instance_action_method('terminate',
+ (), {}, (), {}, False)
+
+ def test_soft_delete_instance(self):
+ self._test_instance_action_method(delete_types.SOFT_DELETE,
+ (), {}, (), {}, False)
+
+ def test_pause_instance(self):
+ self._test_instance_action_method('pause', (), {}, (), {}, False)
+
+ def test_unpause_instance(self):
+ self._test_instance_action_method('unpause', (), {}, (), {}, False)
+
+ def test_resize_instance(self):
+ kwargs = dict(flavor=dict(id=42, flavorid='orangemocchafrappuccino'),
+ extra_instance_updates=dict(cow='moo'))
+ expected_kwargs = dict(flavor_id='orangemocchafrappuccino', cow='moo')
+ self._test_instance_action_method('resize', (), kwargs,
+ (), expected_kwargs,
+ False)
+
+ def test_live_migrate_instance(self):
+ kwargs = dict(block_migration='fake-block-mig',
+ disk_over_commit='fake-commit',
+ host_name='fake-host')
+ expected_args = ('fake-block-mig', 'fake-commit', 'fake-host')
+ self._test_instance_action_method('live_migrate', (), kwargs,
+ expected_args, {}, False)
+
+ def test_revert_resize(self):
+ self._test_instance_action_method('revert_resize',
+ (), {}, (), {}, False)
+
+ def test_confirm_resize(self):
+ self._test_instance_action_method('confirm_resize',
+ (), {}, (), {}, False)
+
+ def test_reset_network(self):
+ self._test_instance_action_method('reset_network',
+ (), {}, (), {}, False)
+
+ def test_inject_network_info(self):
+ self._test_instance_action_method('inject_network_info',
+ (), {}, (), {}, False)
+
+ def test_snapshot_instance(self):
+ inst = objects.Instance()
+ meth_cls = self.tgt_methods_cls
+
+ self.mox.StubOutWithMock(inst, 'refresh')
+ self.mox.StubOutWithMock(inst, 'save')
+ self.mox.StubOutWithMock(meth_cls.compute_rpcapi, 'snapshot_instance')
+
+ def check_state(expected_task_state=None):
+ self.assertEqual(task_states.IMAGE_SNAPSHOT_PENDING,
+ inst.task_state)
+
+ inst.refresh()
+ inst.save(expected_task_state=[None]).WithSideEffects(check_state)
+
+ meth_cls.compute_rpcapi.snapshot_instance(self.ctxt,
+ inst, 'image-id')
+
+ self.mox.ReplayAll()
+
+ class FakeMessage(object):
+ pass
+
+ message = FakeMessage()
+ message.ctxt = self.ctxt
+ message.need_response = False
+
+ meth_cls.snapshot_instance(message, inst, image_id='image-id')
+
+ def test_backup_instance(self):
+ inst = objects.Instance()
+ meth_cls = self.tgt_methods_cls
+
+ self.mox.StubOutWithMock(inst, 'refresh')
+ self.mox.StubOutWithMock(inst, 'save')
+ self.mox.StubOutWithMock(meth_cls.compute_rpcapi, 'backup_instance')
+
+ def check_state(expected_task_state=None):
+ self.assertEqual(task_states.IMAGE_BACKUP, inst.task_state)
+
+ inst.refresh()
+ inst.save(expected_task_state=[None]).WithSideEffects(check_state)
+
+ meth_cls.compute_rpcapi.backup_instance(self.ctxt,
+ inst,
+ 'image-id',
+ 'backup-type',
+ 'rotation')
+
+ self.mox.ReplayAll()
+
+ class FakeMessage(object):
+ pass
+
+ message = FakeMessage()
+ message.ctxt = self.ctxt
+ message.need_response = False
+
+ meth_cls.backup_instance(message, inst,
+ image_id='image-id',
+ backup_type='backup-type',
+ rotation='rotation')
+
+ def test_set_admin_password(self):
+ args = ['fake-password']
+ self._test_instance_action_method('set_admin_password', args, {}, args,
+ {}, False)
+
+
+class CellsBroadcastMethodsTestCase(test.TestCase):
+ """Test case for _BroadcastMessageMethods class. Most of these
+ tests actually test the full path from the MessageRunner through
+ to the functionality of the message method. Hits 2 birds with 1
+ stone, even though it's a little more than a unit test.
+ """
+
+ def setUp(self):
+ super(CellsBroadcastMethodsTestCase, self).setUp()
+ fakes.init(self)
+ self.ctxt = context.RequestContext('fake', 'fake')
+ self._setup_attrs()
+
+ def _setup_attrs(self, up=True):
+ mid_cell = 'child-cell2'
+ if up:
+ src_cell = 'grandchild-cell1'
+ tgt_cell = 'api-cell'
+ else:
+ src_cell = 'api-cell'
+ tgt_cell = 'grandchild-cell1'
+
+ self.src_msg_runner = fakes.get_message_runner(src_cell)
+ methods_cls = self.src_msg_runner.methods_by_type['broadcast']
+ self.src_methods_cls = methods_cls
+ self.src_db_inst = methods_cls.db
+ self.src_compute_api = methods_cls.compute_api
+ self.src_ca_rpcapi = methods_cls.consoleauth_rpcapi
+
+ if not up:
+ # fudge things so we only have 1 child to broadcast to
+ state_manager = self.src_msg_runner.state_manager
+ for cell in state_manager.get_child_cells():
+ if cell.name != 'child-cell2':
+ del state_manager.child_cells[cell.name]
+
+ self.mid_msg_runner = fakes.get_message_runner(mid_cell)
+ methods_cls = self.mid_msg_runner.methods_by_type['broadcast']
+ self.mid_methods_cls = methods_cls
+ self.mid_db_inst = methods_cls.db
+ self.mid_compute_api = methods_cls.compute_api
+ self.mid_ca_rpcapi = methods_cls.consoleauth_rpcapi
+
+ self.tgt_msg_runner = fakes.get_message_runner(tgt_cell)
+ methods_cls = self.tgt_msg_runner.methods_by_type['broadcast']
+ self.tgt_methods_cls = methods_cls
+ self.tgt_db_inst = methods_cls.db
+ self.tgt_compute_api = methods_cls.compute_api
+ self.tgt_ca_rpcapi = methods_cls.consoleauth_rpcapi
+
+ def test_at_the_top(self):
+ self.assertTrue(self.tgt_methods_cls._at_the_top())
+ self.assertFalse(self.mid_methods_cls._at_the_top())
+ self.assertFalse(self.src_methods_cls._at_the_top())
+
+ def test_apply_expected_states_building(self):
+ instance_info = {'vm_state': vm_states.BUILDING}
+ expected = dict(instance_info,
+ expected_vm_state=[vm_states.BUILDING, None])
+ self.src_methods_cls._apply_expected_states(instance_info)
+ self.assertEqual(expected, instance_info)
+
+ def test_apply_expected_states_resize_finish(self):
+ instance_info = {'task_state': task_states.RESIZE_FINISH}
+ exp_states = [task_states.RESIZE_FINISH,
+ task_states.RESIZE_MIGRATED,
+ task_states.RESIZE_MIGRATING,
+ task_states.RESIZE_PREP]
+ expected = dict(instance_info, expected_task_state=exp_states)
+ self.src_methods_cls._apply_expected_states(instance_info)
+ self.assertEqual(expected, instance_info)
+
+ def _test_instance_update_at_top(self, net_info, exists=True):
+ fake_info_cache = {'id': 1,
+ 'instance': 'fake_instance',
+ 'network_info': net_info}
+ fake_sys_metadata = [{'id': 1,
+ 'key': 'key1',
+ 'value': 'value1'},
+ {'id': 2,
+ 'key': 'key2',
+ 'value': 'value2'}]
+ fake_instance = {'id': 2,
+ 'uuid': 'fake_uuid',
+ 'security_groups': 'fake',
+ 'volumes': 'fake',
+ 'cell_name': 'fake',
+ 'name': 'fake',
+ 'metadata': 'fake',
+ 'info_cache': fake_info_cache,
+ 'system_metadata': fake_sys_metadata,
+ 'other': 'meow'}
+ expected_sys_metadata = {'key1': 'value1',
+ 'key2': 'value2'}
+ expected_info_cache = {'network_info': "[]"}
+ expected_cell_name = 'api-cell!child-cell2!grandchild-cell1'
+ expected_instance = {'system_metadata': expected_sys_metadata,
+ 'cell_name': expected_cell_name,
+ 'other': 'meow',
+ 'uuid': 'fake_uuid'}
+
+ # To show these should not be called in src/mid-level cell
+ self.mox.StubOutWithMock(self.src_db_inst, 'instance_update')
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'instance_info_cache_update')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'instance_update')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'instance_info_cache_update')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_update')
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_create')
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'instance_info_cache_update')
+ mock = self.tgt_db_inst.instance_update(self.ctxt, 'fake_uuid',
+ expected_instance,
+ update_cells=False)
+ if not exists:
+ mock.AndRaise(exception.InstanceNotFound(instance_id='fake_uuid'))
+ self.tgt_db_inst.instance_create(self.ctxt,
+ expected_instance)
+ self.tgt_db_inst.instance_info_cache_update(self.ctxt, 'fake_uuid',
+ expected_info_cache)
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.instance_update_at_top(self.ctxt, fake_instance)
+
+ def test_instance_update_at_top(self):
+ self._test_instance_update_at_top("[]")
+
+ def test_instance_update_at_top_netinfo_list(self):
+ self._test_instance_update_at_top([])
+
+ def test_instance_update_at_top_netinfo_model(self):
+ self._test_instance_update_at_top(network_model.NetworkInfo())
+
+ def test_instance_update_at_top_does_not_already_exist(self):
+ self._test_instance_update_at_top([], exists=False)
+
+ def test_instance_update_at_top_with_building_state(self):
+ fake_info_cache = {'id': 1,
+ 'instance': 'fake_instance',
+ 'other': 'moo'}
+ fake_sys_metadata = [{'id': 1,
+ 'key': 'key1',
+ 'value': 'value1'},
+ {'id': 2,
+ 'key': 'key2',
+ 'value': 'value2'}]
+ fake_instance = {'id': 2,
+ 'uuid': 'fake_uuid',
+ 'security_groups': 'fake',
+ 'volumes': 'fake',
+ 'cell_name': 'fake',
+ 'name': 'fake',
+ 'metadata': 'fake',
+ 'info_cache': fake_info_cache,
+ 'system_metadata': fake_sys_metadata,
+ 'vm_state': vm_states.BUILDING,
+ 'other': 'meow'}
+ expected_sys_metadata = {'key1': 'value1',
+ 'key2': 'value2'}
+ expected_info_cache = {'other': 'moo'}
+ expected_cell_name = 'api-cell!child-cell2!grandchild-cell1'
+ expected_instance = {'system_metadata': expected_sys_metadata,
+ 'cell_name': expected_cell_name,
+ 'other': 'meow',
+ 'vm_state': vm_states.BUILDING,
+ 'expected_vm_state': [vm_states.BUILDING, None],
+ 'uuid': 'fake_uuid'}
+
+ # To show these should not be called in src/mid-level cell
+ self.mox.StubOutWithMock(self.src_db_inst, 'instance_update')
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'instance_info_cache_update')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'instance_update')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'instance_info_cache_update')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_update')
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'instance_info_cache_update')
+ self.tgt_db_inst.instance_update(self.ctxt, 'fake_uuid',
+ expected_instance,
+ update_cells=False)
+ self.tgt_db_inst.instance_info_cache_update(self.ctxt, 'fake_uuid',
+ expected_info_cache)
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.instance_update_at_top(self.ctxt, fake_instance)
+
+ def test_instance_destroy_at_top(self):
+ fake_instance = {'uuid': 'fake_uuid'}
+
+ # To show these should not be called in src/mid-level cell
+ self.mox.StubOutWithMock(self.src_db_inst, 'instance_destroy')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_destroy')
+ self.tgt_db_inst.instance_destroy(self.ctxt, 'fake_uuid',
+ update_cells=False)
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.instance_destroy_at_top(self.ctxt, fake_instance)
+
+ def test_instance_hard_delete_everywhere(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+ instance = {'uuid': 'meow'}
+
+ # Should not be called in src (API cell)
+ self.mox.StubOutWithMock(self.src_compute_api, delete_types.DELETE)
+
+ self.mox.StubOutWithMock(self.mid_compute_api, delete_types.DELETE)
+ self.mox.StubOutWithMock(self.tgt_compute_api, delete_types.DELETE)
+
+ self.mid_compute_api.delete(self.ctxt, instance)
+ self.tgt_compute_api.delete(self.ctxt, instance)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.instance_delete_everywhere(self.ctxt,
+ instance, delete_types.DELETE)
+
+ def test_instance_soft_delete_everywhere(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+ instance = {'uuid': 'meow'}
+
+ # Should not be called in src (API cell)
+ self.mox.StubOutWithMock(self.src_compute_api,
+ delete_types.SOFT_DELETE)
+
+ self.mox.StubOutWithMock(self.mid_compute_api,
+ delete_types.SOFT_DELETE)
+ self.mox.StubOutWithMock(self.tgt_compute_api,
+ delete_types.SOFT_DELETE)
+
+ self.mid_compute_api.soft_delete(self.ctxt, instance)
+ self.tgt_compute_api.soft_delete(self.ctxt, instance)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.instance_delete_everywhere(self.ctxt,
+ instance, delete_types.SOFT_DELETE)
+
+ def test_instance_fault_create_at_top(self):
+ fake_instance_fault = {'id': 1,
+ 'message': 'fake-message',
+ 'details': 'fake-details'}
+
+ if_mock = mock.Mock(spec_set=objects.InstanceFault)
+
+ def _check_create():
+ self.assertEqual('fake-message', if_mock.message)
+ self.assertEqual('fake-details', if_mock.details)
+ # Should not be set
+ self.assertNotEqual(1, if_mock.id)
+
+ if_mock.create.side_effect = _check_create
+
+ with mock.patch.object(objects, 'InstanceFault') as if_obj_mock:
+ if_obj_mock.return_value = if_mock
+ self.src_msg_runner.instance_fault_create_at_top(
+ self.ctxt, fake_instance_fault)
+
+ if_obj_mock.assert_called_once_with(context=self.ctxt)
+ if_mock.create.assert_called_once_with()
+
+ def test_bw_usage_update_at_top(self):
+ fake_bw_update_info = {'uuid': 'fake_uuid',
+ 'mac': 'fake_mac',
+ 'start_period': 'fake_start_period',
+ 'bw_in': 'fake_bw_in',
+ 'bw_out': 'fake_bw_out',
+ 'last_ctr_in': 'fake_last_ctr_in',
+ 'last_ctr_out': 'fake_last_ctr_out',
+ 'last_refreshed': 'fake_last_refreshed'}
+
+ # Shouldn't be called for these 2 cells
+ self.mox.StubOutWithMock(self.src_db_inst, 'bw_usage_update')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'bw_usage_update')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'bw_usage_update')
+ self.tgt_db_inst.bw_usage_update(self.ctxt, **fake_bw_update_info)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.bw_usage_update_at_top(self.ctxt,
+ fake_bw_update_info)
+
+ def test_sync_instances(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+ project_id = 'fake_project_id'
+ updated_since_raw = 'fake_updated_since_raw'
+ updated_since_parsed = 'fake_updated_since_parsed'
+ deleted = 'fake_deleted'
+
+ instance1 = dict(uuid='fake_uuid1', deleted=False)
+ instance2 = dict(uuid='fake_uuid2', deleted=True)
+ fake_instances = [instance1, instance2]
+
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'instance_update_at_top')
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'instance_destroy_at_top')
+
+ self.mox.StubOutWithMock(timeutils, 'parse_isotime')
+ self.mox.StubOutWithMock(cells_utils, 'get_instances_to_sync')
+
+ # Middle cell.
+ timeutils.parse_isotime(updated_since_raw).AndReturn(
+ updated_since_parsed)
+ cells_utils.get_instances_to_sync(self.ctxt,
+ updated_since=updated_since_parsed,
+ project_id=project_id,
+ deleted=deleted).AndReturn([])
+
+ # Bottom/Target cell
+ timeutils.parse_isotime(updated_since_raw).AndReturn(
+ updated_since_parsed)
+ cells_utils.get_instances_to_sync(self.ctxt,
+ updated_since=updated_since_parsed,
+ project_id=project_id,
+ deleted=deleted).AndReturn(fake_instances)
+ self.tgt_msg_runner.instance_update_at_top(self.ctxt, instance1)
+ self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, instance2)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.sync_instances(self.ctxt,
+ project_id, updated_since_raw, deleted)
+
+ def test_service_get_all_with_disabled(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+
+ ctxt = self.ctxt.elevated()
+
+ self.mox.StubOutWithMock(self.src_db_inst, 'service_get_all')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'service_get_all')
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'service_get_all')
+
+ self.src_db_inst.service_get_all(ctxt,
+ disabled=None).AndReturn([1, 2])
+ self.mid_db_inst.service_get_all(ctxt,
+ disabled=None).AndReturn([3])
+ self.tgt_db_inst.service_get_all(ctxt,
+ disabled=None).AndReturn([4, 5])
+
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.service_get_all(ctxt,
+ filters={})
+ response_values = [(resp.cell_name, resp.value_or_raise())
+ for resp in responses]
+ expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
+ ('api-cell!child-cell2', [3]),
+ ('api-cell', [1, 2])]
+ self.assertEqual(expected, response_values)
+
+ def test_service_get_all_without_disabled(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+ disabled = False
+ filters = {'disabled': disabled}
+
+ ctxt = self.ctxt.elevated()
+
+ self.mox.StubOutWithMock(self.src_db_inst, 'service_get_all')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'service_get_all')
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'service_get_all')
+
+ self.src_db_inst.service_get_all(ctxt,
+ disabled=disabled).AndReturn([1, 2])
+ self.mid_db_inst.service_get_all(ctxt,
+ disabled=disabled).AndReturn([3])
+ self.tgt_db_inst.service_get_all(ctxt,
+ disabled=disabled).AndReturn([4, 5])
+
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.service_get_all(ctxt,
+ filters=filters)
+ response_values = [(resp.cell_name, resp.value_or_raise())
+ for resp in responses]
+ expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
+ ('api-cell!child-cell2', [3]),
+ ('api-cell', [1, 2])]
+ self.assertEqual(expected, response_values)
+
+ def test_task_log_get_all_broadcast(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+ task_name = 'fake_task_name'
+ begin = 'fake_begin'
+ end = 'fake_end'
+ host = 'fake_host'
+ state = 'fake_state'
+
+ ctxt = self.ctxt.elevated()
+
+ self.mox.StubOutWithMock(self.src_db_inst, 'task_log_get_all')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'task_log_get_all')
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'task_log_get_all')
+
+ self.src_db_inst.task_log_get_all(ctxt, task_name,
+ begin, end, host=host, state=state).AndReturn([1, 2])
+ self.mid_db_inst.task_log_get_all(ctxt, task_name,
+ begin, end, host=host, state=state).AndReturn([3])
+ self.tgt_db_inst.task_log_get_all(ctxt, task_name,
+ begin, end, host=host, state=state).AndReturn([4, 5])
+
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.task_log_get_all(ctxt, None,
+ task_name, begin, end, host=host, state=state)
+ response_values = [(resp.cell_name, resp.value_or_raise())
+ for resp in responses]
+ expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
+ ('api-cell!child-cell2', [3]),
+ ('api-cell', [1, 2])]
+ self.assertEqual(expected, response_values)
+
+ def test_compute_node_get_all(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+
+ ctxt = self.ctxt.elevated()
+
+ self.mox.StubOutWithMock(self.src_db_inst, 'compute_node_get_all')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'compute_node_get_all')
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'compute_node_get_all')
+
+ self.src_db_inst.compute_node_get_all(ctxt).AndReturn([1, 2])
+ self.mid_db_inst.compute_node_get_all(ctxt).AndReturn([3])
+ self.tgt_db_inst.compute_node_get_all(ctxt).AndReturn([4, 5])
+
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.compute_node_get_all(ctxt)
+ response_values = [(resp.cell_name, resp.value_or_raise())
+ for resp in responses]
+ expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
+ ('api-cell!child-cell2', [3]),
+ ('api-cell', [1, 2])]
+ self.assertEqual(expected, response_values)
+
+ def test_compute_node_get_all_with_hyp_match(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+ hypervisor_match = 'meow'
+
+ ctxt = self.ctxt.elevated()
+
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'compute_node_search_by_hypervisor')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'compute_node_search_by_hypervisor')
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'compute_node_search_by_hypervisor')
+
+ self.src_db_inst.compute_node_search_by_hypervisor(ctxt,
+ hypervisor_match).AndReturn([1, 2])
+ self.mid_db_inst.compute_node_search_by_hypervisor(ctxt,
+ hypervisor_match).AndReturn([3])
+ self.tgt_db_inst.compute_node_search_by_hypervisor(ctxt,
+ hypervisor_match).AndReturn([4, 5])
+
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.compute_node_get_all(ctxt,
+ hypervisor_match=hypervisor_match)
+ response_values = [(resp.cell_name, resp.value_or_raise())
+ for resp in responses]
+ expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
+ ('api-cell!child-cell2', [3]),
+ ('api-cell', [1, 2])]
+ self.assertEqual(expected, response_values)
+
+ def test_compute_node_stats(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+
+ ctxt = self.ctxt.elevated()
+
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'compute_node_statistics')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'compute_node_statistics')
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'compute_node_statistics')
+
+ self.src_db_inst.compute_node_statistics(ctxt).AndReturn([1, 2])
+ self.mid_db_inst.compute_node_statistics(ctxt).AndReturn([3])
+ self.tgt_db_inst.compute_node_statistics(ctxt).AndReturn([4, 5])
+
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.compute_node_stats(ctxt)
+ response_values = [(resp.cell_name, resp.value_or_raise())
+ for resp in responses]
+ expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
+ ('api-cell!child-cell2', [3]),
+ ('api-cell', [1, 2])]
+ self.assertEqual(expected, response_values)
+
+ def test_consoleauth_delete_tokens(self):
+ fake_uuid = 'fake-instance-uuid'
+
+ # To show these should not be called in src/mid-level cell
+ self.mox.StubOutWithMock(self.src_ca_rpcapi,
+ 'delete_tokens_for_instance')
+ self.mox.StubOutWithMock(self.mid_ca_rpcapi,
+ 'delete_tokens_for_instance')
+
+ self.mox.StubOutWithMock(self.tgt_ca_rpcapi,
+ 'delete_tokens_for_instance')
+ self.tgt_ca_rpcapi.delete_tokens_for_instance(self.ctxt, fake_uuid)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.consoleauth_delete_tokens(self.ctxt, fake_uuid)
+
+ def test_bdm_update_or_create_with_none_create(self):
+ fake_bdm = {'id': 'fake_id',
+ 'volume_id': 'fake_volume_id'}
+ expected_bdm = fake_bdm.copy()
+ expected_bdm.pop('id')
+
+ # Shouldn't be called for these 2 cells
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'block_device_mapping_update_or_create')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'block_device_mapping_update_or_create')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'block_device_mapping_update_or_create')
+ self.tgt_db_inst.block_device_mapping_update_or_create(
+ self.ctxt, expected_bdm, legacy=False)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt,
+ fake_bdm,
+ create=None)
+
+ def test_bdm_update_or_create_with_true_create(self):
+ fake_bdm = {'id': 'fake_id',
+ 'volume_id': 'fake_volume_id'}
+ expected_bdm = fake_bdm.copy()
+ expected_bdm.pop('id')
+
+ # Shouldn't be called for these 2 cells
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'block_device_mapping_create')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'block_device_mapping_create')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'block_device_mapping_create')
+ self.tgt_db_inst.block_device_mapping_create(
+ self.ctxt, fake_bdm, legacy=False)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt,
+ fake_bdm,
+ create=True)
+
+ def test_bdm_update_or_create_with_false_create_vol_id(self):
+ fake_bdm = {'id': 'fake_id',
+ 'instance_uuid': 'fake_instance_uuid',
+ 'device_name': 'fake_device_name',
+ 'volume_id': 'fake_volume_id'}
+ expected_bdm = fake_bdm.copy()
+ expected_bdm.pop('id')
+
+ fake_inst_bdms = [{'id': 1,
+ 'volume_id': 'not-a-match',
+ 'device_name': 'not-a-match'},
+ {'id': 2,
+ 'volume_id': 'fake_volume_id',
+ 'device_name': 'not-a-match'},
+ {'id': 3,
+ 'volume_id': 'not-a-match',
+ 'device_name': 'not-a-match'}]
+
+ # Shouldn't be called for these 2 cells
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'block_device_mapping_update')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'block_device_mapping_update')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'block_device_mapping_get_all_by_instance')
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'block_device_mapping_update')
+
+ self.tgt_db_inst.block_device_mapping_get_all_by_instance(
+ self.ctxt, 'fake_instance_uuid').AndReturn(
+ fake_inst_bdms)
+ # Should try to update ID 2.
+ self.tgt_db_inst.block_device_mapping_update(
+ self.ctxt, 2, expected_bdm, legacy=False)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt,
+ fake_bdm,
+ create=False)
+
+ def test_bdm_update_or_create_with_false_create_dev_name(self):
+ fake_bdm = {'id': 'fake_id',
+ 'instance_uuid': 'fake_instance_uuid',
+ 'device_name': 'fake_device_name',
+ 'volume_id': 'fake_volume_id'}
+ expected_bdm = fake_bdm.copy()
+ expected_bdm.pop('id')
+
+ fake_inst_bdms = [{'id': 1,
+ 'volume_id': 'not-a-match',
+ 'device_name': 'not-a-match'},
+ {'id': 2,
+ 'volume_id': 'not-a-match',
+ 'device_name': 'fake_device_name'},
+ {'id': 3,
+ 'volume_id': 'not-a-match',
+ 'device_name': 'not-a-match'}]
+
+ # Shouldn't be called for these 2 cells
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'block_device_mapping_update')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'block_device_mapping_update')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'block_device_mapping_get_all_by_instance')
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'block_device_mapping_update')
+
+ self.tgt_db_inst.block_device_mapping_get_all_by_instance(
+ self.ctxt, 'fake_instance_uuid').AndReturn(
+ fake_inst_bdms)
+ # Should try to update ID 2.
+ self.tgt_db_inst.block_device_mapping_update(
+ self.ctxt, 2, expected_bdm, legacy=False)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt,
+ fake_bdm,
+ create=False)
+
+ def test_bdm_destroy_by_volume(self):
+ fake_instance_uuid = 'fake-instance-uuid'
+ fake_volume_id = 'fake-volume-name'
+
+ # Shouldn't be called for these 2 cells
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'block_device_mapping_destroy_by_instance_and_volume')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'block_device_mapping_destroy_by_instance_and_volume')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'block_device_mapping_destroy_by_instance_and_volume')
+ self.tgt_db_inst.block_device_mapping_destroy_by_instance_and_volume(
+ self.ctxt, fake_instance_uuid, fake_volume_id)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.bdm_destroy_at_top(self.ctxt, fake_instance_uuid,
+ volume_id=fake_volume_id)
+
+ def test_bdm_destroy_by_device(self):
+ fake_instance_uuid = 'fake-instance-uuid'
+ fake_device_name = 'fake-device-name'
+
+ # Shouldn't be called for these 2 cells
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'block_device_mapping_destroy_by_instance_and_device')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'block_device_mapping_destroy_by_instance_and_device')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'block_device_mapping_destroy_by_instance_and_device')
+ self.tgt_db_inst.block_device_mapping_destroy_by_instance_and_device(
+ self.ctxt, fake_instance_uuid, fake_device_name)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.bdm_destroy_at_top(self.ctxt, fake_instance_uuid,
+ device_name=fake_device_name)
+
+ def test_get_migrations(self):
+ self._setup_attrs(up=False)
+ filters = {'status': 'confirmed'}
+ migrations_from_cell1 = [{'id': 123}]
+ migrations_from_cell2 = [{'id': 456}]
+ self.mox.StubOutWithMock(self.mid_compute_api,
+ 'get_migrations')
+
+ self.mid_compute_api.get_migrations(self.ctxt, filters).\
+ AndReturn(migrations_from_cell1)
+
+ self.mox.StubOutWithMock(self.tgt_compute_api,
+ 'get_migrations')
+
+ self.tgt_compute_api.get_migrations(self.ctxt, filters).\
+ AndReturn(migrations_from_cell2)
+
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.get_migrations(
+ self.ctxt,
+ None, False, filters)
+ self.assertEqual(2, len(responses))
+ for response in responses:
+ self.assertIn(response.value_or_raise(), [migrations_from_cell1,
+ migrations_from_cell2])
diff --git a/nova/tests/unit/cells/test_cells_rpc_driver.py b/nova/tests/unit/cells/test_cells_rpc_driver.py
new file mode 100644
index 0000000000..7efba3765b
--- /dev/null
+++ b/nova/tests/unit/cells/test_cells_rpc_driver.py
@@ -0,0 +1,207 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Cells RPC Communication Driver
+"""
+
+import mox
+from oslo.config import cfg
+from oslo import messaging as oslo_messaging
+
+from nova.cells import messaging
+from nova.cells import rpc_driver
+from nova import context
+from nova import rpc
+from nova import test
+from nova.tests.unit.cells import fakes
+
+CONF = cfg.CONF
+CONF.import_opt('rpc_driver_queue_base', 'nova.cells.rpc_driver',
+ group='cells')
+
+
+class CellsRPCDriverTestCase(test.NoDBTestCase):
+ """Test case for Cells communication via RPC."""
+
+ def setUp(self):
+ super(CellsRPCDriverTestCase, self).setUp()
+ fakes.init(self)
+ self.ctxt = context.RequestContext('fake', 'fake')
+ self.driver = rpc_driver.CellsRPCDriver()
+
+ def test_start_servers(self):
+ self.flags(rpc_driver_queue_base='cells.intercell42', group='cells')
+ fake_msg_runner = fakes.get_message_runner('api-cell')
+
+ class FakeInterCellRPCDispatcher(object):
+ def __init__(_self, msg_runner):
+ self.assertEqual(fake_msg_runner, msg_runner)
+
+ self.stubs.Set(rpc_driver, 'InterCellRPCDispatcher',
+ FakeInterCellRPCDispatcher)
+ self.mox.StubOutWithMock(rpc, 'get_server')
+
+ for message_type in messaging.MessageRunner.get_message_types():
+ topic = 'cells.intercell42.' + message_type
+ target = oslo_messaging.Target(topic=topic, server=CONF.host)
+ endpoints = [mox.IsA(FakeInterCellRPCDispatcher)]
+
+ rpcserver = self.mox.CreateMockAnything()
+ rpc.get_server(target, endpoints=endpoints).AndReturn(rpcserver)
+ rpcserver.start()
+
+ self.mox.ReplayAll()
+
+ self.driver.start_servers(fake_msg_runner)
+
+ def test_stop_servers(self):
+ call_info = {'stopped': []}
+
+ class FakeRPCServer(object):
+ def stop(self):
+ call_info['stopped'].append(self)
+
+ fake_servers = [FakeRPCServer() for x in xrange(5)]
+ self.driver.rpc_servers = fake_servers
+ self.driver.stop_servers()
+ self.assertEqual(fake_servers, call_info['stopped'])
+
+ def test_send_message_to_cell_cast(self):
+ msg_runner = fakes.get_message_runner('api-cell')
+ cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
+ message = messaging._TargetedMessage(msg_runner,
+ self.ctxt, 'fake', {}, 'down', cell_state, fanout=False)
+
+ expected_server_params = {'hostname': 'rpc_host2',
+ 'password': 'password2',
+ 'port': 3092,
+ 'username': 'username2',
+ 'virtual_host': 'rpc_vhost2'}
+ expected_url = ('rabbit://%(username)s:%(password)s@'
+ '%(hostname)s:%(port)d/%(virtual_host)s' %
+ expected_server_params)
+
+ def check_transport_url(cell_state):
+ return cell_state.db_info['transport_url'] == expected_url
+
+ rpcapi = self.driver.intercell_rpcapi
+ rpcclient = self.mox.CreateMockAnything()
+
+ self.mox.StubOutWithMock(rpcapi, '_get_client')
+ rpcapi._get_client(
+ mox.Func(check_transport_url),
+ 'cells.intercell.targeted').AndReturn(rpcclient)
+
+ rpcclient.cast(mox.IgnoreArg(), 'process_message',
+ message=message.to_json())
+
+ self.mox.ReplayAll()
+
+ self.driver.send_message_to_cell(cell_state, message)
+
+ def test_send_message_to_cell_fanout_cast(self):
+ msg_runner = fakes.get_message_runner('api-cell')
+ cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
+ message = messaging._TargetedMessage(msg_runner,
+ self.ctxt, 'fake', {}, 'down', cell_state, fanout=True)
+
+ expected_server_params = {'hostname': 'rpc_host2',
+ 'password': 'password2',
+ 'port': 3092,
+ 'username': 'username2',
+ 'virtual_host': 'rpc_vhost2'}
+ expected_url = ('rabbit://%(username)s:%(password)s@'
+ '%(hostname)s:%(port)d/%(virtual_host)s' %
+ expected_server_params)
+
+ def check_transport_url(cell_state):
+ return cell_state.db_info['transport_url'] == expected_url
+
+ rpcapi = self.driver.intercell_rpcapi
+ rpcclient = self.mox.CreateMockAnything()
+
+ self.mox.StubOutWithMock(rpcapi, '_get_client')
+ rpcapi._get_client(
+ mox.Func(check_transport_url),
+ 'cells.intercell.targeted').AndReturn(rpcclient)
+
+ rpcclient.prepare(fanout=True).AndReturn(rpcclient)
+ rpcclient.cast(mox.IgnoreArg(), 'process_message',
+ message=message.to_json())
+
+ self.mox.ReplayAll()
+
+ self.driver.send_message_to_cell(cell_state, message)
+
+ def test_rpc_topic_uses_message_type(self):
+ self.flags(rpc_driver_queue_base='cells.intercell42', group='cells')
+ msg_runner = fakes.get_message_runner('api-cell')
+ cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
+ message = messaging._BroadcastMessage(msg_runner,
+ self.ctxt, 'fake', {}, 'down', fanout=True)
+ message.message_type = 'fake-message-type'
+
+ expected_server_params = {'hostname': 'rpc_host2',
+ 'password': 'password2',
+ 'port': 3092,
+ 'username': 'username2',
+ 'virtual_host': 'rpc_vhost2'}
+ expected_url = ('rabbit://%(username)s:%(password)s@'
+ '%(hostname)s:%(port)d/%(virtual_host)s' %
+ expected_server_params)
+
+ def check_transport_url(cell_state):
+ return cell_state.db_info['transport_url'] == expected_url
+
+ rpcapi = self.driver.intercell_rpcapi
+ rpcclient = self.mox.CreateMockAnything()
+
+ self.mox.StubOutWithMock(rpcapi, '_get_client')
+ rpcapi._get_client(
+ mox.Func(check_transport_url),
+ 'cells.intercell42.fake-message-type').AndReturn(rpcclient)
+
+ rpcclient.prepare(fanout=True).AndReturn(rpcclient)
+ rpcclient.cast(mox.IgnoreArg(), 'process_message',
+ message=message.to_json())
+
+ self.mox.ReplayAll()
+
+ self.driver.send_message_to_cell(cell_state, message)
+
+ def test_process_message(self):
+ msg_runner = fakes.get_message_runner('api-cell')
+ dispatcher = rpc_driver.InterCellRPCDispatcher(msg_runner)
+ message = messaging._BroadcastMessage(msg_runner,
+ self.ctxt, 'fake', {}, 'down', fanout=True)
+
+ call_info = {}
+
+ def _fake_message_from_json(json_message):
+ call_info['json_message'] = json_message
+ self.assertEqual(message.to_json(), json_message)
+ return message
+
+ def _fake_process():
+ call_info['process_called'] = True
+
+ self.stubs.Set(msg_runner, 'message_from_json',
+ _fake_message_from_json)
+ self.stubs.Set(message, 'process', _fake_process)
+
+ dispatcher.process_message(self.ctxt, message.to_json())
+ self.assertEqual(message.to_json(), call_info['json_message'])
+ self.assertTrue(call_info['process_called'])
diff --git a/nova/tests/unit/cells/test_cells_rpcapi.py b/nova/tests/unit/cells/test_cells_rpcapi.py
new file mode 100644
index 0000000000..398b96d8ae
--- /dev/null
+++ b/nova/tests/unit/cells/test_cells_rpcapi.py
@@ -0,0 +1,760 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Cells RPCAPI
+"""
+
+from oslo.config import cfg
+import six
+
+from nova.cells import rpcapi as cells_rpcapi
+from nova import exception
+from nova import test
+from nova.tests.unit import fake_instance
+
+CONF = cfg.CONF
+CONF.import_opt('topic', 'nova.cells.opts', group='cells')
+
+
+class CellsAPITestCase(test.NoDBTestCase):
+ """Test case for cells.api interfaces."""
+
+ def setUp(self):
+ super(CellsAPITestCase, self).setUp()
+ self.fake_topic = 'fake_topic'
+ self.fake_context = 'fake_context'
+ self.flags(topic=self.fake_topic, enable=True, group='cells')
+ self.cells_rpcapi = cells_rpcapi.CellsAPI()
+
+ def _stub_rpc_method(self, rpc_method, result):
+ call_info = {}
+
+ orig_prepare = self.cells_rpcapi.client.prepare
+
+ def fake_rpc_prepare(**kwargs):
+ if 'version' in kwargs:
+ call_info['version'] = kwargs.pop('version')
+ return self.cells_rpcapi.client
+
+ def fake_csv(version):
+ return orig_prepare(version).can_send_version()
+
+ def fake_rpc_method(ctxt, method, **kwargs):
+ call_info['context'] = ctxt
+ call_info['method'] = method
+ call_info['args'] = kwargs
+ return result
+
+ self.stubs.Set(self.cells_rpcapi.client, 'prepare', fake_rpc_prepare)
+ self.stubs.Set(self.cells_rpcapi.client, 'can_send_version', fake_csv)
+ self.stubs.Set(self.cells_rpcapi.client, rpc_method, fake_rpc_method)
+
+ return call_info
+
+ def _check_result(self, call_info, method, args, version=None):
+ self.assertEqual(self.cells_rpcapi.client.target.topic,
+ self.fake_topic)
+ self.assertEqual(self.fake_context, call_info['context'])
+ self.assertEqual(method, call_info['method'])
+ self.assertEqual(args, call_info['args'])
+ if version is not None:
+ self.assertIn('version', call_info)
+ self.assertIsInstance(call_info['version'], six.string_types,
+ msg="Message version %s is not a string" %
+ call_info['version'])
+ self.assertEqual(version, call_info['version'])
+ else:
+ self.assertNotIn('version', call_info)
+
+ def test_cast_compute_api_method(self):
+ fake_cell_name = 'fake_cell_name'
+ fake_method = 'fake_method'
+ fake_method_args = (1, 2)
+ fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20}
+
+ expected_method_info = {'method': fake_method,
+ 'method_args': fake_method_args,
+ 'method_kwargs': fake_method_kwargs}
+ expected_args = {'method_info': expected_method_info,
+ 'cell_name': fake_cell_name,
+ 'call': False}
+
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.cast_compute_api_method(self.fake_context,
+ fake_cell_name, fake_method,
+ *fake_method_args, **fake_method_kwargs)
+ self._check_result(call_info, 'run_compute_api_method',
+ expected_args)
+
+ def test_call_compute_api_method(self):
+ fake_cell_name = 'fake_cell_name'
+ fake_method = 'fake_method'
+ fake_method_args = (1, 2)
+ fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20}
+ fake_response = 'fake_response'
+
+ expected_method_info = {'method': fake_method,
+ 'method_args': fake_method_args,
+ 'method_kwargs': fake_method_kwargs}
+ expected_args = {'method_info': expected_method_info,
+ 'cell_name': fake_cell_name,
+ 'call': True}
+
+ call_info = self._stub_rpc_method('call', fake_response)
+
+ result = self.cells_rpcapi.call_compute_api_method(self.fake_context,
+ fake_cell_name, fake_method,
+ *fake_method_args, **fake_method_kwargs)
+ self._check_result(call_info, 'run_compute_api_method',
+ expected_args)
+ self.assertEqual(fake_response, result)
+
+ def test_build_instances(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.build_instances(
+ self.fake_context, instances=['1', '2'],
+ image={'fake': 'image'}, arg1=1, arg2=2, arg3=3)
+
+ expected_args = {'build_inst_kwargs': {'instances': ['1', '2'],
+ 'image': {'fake': 'image'},
+ 'arg1': 1,
+ 'arg2': 2,
+ 'arg3': 3}}
+ self._check_result(call_info, 'build_instances',
+ expected_args, version='1.8')
+
+ def test_get_capacities(self):
+ capacity_info = {"capacity": "info"}
+ call_info = self._stub_rpc_method('call',
+ result=capacity_info)
+ result = self.cells_rpcapi.get_capacities(self.fake_context,
+ cell_name="name")
+ self._check_result(call_info, 'get_capacities',
+ {'cell_name': 'name'}, version='1.9')
+ self.assertEqual(capacity_info, result)
+
+ def test_instance_update_at_top(self):
+ fake_info_cache = {'id': 1,
+ 'instance': 'fake_instance',
+ 'other': 'moo'}
+ fake_sys_metadata = [{'id': 1,
+ 'key': 'key1',
+ 'value': 'value1'},
+ {'id': 2,
+ 'key': 'key2',
+ 'value': 'value2'}]
+ fake_instance = {'id': 2,
+ 'security_groups': 'fake',
+ 'instance_type': 'fake',
+ 'volumes': 'fake',
+ 'cell_name': 'fake',
+ 'name': 'fake',
+ 'metadata': 'fake',
+ 'info_cache': fake_info_cache,
+ 'system_metadata': fake_sys_metadata,
+ 'other': 'meow'}
+
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.instance_update_at_top(
+ self.fake_context, fake_instance)
+
+ expected_args = {'instance': fake_instance}
+ self._check_result(call_info, 'instance_update_at_top',
+ expected_args)
+
+ def test_instance_destroy_at_top(self):
+ fake_instance = {'uuid': 'fake-uuid'}
+
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.instance_destroy_at_top(
+ self.fake_context, fake_instance)
+
+ expected_args = {'instance': fake_instance}
+ self._check_result(call_info, 'instance_destroy_at_top',
+ expected_args)
+
+ def test_instance_delete_everywhere(self):
+ instance = fake_instance.fake_instance_obj(self.fake_context)
+
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.instance_delete_everywhere(
+ self.fake_context, instance,
+ 'fake-type')
+
+ expected_args = {'instance': instance,
+ 'delete_type': 'fake-type'}
+ self._check_result(call_info, 'instance_delete_everywhere',
+ expected_args, version='1.27')
+
+ def test_instance_fault_create_at_top(self):
+ fake_instance_fault = {'id': 2,
+ 'other': 'meow'}
+
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.instance_fault_create_at_top(
+ self.fake_context, fake_instance_fault)
+
+ expected_args = {'instance_fault': fake_instance_fault}
+ self._check_result(call_info, 'instance_fault_create_at_top',
+ expected_args)
+
+ def test_bw_usage_update_at_top(self):
+ update_args = ('fake_uuid', 'fake_mac', 'fake_start_period',
+ 'fake_bw_in', 'fake_bw_out', 'fake_ctr_in',
+ 'fake_ctr_out')
+ update_kwargs = {'last_refreshed': 'fake_refreshed'}
+
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.bw_usage_update_at_top(
+ self.fake_context, *update_args, **update_kwargs)
+
+ bw_update_info = {'uuid': 'fake_uuid',
+ 'mac': 'fake_mac',
+ 'start_period': 'fake_start_period',
+ 'bw_in': 'fake_bw_in',
+ 'bw_out': 'fake_bw_out',
+ 'last_ctr_in': 'fake_ctr_in',
+ 'last_ctr_out': 'fake_ctr_out',
+ 'last_refreshed': 'fake_refreshed'}
+
+ expected_args = {'bw_update_info': bw_update_info}
+ self._check_result(call_info, 'bw_usage_update_at_top',
+ expected_args)
+
+ def test_get_cell_info_for_neighbors(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.get_cell_info_for_neighbors(
+ self.fake_context)
+ self._check_result(call_info, 'get_cell_info_for_neighbors', {},
+ version='1.1')
+ self.assertEqual(result, 'fake_response')
+
+ def test_sync_instances(self):
+ call_info = self._stub_rpc_method('cast', None)
+ self.cells_rpcapi.sync_instances(self.fake_context,
+ project_id='fake_project', updated_since='fake_time',
+ deleted=True)
+
+ expected_args = {'project_id': 'fake_project',
+ 'updated_since': 'fake_time',
+ 'deleted': True}
+ self._check_result(call_info, 'sync_instances', expected_args,
+ version='1.1')
+
+ def test_service_get_all(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ fake_filters = {'key1': 'val1', 'key2': 'val2'}
+ result = self.cells_rpcapi.service_get_all(self.fake_context,
+ filters=fake_filters)
+
+ expected_args = {'filters': fake_filters}
+ self._check_result(call_info, 'service_get_all', expected_args,
+ version='1.2')
+ self.assertEqual(result, 'fake_response')
+
+ def test_service_get_by_compute_host(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.service_get_by_compute_host(
+ self.fake_context, host_name='fake-host-name')
+ expected_args = {'host_name': 'fake-host-name'}
+ self._check_result(call_info, 'service_get_by_compute_host',
+ expected_args,
+ version='1.2')
+ self.assertEqual(result, 'fake_response')
+
+ def test_get_host_uptime(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.get_host_uptime(
+ self.fake_context, host_name='fake-host-name')
+ expected_args = {'host_name': 'fake-host-name'}
+ self._check_result(call_info, 'get_host_uptime',
+ expected_args,
+ version='1.17')
+ self.assertEqual(result, 'fake_response')
+
+ def test_service_update(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.service_update(
+ self.fake_context, host_name='fake-host-name',
+ binary='nova-api', params_to_update={'disabled': True})
+ expected_args = {
+ 'host_name': 'fake-host-name',
+ 'binary': 'nova-api',
+ 'params_to_update': {'disabled': True}}
+ self._check_result(call_info, 'service_update',
+ expected_args,
+ version='1.7')
+ self.assertEqual(result, 'fake_response')
+
+ def test_service_delete(self):
+ call_info = self._stub_rpc_method('call', None)
+ cell_service_id = 'cell@id'
+ result = self.cells_rpcapi.service_delete(
+ self.fake_context, cell_service_id=cell_service_id)
+ expected_args = {'cell_service_id': cell_service_id}
+ self._check_result(call_info, 'service_delete',
+ expected_args, version='1.26')
+ self.assertIsNone(result)
+
+ def test_proxy_rpc_to_manager(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.proxy_rpc_to_manager(
+ self.fake_context, rpc_message='fake-msg',
+ topic='fake-topic', call=True, timeout=-1)
+ expected_args = {'rpc_message': 'fake-msg',
+ 'topic': 'fake-topic',
+ 'call': True,
+ 'timeout': -1}
+ self._check_result(call_info, 'proxy_rpc_to_manager',
+ expected_args,
+ version='1.2')
+ self.assertEqual(result, 'fake_response')
+
+ def test_task_log_get_all(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.task_log_get_all(self.fake_context,
+ task_name='fake_name',
+ period_beginning='fake_begin',
+ period_ending='fake_end',
+ host='fake_host',
+ state='fake_state')
+
+ expected_args = {'task_name': 'fake_name',
+ 'period_beginning': 'fake_begin',
+ 'period_ending': 'fake_end',
+ 'host': 'fake_host',
+ 'state': 'fake_state'}
+ self._check_result(call_info, 'task_log_get_all', expected_args,
+ version='1.3')
+ self.assertEqual(result, 'fake_response')
+
+ def test_compute_node_get_all(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.compute_node_get_all(self.fake_context,
+ hypervisor_match='fake-match')
+
+ expected_args = {'hypervisor_match': 'fake-match'}
+ self._check_result(call_info, 'compute_node_get_all', expected_args,
+ version='1.4')
+ self.assertEqual(result, 'fake_response')
+
+ def test_compute_node_stats(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.compute_node_stats(self.fake_context)
+ expected_args = {}
+ self._check_result(call_info, 'compute_node_stats',
+ expected_args, version='1.4')
+ self.assertEqual(result, 'fake_response')
+
+ def test_compute_node_get(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.compute_node_get(self.fake_context,
+ 'fake_compute_id')
+ expected_args = {'compute_id': 'fake_compute_id'}
+ self._check_result(call_info, 'compute_node_get',
+ expected_args, version='1.4')
+ self.assertEqual(result, 'fake_response')
+
+ def test_actions_get(self):
+ fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.actions_get(self.fake_context,
+ fake_instance)
+ expected_args = {'cell_name': 'region!child',
+ 'instance_uuid': fake_instance['uuid']}
+ self._check_result(call_info, 'actions_get', expected_args,
+ version='1.5')
+ self.assertEqual(result, 'fake_response')
+
+ def test_actions_get_no_cell(self):
+ fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
+ self.assertRaises(exception.InstanceUnknownCell,
+ self.cells_rpcapi.actions_get, self.fake_context,
+ fake_instance)
+
+ def test_action_get_by_request_id(self):
+ fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.action_get_by_request_id(self.fake_context,
+ fake_instance,
+ 'req-fake')
+ expected_args = {'cell_name': 'region!child',
+ 'instance_uuid': fake_instance['uuid'],
+ 'request_id': 'req-fake'}
+ self._check_result(call_info, 'action_get_by_request_id',
+ expected_args, version='1.5')
+ self.assertEqual(result, 'fake_response')
+
+ def test_action_get_by_request_id_no_cell(self):
+ fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
+ self.assertRaises(exception.InstanceUnknownCell,
+ self.cells_rpcapi.action_get_by_request_id,
+ self.fake_context, fake_instance, 'req-fake')
+
+ def test_action_events_get(self):
+ fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.action_events_get(self.fake_context,
+ fake_instance,
+ 'fake-action')
+ expected_args = {'cell_name': 'region!child',
+ 'action_id': 'fake-action'}
+ self._check_result(call_info, 'action_events_get', expected_args,
+ version='1.5')
+ self.assertEqual(result, 'fake_response')
+
+ def test_action_events_get_no_cell(self):
+ fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
+ self.assertRaises(exception.InstanceUnknownCell,
+ self.cells_rpcapi.action_events_get,
+ self.fake_context, fake_instance, 'fake-action')
+
+ def test_consoleauth_delete_tokens(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.consoleauth_delete_tokens(self.fake_context,
+ 'fake-uuid')
+
+ expected_args = {'instance_uuid': 'fake-uuid'}
+ self._check_result(call_info, 'consoleauth_delete_tokens',
+ expected_args, version='1.6')
+
+ def test_validate_console_port(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+
+ result = self.cells_rpcapi.validate_console_port(self.fake_context,
+ 'fake-uuid', 'fake-port', 'fake-type')
+
+ expected_args = {'instance_uuid': 'fake-uuid',
+ 'console_port': 'fake-port',
+ 'console_type': 'fake-type'}
+ self._check_result(call_info, 'validate_console_port',
+ expected_args, version='1.6')
+ self.assertEqual(result, 'fake_response')
+
+ def test_bdm_update_or_create_at_top(self):
+ fake_bdm = {'id': 2, 'other': 'meow'}
+
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.bdm_update_or_create_at_top(
+ self.fake_context, fake_bdm, create='fake-create')
+
+ expected_args = {'bdm': fake_bdm, 'create': 'fake-create'}
+ self._check_result(call_info, 'bdm_update_or_create_at_top',
+ expected_args, version='1.28')
+
+ def test_bdm_destroy_at_top(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.bdm_destroy_at_top(self.fake_context,
+ 'fake-uuid',
+ device_name='fake-device',
+ volume_id='fake-vol')
+
+ expected_args = {'instance_uuid': 'fake-uuid',
+ 'device_name': 'fake-device',
+ 'volume_id': 'fake-vol'}
+ self._check_result(call_info, 'bdm_destroy_at_top',
+ expected_args, version='1.10')
+
+ def test_get_migrations(self):
+ call_info = self._stub_rpc_method('call', None)
+ filters = {'cell_name': 'ChildCell', 'status': 'confirmed'}
+
+ self.cells_rpcapi.get_migrations(self.fake_context, filters)
+
+ expected_args = {'filters': filters}
+ self._check_result(call_info, 'get_migrations', expected_args,
+ version="1.11")
+
+ def test_instance_update_from_api(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.instance_update_from_api(
+ self.fake_context, 'fake-instance',
+ expected_vm_state='exp_vm',
+ expected_task_state='exp_task',
+ admin_state_reset='admin_reset')
+
+ expected_args = {'instance': 'fake-instance',
+ 'expected_vm_state': 'exp_vm',
+ 'expected_task_state': 'exp_task',
+ 'admin_state_reset': 'admin_reset'}
+ self._check_result(call_info, 'instance_update_from_api',
+ expected_args, version='1.16')
+
+ def test_start_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.start_instance(
+ self.fake_context, 'fake-instance')
+
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'start_instance',
+ expected_args, version='1.12')
+
+ def test_stop_instance_cast(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.stop_instance(
+ self.fake_context, 'fake-instance', do_cast=True)
+
+ expected_args = {'instance': 'fake-instance',
+ 'do_cast': True}
+ self._check_result(call_info, 'stop_instance',
+ expected_args, version='1.12')
+
+ def test_stop_instance_call(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+
+ result = self.cells_rpcapi.stop_instance(
+ self.fake_context, 'fake-instance', do_cast=False)
+
+ expected_args = {'instance': 'fake-instance',
+ 'do_cast': False}
+ self._check_result(call_info, 'stop_instance',
+ expected_args, version='1.12')
+ self.assertEqual(result, 'fake_response')
+
+ def test_cell_create(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+
+ result = self.cells_rpcapi.cell_create(self.fake_context, 'values')
+
+ expected_args = {'values': 'values'}
+ self._check_result(call_info, 'cell_create',
+ expected_args, version='1.13')
+ self.assertEqual(result, 'fake_response')
+
+ def test_cell_update(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+
+ result = self.cells_rpcapi.cell_update(self.fake_context,
+ 'cell_name', 'values')
+
+ expected_args = {'cell_name': 'cell_name',
+ 'values': 'values'}
+ self._check_result(call_info, 'cell_update',
+ expected_args, version='1.13')
+ self.assertEqual(result, 'fake_response')
+
+ def test_cell_delete(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+
+ result = self.cells_rpcapi.cell_delete(self.fake_context,
+ 'cell_name')
+
+ expected_args = {'cell_name': 'cell_name'}
+ self._check_result(call_info, 'cell_delete',
+ expected_args, version='1.13')
+ self.assertEqual(result, 'fake_response')
+
+ def test_cell_get(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+
+ result = self.cells_rpcapi.cell_get(self.fake_context,
+ 'cell_name')
+
+ expected_args = {'cell_name': 'cell_name'}
+ self._check_result(call_info, 'cell_get',
+ expected_args, version='1.13')
+ self.assertEqual(result, 'fake_response')
+
+ def test_reboot_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.reboot_instance(
+ self.fake_context, 'fake-instance',
+ block_device_info='ignored', reboot_type='HARD')
+
+ expected_args = {'instance': 'fake-instance',
+ 'reboot_type': 'HARD'}
+ self._check_result(call_info, 'reboot_instance',
+ expected_args, version='1.14')
+
+ def test_pause_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.pause_instance(
+ self.fake_context, 'fake-instance')
+
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'pause_instance',
+ expected_args, version='1.19')
+
+ def test_unpause_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.unpause_instance(
+ self.fake_context, 'fake-instance')
+
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'unpause_instance',
+ expected_args, version='1.19')
+
+ def test_suspend_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.suspend_instance(
+ self.fake_context, 'fake-instance')
+
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'suspend_instance',
+ expected_args, version='1.15')
+
+ def test_resume_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.resume_instance(
+ self.fake_context, 'fake-instance')
+
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'resume_instance',
+ expected_args, version='1.15')
+
+ def test_terminate_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.terminate_instance(self.fake_context,
+ 'fake-instance', [])
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'terminate_instance',
+ expected_args, version='1.18')
+
+ def test_soft_delete_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.soft_delete_instance(self.fake_context,
+ 'fake-instance')
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'soft_delete_instance',
+ expected_args, version='1.18')
+
+ def test_resize_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.resize_instance(self.fake_context,
+ 'fake-instance',
+ dict(cow='moo'),
+ 'fake-hint',
+ 'fake-flavor',
+ 'fake-reservations')
+ expected_args = {'instance': 'fake-instance',
+ 'flavor': 'fake-flavor',
+ 'extra_instance_updates': dict(cow='moo')}
+ self._check_result(call_info, 'resize_instance',
+ expected_args, version='1.20')
+
+ def test_live_migrate_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.live_migrate_instance(self.fake_context,
+ 'fake-instance',
+ 'fake-host',
+ 'fake-block',
+ 'fake-commit')
+ expected_args = {'instance': 'fake-instance',
+ 'block_migration': 'fake-block',
+ 'disk_over_commit': 'fake-commit',
+ 'host_name': 'fake-host'}
+ self._check_result(call_info, 'live_migrate_instance',
+ expected_args, version='1.20')
+
+ def test_revert_resize(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.revert_resize(self.fake_context,
+ 'fake-instance',
+ 'fake-migration',
+ 'fake-dest',
+ 'resvs')
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'revert_resize',
+ expected_args, version='1.21')
+
+ def test_confirm_resize(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.confirm_resize(self.fake_context,
+ 'fake-instance',
+ 'fake-migration',
+ 'fake-source',
+ 'resvs')
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'confirm_resize',
+ expected_args, version='1.21')
+
+ def test_reset_network(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.reset_network(self.fake_context,
+ 'fake-instance')
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'reset_network',
+ expected_args, version='1.22')
+
+ def test_inject_network_info(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.inject_network_info(self.fake_context,
+ 'fake-instance')
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'inject_network_info',
+ expected_args, version='1.23')
+
+ def test_snapshot_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.snapshot_instance(self.fake_context,
+ 'fake-instance',
+ 'image-id')
+ expected_args = {'instance': 'fake-instance',
+ 'image_id': 'image-id'}
+ self._check_result(call_info, 'snapshot_instance',
+ expected_args, version='1.24')
+
+ def test_backup_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.backup_instance(self.fake_context,
+ 'fake-instance',
+ 'image-id',
+ 'backup-type',
+ 'rotation')
+ expected_args = {'instance': 'fake-instance',
+ 'image_id': 'image-id',
+ 'backup_type': 'backup-type',
+ 'rotation': 'rotation'}
+ self._check_result(call_info, 'backup_instance',
+ expected_args, version='1.24')
+
+ def test_set_admin_password(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.set_admin_password(self.fake_context,
+ 'fake-instance', 'fake-password')
+
+ expected_args = {'instance': 'fake-instance',
+ 'new_pass': 'fake-password'}
+ self._check_result(call_info, 'set_admin_password',
+ expected_args, version='1.29')
diff --git a/nova/tests/unit/cells/test_cells_scheduler.py b/nova/tests/unit/cells/test_cells_scheduler.py
new file mode 100644
index 0000000000..23a115eaa1
--- /dev/null
+++ b/nova/tests/unit/cells/test_cells_scheduler.py
@@ -0,0 +1,530 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For CellsScheduler
+"""
+import copy
+import time
+
+from oslo.config import cfg
+
+from nova import block_device
+from nova.cells import filters
+from nova.cells import weights
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.openstack.common import uuidutils
+from nova.scheduler import utils as scheduler_utils
+from nova import test
+from nova.tests.unit.cells import fakes
+from nova.tests.unit import fake_instance
+from nova import utils
+
+CONF = cfg.CONF
+CONF.import_opt('scheduler_retries', 'nova.cells.scheduler', group='cells')
+CONF.import_opt('scheduler_filter_classes', 'nova.cells.scheduler',
+ group='cells')
+CONF.import_opt('scheduler_weight_classes', 'nova.cells.scheduler',
+ group='cells')
+
+
+class FakeFilterClass1(filters.BaseCellFilter):
+ pass
+
+
+class FakeFilterClass2(filters.BaseCellFilter):
+ pass
+
+
+class FakeWeightClass1(weights.BaseCellWeigher):
+ pass
+
+
+class FakeWeightClass2(weights.BaseCellWeigher):
+ pass
+
+
+class CellsSchedulerTestCase(test.TestCase):
+ """Test case for CellsScheduler class."""
+
+ def setUp(self):
+ super(CellsSchedulerTestCase, self).setUp()
+ self.flags(scheduler_filter_classes=[], scheduler_weight_classes=[],
+ group='cells')
+ self._init_cells_scheduler()
+
+ def _init_cells_scheduler(self):
+ fakes.init(self)
+ self.msg_runner = fakes.get_message_runner('api-cell')
+ self.scheduler = self.msg_runner.scheduler
+ self.state_manager = self.msg_runner.state_manager
+ self.my_cell_state = self.state_manager.get_my_state()
+ self.ctxt = context.RequestContext('fake', 'fake')
+ instance_uuids = []
+ for x in xrange(3):
+ instance_uuids.append(uuidutils.generate_uuid())
+ self.instance_uuids = instance_uuids
+ self.instances = [{'uuid': uuid} for uuid in instance_uuids]
+ self.request_spec = {
+ 'instance_uuids': instance_uuids,
+ 'instance_properties': self.instances[0],
+ 'instance_type': 'fake_type',
+ 'image': 'fake_image'}
+ self.build_inst_kwargs = {
+ 'instances': self.instances,
+ 'image': 'fake_image',
+ 'filter_properties': {'instance_type': 'fake_type'},
+ 'security_groups': 'fake_sec_groups',
+ 'block_device_mapping': 'fake_bdm'}
+
+ def test_create_instances_here(self):
+ # Just grab the first instance type
+ inst_type = db.flavor_get(self.ctxt, 1)
+ image = {'properties': {}}
+ instance_uuids = self.instance_uuids
+ instance_props = {'id': 'removed',
+ 'security_groups': 'removed',
+ 'info_cache': 'removed',
+ 'name': 'instance-00000001',
+ 'hostname': 'meow',
+ 'display_name': 'moo',
+ 'image_ref': 'fake_image_ref',
+ 'user_id': self.ctxt.user_id,
+ # Test these as lists
+ 'metadata': [{'key': 'moo', 'value': 'cow'}],
+ 'system_metadata': [{'key': 'meow', 'value': 'cat'}],
+ 'project_id': self.ctxt.project_id}
+
+ call_info = {'uuids': []}
+ block_device_mapping = [block_device.create_image_bdm(
+ 'fake_image_ref')]
+
+ def _fake_instance_update_at_top(_ctxt, instance):
+ call_info['uuids'].append(instance['uuid'])
+
+ self.stubs.Set(self.msg_runner, 'instance_update_at_top',
+ _fake_instance_update_at_top)
+
+ self.scheduler._create_instances_here(self.ctxt, instance_uuids,
+ instance_props, inst_type, image,
+ ['default'], block_device_mapping)
+ self.assertEqual(instance_uuids, call_info['uuids'])
+
+ for instance_uuid in instance_uuids:
+ instance = db.instance_get_by_uuid(self.ctxt, instance_uuid)
+ meta = utils.instance_meta(instance)
+ self.assertEqual('cow', meta['moo'])
+ sys_meta = utils.instance_sys_meta(instance)
+ self.assertEqual('cat', sys_meta['meow'])
+ self.assertEqual('meow', instance['hostname'])
+ self.assertEqual('moo-%s' % instance['uuid'],
+ instance['display_name'])
+ self.assertEqual('fake_image_ref', instance['image_ref'])
+
+ def test_build_instances_selects_child_cell(self):
+ # Make sure there's no capacity info so we're sure to
+ # select a child cell
+ our_cell_info = self.state_manager.get_my_state()
+ our_cell_info.capacities = {}
+
+ call_info = {'times': 0}
+
+ orig_fn = self.msg_runner.build_instances
+
+ def msg_runner_build_instances(ctxt, target_cell, build_inst_kwargs):
+ # This gets called twice. Once for our running it
+ # in this cell.. and then it'll get called when the
+ # child cell is picked. So, first time.. just run it
+ # like normal.
+ if not call_info['times']:
+ call_info['times'] += 1
+ return orig_fn(ctxt, target_cell, build_inst_kwargs)
+ call_info['ctxt'] = ctxt
+ call_info['target_cell'] = target_cell
+ call_info['build_inst_kwargs'] = build_inst_kwargs
+
+ def fake_build_request_spec(ctxt, image, instances):
+ request_spec = {
+ 'instance_uuids': [inst['uuid'] for inst in instances],
+ 'image': image}
+ return request_spec
+
+ self.stubs.Set(self.msg_runner, 'build_instances',
+ msg_runner_build_instances)
+ self.stubs.Set(scheduler_utils, 'build_request_spec',
+ fake_build_request_spec)
+
+ self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
+ self.build_inst_kwargs)
+
+ self.assertEqual(self.ctxt, call_info['ctxt'])
+ self.assertEqual(self.build_inst_kwargs,
+ call_info['build_inst_kwargs'])
+ child_cells = self.state_manager.get_child_cells()
+ self.assertIn(call_info['target_cell'], child_cells)
+
+ def test_build_instances_selects_current_cell(self):
+ # Make sure there's no child cells so that we will be
+ # selected
+ self.state_manager.child_cells = {}
+
+ call_info = {}
+ build_inst_kwargs = copy.deepcopy(self.build_inst_kwargs)
+
+ def fake_create_instances_here(ctxt, instance_uuids,
+ instance_properties, instance_type, image, security_groups,
+ block_device_mapping):
+ call_info['ctxt'] = ctxt
+ call_info['instance_uuids'] = instance_uuids
+ call_info['instance_properties'] = instance_properties
+ call_info['instance_type'] = instance_type
+ call_info['image'] = image
+ call_info['security_groups'] = security_groups
+ call_info['block_device_mapping'] = block_device_mapping
+ instances = [fake_instance.fake_instance_obj(ctxt, **instance)
+ for instance in self.instances]
+ return instances
+
+ def fake_rpc_build_instances(ctxt, **build_inst_kwargs):
+ call_info['build_inst_kwargs'] = build_inst_kwargs
+
+ def fake_build_request_spec(ctxt, image, instances):
+ request_spec = {
+ 'instance_uuids': [inst['uuid'] for inst in instances],
+ 'image': image}
+ return request_spec
+
+ self.stubs.Set(self.scheduler, '_create_instances_here',
+ fake_create_instances_here)
+ self.stubs.Set(self.scheduler.compute_task_api,
+ 'build_instances', fake_rpc_build_instances)
+ self.stubs.Set(scheduler_utils, 'build_request_spec',
+ fake_build_request_spec)
+
+ self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
+ build_inst_kwargs)
+
+ self.assertEqual(self.ctxt, call_info['ctxt'])
+ self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
+ self.assertEqual(self.build_inst_kwargs['instances'][0],
+ call_info['instance_properties'])
+ self.assertEqual(
+ self.build_inst_kwargs['filter_properties']['instance_type'],
+ call_info['instance_type'])
+ self.assertEqual(self.build_inst_kwargs['image'], call_info['image'])
+ self.assertEqual(self.build_inst_kwargs['security_groups'],
+ call_info['security_groups'])
+ self.assertEqual(self.build_inst_kwargs['block_device_mapping'],
+ call_info['block_device_mapping'])
+ self.assertEqual(build_inst_kwargs,
+ call_info['build_inst_kwargs'])
+ self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
+
+ def test_build_instances_retries_when_no_cells_avail(self):
+ self.flags(scheduler_retries=7, group='cells')
+
+ call_info = {'num_tries': 0, 'errored_uuids': []}
+
+ def fake_grab_target_cells(filter_properties):
+ call_info['num_tries'] += 1
+ raise exception.NoCellsAvailable()
+
+ def fake_sleep(_secs):
+ return
+
+ def fake_instance_update(ctxt, instance_uuid, values):
+ self.assertEqual(vm_states.ERROR, values['vm_state'])
+ call_info['errored_uuids'].append(instance_uuid)
+
+ def fake_build_request_spec(ctxt, image, instances):
+ request_spec = {
+ 'instance_uuids': [inst['uuid'] for inst in instances],
+ 'image': image}
+ return request_spec
+
+ self.stubs.Set(self.scheduler, '_grab_target_cells',
+ fake_grab_target_cells)
+ self.stubs.Set(time, 'sleep', fake_sleep)
+ self.stubs.Set(db, 'instance_update', fake_instance_update)
+ self.stubs.Set(scheduler_utils, 'build_request_spec',
+ fake_build_request_spec)
+
+ self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
+ self.build_inst_kwargs)
+
+ self.assertEqual(8, call_info['num_tries'])
+ self.assertEqual(self.instance_uuids, call_info['errored_uuids'])
+
+ def test_schedule_method_on_random_exception(self):
+ self.flags(scheduler_retries=7, group='cells')
+
+ instances = [{'uuid': uuid} for uuid in self.instance_uuids]
+ method_kwargs = {
+ 'image': 'fake_image',
+ 'instances': instances,
+ 'filter_properties': {}}
+
+ call_info = {'num_tries': 0,
+ 'errored_uuids1': [],
+ 'errored_uuids2': []}
+
+ def fake_grab_target_cells(filter_properties):
+ call_info['num_tries'] += 1
+ raise test.TestingException()
+
+ def fake_instance_update(ctxt, instance_uuid, values):
+ self.assertEqual(vm_states.ERROR, values['vm_state'])
+ call_info['errored_uuids1'].append(instance_uuid)
+
+ def fake_instance_update_at_top(ctxt, instance):
+ self.assertEqual(vm_states.ERROR, instance['vm_state'])
+ call_info['errored_uuids2'].append(instance['uuid'])
+
+ def fake_build_request_spec(ctxt, image, instances):
+ request_spec = {
+ 'instance_uuids': [inst['uuid'] for inst in instances],
+ 'image': image}
+ return request_spec
+
+ self.stubs.Set(self.scheduler, '_grab_target_cells',
+ fake_grab_target_cells)
+ self.stubs.Set(db, 'instance_update', fake_instance_update)
+ self.stubs.Set(self.msg_runner, 'instance_update_at_top',
+ fake_instance_update_at_top)
+ self.stubs.Set(scheduler_utils, 'build_request_spec',
+ fake_build_request_spec)
+
+ self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
+ method_kwargs)
+ # Shouldn't retry
+ self.assertEqual(1, call_info['num_tries'])
+ self.assertEqual(self.instance_uuids, call_info['errored_uuids1'])
+ self.assertEqual(self.instance_uuids, call_info['errored_uuids2'])
+
+ def test_filter_schedule_skipping(self):
+ # if a filter handles scheduling, short circuit
+
+ def _grab(filter_properties):
+ return None
+
+ self.stubs.Set(self.scheduler, '_grab_target_cells', _grab)
+
+ def _test(self, *args):
+ raise test.TestingException("shouldn't be called")
+
+ try:
+ self.scheduler._schedule_build_to_cells(None, None, None, _test,
+ None)
+ except test.TestingException:
+ self.fail("Scheduling did not properly short circuit")
+
+ def test_cells_filter_args_correct(self):
+ # Re-init our fakes with some filters.
+ our_path = 'nova.tests.unit.cells.test_cells_scheduler'
+ cls_names = [our_path + '.' + 'FakeFilterClass1',
+ our_path + '.' + 'FakeFilterClass2']
+ self.flags(scheduler_filter_classes=cls_names, group='cells')
+ self._init_cells_scheduler()
+
+ # Make sure there's no child cells so that we will be
+ # selected. Makes stubbing easier.
+ self.state_manager.child_cells = {}
+
+ call_info = {}
+
+ def fake_create_instances_here(ctxt, instance_uuids,
+ instance_properties, instance_type, image, security_groups,
+ block_device_mapping):
+ call_info['ctxt'] = ctxt
+ call_info['instance_uuids'] = instance_uuids
+ call_info['instance_properties'] = instance_properties
+ call_info['instance_type'] = instance_type
+ call_info['image'] = image
+ call_info['security_groups'] = security_groups
+ call_info['block_device_mapping'] = block_device_mapping
+
+ def fake_rpc_build_instances(ctxt, **host_sched_kwargs):
+ call_info['host_sched_kwargs'] = host_sched_kwargs
+
+ def fake_get_filtered_objs(filter_classes, cells, filt_properties):
+ call_info['filt_classes'] = filter_classes
+ call_info['filt_cells'] = cells
+ call_info['filt_props'] = filt_properties
+ return cells
+
+ def fake_build_request_spec(ctxt, image, instances):
+ request_spec = {
+ 'instance_uuids': [inst['uuid'] for inst in instances],
+ 'instance_properties': instances[0],
+ 'image': image,
+ 'instance_type': 'fake_type'}
+ return request_spec
+
+ self.stubs.Set(self.scheduler, '_create_instances_here',
+ fake_create_instances_here)
+ self.stubs.Set(self.scheduler.compute_task_api,
+ 'build_instances', fake_rpc_build_instances)
+ self.stubs.Set(scheduler_utils, 'build_request_spec',
+ fake_build_request_spec)
+ filter_handler = self.scheduler.filter_handler
+ self.stubs.Set(filter_handler, 'get_filtered_objects',
+ fake_get_filtered_objs)
+
+ host_sched_kwargs = {'image': 'fake_image',
+ 'instances': self.instances,
+ 'filter_properties':
+ {'instance_type': 'fake_type'},
+ 'security_groups': 'fake_sec_groups',
+ 'block_device_mapping': 'fake_bdm'}
+
+ self.msg_runner.build_instances(self.ctxt,
+ self.my_cell_state, host_sched_kwargs)
+ # Our cell was selected.
+ self.assertEqual(self.ctxt, call_info['ctxt'])
+ self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
+ self.assertEqual(self.request_spec['instance_properties'],
+ call_info['instance_properties'])
+ self.assertEqual(self.request_spec['instance_type'],
+ call_info['instance_type'])
+ self.assertEqual(self.request_spec['image'], call_info['image'])
+ self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs'])
+ # Filter args are correct
+ expected_filt_props = {'context': self.ctxt,
+ 'scheduler': self.scheduler,
+ 'routing_path': self.my_cell_state.name,
+ 'host_sched_kwargs': host_sched_kwargs,
+ 'request_spec': self.request_spec,
+ 'instance_type': 'fake_type'}
+ self.assertEqual(expected_filt_props, call_info['filt_props'])
+ self.assertEqual([FakeFilterClass1, FakeFilterClass2],
+ call_info['filt_classes'])
+ self.assertEqual([self.my_cell_state], call_info['filt_cells'])
+
+ def test_cells_filter_returning_none(self):
+ # Re-init our fakes with some filters.
+ our_path = 'nova.tests.unit.cells.test_cells_scheduler'
+ cls_names = [our_path + '.' + 'FakeFilterClass1',
+ our_path + '.' + 'FakeFilterClass2']
+ self.flags(scheduler_filter_classes=cls_names, group='cells')
+ self._init_cells_scheduler()
+
+ # Make sure there's no child cells so that we will be
+ # selected. Makes stubbing easier.
+ self.state_manager.child_cells = {}
+
+ call_info = {'scheduled': False}
+
+ def fake_create_instances_here(ctxt, request_spec):
+ # Should not be called
+ call_info['scheduled'] = True
+
+ def fake_get_filtered_objs(filter_classes, cells, filt_properties):
+ # Should cause scheduling to be skipped. Means that the
+ # filter did it.
+ return None
+
+ self.stubs.Set(self.scheduler, '_create_instances_here',
+ fake_create_instances_here)
+ filter_handler = self.scheduler.filter_handler
+ self.stubs.Set(filter_handler, 'get_filtered_objects',
+ fake_get_filtered_objs)
+
+ self.msg_runner.build_instances(self.ctxt,
+ self.my_cell_state, {})
+ self.assertFalse(call_info['scheduled'])
+
+ def test_cells_weight_args_correct(self):
+ # Re-init our fakes with some filters.
+ our_path = 'nova.tests.unit.cells.test_cells_scheduler'
+ cls_names = [our_path + '.' + 'FakeWeightClass1',
+ our_path + '.' + 'FakeWeightClass2']
+ self.flags(scheduler_weight_classes=cls_names, group='cells')
+ self._init_cells_scheduler()
+
+ # Make sure there's no child cells so that we will be
+ # selected. Makes stubbing easier.
+ self.state_manager.child_cells = {}
+
+ call_info = {}
+
+ def fake_create_instances_here(ctxt, instance_uuids,
+ instance_properties, instance_type, image, security_groups,
+ block_device_mapping):
+ call_info['ctxt'] = ctxt
+ call_info['instance_uuids'] = instance_uuids
+ call_info['instance_properties'] = instance_properties
+ call_info['instance_type'] = instance_type
+ call_info['image'] = image
+ call_info['security_groups'] = security_groups
+ call_info['block_device_mapping'] = block_device_mapping
+
+ def fake_rpc_build_instances(ctxt, **host_sched_kwargs):
+ call_info['host_sched_kwargs'] = host_sched_kwargs
+
+ def fake_get_weighed_objs(weight_classes, cells, filt_properties):
+ call_info['weight_classes'] = weight_classes
+ call_info['weight_cells'] = cells
+ call_info['weight_props'] = filt_properties
+ return [weights.WeightedCell(cells[0], 0.0)]
+
+ def fake_build_request_spec(ctxt, image, instances):
+ request_spec = {
+ 'instance_uuids': [inst['uuid'] for inst in instances],
+ 'instance_properties': instances[0],
+ 'image': image,
+ 'instance_type': 'fake_type'}
+ return request_spec
+
+ self.stubs.Set(self.scheduler, '_create_instances_here',
+ fake_create_instances_here)
+ self.stubs.Set(scheduler_utils, 'build_request_spec',
+ fake_build_request_spec)
+ self.stubs.Set(self.scheduler.compute_task_api,
+ 'build_instances', fake_rpc_build_instances)
+ weight_handler = self.scheduler.weight_handler
+ self.stubs.Set(weight_handler, 'get_weighed_objects',
+ fake_get_weighed_objs)
+
+ host_sched_kwargs = {'image': 'fake_image',
+ 'instances': self.instances,
+ 'filter_properties':
+ {'instance_type': 'fake_type'},
+ 'security_groups': 'fake_sec_groups',
+ 'block_device_mapping': 'fake_bdm'}
+
+ self.msg_runner.build_instances(self.ctxt,
+ self.my_cell_state, host_sched_kwargs)
+ # Our cell was selected.
+ self.assertEqual(self.ctxt, call_info['ctxt'])
+ self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
+ self.assertEqual(self.request_spec['instance_properties'],
+ call_info['instance_properties'])
+ self.assertEqual(self.request_spec['instance_type'],
+ call_info['instance_type'])
+ self.assertEqual(self.request_spec['image'], call_info['image'])
+ self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs'])
+ # Weight args are correct
+ expected_filt_props = {'context': self.ctxt,
+ 'scheduler': self.scheduler,
+ 'routing_path': self.my_cell_state.name,
+ 'host_sched_kwargs': host_sched_kwargs,
+ 'request_spec': self.request_spec,
+ 'instance_type': 'fake_type'}
+ self.assertEqual(expected_filt_props, call_info['weight_props'])
+ self.assertEqual([FakeWeightClass1, FakeWeightClass2],
+ call_info['weight_classes'])
+ self.assertEqual([self.my_cell_state], call_info['weight_cells'])
diff --git a/nova/tests/unit/cells/test_cells_state_manager.py b/nova/tests/unit/cells/test_cells_state_manager.py
new file mode 100644
index 0000000000..6c52448111
--- /dev/null
+++ b/nova/tests/unit/cells/test_cells_state_manager.py
@@ -0,0 +1,259 @@
+# Copyright (c) 2013 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For CellStateManager
+"""
+
+import time
+
+import mock
+from oslo.config import cfg
+from oslo.db import exception as db_exc
+import six
+
+from nova.cells import state
+from nova import db
+from nova.db.sqlalchemy import models
+from nova import exception
+from nova.openstack.common import fileutils
+from nova import test
+
+FAKE_COMPUTES = [
+ ('host1', 1024, 100, 0, 0),
+ ('host2', 1024, 100, -1, -1),
+ ('host3', 1024, 100, 1024, 100),
+ ('host4', 1024, 100, 300, 30),
+]
+
+# NOTE(alaski): It's important to have multiple types that end up having the
+# same memory and disk requirements. So two types need the same first value,
+# and two need the second and third values to add up to the same thing.
+FAKE_ITYPES = [
+ (0, 0, 0),
+ (50, 12, 13),
+ (50, 2, 4),
+ (10, 20, 5),
+]
+
+
+def _fake_compute_node_get_all(context):
+ def _node(host, total_mem, total_disk, free_mem, free_disk):
+ service = {'host': host, 'disabled': False}
+ return {'service': service,
+ 'memory_mb': total_mem,
+ 'local_gb': total_disk,
+ 'free_ram_mb': free_mem,
+ 'free_disk_gb': free_disk}
+
+ return [_node(*fake) for fake in FAKE_COMPUTES]
+
+
+def _fake_instance_type_all(context):
+ def _type(mem, root, eph):
+ return {'root_gb': root,
+ 'ephemeral_gb': eph,
+ 'memory_mb': mem}
+
+ return [_type(*fake) for fake in FAKE_ITYPES]
+
+
+class TestCellsStateManager(test.TestCase):
+
+ def setUp(self):
+ super(TestCellsStateManager, self).setUp()
+
+ self.stubs.Set(db, 'compute_node_get_all', _fake_compute_node_get_all)
+ self.stubs.Set(db, 'flavor_get_all', _fake_instance_type_all)
+
+ def test_cells_config_not_found(self):
+ self.flags(cells_config='no_such_file_exists.conf', group='cells')
+ e = self.assertRaises(cfg.ConfigFilesNotFoundError,
+ state.CellStateManager)
+ self.assertEqual(['no_such_file_exists.conf'], e.config_files)
+
+ @mock.patch.object(cfg.ConfigOpts, 'find_file')
+ @mock.patch.object(fileutils, 'read_cached_file')
+ def test_filemanager_returned(self, mock_read_cached_file, mock_find_file):
+ mock_find_file.return_value = "/etc/nova/cells.json"
+ mock_read_cached_file.return_value = (False, six.StringIO({}))
+ self.flags(cells_config='cells.json', group='cells')
+ manager = state.CellStateManager()
+ self.assertIsInstance(manager,
+ state.CellStateManagerFile)
+ self.assertRaises(exception.CellsUpdateUnsupported,
+ manager.cell_create, None, None)
+ self.assertRaises(exception.CellsUpdateUnsupported,
+ manager.cell_update, None, None, None)
+ self.assertRaises(exception.CellsUpdateUnsupported,
+ manager.cell_delete, None, None)
+
+ def test_dbmanager_returned(self):
+ self.assertIsInstance(state.CellStateManager(),
+ state.CellStateManagerDB)
+
+ def test_capacity_no_reserve(self):
+ # utilize entire cell
+ cap = self._capacity(0.0)
+
+ cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES)
+ self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
+
+ cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES)
+ self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
+
+ self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
+ self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
+
+ units = cell_free_ram / 50
+ self.assertEqual(units, cap['ram_free']['units_by_mb']['50'])
+
+ sz = 25 * 1024
+ units = 5 # 4 on host 3, 1 on host4
+ self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)])
+
+ def test_capacity_full_reserve(self):
+ # reserve the entire cell. (utilize zero percent)
+ cap = self._capacity(100.0)
+
+ cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES)
+ self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
+
+ cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES)
+ self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
+
+ self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
+ self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
+ self.assertEqual(0, cap['ram_free']['units_by_mb']['50'])
+
+ sz = 25 * 1024
+ self.assertEqual(0, cap['disk_free']['units_by_mb'][str(sz)])
+
+ def test_capacity_part_reserve(self):
+ # utilize half the cell's free capacity
+ cap = self._capacity(50.0)
+
+ cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES)
+ self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
+
+ cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES)
+ self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
+
+ self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
+ self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
+
+ units = 10 # 10 from host 3
+ self.assertEqual(units, cap['ram_free']['units_by_mb']['50'])
+
+ sz = 25 * 1024
+ units = 2 # 2 on host 3
+ self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)])
+
+ def _get_state_manager(self, reserve_percent=0.0):
+ self.flags(reserve_percent=reserve_percent, group='cells')
+ return state.CellStateManager()
+
+ def _capacity(self, reserve_percent):
+ state_manager = self._get_state_manager(reserve_percent)
+ my_state = state_manager.get_my_state()
+ return my_state.capacities
+
+
+class TestCellStateManagerException(test.TestCase):
+ @mock.patch.object(time, 'sleep')
+ def test_init_db_error(self, mock_sleep):
+ class TestCellStateManagerDB(state.CellStateManagerDB):
+ def __init__(self):
+ self._cell_data_sync = mock.Mock()
+ self._cell_data_sync.side_effect = [db_exc.DBError(), []]
+ super(TestCellStateManagerDB, self).__init__()
+ test = TestCellStateManagerDB()
+ mock_sleep.assert_called_once_with(30)
+ self.assertEqual(test._cell_data_sync.call_count, 2)
+
+
+class TestCellsGetCapacity(TestCellsStateManager):
+ def setUp(self):
+ super(TestCellsGetCapacity, self).setUp()
+ self.capacities = {"ram_free": 1234}
+ self.state_manager = self._get_state_manager()
+ cell = models.Cell(name="cell_name")
+ other_cell = models.Cell(name="other_cell_name")
+ cell.capacities = self.capacities
+ other_cell.capacities = self.capacities
+ self.stubs.Set(self.state_manager, 'child_cells',
+ {"cell_name": cell,
+ "other_cell_name": other_cell})
+
+ def test_get_cell_capacity_for_all_cells(self):
+ self.stubs.Set(self.state_manager.my_cell_state, 'capacities',
+ self.capacities)
+ capacities = self.state_manager.get_capacities()
+ self.assertEqual({"ram_free": 3702}, capacities)
+
+ def test_get_cell_capacity_for_the_parent_cell(self):
+ self.stubs.Set(self.state_manager.my_cell_state, 'capacities',
+ self.capacities)
+ capacities = self.state_manager.\
+ get_capacities(self.state_manager.my_cell_state.name)
+ self.assertEqual({"ram_free": 3702}, capacities)
+
+ def test_get_cell_capacity_for_a_cell(self):
+ self.assertEqual(self.capacities,
+ self.state_manager.get_capacities(cell_name="cell_name"))
+
+ def test_get_cell_capacity_for_non_existing_cell(self):
+ self.assertRaises(exception.CellNotFound,
+ self.state_manager.get_capacities,
+ cell_name="invalid_cell_name")
+
+
+class FakeCellStateManager(object):
+ def __init__(self):
+ self.called = []
+
+ def _cell_data_sync(self, force=False):
+ self.called.append(('_cell_data_sync', force))
+
+
+class TestSyncDecorators(test.TestCase):
+ def test_sync_before(self):
+ manager = FakeCellStateManager()
+
+ def test(inst, *args, **kwargs):
+ self.assertEqual(inst, manager)
+ self.assertEqual(args, (1, 2, 3))
+ self.assertEqual(kwargs, dict(a=4, b=5, c=6))
+ return 'result'
+ wrapper = state.sync_before(test)
+
+ result = wrapper(manager, 1, 2, 3, a=4, b=5, c=6)
+
+ self.assertEqual(result, 'result')
+ self.assertEqual(manager.called, [('_cell_data_sync', False)])
+
+ def test_sync_after(self):
+ manager = FakeCellStateManager()
+
+ def test(inst, *args, **kwargs):
+ self.assertEqual(inst, manager)
+ self.assertEqual(args, (1, 2, 3))
+ self.assertEqual(kwargs, dict(a=4, b=5, c=6))
+ return 'result'
+ wrapper = state.sync_after(test)
+
+ result = wrapper(manager, 1, 2, 3, a=4, b=5, c=6)
+
+ self.assertEqual(result, 'result')
+ self.assertEqual(manager.called, [('_cell_data_sync', True)])
diff --git a/nova/tests/unit/cells/test_cells_utils.py b/nova/tests/unit/cells/test_cells_utils.py
new file mode 100644
index 0000000000..44141150b6
--- /dev/null
+++ b/nova/tests/unit/cells/test_cells_utils.py
@@ -0,0 +1,103 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Cells Utility methods
+"""
+import inspect
+import random
+
+from nova.cells import utils as cells_utils
+from nova import db
+from nova import test
+
+
+class CellsUtilsTestCase(test.NoDBTestCase):
+ """Test case for Cells utility methods."""
+ def test_get_instances_to_sync(self):
+ fake_context = 'fake_context'
+
+ call_info = {'get_all': 0, 'shuffle': 0}
+
+ def random_shuffle(_list):
+ call_info['shuffle'] += 1
+
+ def instance_get_all_by_filters(context, filters,
+ sort_key, sort_order):
+ self.assertEqual(context, fake_context)
+ self.assertEqual(sort_key, 'deleted')
+ self.assertEqual(sort_order, 'asc')
+ call_info['got_filters'] = filters
+ call_info['get_all'] += 1
+ return ['fake_instance1', 'fake_instance2', 'fake_instance3']
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ instance_get_all_by_filters)
+ self.stubs.Set(random, 'shuffle', random_shuffle)
+
+ instances = cells_utils.get_instances_to_sync(fake_context)
+ self.assertTrue(inspect.isgenerator(instances))
+ self.assertEqual(len([x for x in instances]), 3)
+ self.assertEqual(call_info['get_all'], 1)
+ self.assertEqual(call_info['got_filters'], {})
+ self.assertEqual(call_info['shuffle'], 0)
+
+ instances = cells_utils.get_instances_to_sync(fake_context,
+ shuffle=True)
+ self.assertTrue(inspect.isgenerator(instances))
+ self.assertEqual(len([x for x in instances]), 3)
+ self.assertEqual(call_info['get_all'], 2)
+ self.assertEqual(call_info['got_filters'], {})
+ self.assertEqual(call_info['shuffle'], 1)
+
+ instances = cells_utils.get_instances_to_sync(fake_context,
+ updated_since='fake-updated-since')
+ self.assertTrue(inspect.isgenerator(instances))
+ self.assertEqual(len([x for x in instances]), 3)
+ self.assertEqual(call_info['get_all'], 3)
+ self.assertEqual(call_info['got_filters'],
+ {'changes-since': 'fake-updated-since'})
+ self.assertEqual(call_info['shuffle'], 1)
+
+ instances = cells_utils.get_instances_to_sync(fake_context,
+ project_id='fake-project',
+ updated_since='fake-updated-since', shuffle=True)
+ self.assertTrue(inspect.isgenerator(instances))
+ self.assertEqual(len([x for x in instances]), 3)
+ self.assertEqual(call_info['get_all'], 4)
+ self.assertEqual(call_info['got_filters'],
+ {'changes-since': 'fake-updated-since',
+ 'project_id': 'fake-project'})
+ self.assertEqual(call_info['shuffle'], 2)
+
+ def test_split_cell_and_item(self):
+ path = 'australia', 'queensland', 'gold_coast'
+ cell = cells_utils.PATH_CELL_SEP.join(path)
+ item = 'host_5'
+ together = cells_utils.cell_with_item(cell, item)
+ self.assertEqual(cells_utils._CELL_ITEM_SEP.join([cell, item]),
+ together)
+
+ # Test normal usage
+ result_cell, result_item = cells_utils.split_cell_and_item(together)
+ self.assertEqual(cell, result_cell)
+ self.assertEqual(item, result_item)
+
+ # Test with no cell
+ cell = None
+ together = cells_utils.cell_with_item(cell, item)
+ self.assertEqual(item, together)
+ result_cell, result_item = cells_utils.split_cell_and_item(together)
+ self.assertEqual(cell, result_cell)
+ self.assertEqual(item, result_item)
diff --git a/nova/tests/unit/cells/test_cells_weights.py b/nova/tests/unit/cells/test_cells_weights.py
new file mode 100644
index 0000000000..5f0a0ac783
--- /dev/null
+++ b/nova/tests/unit/cells/test_cells_weights.py
@@ -0,0 +1,218 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for testing the cells weight algorithms.
+
+Cells with higher weights should be given priority for new builds.
+"""
+
+import datetime
+
+from oslo.utils import timeutils
+
+from nova.cells import state
+from nova.cells import weights
+from nova import test
+
+
+class FakeCellState(state.CellState):
+ def __init__(self, cell_name):
+ super(FakeCellState, self).__init__(cell_name)
+ self.capacities['ram_free'] = {'total_mb': 0,
+ 'units_by_mb': {}}
+ self.db_info = {}
+
+ def _update_ram_free(self, *args):
+ ram_free = self.capacities['ram_free']
+ for ram_size, units in args:
+ ram_free['total_mb'] += units * ram_size
+ ram_free['units_by_mb'][str(ram_size)] = units
+
+
+def _get_fake_cells():
+
+ cell1 = FakeCellState('cell1')
+ cell1._update_ram_free((512, 1), (1024, 4), (2048, 3))
+ cell1.db_info['weight_offset'] = -200.0
+ cell2 = FakeCellState('cell2')
+ cell2._update_ram_free((512, 2), (1024, 3), (2048, 4))
+ cell2.db_info['weight_offset'] = -200.1
+ cell3 = FakeCellState('cell3')
+ cell3._update_ram_free((512, 3), (1024, 2), (2048, 1))
+ cell3.db_info['weight_offset'] = 400.0
+ cell4 = FakeCellState('cell4')
+ cell4._update_ram_free((512, 4), (1024, 1), (2048, 2))
+ cell4.db_info['weight_offset'] = 300.0
+
+ return [cell1, cell2, cell3, cell4]
+
+
+class CellsWeightsTestCase(test.NoDBTestCase):
+ """Makes sure the proper weighers are in the directory."""
+
+ def test_all_weighers(self):
+ weighers = weights.all_weighers()
+ # Check at least a couple that we expect are there
+ self.assertTrue(len(weighers) >= 2)
+ class_names = [cls.__name__ for cls in weighers]
+ self.assertIn('WeightOffsetWeigher', class_names)
+ self.assertIn('RamByInstanceTypeWeigher', class_names)
+
+
+class _WeigherTestClass(test.NoDBTestCase):
+ """Base class for testing individual weigher plugins."""
+ weigher_cls_name = None
+
+ def setUp(self):
+ super(_WeigherTestClass, self).setUp()
+ self.weight_handler = weights.CellWeightHandler()
+ self.weight_classes = self.weight_handler.get_matching_classes(
+ [self.weigher_cls_name])
+
+ def _get_weighed_cells(self, cells, weight_properties):
+ return self.weight_handler.get_weighed_objects(self.weight_classes,
+ cells, weight_properties)
+
+
+class RAMByInstanceTypeWeigherTestClass(_WeigherTestClass):
+
+ weigher_cls_name = ('nova.cells.weights.ram_by_instance_type.'
+ 'RamByInstanceTypeWeigher')
+
+ def test_default_spreading(self):
+ """Test that cells with more ram available return a higher weight."""
+ cells = _get_fake_cells()
+ # Simulate building a new 512MB instance.
+ instance_type = {'memory_mb': 512}
+ weight_properties = {'request_spec': {'instance_type': instance_type}}
+ weighed_cells = self._get_weighed_cells(cells, weight_properties)
+ self.assertEqual(len(weighed_cells), 4)
+ resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
+ expected_cells = [cells[3], cells[2], cells[1], cells[0]]
+ self.assertEqual(expected_cells, resulting_cells)
+
+ # Simulate building a new 1024MB instance.
+ instance_type = {'memory_mb': 1024}
+ weight_properties = {'request_spec': {'instance_type': instance_type}}
+ weighed_cells = self._get_weighed_cells(cells, weight_properties)
+ self.assertEqual(len(weighed_cells), 4)
+ resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
+ expected_cells = [cells[0], cells[1], cells[2], cells[3]]
+ self.assertEqual(expected_cells, resulting_cells)
+
+ # Simulate building a new 2048MB instance.
+ instance_type = {'memory_mb': 2048}
+ weight_properties = {'request_spec': {'instance_type': instance_type}}
+ weighed_cells = self._get_weighed_cells(cells, weight_properties)
+ self.assertEqual(len(weighed_cells), 4)
+ resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
+ expected_cells = [cells[1], cells[0], cells[3], cells[2]]
+ self.assertEqual(expected_cells, resulting_cells)
+
+ def test_negative_multiplier(self):
+ """Test that cells with less ram available return a higher weight."""
+ self.flags(ram_weight_multiplier=-1.0, group='cells')
+ cells = _get_fake_cells()
+ # Simulate building a new 512MB instance.
+ instance_type = {'memory_mb': 512}
+ weight_properties = {'request_spec': {'instance_type': instance_type}}
+ weighed_cells = self._get_weighed_cells(cells, weight_properties)
+ self.assertEqual(len(weighed_cells), 4)
+ resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
+ expected_cells = [cells[0], cells[1], cells[2], cells[3]]
+ self.assertEqual(expected_cells, resulting_cells)
+
+ # Simulate building a new 1024MB instance.
+ instance_type = {'memory_mb': 1024}
+ weight_properties = {'request_spec': {'instance_type': instance_type}}
+ weighed_cells = self._get_weighed_cells(cells, weight_properties)
+ self.assertEqual(len(weighed_cells), 4)
+ resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
+ expected_cells = [cells[3], cells[2], cells[1], cells[0]]
+ self.assertEqual(expected_cells, resulting_cells)
+
+ # Simulate building a new 2048MB instance.
+ instance_type = {'memory_mb': 2048}
+ weight_properties = {'request_spec': {'instance_type': instance_type}}
+ weighed_cells = self._get_weighed_cells(cells, weight_properties)
+ self.assertEqual(len(weighed_cells), 4)
+ resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
+ expected_cells = [cells[2], cells[3], cells[0], cells[1]]
+ self.assertEqual(expected_cells, resulting_cells)
+
+
+class WeightOffsetWeigherTestClass(_WeigherTestClass):
+ """Test the RAMWeigher class."""
+ weigher_cls_name = 'nova.cells.weights.weight_offset.WeightOffsetWeigher'
+
+ def test_weight_offset(self):
+ """Test that cells with higher weight_offsets return higher
+ weights.
+ """
+ cells = _get_fake_cells()
+ weighed_cells = self._get_weighed_cells(cells, {})
+ self.assertEqual(len(weighed_cells), 4)
+ expected_cells = [cells[2], cells[3], cells[0], cells[1]]
+ resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
+ self.assertEqual(expected_cells, resulting_cells)
+
+
+class MuteWeigherTestClass(_WeigherTestClass):
+ weigher_cls_name = 'nova.cells.weights.mute_child.MuteChildWeigher'
+
+ def setUp(self):
+ super(MuteWeigherTestClass, self).setUp()
+ self.flags(mute_weight_multiplier=-10.0, mute_child_interval=100,
+ mute_weight_value=1000.0, group='cells')
+
+ self.now = timeutils.utcnow()
+ timeutils.set_time_override(self.now)
+
+ self.cells = _get_fake_cells()
+ for cell in self.cells:
+ cell.last_seen = self.now
+
+ def tearDown(self):
+ super(MuteWeigherTestClass, self).tearDown()
+ timeutils.clear_time_override()
+
+ def test_non_mute(self):
+ weight_properties = {}
+ weighed_cells = self._get_weighed_cells(self.cells, weight_properties)
+ self.assertEqual(len(weighed_cells), 4)
+
+ for weighed_cell in weighed_cells:
+ self.assertEqual(0, weighed_cell.weight)
+
+ def test_mutes(self):
+ # make 2 of them mute:
+ self.cells[0].last_seen = (self.cells[0].last_seen -
+ datetime.timedelta(seconds=200))
+ self.cells[1].last_seen = (self.cells[1].last_seen -
+ datetime.timedelta(seconds=200))
+
+ weight_properties = {}
+ weighed_cells = self._get_weighed_cells(self.cells, weight_properties)
+ self.assertEqual(len(weighed_cells), 4)
+
+ for i in range(2):
+ weighed_cell = weighed_cells.pop(0)
+ self.assertEqual(0, weighed_cell.weight)
+ self.assertIn(weighed_cell.obj.name, ['cell3', 'cell4'])
+
+ for i in range(2):
+ weighed_cell = weighed_cells.pop(0)
+ self.assertEqual(-10.0, weighed_cell.weight)
+ self.assertIn(weighed_cell.obj.name, ['cell1', 'cell2'])
diff --git a/nova/tests/unit/cert/__init__.py b/nova/tests/unit/cert/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/cert/__init__.py
diff --git a/nova/tests/unit/cert/test_rpcapi.py b/nova/tests/unit/cert/test_rpcapi.py
new file mode 100644
index 0000000000..ee20c477cd
--- /dev/null
+++ b/nova/tests/unit/cert/test_rpcapi.py
@@ -0,0 +1,123 @@
+# Copyright 2012, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Unit Tests for nova.cert.rpcapi
+"""
+
+import contextlib
+
+import mock
+from oslo.config import cfg
+
+from nova.cert import rpcapi as cert_rpcapi
+from nova import context
+from nova import test
+
+CONF = cfg.CONF
+
+
+class CertRpcAPITestCase(test.NoDBTestCase):
+ def _test_cert_api(self, method, **kwargs):
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+
+ rpcapi = cert_rpcapi.CertAPI()
+ self.assertIsNotNone(rpcapi.client)
+ self.assertEqual(rpcapi.client.target.topic, CONF.cert_topic)
+
+ orig_prepare = rpcapi.client.prepare
+ expected_version = kwargs.pop('version', rpcapi.client.target.version)
+
+ with contextlib.nested(
+ mock.patch.object(rpcapi.client, 'call'),
+ mock.patch.object(rpcapi.client, 'prepare'),
+ mock.patch.object(rpcapi.client, 'can_send_version'),
+ ) as (
+ rpc_mock, prepare_mock, csv_mock
+ ):
+ prepare_mock.return_value = rpcapi.client
+ rpc_mock.return_value = 'foo'
+ csv_mock.side_effect = (
+ lambda v: orig_prepare(version=v).can_send_version())
+
+ retval = getattr(rpcapi, method)(ctxt, **kwargs)
+ self.assertEqual(retval, rpc_mock.return_value)
+
+ prepare_mock.assert_called_once_with(version=expected_version)
+ rpc_mock.assert_called_once_with(ctxt, method, **kwargs)
+
+ def test_revoke_certs_by_user(self):
+ self._test_cert_api('revoke_certs_by_user', user_id='fake_user_id')
+
+ # NOTE(russellb) Havana compat
+ self.flags(cert='havana', group='upgrade_levels')
+ self._test_cert_api('revoke_certs_by_user', user_id='fake_user_id',
+ version='1.0')
+
+ def test_revoke_certs_by_project(self):
+ self._test_cert_api('revoke_certs_by_project',
+ project_id='fake_project_id')
+
+ # NOTE(russellb) Havana compat
+ self.flags(cert='havana', group='upgrade_levels')
+ self._test_cert_api('revoke_certs_by_project',
+ project_id='fake_project_id', version='1.0')
+
+ def test_revoke_certs_by_user_and_project(self):
+ self._test_cert_api('revoke_certs_by_user_and_project',
+ user_id='fake_user_id',
+ project_id='fake_project_id')
+
+ # NOTE(russellb) Havana compat
+ self.flags(cert='havana', group='upgrade_levels')
+ self._test_cert_api('revoke_certs_by_user_and_project',
+ user_id='fake_user_id',
+ project_id='fake_project_id', version='1.0')
+
+ def test_generate_x509_cert(self):
+ self._test_cert_api('generate_x509_cert',
+ user_id='fake_user_id',
+ project_id='fake_project_id')
+
+ # NOTE(russellb) Havana compat
+ self.flags(cert='havana', group='upgrade_levels')
+ self._test_cert_api('generate_x509_cert',
+ user_id='fake_user_id',
+ project_id='fake_project_id', version='1.0')
+
+ def test_fetch_ca(self):
+ self._test_cert_api('fetch_ca', project_id='fake_project_id')
+
+ # NOTE(russellb) Havana compat
+ self.flags(cert='havana', group='upgrade_levels')
+ self._test_cert_api('fetch_ca', project_id='fake_project_id',
+ version='1.0')
+
+ def test_fetch_crl(self):
+ self._test_cert_api('fetch_crl', project_id='fake_project_id')
+
+ # NOTE(russellb) Havana compat
+ self.flags(cert='havana', group='upgrade_levels')
+ self._test_cert_api('fetch_crl', project_id='fake_project_id',
+ version='1.0')
+
+ def test_decrypt_text(self):
+ self._test_cert_api('decrypt_text',
+ project_id='fake_project_id', text='blah')
+
+ # NOTE(russellb) Havana compat
+ self.flags(cert='havana', group='upgrade_levels')
+ self._test_cert_api('decrypt_text',
+ project_id='fake_project_id', text='blah',
+ version='1.0')
diff --git a/nova/tests/unit/cmd/__init__.py b/nova/tests/unit/cmd/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/cmd/__init__.py
diff --git a/nova/tests/unit/cmd/test_idmapshift.py b/nova/tests/unit/cmd/test_idmapshift.py
new file mode 100644
index 0000000000..2f0fe06bc0
--- /dev/null
+++ b/nova/tests/unit/cmd/test_idmapshift.py
@@ -0,0 +1,636 @@
+# Copyright 2014 Rackspace, Andrew Melton
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+
+import mock
+
+from nova.cmd import idmapshift
+from nova import test
+
+
+def join_side_effect(root, *args):
+ path = root
+ if root != '/':
+ path += '/'
+ path += '/'.join(args)
+ return path
+
+
+class FakeStat(object):
+ def __init__(self, uid, gid):
+ self.st_uid = uid
+ self.st_gid = gid
+
+
+class BaseTestCase(test.NoDBTestCase):
+ def __init__(self, *args, **kwargs):
+ super(BaseTestCase, self).__init__(*args, **kwargs)
+ self.uid_maps = [(0, 10000, 10), (10, 20000, 1000)]
+ self.gid_maps = [(0, 10000, 10), (10, 20000, 1000)]
+
+
+class FindTargetIDTestCase(BaseTestCase):
+ def test_find_target_id_range_1_first(self):
+ actual_target = idmapshift.find_target_id(0, self.uid_maps,
+ idmapshift.NOBODY_ID, dict())
+ self.assertEqual(10000, actual_target)
+
+ def test_find_target_id_inside_range_1(self):
+ actual_target = idmapshift.find_target_id(2, self.uid_maps,
+ idmapshift.NOBODY_ID, dict())
+ self.assertEqual(10002, actual_target)
+
+ def test_find_target_id_range_2_first(self):
+ actual_target = idmapshift.find_target_id(10, self.uid_maps,
+ idmapshift.NOBODY_ID, dict())
+ self.assertEqual(20000, actual_target)
+
+ def test_find_target_id_inside_range_2(self):
+ actual_target = idmapshift.find_target_id(100, self.uid_maps,
+ idmapshift.NOBODY_ID, dict())
+ self.assertEqual(20090, actual_target)
+
+ def test_find_target_id_outside_range(self):
+ actual_target = idmapshift.find_target_id(10000, self.uid_maps,
+ idmapshift.NOBODY_ID, dict())
+ self.assertEqual(idmapshift.NOBODY_ID, actual_target)
+
+ def test_find_target_id_no_mappings(self):
+ actual_target = idmapshift.find_target_id(0, [],
+ idmapshift.NOBODY_ID, dict())
+ self.assertEqual(idmapshift.NOBODY_ID, actual_target)
+
+ def test_find_target_id_updates_memo(self):
+ memo = dict()
+ idmapshift.find_target_id(0, self.uid_maps, idmapshift.NOBODY_ID, memo)
+ self.assertTrue(0 in memo)
+ self.assertEqual(10000, memo[0])
+
+ def test_find_target_guest_id_greater_than_count(self):
+ uid_maps = [(500, 10000, 10)]
+
+ # Below range
+ actual_target = idmapshift.find_target_id(499, uid_maps,
+ idmapshift.NOBODY_ID, dict())
+ self.assertEqual(idmapshift.NOBODY_ID, actual_target)
+
+ # Match
+ actual_target = idmapshift.find_target_id(501, uid_maps,
+ idmapshift.NOBODY_ID, dict())
+ self.assertEqual(10001, actual_target)
+
+ # Beyond range
+ actual_target = idmapshift.find_target_id(510, uid_maps,
+ idmapshift.NOBODY_ID, dict())
+ self.assertEqual(idmapshift.NOBODY_ID, actual_target)
+
+
+class ShiftPathTestCase(BaseTestCase):
+ @mock.patch('os.lchown')
+ @mock.patch('os.lstat')
+ def test_shift_path(self, mock_lstat, mock_lchown):
+ mock_lstat.return_value = FakeStat(0, 0)
+ idmapshift.shift_path('/test/path', self.uid_maps, self.gid_maps,
+ idmapshift.NOBODY_ID, dict(), dict())
+ mock_lstat.assert_has_calls([mock.call('/test/path')])
+ mock_lchown.assert_has_calls([mock.call('/test/path', 10000, 10000)])
+
+ @mock.patch('os.lchown')
+ @mock.patch('os.lstat')
+ def test_shift_path_dry_run(self, mock_lstat, mock_lchown):
+ mock_lstat.return_value = FakeStat(0, 0)
+ idmapshift.shift_path('/test/path', self.uid_maps, self.gid_maps,
+ idmapshift.NOBODY_ID, dict(), dict(),
+ dry_run=True)
+ mock_lstat.assert_has_calls([mock.call('/test/path')])
+ self.assertEqual(0, len(mock_lchown.mock_calls))
+
+ @mock.patch('os.lchown')
+ @mock.patch('nova.cmd.idmapshift.print_chown')
+ @mock.patch('os.lstat')
+ def test_shift_path_verbose(self, mock_lstat, mock_print, mock_lchown):
+ mock_lstat.return_value = FakeStat(0, 0)
+ idmapshift.shift_path('/test/path', self.uid_maps, self.gid_maps,
+ idmapshift.NOBODY_ID, dict(), dict(),
+ verbose=True)
+ mock_lstat.assert_has_calls([mock.call('/test/path')])
+ mock_print_call = mock.call('/test/path', 0, 0, 10000, 10000)
+ mock_print.assert_has_calls([mock_print_call])
+ mock_lchown.assert_has_calls([mock.call('/test/path', 10000, 10000)])
+
+
+class ShiftDirTestCase(BaseTestCase):
+ @mock.patch('nova.cmd.idmapshift.shift_path')
+ @mock.patch('os.path.join')
+ @mock.patch('os.walk')
+ def test_shift_dir(self, mock_walk, mock_join, mock_shift_path):
+ mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])]
+ mock_join.side_effect = join_side_effect
+
+ idmapshift.shift_dir('/', self.uid_maps, self.gid_maps,
+ idmapshift.NOBODY_ID)
+
+ files = ['a', 'b', 'c', 'd']
+ mock_walk.assert_has_calls([mock.call('/')])
+ mock_join_calls = [mock.call('/', x) for x in files]
+ mock_join.assert_has_calls(mock_join_calls)
+
+ args = (self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID)
+ kwargs = dict(dry_run=False, verbose=False,
+ uid_memo=dict(), gid_memo=dict())
+ shift_path_calls = [mock.call('/', *args, **kwargs)]
+ shift_path_calls += [mock.call('/' + x, *args, **kwargs)
+ for x in files]
+ mock_shift_path.assert_has_calls(shift_path_calls)
+
+ @mock.patch('nova.cmd.idmapshift.shift_path')
+ @mock.patch('os.path.join')
+ @mock.patch('os.walk')
+ def test_shift_dir_dry_run(self, mock_walk, mock_join, mock_shift_path):
+ mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])]
+ mock_join.side_effect = join_side_effect
+
+ idmapshift.shift_dir('/', self.uid_maps, self.gid_maps,
+ idmapshift.NOBODY_ID, dry_run=True)
+
+ mock_walk.assert_has_calls([mock.call('/')])
+
+ files = ['a', 'b', 'c', 'd']
+ mock_join_calls = [mock.call('/', x) for x in files]
+ mock_join.assert_has_calls(mock_join_calls)
+
+ args = (self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID)
+ kwargs = dict(dry_run=True, verbose=False,
+ uid_memo=dict(), gid_memo=dict())
+ shift_path_calls = [mock.call('/', *args, **kwargs)]
+ shift_path_calls += [mock.call('/' + x, *args, **kwargs)
+ for x in files]
+ mock_shift_path.assert_has_calls(shift_path_calls)
+
+
+class ConfirmPathTestCase(test.NoDBTestCase):
+ @mock.patch('os.lstat')
+ def test_confirm_path(self, mock_lstat):
+ uid_ranges = [(1000, 1999)]
+ gid_ranges = [(300, 399)]
+ mock_lstat.return_value = FakeStat(1000, 301)
+
+ result = idmapshift.confirm_path('/test/path', uid_ranges, gid_ranges,
+ 50000)
+
+ mock_lstat.assert_has_calls(mock.call('/test/path'))
+ self.assertTrue(result)
+
+ @mock.patch('os.lstat')
+ def test_confirm_path_nobody(self, mock_lstat):
+ uid_ranges = [(1000, 1999)]
+ gid_ranges = [(300, 399)]
+ mock_lstat.return_value = FakeStat(50000, 50000)
+
+ result = idmapshift.confirm_path('/test/path', uid_ranges, gid_ranges,
+ 50000)
+
+ mock_lstat.assert_has_calls(mock.call('/test/path'))
+ self.assertTrue(result)
+
+ @mock.patch('os.lstat')
+ def test_confirm_path_uid_mismatch(self, mock_lstat):
+ uid_ranges = [(1000, 1999)]
+ gid_ranges = [(300, 399)]
+ mock_lstat.return_value = FakeStat(0, 301)
+
+ result = idmapshift.confirm_path('/test/path', uid_ranges, gid_ranges,
+ 50000)
+
+ mock_lstat.assert_has_calls(mock.call('/test/path'))
+ self.assertFalse(result)
+
+ @mock.patch('os.lstat')
+ def test_confirm_path_gid_mismatch(self, mock_lstat):
+ uid_ranges = [(1000, 1999)]
+ gid_ranges = [(300, 399)]
+ mock_lstat.return_value = FakeStat(1000, 0)
+
+ result = idmapshift.confirm_path('/test/path', uid_ranges, gid_ranges,
+ 50000)
+
+ mock_lstat.assert_has_calls(mock.call('/test/path'))
+ self.assertFalse(result)
+
+ @mock.patch('os.lstat')
+ def test_confirm_path_uid_nobody(self, mock_lstat):
+ uid_ranges = [(1000, 1999)]
+ gid_ranges = [(300, 399)]
+ mock_lstat.return_value = FakeStat(50000, 301)
+
+ result = idmapshift.confirm_path('/test/path', uid_ranges, gid_ranges,
+ 50000)
+
+ mock_lstat.assert_has_calls(mock.call('/test/path'))
+ self.assertTrue(result)
+
+ @mock.patch('os.lstat')
+ def test_confirm_path_gid_nobody(self, mock_lstat):
+ uid_ranges = [(1000, 1999)]
+ gid_ranges = [(300, 399)]
+ mock_lstat.return_value = FakeStat(1000, 50000)
+
+ result = idmapshift.confirm_path('/test/path', uid_ranges, gid_ranges,
+ 50000)
+
+ mock_lstat.assert_has_calls(mock.call('/test/path'))
+ self.assertTrue(result)
+
+
+class ConfirmDirTestCase(BaseTestCase):
+ def setUp(self):
+ super(ConfirmDirTestCase, self).setUp()
+ self.uid_map_ranges = idmapshift.get_ranges(self.uid_maps)
+ self.gid_map_ranges = idmapshift.get_ranges(self.gid_maps)
+
+ @mock.patch('nova.cmd.idmapshift.confirm_path')
+ @mock.patch('os.path.join')
+ @mock.patch('os.walk')
+ def test_confirm_dir(self, mock_walk, mock_join, mock_confirm_path):
+ mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])]
+ mock_join.side_effect = join_side_effect
+ mock_confirm_path.return_value = True
+
+ idmapshift.confirm_dir('/', self.uid_maps, self.gid_maps,
+ idmapshift.NOBODY_ID)
+
+ files = ['a', 'b', 'c', 'd']
+ mock_walk.assert_has_calls([mock.call('/')])
+ mock_join_calls = [mock.call('/', x) for x in files]
+ mock_join.assert_has_calls(mock_join_calls)
+
+ args = (self.uid_map_ranges, self.gid_map_ranges, idmapshift.NOBODY_ID)
+ confirm_path_calls = [mock.call('/', *args)]
+ confirm_path_calls += [mock.call('/' + x, *args)
+ for x in files]
+ mock_confirm_path.assert_has_calls(confirm_path_calls)
+
+ @mock.patch('nova.cmd.idmapshift.confirm_path')
+ @mock.patch('os.path.join')
+ @mock.patch('os.walk')
+ def test_confirm_dir_short_circuit_root(self, mock_walk, mock_join,
+ mock_confirm_path):
+ mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])]
+ mock_join.side_effect = join_side_effect
+ mock_confirm_path.return_value = False
+
+ idmapshift.confirm_dir('/', self.uid_maps, self.gid_maps,
+ idmapshift.NOBODY_ID)
+
+ args = (self.uid_map_ranges, self.gid_map_ranges, idmapshift.NOBODY_ID)
+ confirm_path_calls = [mock.call('/', *args)]
+ mock_confirm_path.assert_has_calls(confirm_path_calls)
+
+ @mock.patch('nova.cmd.idmapshift.confirm_path')
+ @mock.patch('os.path.join')
+ @mock.patch('os.walk')
+ def test_confirm_dir_short_circuit_file(self, mock_walk, mock_join,
+ mock_confirm_path):
+ mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])]
+ mock_join.side_effect = join_side_effect
+
+ def confirm_path_side_effect(path, *args):
+ if 'a' in path:
+ return False
+ return True
+
+ mock_confirm_path.side_effect = confirm_path_side_effect
+
+ idmapshift.confirm_dir('/', self.uid_maps, self.gid_maps,
+ idmapshift.NOBODY_ID)
+
+ mock_walk.assert_has_calls([mock.call('/')])
+ mock_join.assert_has_calls([mock.call('/', 'a')])
+
+ args = (self.uid_map_ranges, self.gid_map_ranges, idmapshift.NOBODY_ID)
+ confirm_path_calls = [mock.call('/', *args),
+ mock.call('/' + 'a', *args)]
+ mock_confirm_path.assert_has_calls(confirm_path_calls)
+
+ @mock.patch('nova.cmd.idmapshift.confirm_path')
+ @mock.patch('os.path.join')
+ @mock.patch('os.walk')
+ def test_confirm_dir_short_circuit_dir(self, mock_walk, mock_join,
+ mock_confirm_path):
+ mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])]
+ mock_join.side_effect = join_side_effect
+
+ def confirm_path_side_effect(path, *args):
+ if 'c' in path:
+ return False
+ return True
+
+ mock_confirm_path.side_effect = confirm_path_side_effect
+
+ idmapshift.confirm_dir('/', self.uid_maps, self.gid_maps,
+ idmapshift.NOBODY_ID)
+
+ files = ['a', 'b', 'c']
+ mock_walk.assert_has_calls([mock.call('/')])
+ mock_join_calls = [mock.call('/', x) for x in files]
+ mock_join.assert_has_calls(mock_join_calls)
+
+ args = (self.uid_map_ranges, self.gid_map_ranges, idmapshift.NOBODY_ID)
+ confirm_path_calls = [mock.call('/', *args)]
+ confirm_path_calls += [mock.call('/' + x, *args)
+ for x in files]
+ mock_confirm_path.assert_has_calls(confirm_path_calls)
+
+
+class IDMapTypeTestCase(test.NoDBTestCase):
+ def test_id_map_type(self):
+ result = idmapshift.id_map_type("1:1:1,2:2:2")
+ self.assertEqual([(1, 1, 1), (2, 2, 2)], result)
+
+ def test_id_map_type_not_int(self):
+ self.assertRaises(argparse.ArgumentTypeError, idmapshift.id_map_type,
+ "a:1:1")
+
+ def test_id_map_type_not_proper_format(self):
+ self.assertRaises(argparse.ArgumentTypeError, idmapshift.id_map_type,
+ "1:1")
+
+
+class MainTestCase(BaseTestCase):
+ @mock.patch('nova.cmd.idmapshift.shift_dir')
+ @mock.patch('argparse.ArgumentParser')
+ def test_main(self, mock_parser_class, mock_shift_dir):
+ mock_parser = mock.MagicMock()
+ mock_parser.parse_args.return_value = mock_parser
+ mock_parser.idempotent = False
+ mock_parser.confirm = False
+ mock_parser.path = '/test/path'
+ mock_parser.uid = self.uid_maps
+ mock_parser.gid = self.gid_maps
+ mock_parser.nobody = idmapshift.NOBODY_ID
+ mock_parser.dry_run = False
+ mock_parser.verbose = False
+ mock_parser_class.return_value = mock_parser
+
+ idmapshift.main()
+
+ mock_shift_dir_call = mock.call('/test/path', self.uid_maps,
+ self.gid_maps, idmapshift.NOBODY_ID,
+ dry_run=False, verbose=False)
+ mock_shift_dir.assert_has_calls([mock_shift_dir_call])
+
+ @mock.patch('nova.cmd.idmapshift.shift_dir')
+ @mock.patch('nova.cmd.idmapshift.confirm_dir')
+ @mock.patch('argparse.ArgumentParser')
+ def test_main_confirm_dir_idempotent_unshifted(self, mock_parser_class,
+ mock_confirm_dir,
+ mock_shift_dir):
+ mock_parser = mock.MagicMock()
+ mock_parser.parse_args.return_value = mock_parser
+ mock_parser.idempotent = True
+ mock_parser.confirm = False
+ mock_parser.path = '/test/path'
+ mock_parser.uid = self.uid_maps
+ mock_parser.gid = self.gid_maps
+ mock_parser.nobody = idmapshift.NOBODY_ID
+ mock_parser.dry_run = False
+ mock_parser.verbose = False
+ mock_parser_class.return_value = mock_parser
+ mock_confirm_dir.return_value = False
+
+ idmapshift.main()
+
+ mock_confirm_dir_call = mock.call('/test/path', self.uid_maps,
+ self.gid_maps, idmapshift.NOBODY_ID)
+ mock_confirm_dir.assert_has_calls([mock_confirm_dir_call])
+ mock_shift_dir_call = mock.call('/test/path', self.uid_maps,
+ self.gid_maps, idmapshift.NOBODY_ID,
+ dry_run=False, verbose=False)
+ mock_shift_dir.assert_has_calls([mock_shift_dir_call])
+
+ @mock.patch('nova.cmd.idmapshift.shift_dir')
+ @mock.patch('nova.cmd.idmapshift.confirm_dir')
+ @mock.patch('argparse.ArgumentParser')
+ def test_main_confirm_dir_idempotent_shifted(self, mock_parser_class,
+ mock_confirm_dir,
+ mock_shift_dir):
+ mock_parser = mock.MagicMock()
+ mock_parser.parse_args.return_value = mock_parser
+ mock_parser.idempotent = True
+ mock_parser.confirm = False
+ mock_parser.path = '/test/path'
+ mock_parser.uid = self.uid_maps
+ mock_parser.gid = self.gid_maps
+ mock_parser.nobody = idmapshift.NOBODY_ID
+ mock_parser.dry_run = False
+ mock_parser.verbose = False
+ mock_parser_class.return_value = mock_parser
+ mock_confirm_dir.return_value = True
+
+ try:
+ idmapshift.main()
+ except SystemExit as sys_exit:
+ self.assertEqual(sys_exit.code, 0)
+
+ mock_confirm_dir_call = mock.call('/test/path', self.uid_maps,
+ self.gid_maps, idmapshift.NOBODY_ID)
+ mock_confirm_dir.assert_has_calls([mock_confirm_dir_call])
+ mock_shift_dir.assert_has_calls([])
+
+ @mock.patch('nova.cmd.idmapshift.shift_dir')
+ @mock.patch('nova.cmd.idmapshift.confirm_dir')
+ @mock.patch('argparse.ArgumentParser')
+ def test_main_confirm_dir_confirm_unshifted(self, mock_parser_class,
+ mock_confirm_dir,
+ mock_shift_dir):
+ mock_parser = mock.MagicMock()
+ mock_parser.parse_args.return_value = mock_parser
+ mock_parser.idempotent = False
+ mock_parser.confirm = True
+ mock_parser.exit_on_fail = True
+ mock_parser.path = '/test/path'
+ mock_parser.uid = self.uid_maps
+ mock_parser.gid = self.gid_maps
+ mock_parser.nobody = idmapshift.NOBODY_ID
+ mock_parser.dry_run = False
+ mock_parser.verbose = False
+ mock_parser_class.return_value = mock_parser
+ mock_confirm_dir.return_value = False
+
+ try:
+ idmapshift.main()
+ except SystemExit as sys_exit:
+ self.assertEqual(sys_exit.code, 1)
+
+ mock_confirm_dir_call = mock.call('/test/path', self.uid_maps,
+ self.gid_maps, idmapshift.NOBODY_ID)
+ mock_confirm_dir.assert_has_calls([mock_confirm_dir_call])
+ mock_shift_dir.assert_has_calls([])
+
+ @mock.patch('nova.cmd.idmapshift.shift_dir')
+ @mock.patch('nova.cmd.idmapshift.confirm_dir')
+ @mock.patch('argparse.ArgumentParser')
+ def test_main_confirm_dir_confirm_shifted(self, mock_parser_class,
+ mock_confirm_dir,
+ mock_shift_dir):
+ mock_parser = mock.MagicMock()
+ mock_parser.parse_args.return_value = mock_parser
+ mock_parser.idempotent = False
+ mock_parser.confirm = True
+ mock_parser.exit_on_fail = True
+ mock_parser.path = '/test/path'
+ mock_parser.uid = self.uid_maps
+ mock_parser.gid = self.gid_maps
+ mock_parser.nobody = idmapshift.NOBODY_ID
+ mock_parser.dry_run = False
+ mock_parser.verbose = False
+ mock_parser_class.return_value = mock_parser
+ mock_confirm_dir.return_value = True
+
+ try:
+ idmapshift.main()
+ except SystemExit as sys_exit:
+ self.assertEqual(sys_exit.code, 0)
+
+ mock_confirm_dir_call = mock.call('/test/path', self.uid_maps,
+ self.gid_maps, idmapshift.NOBODY_ID)
+ mock_confirm_dir.assert_has_calls([mock_confirm_dir_call])
+ mock_shift_dir.assert_has_calls([])
+
+
+class IntegrationTestCase(BaseTestCase):
+ @mock.patch('os.lchown')
+ @mock.patch('os.lstat')
+ @mock.patch('os.path.join')
+ @mock.patch('os.walk')
+ def test_integrated_shift_dir(self, mock_walk, mock_join, mock_lstat,
+ mock_lchown):
+ mock_walk.return_value = [('/tmp/test', ['a', 'b', 'c'], ['d']),
+ ('/tmp/test/d', ['1', '2'], [])]
+ mock_join.side_effect = join_side_effect
+
+ def lstat(path):
+ stats = {
+ 't': FakeStat(0, 0),
+ 'a': FakeStat(0, 0),
+ 'b': FakeStat(0, 2),
+ 'c': FakeStat(30000, 30000),
+ 'd': FakeStat(100, 100),
+ '1': FakeStat(0, 100),
+ '2': FakeStat(100, 100),
+ }
+ return stats[path[-1]]
+
+ mock_lstat.side_effect = lstat
+
+ idmapshift.shift_dir('/tmp/test', self.uid_maps, self.gid_maps,
+ idmapshift.NOBODY_ID, verbose=True)
+
+ lchown_calls = [
+ mock.call('/tmp/test', 10000, 10000),
+ mock.call('/tmp/test/a', 10000, 10000),
+ mock.call('/tmp/test/b', 10000, 10002),
+ mock.call('/tmp/test/c', idmapshift.NOBODY_ID,
+ idmapshift.NOBODY_ID),
+ mock.call('/tmp/test/d', 20090, 20090),
+ mock.call('/tmp/test/d/1', 10000, 20090),
+ mock.call('/tmp/test/d/2', 20090, 20090),
+ ]
+ mock_lchown.assert_has_calls(lchown_calls)
+
+ @mock.patch('os.lchown')
+ @mock.patch('os.lstat')
+ @mock.patch('os.path.join')
+ @mock.patch('os.walk')
+ def test_integrated_shift_dir_dry_run(self, mock_walk, mock_join,
+ mock_lstat, mock_lchown):
+ mock_walk.return_value = [('/tmp/test', ['a', 'b', 'c'], ['d']),
+ ('/tmp/test/d', ['1', '2'], [])]
+ mock_join.side_effect = join_side_effect
+
+ def lstat(path):
+ stats = {
+ 't': FakeStat(0, 0),
+ 'a': FakeStat(0, 0),
+ 'b': FakeStat(0, 2),
+ 'c': FakeStat(30000, 30000),
+ 'd': FakeStat(100, 100),
+ '1': FakeStat(0, 100),
+ '2': FakeStat(100, 100),
+ }
+ return stats[path[-1]]
+
+ mock_lstat.side_effect = lstat
+
+ idmapshift.shift_dir('/tmp/test', self.uid_maps, self.gid_maps,
+ idmapshift.NOBODY_ID, dry_run=True, verbose=True)
+
+ self.assertEqual(0, len(mock_lchown.mock_calls))
+
+ @mock.patch('os.lstat')
+ @mock.patch('os.path.join')
+ @mock.patch('os.walk')
+ def test_integrated_confirm_dir_shifted(self, mock_walk, mock_join,
+ mock_lstat):
+ mock_walk.return_value = [('/tmp/test', ['a', 'b', 'c'], ['d']),
+ ('/tmp/test/d', ['1', '2'], [])]
+ mock_join.side_effect = join_side_effect
+
+ def lstat(path):
+ stats = {
+ 't': FakeStat(10000, 10000),
+ 'a': FakeStat(10000, 10000),
+ 'b': FakeStat(10000, 10002),
+ 'c': FakeStat(idmapshift.NOBODY_ID, idmapshift.NOBODY_ID),
+ 'd': FakeStat(20090, 20090),
+ '1': FakeStat(10000, 20090),
+ '2': FakeStat(20090, 20090),
+ }
+ return stats[path[-1]]
+
+ mock_lstat.side_effect = lstat
+
+ result = idmapshift.confirm_dir('/tmp/test', self.uid_maps,
+ self.gid_maps, idmapshift.NOBODY_ID)
+
+ self.assertTrue(result)
+
+ @mock.patch('os.lstat')
+ @mock.patch('os.path.join')
+ @mock.patch('os.walk')
+ def test_integrated_confirm_dir_unshifted(self, mock_walk, mock_join,
+ mock_lstat):
+ mock_walk.return_value = [('/tmp/test', ['a', 'b', 'c'], ['d']),
+ ('/tmp/test/d', ['1', '2'], [])]
+ mock_join.side_effect = join_side_effect
+
+ def lstat(path):
+ stats = {
+ 't': FakeStat(0, 0),
+ 'a': FakeStat(0, 0),
+ 'b': FakeStat(0, 2),
+ 'c': FakeStat(30000, 30000),
+ 'd': FakeStat(100, 100),
+ '1': FakeStat(0, 100),
+ '2': FakeStat(100, 100),
+ }
+ return stats[path[-1]]
+
+ mock_lstat.side_effect = lstat
+
+ result = idmapshift.confirm_dir('/tmp/test', self.uid_maps,
+ self.gid_maps, idmapshift.NOBODY_ID)
+
+ self.assertFalse(result)
diff --git a/nova/tests/unit/compute/__init__.py b/nova/tests/unit/compute/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/compute/__init__.py
diff --git a/nova/tests/unit/compute/eventlet_utils.py b/nova/tests/unit/compute/eventlet_utils.py
new file mode 100644
index 0000000000..6d70c0a063
--- /dev/null
+++ b/nova/tests/unit/compute/eventlet_utils.py
@@ -0,0 +1,23 @@
+# Rackspace Hosting 2014
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import eventlet
+
+
+class SyncPool(eventlet.GreenPool):
+ """Synchronous pool for testing threaded code without adding sleep
+ waits.
+ """
+ def spawn_n(self, func, *args, **kwargs):
+ func(*args, **kwargs)
diff --git a/nova/tests/unit/compute/fake_resource_tracker.py b/nova/tests/unit/compute/fake_resource_tracker.py
new file mode 100644
index 0000000000..b0fec2042b
--- /dev/null
+++ b/nova/tests/unit/compute/fake_resource_tracker.py
@@ -0,0 +1,35 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.compute import resource_tracker
+
+
+class FakeResourceTracker(resource_tracker.ResourceTracker):
+ """Version without a DB requirement."""
+
+ def _create(self, context, values):
+ self._write_ext_resources(values)
+ self.compute_node = values
+ self.compute_node['id'] = 1
+
+ def _update(self, context, values, prune_stats=False):
+ self._write_ext_resources(values)
+ self.compute_node.update(values)
+
+ def _get_service(self, context):
+ return {
+ "id": 1,
+ "compute_node": None
+ }
diff --git a/nova/tests/unit/compute/monitors/__init__.py b/nova/tests/unit/compute/monitors/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/compute/monitors/__init__.py
diff --git a/nova/tests/unit/compute/monitors/test_cpu_monitor.py b/nova/tests/unit/compute/monitors/test_cpu_monitor.py
new file mode 100644
index 0000000000..04977cd47f
--- /dev/null
+++ b/nova/tests/unit/compute/monitors/test_cpu_monitor.py
@@ -0,0 +1,86 @@
+# Copyright 2013 Intel Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for Compute Driver CPU resource monitor."""
+
+import fixtures
+
+from nova.compute import manager
+from nova.compute.monitors import virt
+from nova import test
+
+
+class FakeLibvirt(object):
+ def getCPUStats(self, cpuNum, flag):
+ if cpuNum < 2:
+ return {'kernel': 5664160000000L,
+ 'idle': 1592705190000000L,
+ 'user': 26728850000000L,
+ 'iowait': 6121490000000L}
+ else:
+ raise Exception("invalid argument: Invalid cpu number")
+
+ def getInfo(self):
+ return [0, 0, 0, 800, 0, 0, 0, 0]
+
+
+class ComputeDriverCPUMonitorTestCase(test.TestCase):
+ def setUp(self):
+ super(ComputeDriverCPUMonitorTestCase, self).setUp()
+
+ self.flags(compute_driver='nova.virt.libvirt.LibvirtDriver')
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.LibvirtDriver._conn',
+ FakeLibvirt()))
+ cm = manager.ComputeManager()
+ self.monitor = virt.ComputeDriverCPUMonitor(cm)
+
+ def test_get_metric_names(self):
+ names = self.monitor.get_metric_names()
+ self.assertEqual(10, len(names))
+ self.assertIn("cpu.frequency", names)
+ self.assertIn("cpu.user.time", names)
+ self.assertIn("cpu.kernel.time", names)
+ self.assertIn("cpu.idle.time", names)
+ self.assertIn("cpu.iowait.time", names)
+ self.assertIn("cpu.user.percent", names)
+ self.assertIn("cpu.kernel.percent", names)
+ self.assertIn("cpu.idle.percent", names)
+ self.assertIn("cpu.iowait.percent", names)
+ self.assertIn("cpu.percent", names)
+
+ def test_get_metrics(self):
+ metrics_raw = self.monitor.get_metrics()
+ names = self.monitor.get_metric_names()
+ metrics = {}
+ for metric in metrics_raw:
+ self.assertIn(metric['name'], names)
+ metrics[metric['name']] = metric['value']
+
+ self.assertEqual(metrics["cpu.frequency"], 800)
+ self.assertEqual(metrics["cpu.user.time"], 26728850000000L)
+ self.assertEqual(metrics["cpu.kernel.time"], 5664160000000L)
+ self.assertEqual(metrics["cpu.idle.time"], 1592705190000000L)
+ self.assertEqual(metrics["cpu.iowait.time"], 6121490000000L)
+ self.assertTrue(metrics["cpu.user.percent"] <= 1
+ and metrics["cpu.user.percent"] >= 0)
+ self.assertTrue(metrics["cpu.kernel.percent"] <= 1
+ and metrics["cpu.kernel.percent"] >= 0)
+ self.assertTrue(metrics["cpu.idle.percent"] <= 1
+ and metrics["cpu.idle.percent"] >= 0)
+ self.assertTrue(metrics["cpu.iowait.percent"] <= 1
+ and metrics["cpu.iowait.percent"] >= 0)
+ self.assertTrue(metrics["cpu.percent"] <= 1
+ and metrics["cpu.percent"] >= 0)
diff --git a/nova/tests/unit/compute/monitors/test_monitors.py b/nova/tests/unit/compute/monitors/test_monitors.py
new file mode 100644
index 0000000000..e846479483
--- /dev/null
+++ b/nova/tests/unit/compute/monitors/test_monitors.py
@@ -0,0 +1,144 @@
+# Copyright 2013 Intel Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for resource monitors."""
+
+from nova.compute import monitors
+from nova import test
+
+
+class FakeResourceMonitor(monitors.ResourceMonitorBase):
+ def _update_data(self):
+ self._data['foo.metric1'] = '1000'
+ self._data['foo.metric2'] = '99.999'
+ self._data['timestamp'] = '123'
+
+ @monitors.ResourceMonitorBase.add_timestamp
+ def _get_foo_metric1(self, **kwargs):
+ return self._data.get("foo.metric1")
+
+ @monitors.ResourceMonitorBase.add_timestamp
+ def _get_foo_metric2(self, **kwargs):
+ return self._data.get("foo.metric2")
+
+
+class FakeMonitorClass1(monitors.ResourceMonitorBase):
+ def get_metrics(self, **kwargs):
+ data = [{'timestamp': 1232,
+ 'name': 'key1',
+ 'value': 2600,
+ 'source': 'libvirt'}]
+ return data
+
+ def get_metric_names(self):
+ return ['key1']
+
+
+class FakeMonitorClass2(monitors.ResourceMonitorBase):
+ def get_metrics(self, **kwargs):
+ data = [{'timestamp': 123,
+ 'name': 'key2',
+ 'value': 1600,
+ 'source': 'libvirt'}]
+ return data
+
+ def get_metric_names(self):
+ return ['key2']
+
+
+class FakeMonitorClass3(monitors.ResourceMonitorBase):
+ def get_metrics(self, **kwargs):
+ data = [{'timestamp': 1234,
+ 'name': 'key1',
+ 'value': 1200,
+ 'source': 'libvirt'}]
+ return data
+
+ def get_metric_names(self):
+ return ['key1']
+
+
+class FakeMonitorClass4(monitors.ResourceMonitorBase):
+ def get_metrics(self, **kwargs):
+ raise test.TestingException()
+
+ def get_metric_names(self):
+ raise test.TestingException()
+
+
+class ResourceMonitorBaseTestCase(test.TestCase):
+ def setUp(self):
+ super(ResourceMonitorBaseTestCase, self).setUp()
+ self.monitor = FakeResourceMonitor(None)
+
+ def test_get_metric_names(self):
+ names = self.monitor.get_metric_names()
+ self.assertEqual(2, len(names))
+ self.assertIn("foo.metric1", names)
+ self.assertIn("foo.metric2", names)
+
+ def test_get_metrics(self):
+ metrics_raw = self.monitor.get_metrics()
+ names = self.monitor.get_metric_names()
+ metrics = {}
+ for metric in metrics_raw:
+ self.assertIn(metric['name'], names)
+ self.assertEqual(metric["timestamp"], '123')
+ metrics[metric['name']] = metric['value']
+
+ self.assertEqual(metrics["foo.metric1"], '1000')
+ self.assertEqual(metrics["foo.metric2"], '99.999')
+
+
+class ResourceMonitorsTestCase(test.TestCase):
+ """Test case for monitors."""
+
+ def setUp(self):
+ super(ResourceMonitorsTestCase, self).setUp()
+ self.monitor_handler = monitors.ResourceMonitorHandler()
+ fake_monitors = [
+ 'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass1',
+ 'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass2']
+ self.flags(compute_available_monitors=fake_monitors)
+
+ classes = self.monitor_handler.get_matching_classes(
+ ['nova.compute.monitors.all_monitors'])
+ self.class_map = {}
+ for cls in classes:
+ self.class_map[cls.__name__] = cls
+
+ def test_choose_monitors_not_found(self):
+ self.flags(compute_monitors=['FakeMonitorClass5', 'FakeMonitorClass4'])
+ monitor_classes = self.monitor_handler.choose_monitors(self)
+ self.assertEqual(len(monitor_classes), 0)
+
+ def test_choose_monitors_bad(self):
+ self.flags(compute_monitors=['FakeMonitorClass1', 'FakePluginClass3'])
+ monitor_classes = self.monitor_handler.choose_monitors(self)
+ self.assertEqual(len(monitor_classes), 1)
+
+ def test_choose_monitors(self):
+ self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass2'])
+ monitor_classes = self.monitor_handler.choose_monitors(self)
+ self.assertEqual(len(monitor_classes), 2)
+
+ def test_choose_monitors_none(self):
+ self.flags(compute_monitors=[])
+ monitor_classes = self.monitor_handler.choose_monitors(self)
+ self.assertEqual(len(monitor_classes), 0)
+
+ def test_all_monitors(self):
+ # Double check at least a couple of known monitors exist
+ self.assertIn('ComputeDriverCPUMonitor', self.class_map)
diff --git a/nova/tests/unit/compute/test_arch.py b/nova/tests/unit/compute/test_arch.py
new file mode 100644
index 0000000000..0aab95c2ae
--- /dev/null
+++ b/nova/tests/unit/compute/test_arch.py
@@ -0,0 +1,65 @@
+# Copyright (C) 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import mock
+
+from nova.compute import arch
+from nova import exception
+from nova import test
+
+
+class ArchTest(test.NoDBTestCase):
+
+ @mock.patch.object(os, "uname")
+ def test_host(self, mock_uname):
+ os.uname.return_value = (
+ 'Linux',
+ 'localhost.localdomain',
+ '3.14.8-200.fc20.x86_64',
+ '#1 SMP Mon Jun 16 21:57:53 UTC 2014',
+ 'i686'
+ )
+
+ self.assertEqual(arch.I686, arch.from_host())
+
+ def test_valid_string(self):
+ self.assertTrue(arch.is_valid("x86_64"))
+
+ def test_valid_constant(self):
+ self.assertTrue(arch.is_valid(arch.X86_64))
+
+ def test_valid_bogus(self):
+ self.assertFalse(arch.is_valid("x86_64wibble"))
+
+ def test_canonicalize_i386(self):
+ self.assertEqual(arch.I686, arch.canonicalize("i386"))
+
+ def test_canonicalize_amd64(self):
+ self.assertEqual(arch.X86_64, arch.canonicalize("amd64"))
+
+ def test_canonicalize_case(self):
+ self.assertEqual(arch.X86_64, arch.canonicalize("X86_64"))
+
+ def test_canonicalize_compat_xen1(self):
+ self.assertEqual(arch.I686, arch.canonicalize("x86_32"))
+
+ def test_canonicalize_compat_xen2(self):
+ self.assertEqual(arch.I686, arch.canonicalize("x86_32p"))
+
+ def test_canonicalize_bogus(self):
+ self.assertRaises(exception.InvalidArchitectureName,
+ arch.canonicalize,
+ "x86_64wibble")
diff --git a/nova/tests/unit/compute/test_claims.py b/nova/tests/unit/compute/test_claims.py
new file mode 100644
index 0000000000..50218f24c7
--- /dev/null
+++ b/nova/tests/unit/compute/test_claims.py
@@ -0,0 +1,320 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for resource tracker claims."""
+
+import uuid
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova.compute import claims
+from nova import db
+from nova import exception
+from nova import objects
+from nova.pci import manager as pci_manager
+from nova import test
+from nova.tests.unit.pci import fakes as pci_fakes
+from nova.virt import hardware
+
+
+class FakeResourceHandler(object):
+ test_called = False
+ usage_is_instance = False
+
+ def test_resources(self, usage, limits):
+ self.test_called = True
+ self.usage_is_itype = usage.get('name') is 'fakeitype'
+ return []
+
+
+class DummyTracker(object):
+ icalled = False
+ rcalled = False
+ pci_tracker = pci_manager.PciDevTracker()
+ ext_resources_handler = FakeResourceHandler()
+
+ def abort_instance_claim(self, *args, **kwargs):
+ self.icalled = True
+
+ def drop_resize_claim(self, *args, **kwargs):
+ self.rcalled = True
+
+ def new_pci_tracker(self):
+ self.pci_tracker = pci_manager.PciDevTracker()
+
+
+@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+class ClaimTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(ClaimTestCase, self).setUp()
+ self.resources = self._fake_resources()
+ self.tracker = DummyTracker()
+
+ def _claim(self, limits=None, overhead=None, **kwargs):
+ numa_topology = kwargs.pop('numa_topology', None)
+ instance = self._fake_instance(**kwargs)
+ if numa_topology:
+ db_numa_topology = {
+ 'id': 1, 'created_at': None, 'updated_at': None,
+ 'deleted_at': None, 'deleted': None,
+ 'instance_uuid': instance['uuid'],
+ 'numa_topology': numa_topology.to_json()
+ }
+ else:
+ db_numa_topology = None
+ if overhead is None:
+ overhead = {'memory_mb': 0}
+ with mock.patch.object(
+ db, 'instance_extra_get_by_instance_uuid',
+ return_value=db_numa_topology):
+ return claims.Claim('context', instance, self.tracker,
+ self.resources, overhead=overhead,
+ limits=limits)
+
+ def _fake_instance(self, **kwargs):
+ instance = {
+ 'uuid': str(uuid.uuid1()),
+ 'memory_mb': 1024,
+ 'root_gb': 10,
+ 'ephemeral_gb': 5,
+ 'vcpus': 1,
+ 'system_metadata': {},
+ 'numa_topology': None
+ }
+ instance.update(**kwargs)
+ return instance
+
+ def _fake_instance_type(self, **kwargs):
+ instance_type = {
+ 'id': 1,
+ 'name': 'fakeitype',
+ 'memory_mb': 1,
+ 'vcpus': 1,
+ 'root_gb': 1,
+ 'ephemeral_gb': 2
+ }
+ instance_type.update(**kwargs)
+ return instance_type
+
+ def _fake_resources(self, values=None):
+ resources = {
+ 'memory_mb': 2048,
+ 'memory_mb_used': 0,
+ 'free_ram_mb': 2048,
+ 'local_gb': 20,
+ 'local_gb_used': 0,
+ 'free_disk_gb': 20,
+ 'vcpus': 2,
+ 'vcpus_used': 0,
+ 'numa_topology': hardware.VirtNUMAHostTopology(
+ cells=[hardware.VirtNUMATopologyCellUsage(1, [1, 2], 512),
+ hardware.VirtNUMATopologyCellUsage(2, [3, 4], 512)]
+ ).to_json()
+ }
+ if values:
+ resources.update(values)
+ return resources
+
+ def test_memory_unlimited(self, mock_get):
+ self._claim(memory_mb=99999999)
+
+ def test_disk_unlimited_root(self, mock_get):
+ self._claim(root_gb=999999)
+
+ def test_disk_unlimited_ephemeral(self, mock_get):
+ self._claim(ephemeral_gb=999999)
+
+ def test_memory_with_overhead(self, mock_get):
+ overhead = {'memory_mb': 8}
+ limits = {'memory_mb': 2048}
+ self._claim(memory_mb=2040, limits=limits,
+ overhead=overhead)
+
+ def test_memory_with_overhead_insufficient(self, mock_get):
+ overhead = {'memory_mb': 9}
+ limits = {'memory_mb': 2048}
+
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self._claim, limits=limits, overhead=overhead,
+ memory_mb=2040)
+
+ def test_memory_oversubscription(self, mock_get):
+ self._claim(memory_mb=4096)
+
+ def test_memory_insufficient(self, mock_get):
+ limits = {'memory_mb': 8192}
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self._claim, limits=limits, memory_mb=16384)
+
+ def test_disk_oversubscription(self, mock_get):
+ limits = {'disk_gb': 60}
+ self._claim(root_gb=10, ephemeral_gb=40,
+ limits=limits)
+
+ def test_disk_insufficient(self, mock_get):
+ limits = {'disk_gb': 45}
+ self.assertRaisesRegexp(
+ exception.ComputeResourcesUnavailable,
+ "disk",
+ self._claim, limits=limits, root_gb=10, ephemeral_gb=40)
+
+ def test_disk_and_memory_insufficient(self, mock_get):
+ limits = {'disk_gb': 45, 'memory_mb': 8192}
+ self.assertRaisesRegexp(
+ exception.ComputeResourcesUnavailable,
+ "memory.*disk",
+ self._claim, limits=limits, root_gb=10, ephemeral_gb=40,
+ memory_mb=16384)
+
+ @pci_fakes.patch_pci_whitelist
+ def test_pci_pass(self, mock_get):
+ dev_dict = {
+ 'compute_node_id': 1,
+ 'address': 'a',
+ 'product_id': 'p',
+ 'vendor_id': 'v',
+ 'status': 'available'}
+ self.tracker.new_pci_tracker()
+ self.tracker.pci_tracker.set_hvdevs([dev_dict])
+ claim = self._claim()
+ request = objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': 'v', 'product_id': 'p'}])
+ mock_get.return_value = objects.InstancePCIRequests(
+ requests=[request])
+ self.assertIsNone(claim._test_pci())
+
+ @pci_fakes.patch_pci_whitelist
+ def test_pci_fail(self, mock_get):
+ dev_dict = {
+ 'compute_node_id': 1,
+ 'address': 'a',
+ 'product_id': 'p',
+ 'vendor_id': 'v1',
+ 'status': 'available'}
+ self.tracker.new_pci_tracker()
+ self.tracker.pci_tracker.set_hvdevs([dev_dict])
+ claim = self._claim()
+ request = objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': 'v', 'product_id': 'p'}])
+ mock_get.return_value = objects.InstancePCIRequests(
+ requests=[request])
+ claim._test_pci()
+
+ @pci_fakes.patch_pci_whitelist
+ def test_pci_pass_no_requests(self, mock_get):
+ dev_dict = {
+ 'compute_node_id': 1,
+ 'address': 'a',
+ 'product_id': 'p',
+ 'vendor_id': 'v',
+ 'status': 'available'}
+ self.tracker.new_pci_tracker()
+ self.tracker.pci_tracker.set_hvdevs([dev_dict])
+ claim = self._claim()
+ self.assertIsNone(claim._test_pci())
+
+ def test_ext_resources(self, mock_get):
+ self._claim()
+ self.assertTrue(self.tracker.ext_resources_handler.test_called)
+ self.assertFalse(self.tracker.ext_resources_handler.usage_is_itype)
+
+ def test_numa_topology_no_limit(self, mock_get):
+ huge_instance = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(
+ 1, set([1, 2, 3, 4, 5]), 2048)])
+ self._claim(numa_topology=huge_instance)
+
+ def test_numa_topology_fails(self, mock_get):
+ huge_instance = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(
+ 1, set([1, 2, 3, 4, 5]), 2048)])
+ limit_topo = hardware.VirtNUMALimitTopology(
+ cells=[hardware.VirtNUMATopologyCellLimit(
+ 1, [1, 2], 512, cpu_limit=2, memory_limit=512),
+ hardware.VirtNUMATopologyCellLimit(
+ 1, [3, 4], 512, cpu_limit=2, memory_limit=512)])
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self._claim,
+ limits={'numa_topology': limit_topo.to_json()},
+ numa_topology=huge_instance)
+
+ def test_numa_topology_passes(self, mock_get):
+ huge_instance = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(
+ 1, set([1, 2, 3, 4, 5]), 2048)])
+ limit_topo = hardware.VirtNUMALimitTopology(
+ cells=[hardware.VirtNUMATopologyCellLimit(
+ 1, [1, 2], 512, cpu_limit=5, memory_limit=4096),
+ hardware.VirtNUMATopologyCellLimit(
+ 1, [3, 4], 512, cpu_limit=5, memory_limit=4096)])
+ self._claim(limits={'numa_topology': limit_topo.to_json()},
+ numa_topology=huge_instance)
+
+ def test_abort(self, mock_get):
+ claim = self._abort()
+ self.assertTrue(claim.tracker.icalled)
+
+ def _abort(self):
+ claim = None
+ try:
+ with self._claim(memory_mb=4096) as claim:
+ raise test.TestingException("abort")
+ except test.TestingException:
+ pass
+
+ return claim
+
+
+class ResizeClaimTestCase(ClaimTestCase):
+
+ def setUp(self):
+ super(ResizeClaimTestCase, self).setUp()
+ self.instance = self._fake_instance()
+ self.get_numa_constraint_patch = None
+
+ def _claim(self, limits=None, overhead=None, **kwargs):
+ instance_type = self._fake_instance_type(**kwargs)
+ numa_constraint = kwargs.pop('numa_topology', None)
+ if overhead is None:
+ overhead = {'memory_mb': 0}
+ with mock.patch.object(
+ hardware.VirtNUMAInstanceTopology, 'get_constraints',
+ return_value=numa_constraint):
+ return claims.ResizeClaim('context', self.instance, instance_type,
+ {}, self.tracker, self.resources,
+ overhead=overhead, limits=limits)
+
+ def _set_pci_request(self, claim):
+ request = [{'count': 1,
+ 'spec': [{'vendor_id': 'v', 'product_id': 'p'}],
+ }]
+ claim.instance.update(
+ system_metadata={'new_pci_requests': jsonutils.dumps(request)})
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_ext_resources(self, mock_get):
+ self._claim()
+ self.assertTrue(self.tracker.ext_resources_handler.test_called)
+ self.assertTrue(self.tracker.ext_resources_handler.usage_is_itype)
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_abort(self, mock_get):
+ claim = self._abort()
+ self.assertTrue(claim.tracker.rcalled)
diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py
new file mode 100644
index 0000000000..8f4d73dd79
--- /dev/null
+++ b/nova/tests/unit/compute/test_compute.py
@@ -0,0 +1,11415 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Piston Cloud Computing, Inc.
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Tests for compute service."""
+
+import base64
+import contextlib
+import datetime
+import operator
+import sys
+import time
+import traceback
+import uuid
+
+from eventlet import greenthread
+import mock
+import mox
+from oslo.config import cfg
+from oslo import messaging
+from oslo.serialization import jsonutils
+from oslo.utils import importutils
+from oslo.utils import timeutils
+from oslo.utils import units
+import six
+import testtools
+from testtools import matchers as testtools_matchers
+
+import nova
+from nova import availability_zones
+from nova import block_device
+from nova import compute
+from nova.compute import api as compute_api
+from nova.compute import arch
+from nova.compute import delete_types
+from nova.compute import flavors
+from nova.compute import manager as compute_manager
+from nova.compute import power_state
+from nova.compute import rpcapi as compute_rpcapi
+from nova.compute import task_states
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova.conductor import manager as conductor_manager
+from nova.console import type as ctype
+from nova import context
+from nova import db
+from nova import exception
+from nova.i18n import _
+from nova.image import glance
+from nova.network import api as network_api
+from nova.network import model as network_model
+from nova.network.security_group import openstack_driver
+from nova import objects
+from nova.objects import base as obj_base
+from nova.objects import block_device as block_device_obj
+from nova.objects import instance as instance_obj
+from nova.openstack.common import log as logging
+from nova.openstack.common import uuidutils
+from nova import policy
+from nova import quota
+from nova import test
+from nova.tests.unit.compute import eventlet_utils
+from nova.tests.unit.compute import fake_resource_tracker
+from nova.tests.unit.db import fakes as db_fakes
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_network_cache_model
+from nova.tests.unit import fake_notifier
+from nova.tests.unit import fake_server_actions
+from nova.tests.unit.image import fake as fake_image
+from nova.tests.unit import matchers
+from nova.tests.unit.objects import test_flavor
+from nova.tests.unit.objects import test_migration
+from nova import utils
+from nova.virt import block_device as driver_block_device
+from nova.virt import event
+from nova.virt import fake
+from nova.virt import hardware
+from nova.volume import cinder
+
+QUOTAS = quota.QUOTAS
+LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
+CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
+
+
+FAKE_IMAGE_REF = 'fake-image-ref'
+
+NODENAME = 'fakenode1'
+
+
+def fake_not_implemented(*args, **kwargs):
+ raise NotImplementedError()
+
+
+def get_primitive_instance_by_uuid(context, instance_uuid):
+ """Helper method to get an instance and then convert it to
+ a primitive form using jsonutils.
+ """
+ instance = db.instance_get_by_uuid(context, instance_uuid)
+ return jsonutils.to_primitive(instance)
+
+
+def unify_instance(instance):
+ """Return a dict-like instance for both object-initiated and
+ model-initiated sources that can reasonably be compared.
+ """
+ newdict = dict()
+ for k, v in instance.iteritems():
+ if isinstance(v, datetime.datetime):
+ # NOTE(danms): DB models and Instance objects have different
+ # timezone expectations
+ v = v.replace(tzinfo=None)
+ elif k == 'fault':
+ # NOTE(danms): DB models don't have 'fault'
+ continue
+ elif k == 'pci_devices':
+ # NOTE(yonlig.he) pci devices need lazy loading
+ # fake db does not support it yet.
+ continue
+ newdict[k] = v
+ return newdict
+
+
+class FakeSchedulerAPI(object):
+
+ def run_instance(self, ctxt, request_spec, admin_password,
+ injected_files, requested_networks, is_first_time,
+ filter_properties):
+ pass
+
+ def live_migration(self, ctxt, block_migration, disk_over_commit,
+ instance, dest):
+ pass
+
+ def prep_resize(self, ctxt, instance, instance_type, image, request_spec,
+ filter_properties, reservations):
+ pass
+
+
+class FakeComputeTaskAPI(object):
+
+ def resize_instance(self, context, instance, extra_instance_updates,
+ scheduler_hint, flavor, reservations):
+ pass
+
+
+class BaseTestCase(test.TestCase):
+
+ def setUp(self):
+ super(BaseTestCase, self).setUp()
+ self.flags(network_manager='nova.network.manager.FlatManager')
+ fake.set_nodes([NODENAME])
+ self.flags(use_local=True, group='conductor')
+
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ self.compute = importutils.import_object(CONF.compute_manager)
+ # execute power syncing synchronously for testing:
+ self.compute._sync_power_pool = eventlet_utils.SyncPool()
+
+ # override tracker with a version that doesn't need the database:
+ fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
+ self.compute.driver, NODENAME)
+ self.compute._resource_tracker_dict[NODENAME] = fake_rt
+
+ def fake_get_compute_nodes_in_db(context, use_slave=False):
+ fake_compute_nodes = [{'local_gb': 259,
+ 'vcpus_used': 0,
+ 'deleted': 0,
+ 'hypervisor_type': 'powervm',
+ 'created_at': '2013-04-01T00:27:06.000000',
+ 'local_gb_used': 0,
+ 'updated_at': '2013-04-03T00:35:41.000000',
+ 'hypervisor_hostname': 'fake_phyp1',
+ 'memory_mb_used': 512,
+ 'memory_mb': 131072,
+ 'current_workload': 0,
+ 'vcpus': 16,
+ 'cpu_info': 'ppc64,powervm,3940',
+ 'running_vms': 0,
+ 'free_disk_gb': 259,
+ 'service_id': 7,
+ 'hypervisor_version': 7,
+ 'disk_available_least': 265856,
+ 'deleted_at': None,
+ 'free_ram_mb': 130560,
+ 'metrics': '',
+ 'stats': '',
+ 'numa_topology': '',
+ 'id': 2,
+ 'host_ip': '127.0.0.1'}]
+ return [objects.ComputeNode._from_db_object(
+ context, objects.ComputeNode(), cn)
+ for cn in fake_compute_nodes]
+
+ def fake_compute_node_delete(context, compute_node_id):
+ self.assertEqual(2, compute_node_id)
+
+ self.stubs.Set(self.compute, '_get_compute_nodes_in_db',
+ fake_get_compute_nodes_in_db)
+ self.stubs.Set(db, 'compute_node_delete',
+ fake_compute_node_delete)
+
+ self.compute.update_available_resource(
+ context.get_admin_context())
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id)
+ self.none_quotas = objects.Quotas.from_reservations(
+ self.context, None)
+
+ def fake_show(meh, context, id, **kwargs):
+ if id:
+ return {'id': id, 'min_disk': None, 'min_ram': None,
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {'kernel_id': 'fake_kernel_id',
+ 'ramdisk_id': 'fake_ramdisk_id',
+ 'something_else': 'meow'}}
+ else:
+ raise exception.ImageNotFound(image_id=id)
+
+ fake_image.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+
+ fake_rpcapi = FakeSchedulerAPI()
+ fake_taskapi = FakeComputeTaskAPI()
+ self.stubs.Set(self.compute, 'scheduler_rpcapi', fake_rpcapi)
+ self.stubs.Set(self.compute, 'compute_task_api', fake_taskapi)
+
+ fake_network.set_stub_network_methods(self.stubs)
+ fake_server_actions.stub_out_action_events(self.stubs)
+
+ def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs):
+ self.assertTrue(ctxt.is_admin)
+ return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
+
+ self.stubs.Set(network_api.API, 'get_instance_nw_info',
+ fake_get_nw_info)
+
+ def fake_allocate_for_instance(cls, ctxt, instance, *args, **kwargs):
+ self.assertFalse(ctxt.is_admin)
+ return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
+
+ self.stubs.Set(network_api.API, 'allocate_for_instance',
+ fake_allocate_for_instance)
+ self.compute_api = compute.API()
+
+ # Just to make long lines short
+ self.rt = self.compute._get_resource_tracker(NODENAME)
+
+ def tearDown(self):
+ timeutils.clear_time_override()
+ ctxt = context.get_admin_context()
+ fake_image.FakeImageService_reset()
+ instances = db.instance_get_all(ctxt)
+ for instance in instances:
+ db.instance_destroy(ctxt, instance['uuid'])
+ fake.restore_nodes()
+ super(BaseTestCase, self).tearDown()
+
+ def _create_fake_instance(self, params=None, type_name='m1.tiny',
+ services=False):
+ """Create a test instance."""
+ if not params:
+ params = {}
+
+ def make_fake_sys_meta():
+ sys_meta = params.pop("system_metadata", {})
+ inst_type = flavors.get_flavor_by_name(type_name)
+ for key in flavors.system_metadata_flavor_props:
+ sys_meta['instance_type_%s' % key] = inst_type[key]
+ return sys_meta
+
+ inst = {}
+ inst['vm_state'] = vm_states.ACTIVE
+ inst['task_state'] = None
+ inst['image_ref'] = FAKE_IMAGE_REF
+ inst['reservation_id'] = 'r-fakeres'
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
+ inst['host'] = 'fake_host'
+ inst['node'] = NODENAME
+ type_id = flavors.get_flavor_by_name(type_name)['id']
+ inst['instance_type_id'] = type_id
+ inst['ami_launch_index'] = 0
+ inst['memory_mb'] = 0
+ inst['vcpus'] = 0
+ inst['root_gb'] = 0
+ inst['ephemeral_gb'] = 0
+ inst['architecture'] = arch.X86_64
+ inst['os_type'] = 'Linux'
+ inst['system_metadata'] = make_fake_sys_meta()
+ inst['locked'] = False
+ inst['created_at'] = timeutils.utcnow()
+ inst['updated_at'] = timeutils.utcnow()
+ inst['launched_at'] = timeutils.utcnow()
+ inst['security_groups'] = []
+ inst.update(params)
+ if services:
+ _create_service_entries(self.context.elevated(),
+ [['fake_zone', [inst['host']]]])
+ return db.instance_create(self.context, inst)
+
+ def _create_fake_instance_obj(self, params=None, type_name='m1.tiny',
+ services=False):
+ db_inst = self._create_fake_instance(params, type_name=type_name,
+ services=services)
+ return objects.Instance._from_db_object(
+ self.context, objects.Instance(), db_inst,
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
+
+ def _create_instance_type(self, params=None):
+ """Create a test instance type."""
+ if not params:
+ params = {}
+
+ context = self.context.elevated()
+ inst = {}
+ inst['name'] = 'm1.small'
+ inst['memory_mb'] = 1024
+ inst['vcpus'] = 1
+ inst['root_gb'] = 20
+ inst['ephemeral_gb'] = 10
+ inst['flavorid'] = '1'
+ inst['swap'] = 2048
+ inst['rxtx_factor'] = 1
+ inst.update(params)
+ return db.flavor_create(context, inst)['id']
+
+ def _create_group(self):
+ values = {'name': 'testgroup',
+ 'description': 'testgroup',
+ 'user_id': self.user_id,
+ 'project_id': self.project_id}
+ return db.security_group_create(self.context, values)
+
+ def _stub_migrate_server(self):
+ def _fake_migrate_server(*args, **kwargs):
+ pass
+
+ self.stubs.Set(conductor_manager.ComputeTaskManager,
+ 'migrate_server', _fake_migrate_server)
+
+ def _init_aggregate_with_host(self, aggr, aggr_name, zone, host):
+ if not aggr:
+ aggr = self.api.create_aggregate(self.context, aggr_name, zone)
+ aggr = self.api.add_host_to_aggregate(self.context, aggr['id'], host)
+ return aggr
+
+
+class ComputeVolumeTestCase(BaseTestCase):
+
+ def setUp(self):
+ super(ComputeVolumeTestCase, self).setUp()
+ self.volume_id = 'fake'
+ self.fetched_attempts = 0
+ self.instance = {
+ 'id': 'fake',
+ 'uuid': 'fake',
+ 'name': 'fake',
+ 'root_device_name': '/dev/vda',
+ }
+ self.fake_volume = fake_block_device.FakeDbBlockDeviceDict(
+ {'source_type': 'volume', 'destination_type': 'volume',
+ 'volume_id': self.volume_id, 'device_name': '/dev/vdb'})
+ self.instance_object = objects.Instance._from_db_object(
+ self.context, objects.Instance(),
+ fake_instance.fake_db_instance())
+ self.stubs.Set(self.compute.volume_api, 'get', lambda *a, **kw:
+ {'id': self.volume_id,
+ 'attach_status': 'detached'})
+ self.stubs.Set(self.compute.driver, 'get_volume_connector',
+ lambda *a, **kw: None)
+ self.stubs.Set(self.compute.volume_api, 'initialize_connection',
+ lambda *a, **kw: {})
+ self.stubs.Set(self.compute.volume_api, 'terminate_connection',
+ lambda *a, **kw: None)
+ self.stubs.Set(self.compute.volume_api, 'attach',
+ lambda *a, **kw: None)
+ self.stubs.Set(self.compute.volume_api, 'detach',
+ lambda *a, **kw: None)
+ self.stubs.Set(self.compute.volume_api, 'check_attach',
+ lambda *a, **kw: None)
+ self.stubs.Set(greenthread, 'sleep',
+ lambda *a, **kw: None)
+
+ def store_cinfo(context, *args, **kwargs):
+ self.cinfo = jsonutils.loads(args[-1].get('connection_info'))
+ return self.fake_volume
+
+ self.stubs.Set(self.compute.conductor_api,
+ 'block_device_mapping_update',
+ store_cinfo)
+ self.stubs.Set(self.compute.conductor_api,
+ 'block_device_mapping_update_or_create',
+ store_cinfo)
+ self.stubs.Set(db, 'block_device_mapping_create', store_cinfo)
+ self.stubs.Set(db, 'block_device_mapping_update', store_cinfo)
+
+ def test_attach_volume_serial(self):
+ fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
+ with (mock.patch.object(cinder.API, 'get_volume_encryption_metadata',
+ return_value={})):
+ instance = self._create_fake_instance_obj()
+ self.compute.attach_volume(self.context, self.volume_id,
+ '/dev/vdb', instance, bdm=fake_bdm)
+ self.assertEqual(self.cinfo.get('serial'), self.volume_id)
+
+ def test_attach_volume_raises(self):
+ fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
+ instance = self._create_fake_instance_obj()
+
+ def fake_attach(*args, **kwargs):
+ raise test.TestingException
+
+ with contextlib.nested(
+ mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
+ 'attach'),
+ mock.patch.object(cinder.API, 'unreserve_volume'),
+ mock.patch.object(objects.BlockDeviceMapping,
+ 'destroy')
+ ) as (mock_attach, mock_unreserve, mock_destroy):
+ mock_attach.side_effect = fake_attach
+ self.assertRaises(
+ test.TestingException, self.compute.attach_volume,
+ self.context, 'fake', '/dev/vdb',
+ instance, bdm=fake_bdm)
+ self.assertTrue(mock_unreserve.called)
+ self.assertTrue(mock_destroy.called)
+
+ def test_detach_volume_api_raises(self):
+ fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
+ instance = self._create_fake_instance()
+
+ with contextlib.nested(
+ mock.patch.object(self.compute, '_detach_volume'),
+ mock.patch.object(self.compute.volume_api, 'detach'),
+ mock.patch.object(objects.BlockDeviceMapping,
+ 'get_by_volume_id'),
+ mock.patch.object(fake_bdm, 'destroy')
+ ) as (mock_internal_detach, mock_detach, mock_get, mock_destroy):
+ mock_detach.side_effect = test.TestingException
+ mock_get.return_value = fake_bdm
+ self.assertRaises(
+ test.TestingException, self.compute.detach_volume,
+ self.context, 'fake', instance)
+ mock_internal_detach.assert_called_once_with(self.context,
+ instance,
+ fake_bdm)
+ self.assertTrue(mock_destroy.called)
+
+ def test_attach_volume_no_bdm(self):
+ fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
+ instance = self._create_fake_instance_obj()
+
+ with contextlib.nested(
+ mock.patch.object(objects.BlockDeviceMapping,
+ 'get_by_volume_id', return_value=fake_bdm),
+ mock.patch.object(self.compute, '_attach_volume')
+ ) as (mock_get_by_id, mock_attach):
+ self.compute.attach_volume(self.context, 'fake', '/dev/vdb',
+ instance, bdm=None)
+ mock_get_by_id.assert_called_once_with(self.context, 'fake')
+ self.assertTrue(mock_attach.called)
+
+ def test_await_block_device_created_too_slow(self):
+ self.flags(block_device_allocate_retries=2)
+ self.flags(block_device_allocate_retries_interval=0.1)
+
+ def never_get(context, vol_id):
+ return {
+ 'status': 'creating',
+ 'id': 'blah',
+ }
+
+ self.stubs.Set(self.compute.volume_api, 'get', never_get)
+ self.assertRaises(exception.VolumeNotCreated,
+ self.compute._await_block_device_map_created,
+ self.context, '1')
+
+ def test_await_block_device_created_slow(self):
+ c = self.compute
+ self.flags(block_device_allocate_retries=4)
+ self.flags(block_device_allocate_retries_interval=0.1)
+
+ def slow_get(context, vol_id):
+ if self.fetched_attempts < 2:
+ self.fetched_attempts += 1
+ return {
+ 'status': 'creating',
+ 'id': 'blah',
+ }
+ return {
+ 'status': 'available',
+ 'id': 'blah',
+ }
+
+ self.stubs.Set(c.volume_api, 'get', slow_get)
+ attempts = c._await_block_device_map_created(self.context, '1')
+ self.assertEqual(attempts, 3)
+
+ def test_await_block_device_created_retries_negative(self):
+ c = self.compute
+ self.flags(block_device_allocate_retries=-1)
+ self.flags(block_device_allocate_retries_interval=0.1)
+
+ def volume_get(context, vol_id):
+ return {
+ 'status': 'available',
+ 'id': 'blah',
+ }
+
+ self.stubs.Set(c.volume_api, 'get', volume_get)
+ attempts = c._await_block_device_map_created(self.context, '1')
+ self.assertEqual(1, attempts)
+
+ def test_await_block_device_created_retries_zero(self):
+ c = self.compute
+ self.flags(block_device_allocate_retries=0)
+ self.flags(block_device_allocate_retries_interval=0.1)
+
+ def volume_get(context, vol_id):
+ return {
+ 'status': 'available',
+ 'id': 'blah',
+ }
+
+ self.stubs.Set(c.volume_api, 'get', volume_get)
+ attempts = c._await_block_device_map_created(self.context, '1')
+ self.assertEqual(1, attempts)
+
+ def test_boot_volume_serial(self):
+ with (
+ mock.patch.object(objects.BlockDeviceMapping, 'save')
+ ) as mock_save:
+ block_device_mapping = [
+ block_device.BlockDeviceDict({
+ 'id': 1,
+ 'no_device': None,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'snapshot_id': None,
+ 'volume_id': self.volume_id,
+ 'device_name': '/dev/vdb',
+ 'delete_on_termination': False,
+ })]
+ prepped_bdm = self.compute._prep_block_device(
+ self.context, self.instance, block_device_mapping)
+ mock_save.assert_called_once_with(self.context)
+ volume_driver_bdm = prepped_bdm['block_device_mapping'][0]
+ self.assertEqual(volume_driver_bdm['connection_info']['serial'],
+ self.volume_id)
+
+ def test_boot_volume_metadata(self, metadata=True):
+ def volume_api_get(*args, **kwargs):
+ if metadata:
+ return {
+ 'size': 1,
+ 'volume_image_metadata': {'vol_test_key': 'vol_test_value',
+ 'min_ram': u'128',
+ 'min_disk': u'256',
+ 'size': u'536870912'
+ },
+ }
+ else:
+ return {}
+
+ self.stubs.Set(self.compute_api.volume_api, 'get', volume_api_get)
+
+ expected_no_metadata = {'min_disk': 0, 'min_ram': 0, 'properties': {},
+ 'size': 0, 'status': 'active'}
+
+ block_device_mapping = [{
+ 'id': 1,
+ 'device_name': 'vda',
+ 'no_device': None,
+ 'virtual_name': None,
+ 'snapshot_id': None,
+ 'volume_id': self.volume_id,
+ 'delete_on_termination': False,
+ }]
+
+ image_meta = self.compute_api._get_bdm_image_metadata(
+ self.context, block_device_mapping)
+ if metadata:
+ self.assertEqual(image_meta['properties']['vol_test_key'],
+ 'vol_test_value')
+ self.assertEqual(128, image_meta['min_ram'])
+ self.assertEqual(256, image_meta['min_disk'])
+ self.assertEqual(units.Gi, image_meta['size'])
+ else:
+ self.assertEqual(expected_no_metadata, image_meta)
+
+ # Test it with new-style BDMs
+ block_device_mapping = [{
+ 'boot_index': 0,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': self.volume_id,
+ 'delete_on_termination': False,
+ }]
+
+ image_meta = self.compute_api._get_bdm_image_metadata(
+ self.context, block_device_mapping, legacy_bdm=False)
+ if metadata:
+ self.assertEqual(image_meta['properties']['vol_test_key'],
+ 'vol_test_value')
+ self.assertEqual(128, image_meta['min_ram'])
+ self.assertEqual(256, image_meta['min_disk'])
+ self.assertEqual(units.Gi, image_meta['size'])
+ else:
+ self.assertEqual(expected_no_metadata, image_meta)
+
+ def test_boot_volume_no_metadata(self):
+ self.test_boot_volume_metadata(metadata=False)
+
+ def test_boot_image_metadata(self, metadata=True):
+ def image_api_get(*args, **kwargs):
+ if metadata:
+ return {
+ 'properties': {'img_test_key': 'img_test_value'}
+ }
+ else:
+ return {}
+
+ self.stubs.Set(self.compute_api.image_api, 'get', image_api_get)
+
+ block_device_mapping = [{
+ 'boot_index': 0,
+ 'source_type': 'image',
+ 'destination_type': 'local',
+ 'image_id': "fake-image",
+ 'delete_on_termination': True,
+ }]
+
+ image_meta = self.compute_api._get_bdm_image_metadata(
+ self.context, block_device_mapping, legacy_bdm=False)
+
+ if metadata:
+ self.assertEqual('img_test_value',
+ image_meta['properties']['img_test_key'])
+ else:
+ self.assertEqual(image_meta, {})
+
+ def test_boot_image_no_metadata(self):
+ self.test_boot_image_metadata(metadata=False)
+
+ def test_poll_bandwidth_usage_not_implemented(self):
+ ctxt = context.get_admin_context()
+
+ self.mox.StubOutWithMock(self.compute.driver, 'get_all_bw_counters')
+ self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
+ self.mox.StubOutWithMock(time, 'time')
+ self.mox.StubOutWithMock(objects.InstanceList, 'get_by_host')
+ # Following methods will be called
+ utils.last_completed_audit_period().AndReturn((0, 0))
+ time.time().AndReturn(10)
+ # Note - time called two more times from Log
+ time.time().AndReturn(20)
+ time.time().AndReturn(21)
+ objects.InstanceList.get_by_host(ctxt, 'fake-mini',
+ use_slave=True).AndReturn([])
+ self.compute.driver.get_all_bw_counters([]).AndRaise(
+ NotImplementedError)
+ self.mox.ReplayAll()
+
+ self.flags(bandwidth_poll_interval=1)
+ self.compute._poll_bandwidth_usage(ctxt)
+ # A second call won't call the stubs again as the bandwidth
+ # poll is now disabled
+ self.compute._poll_bandwidth_usage(ctxt)
+ self.mox.UnsetStubs()
+
+ @mock.patch.object(objects.InstanceList, 'get_by_host')
+ @mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ def test_get_host_volume_bdms(self, mock_get_by_inst, mock_get_by_host):
+ fake_instance = mock.Mock(uuid='fake-instance-uuid')
+ mock_get_by_host.return_value = [fake_instance]
+
+ volume_bdm = mock.Mock(id=1, is_volume=True)
+ not_volume_bdm = mock.Mock(id=2, is_volume=False)
+ mock_get_by_inst.return_value = [volume_bdm, not_volume_bdm]
+
+ expected_host_bdms = [{'instance': fake_instance,
+ 'instance_bdms': [volume_bdm]}]
+
+ got_host_bdms = self.compute._get_host_volume_bdms('fake-context')
+ mock_get_by_host.assert_called_once_with('fake-context',
+ self.compute.host)
+ mock_get_by_inst.assert_called_once_with('fake-context',
+ 'fake-instance-uuid',
+ use_slave=False)
+ self.assertEqual(expected_host_bdms, got_host_bdms)
+
+ def test_poll_volume_usage_disabled(self):
+ ctxt = 'MockContext'
+ self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
+ self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
+ # None of the mocks should be called.
+ self.mox.ReplayAll()
+
+ self.flags(volume_usage_poll_interval=0)
+ self.compute._poll_volume_usage(ctxt)
+ self.mox.UnsetStubs()
+
+ def test_poll_volume_usage_returns_no_vols(self):
+ ctxt = 'MockContext'
+ self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
+ self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
+ self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage')
+ # Following methods are called.
+ utils.last_completed_audit_period().AndReturn((0, 0))
+ self.compute._get_host_volume_bdms(ctxt, use_slave=True).AndReturn([])
+ self.mox.ReplayAll()
+
+ self.flags(volume_usage_poll_interval=10)
+ self.compute._poll_volume_usage(ctxt)
+ self.mox.UnsetStubs()
+
+ def test_poll_volume_usage_with_data(self):
+ ctxt = 'MockContext'
+ self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
+ self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
+ self.mox.StubOutWithMock(self.compute, '_update_volume_usage_cache')
+ self.stubs.Set(self.compute.driver, 'get_all_volume_usage',
+ lambda x, y: [3, 4])
+ # All the mocks are called
+ utils.last_completed_audit_period().AndReturn((10, 20))
+ self.compute._get_host_volume_bdms(ctxt,
+ use_slave=True).AndReturn([1, 2])
+ self.compute._update_volume_usage_cache(ctxt, [3, 4])
+ self.mox.ReplayAll()
+ self.flags(volume_usage_poll_interval=10)
+ self.compute._poll_volume_usage(ctxt)
+ self.mox.UnsetStubs()
+
+ def test_detach_volume_usage(self):
+ # Test that detach volume update the volume usage cache table correctly
+ instance = self._create_fake_instance_obj()
+ bdm = fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'device_name': '/dev/vdb',
+ 'connection_info': '{}', 'instance_uuid': instance['uuid'],
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'volume_id': 1})
+ host_volume_bdms = {'id': 1, 'device_name': '/dev/vdb',
+ 'connection_info': '{}', 'instance_uuid': instance['uuid'],
+ 'volume_id': 1}
+
+ self.mox.StubOutWithMock(db, 'block_device_mapping_get_by_volume_id')
+ self.mox.StubOutWithMock(self.compute.driver, 'block_stats')
+ self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
+ self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage')
+
+ # The following methods will be called
+ db.block_device_mapping_get_by_volume_id(self.context, 1, []).\
+ AndReturn(bdm)
+ self.compute.driver.block_stats(instance['name'], 'vdb').\
+ AndReturn([1L, 30L, 1L, 20L, None])
+ self.compute._get_host_volume_bdms(self.context,
+ use_slave=True).AndReturn(
+ host_volume_bdms)
+ self.compute.driver.get_all_volume_usage(
+ self.context, host_volume_bdms).AndReturn(
+ [{'volume': 1,
+ 'rd_req': 1,
+ 'rd_bytes': 10,
+ 'wr_req': 1,
+ 'wr_bytes': 5,
+ 'instance': instance}])
+ db.block_device_mapping_get_by_volume_id(self.context, 1, []).\
+ AndReturn(bdm)
+
+ self.mox.ReplayAll()
+
+ def fake_get_volume_encryption_metadata(self, context, volume_id):
+ return {}
+ self.stubs.Set(cinder.API, 'get_volume_encryption_metadata',
+ fake_get_volume_encryption_metadata)
+
+ self.compute.attach_volume(self.context, 1, '/dev/vdb', instance)
+
+ # Poll volume usage & then detach the volume. This will update the
+ # total fields in the volume usage cache.
+ self.flags(volume_usage_poll_interval=10)
+ self.compute._poll_volume_usage(self.context)
+ # Check that a volume.usage and volume.attach notification was sent
+ self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
+
+ self.compute.detach_volume(self.context, 1, instance)
+
+ # Check that volume.attach, 2 volume.usage, and volume.detach
+ # notifications were sent
+ self.assertEqual(4, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('compute.instance.volume.attach', msg.event_type)
+ msg = fake_notifier.NOTIFICATIONS[2]
+ self.assertEqual('volume.usage', msg.event_type)
+ payload = msg.payload
+ self.assertEqual(instance['uuid'], payload['instance_id'])
+ self.assertEqual('fake', payload['user_id'])
+ self.assertEqual('fake', payload['tenant_id'])
+ self.assertEqual(1, payload['reads'])
+ self.assertEqual(30, payload['read_bytes'])
+ self.assertEqual(1, payload['writes'])
+ self.assertEqual(20, payload['write_bytes'])
+ self.assertIsNone(payload['availability_zone'])
+ msg = fake_notifier.NOTIFICATIONS[3]
+ self.assertEqual('compute.instance.volume.detach', msg.event_type)
+
+ # Check the database for the
+ volume_usages = db.vol_get_usage_by_time(self.context, 0)
+ self.assertEqual(1, len(volume_usages))
+ volume_usage = volume_usages[0]
+ self.assertEqual(0, volume_usage['curr_reads'])
+ self.assertEqual(0, volume_usage['curr_read_bytes'])
+ self.assertEqual(0, volume_usage['curr_writes'])
+ self.assertEqual(0, volume_usage['curr_write_bytes'])
+ self.assertEqual(1, volume_usage['tot_reads'])
+ self.assertEqual(30, volume_usage['tot_read_bytes'])
+ self.assertEqual(1, volume_usage['tot_writes'])
+ self.assertEqual(20, volume_usage['tot_write_bytes'])
+
+ def test_prepare_image_mapping(self):
+ swap_size = 1
+ ephemeral_size = 1
+ instance_type = {'swap': swap_size,
+ 'ephemeral_gb': ephemeral_size}
+ mappings = [
+ {'virtual': 'ami', 'device': 'sda1'},
+ {'virtual': 'root', 'device': '/dev/sda1'},
+
+ {'virtual': 'swap', 'device': 'sdb4'},
+
+ {'virtual': 'ephemeral0', 'device': 'sdc1'},
+ {'virtual': 'ephemeral1', 'device': 'sdc2'},
+ ]
+
+ preped_bdm = self.compute_api._prepare_image_mapping(
+ instance_type, mappings)
+
+ expected_result = [
+ {
+ 'device_name': '/dev/sdb4',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'guest_format': 'swap',
+ 'boot_index': -1,
+ 'volume_size': swap_size
+ },
+ {
+ 'device_name': '/dev/sdc1',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'guest_format': CONF.default_ephemeral_format,
+ 'boot_index': -1,
+ 'volume_size': ephemeral_size
+ },
+ {
+ 'device_name': '/dev/sdc2',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'guest_format': CONF.default_ephemeral_format,
+ 'boot_index': -1,
+ 'volume_size': ephemeral_size
+ }
+ ]
+
+ for expected, got in zip(expected_result, preped_bdm):
+ self.assertThat(expected, matchers.IsSubDictOf(got))
+
+ def test_validate_bdm(self):
+ def fake_get(self, context, res_id):
+ return {'id': res_id}
+
+ def fake_check_attach(*args, **kwargs):
+ pass
+
+ self.stubs.Set(cinder.API, 'get', fake_get)
+ self.stubs.Set(cinder.API, 'get_snapshot', fake_get)
+ self.stubs.Set(cinder.API, 'check_attach',
+ fake_check_attach)
+
+ volume_id = '55555555-aaaa-bbbb-cccc-555555555555'
+ snapshot_id = '66666666-aaaa-bbbb-cccc-555555555555'
+ image_id = '77777777-aaaa-bbbb-cccc-555555555555'
+
+ instance = self._create_fake_instance()
+ instance_type = {'swap': 1, 'ephemeral_gb': 2}
+ mappings = [
+ {
+ 'device_name': '/dev/sdb4',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'guest_format': 'swap',
+ 'boot_index': -1,
+ 'volume_size': 1
+ },
+ {
+ 'device_name': '/dev/sda1',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_type': 'disk',
+ 'volume_id': volume_id,
+ 'guest_format': None,
+ 'boot_index': 1,
+ 'volume_size': 6
+ },
+ {
+ 'device_name': '/dev/sda2',
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'snapshot_id': snapshot_id,
+ 'device_type': 'disk',
+ 'guest_format': None,
+ 'boot_index': 0,
+ 'volume_size': 4
+ },
+ {
+ 'device_name': '/dev/sda3',
+ 'source_type': 'image',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'guest_format': None,
+ 'boot_index': 2,
+ 'volume_size': 1
+ }
+ ]
+
+ # Make sure it passes at first
+ self.compute_api._validate_bdm(self.context, instance,
+ instance_type, mappings)
+
+ # Boot sequence
+ mappings[2]['boot_index'] = 2
+ self.assertRaises(exception.InvalidBDMBootSequence,
+ self.compute_api._validate_bdm,
+ self.context, instance, instance_type,
+ mappings)
+ mappings[2]['boot_index'] = 0
+
+ # number of local block_devices
+ self.flags(max_local_block_devices=1)
+ self.assertRaises(exception.InvalidBDMLocalsLimit,
+ self.compute_api._validate_bdm,
+ self.context, instance, instance_type,
+ mappings)
+ ephemerals = [
+ {
+ 'device_name': '/dev/vdb',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'volume_id': volume_id,
+ 'guest_format': None,
+ 'boot_index': -1,
+ 'volume_size': 1
+ },
+ {
+ 'device_name': '/dev/vdc',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'volume_id': volume_id,
+ 'guest_format': None,
+ 'boot_index': -1,
+ 'volume_size': 1
+ }]
+
+ self.flags(max_local_block_devices=4)
+ # More ephemerals are OK as long as they are not over the size limit
+ self.compute_api._validate_bdm(self.context, instance,
+ instance_type, mappings + ephemerals)
+
+ # Ephemerals over the size limit
+ ephemerals[0]['volume_size'] = 3
+ self.assertRaises(exception.InvalidBDMEphemeralSize,
+ self.compute_api._validate_bdm,
+ self.context, instance, instance_type,
+ mappings + ephemerals)
+ self.assertRaises(exception.InvalidBDMEphemeralSize,
+ self.compute_api._validate_bdm,
+ self.context, instance, instance_type,
+ mappings + [ephemerals[0]])
+
+ # Swap over the size limit
+ mappings[0]['volume_size'] = 3
+ self.assertRaises(exception.InvalidBDMSwapSize,
+ self.compute_api._validate_bdm,
+ self.context, instance, instance_type,
+ mappings)
+ mappings[0]['volume_size'] = 1
+
+ additional_swap = [
+ {
+ 'device_name': '/dev/vdb',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'guest_format': 'swap',
+ 'boot_index': -1,
+ 'volume_size': 1
+ }]
+
+ # More than one swap
+ self.assertRaises(exception.InvalidBDMFormat,
+ self.compute_api._validate_bdm,
+ self.context, instance, instance_type,
+ mappings + additional_swap)
+
+ image_no_size = [
+ {
+ 'device_name': '/dev/sda4',
+ 'source_type': 'image',
+ 'image_id': image_id,
+ 'destination_type': 'volume',
+ 'boot_index': -1,
+ 'volume_size': None,
+ }]
+ self.assertRaises(exception.InvalidBDM,
+ self.compute_api._validate_bdm,
+ self.context, instance, instance_type,
+ mappings + image_no_size)
+
+ def test_validate_bdm_media_service_exceptions(self):
+ instance_type = {'swap': 1, 'ephemeral_gb': 1}
+ all_mappings = [{'id': 1,
+ 'no_device': None,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'snapshot_id': None,
+ 'volume_id': self.volume_id,
+ 'device_name': 'vda',
+ 'boot_index': 0,
+ 'delete_on_termination': False}]
+
+ # First we test a list of invalid status values that should result
+ # in an InvalidVolume exception being raised.
+ status_values = (
+ # First two check that the status is 'available'.
+ ('creating', 'detached'),
+ ('error', 'detached'),
+ # Checks that the attach_status is 'detached'.
+ ('available', 'attached')
+ )
+
+ for status, attach_status in status_values:
+ def fake_volume_get(self, ctxt, volume_id):
+ return {'id': volume_id,
+ 'status': status,
+ 'attach_status': attach_status}
+ self.stubs.Set(cinder.API, 'get', fake_volume_get)
+ self.assertRaises(exception.InvalidVolume,
+ self.compute_api._validate_bdm,
+ self.context, self.instance,
+ instance_type, all_mappings)
+
+ # Now we test a 404 case that results in InvalidBDMVolume.
+ def fake_volume_get_not_found(self, context, volume_id):
+ raise exception.VolumeNotFound(volume_id)
+
+ self.stubs.Set(cinder.API, 'get', fake_volume_get_not_found)
+ self.assertRaises(exception.InvalidBDMVolume,
+ self.compute_api._validate_bdm,
+ self.context, self.instance,
+ instance_type, all_mappings)
+
+ # Check that the volume status is 'available' and attach_status is
+ # 'detached' and accept the request if so
+ def fake_volume_get_ok(self, context, volume_id):
+ return {'id': volume_id,
+ 'status': 'available',
+ 'attach_status': 'detached'}
+ self.stubs.Set(cinder.API, 'get', fake_volume_get_ok)
+
+ self.compute_api._validate_bdm(self.context, self.instance,
+ instance_type, all_mappings)
+
+ def test_volume_snapshot_create(self):
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.volume_snapshot_create, self.context,
+ self.instance_object, 'fake_id', {})
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(NotImplementedError,
+ self.compute.volume_snapshot_create, self.context,
+ self.instance_object, 'fake_id', {})
+
+ def test_volume_snapshot_delete(self):
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.volume_snapshot_delete, self.context,
+ self.instance_object, 'fake_id', 'fake_id2', {})
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(NotImplementedError,
+ self.compute.volume_snapshot_delete, self.context,
+ self.instance_object, 'fake_id', 'fake_id2', {})
+
+ @mock.patch.object(cinder.API, 'create',
+ side_effect=exception.OverQuota(overs='volumes'))
+ def test_prep_block_device_over_quota_failure(self, mock_create):
+ instance = self._create_fake_instance()
+ bdms = [
+ block_device.BlockDeviceDict({
+ 'boot_index': 0,
+ 'guest_format': None,
+ 'connection_info': None,
+ 'device_type': u'disk',
+ 'source_type': 'image',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'image_id': 1,
+ 'device_name': '/dev/vdb',
+ })]
+ self.assertRaises(exception.InvalidBDM,
+ compute_manager.ComputeManager()._prep_block_device,
+ self.context, instance, bdms)
+ self.assertTrue(mock_create.called)
+
+ @mock.patch.object(nova.virt.block_device, 'get_swap')
+ @mock.patch.object(nova.virt.block_device, 'convert_blanks')
+ @mock.patch.object(nova.virt.block_device, 'convert_images')
+ @mock.patch.object(nova.virt.block_device, 'convert_snapshots')
+ @mock.patch.object(nova.virt.block_device, 'convert_volumes')
+ @mock.patch.object(nova.virt.block_device, 'convert_ephemerals')
+ @mock.patch.object(nova.virt.block_device, 'convert_swap')
+ @mock.patch.object(nova.virt.block_device, 'attach_block_devices')
+ def test_prep_block_device_with_blanks(self, attach_block_devices,
+ convert_swap, convert_ephemerals,
+ convert_volumes, convert_snapshots,
+ convert_images, convert_blanks,
+ get_swap):
+ instance = self._create_fake_instance()
+ instance['root_device_name'] = '/dev/vda'
+ root_volume = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'instance_uuid': 'fake-instance',
+ 'source_type': 'image',
+ 'destination_type': 'volume',
+ 'image_id': 'fake-image-id-1',
+ 'volume_size': 1,
+ 'boot_index': 0}))
+ blank_volume1 = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'instance_uuid': 'fake-instance',
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'boot_index': 1}))
+ blank_volume2 = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'instance_uuid': 'fake-instance',
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'boot_index': 2}))
+ bdms = [blank_volume1, blank_volume2, root_volume]
+
+ def fake_attach_block_devices(bdm, *args, **kwargs):
+ return bdm
+
+ convert_swap.return_value = []
+ convert_ephemerals.return_value = []
+ convert_volumes.return_value = [blank_volume1, blank_volume2]
+ convert_snapshots.return_value = []
+ convert_images.return_value = [root_volume]
+ convert_blanks.return_value = []
+ attach_block_devices.side_effect = fake_attach_block_devices
+ get_swap.return_value = []
+
+ expected_block_device_info = {
+ 'root_device_name': '/dev/vda',
+ 'swap': [],
+ 'ephemerals': [],
+ 'block_device_mapping': bdms
+ }
+
+ manager = compute_manager.ComputeManager()
+ manager.use_legacy_block_device_info = False
+ block_device_info = manager._prep_block_device(self.context, instance,
+ bdms)
+
+ convert_swap.assert_called_once_with(bdms)
+ convert_ephemerals.assert_called_once_with(bdms)
+ convert_volumes.assert_called_once_with(bdms)
+ convert_snapshots.assert_called_once_with(bdms)
+ convert_images.assert_called_once_with(bdms)
+ convert_blanks.assert_called_once_with(bdms)
+
+ self.assertEqual(expected_block_device_info, block_device_info)
+ self.assertEqual(4, attach_block_devices.call_count)
+ get_swap.assert_called_once_with([])
+
+
+class ComputeTestCase(BaseTestCase):
+ def test_wrap_instance_fault(self):
+ inst = {"uuid": "fake_uuid"}
+
+ called = {'fault_added': False}
+
+ def did_it_add_fault(*args):
+ called['fault_added'] = True
+
+ self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
+ did_it_add_fault)
+
+ @compute_manager.wrap_instance_fault
+ def failer(self2, context, instance):
+ raise NotImplementedError()
+
+ self.assertRaises(NotImplementedError, failer,
+ self.compute, self.context, instance=inst)
+
+ self.assertTrue(called['fault_added'])
+
+ def test_wrap_instance_fault_instance_in_args(self):
+ inst = {"uuid": "fake_uuid"}
+
+ called = {'fault_added': False}
+
+ def did_it_add_fault(*args):
+ called['fault_added'] = True
+
+ self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
+ did_it_add_fault)
+
+ @compute_manager.wrap_instance_fault
+ def failer(self2, context, instance):
+ raise NotImplementedError()
+
+ self.assertRaises(NotImplementedError, failer,
+ self.compute, self.context, inst)
+
+ self.assertTrue(called['fault_added'])
+
+ def test_wrap_instance_fault_no_instance(self):
+ inst = {"uuid": "fake_uuid"}
+
+ called = {'fault_added': False}
+
+ def did_it_add_fault(*args):
+ called['fault_added'] = True
+
+ self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
+ did_it_add_fault)
+
+ @compute_manager.wrap_instance_fault
+ def failer(self2, context, instance):
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ self.assertRaises(exception.InstanceNotFound, failer,
+ self.compute, self.context, inst)
+
+ self.assertFalse(called['fault_added'])
+
+ @mock.patch.object(objects.InstanceActionEvent, 'event_start')
+ @mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure')
+ def test_wrap_instance_event(self, mock_finish, mock_start):
+ inst = {"uuid": "fake_uuid"}
+
+ @compute_manager.wrap_instance_event
+ def fake_event(self, context, instance):
+ pass
+
+ fake_event(self.compute, self.context, instance=inst)
+
+ self.assertTrue(mock_start.called)
+ self.assertTrue(mock_finish.called)
+
+ @mock.patch.object(objects.InstanceActionEvent, 'event_start')
+ @mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure')
+ def test_wrap_instance_event_return(self, mock_finish, mock_start):
+ inst = {"uuid": "fake_uuid"}
+
+ @compute_manager.wrap_instance_event
+ def fake_event(self, context, instance):
+ return True
+
+ retval = fake_event(self.compute, self.context, instance=inst)
+
+ self.assertTrue(retval)
+ self.assertTrue(mock_start.called)
+ self.assertTrue(mock_finish.called)
+
+ @mock.patch.object(objects.InstanceActionEvent, 'event_start')
+ @mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure')
+ def test_wrap_instance_event_log_exception(self, mock_finish, mock_start):
+ inst = {"uuid": "fake_uuid"}
+
+ @compute_manager.wrap_instance_event
+ def fake_event(self2, context, instance):
+ raise exception.NovaException()
+
+ self.assertRaises(exception.NovaException, fake_event,
+ self.compute, self.context, instance=inst)
+
+ self.assertTrue(mock_start.called)
+ self.assertTrue(mock_finish.called)
+ args, kwargs = mock_finish.call_args
+ self.assertIsInstance(kwargs['exc_val'], exception.NovaException)
+
+ def test_object_compat(self):
+ db_inst = fake_instance.fake_db_instance()
+
+ @compute_manager.object_compat
+ def test_fn(_self, context, instance):
+ self.assertIsInstance(instance, objects.Instance)
+ self.assertEqual(instance.uuid, db_inst['uuid'])
+ test_fn(None, self.context, instance=db_inst)
+
+ def test_object_compat_more_positional_args(self):
+ db_inst = fake_instance.fake_db_instance()
+
+ @compute_manager.object_compat
+ def test_fn(_self, context, instance, pos_arg_1, pos_arg_2):
+ self.assertIsInstance(instance, objects.Instance)
+ self.assertEqual(instance.uuid, db_inst['uuid'])
+ self.assertEqual(pos_arg_1, 'fake_pos_arg1')
+ self.assertEqual(pos_arg_2, 'fake_pos_arg2')
+
+ test_fn(None, self.context, db_inst, 'fake_pos_arg1', 'fake_pos_arg2')
+
+ def test_create_instance_with_img_ref_associates_config_drive(self):
+ # Make sure create associates a config drive.
+
+ instance = self._create_fake_instance_obj(
+ params={'config_drive': '1234', })
+
+ try:
+ self.compute.run_instance(self.context, instance, {}, {},
+ [], None, None, True, None, False)
+ instances = db.instance_get_all(self.context)
+ instance = instances[0]
+
+ self.assertTrue(instance['config_drive'])
+ finally:
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_create_instance_associates_config_drive(self):
+ # Make sure create associates a config drive.
+
+ instance = self._create_fake_instance_obj(
+ params={'config_drive': '1234', })
+
+ try:
+ self.compute.run_instance(self.context, instance, {}, {},
+ [], None, None, True, None, False)
+ instances = db.instance_get_all(self.context)
+ instance = instances[0]
+
+ self.assertTrue(instance['config_drive'])
+ finally:
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_create_instance_unlimited_memory(self):
+ # Default of memory limit=None is unlimited.
+ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
+ self.rt.update_available_resource(self.context.elevated())
+ params = {"memory_mb": 999999999999}
+ filter_properties = {'limits': {'memory_mb': None}}
+ instance = self._create_fake_instance_obj(params)
+ self.compute.run_instance(self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+ self.assertEqual(999999999999, self.rt.compute_node['memory_mb_used'])
+
+ def test_create_instance_unlimited_disk(self):
+ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
+ self.rt.update_available_resource(self.context.elevated())
+ params = {"root_gb": 999999999999,
+ "ephemeral_gb": 99999999999}
+ filter_properties = {'limits': {'disk_gb': None}}
+ instance = self._create_fake_instance_obj(params)
+ self.compute.run_instance(self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+
+ def test_create_multiple_instances_then_starve(self):
+ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
+ self.rt.update_available_resource(self.context.elevated())
+ filter_properties = {'limits': {'memory_mb': 4096, 'disk_gb': 1000}}
+ params = {"memory_mb": 1024, "root_gb": 128, "ephemeral_gb": 128}
+ instance = self._create_fake_instance_obj(params)
+ self.compute.run_instance(self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+ self.assertEqual(1024, self.rt.compute_node['memory_mb_used'])
+ self.assertEqual(256, self.rt.compute_node['local_gb_used'])
+
+ params = {"memory_mb": 2048, "root_gb": 256, "ephemeral_gb": 256}
+ instance = self._create_fake_instance_obj(params)
+ self.compute.run_instance(self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+ self.assertEqual(3072, self.rt.compute_node['memory_mb_used'])
+ self.assertEqual(768, self.rt.compute_node['local_gb_used'])
+
+ params = {"memory_mb": 8192, "root_gb": 8192, "ephemeral_gb": 8192}
+ instance = self._create_fake_instance_obj(params)
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self.compute.run_instance, self.context, instance,
+ {}, filter_properties, [], None, None, True, None, False)
+
+ def test_create_multiple_instance_with_neutron_port(self):
+ instance_type = flavors.get_default_flavor()
+
+ def fake_is_neutron():
+ return True
+ self.stubs.Set(utils, 'is_neutron', fake_is_neutron)
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id='adadds')])
+ self.assertRaises(exception.MultiplePortsNotApplicable,
+ self.compute_api.create,
+ self.context,
+ instance_type=instance_type,
+ image_href=None,
+ max_count=2,
+ requested_networks=requested_networks)
+
+ def test_create_instance_with_oversubscribed_ram(self):
+ # Test passing of oversubscribed ram policy from the scheduler.
+
+ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
+ self.rt.update_available_resource(self.context.elevated())
+
+ # get total memory as reported by virt driver:
+ resources = self.compute.driver.get_available_resource(NODENAME)
+ total_mem_mb = resources['memory_mb']
+
+ oversub_limit_mb = total_mem_mb * 1.5
+ instance_mb = int(total_mem_mb * 1.45)
+
+ # build an instance, specifying an amount of memory that exceeds
+ # total_mem_mb, but is less than the oversubscribed limit:
+ params = {"memory_mb": instance_mb, "root_gb": 128,
+ "ephemeral_gb": 128}
+ instance = self._create_fake_instance_obj(params)
+
+ limits = {'memory_mb': oversub_limit_mb}
+ filter_properties = {'limits': limits}
+ self.compute.run_instance(self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+
+ self.assertEqual(instance_mb, self.rt.compute_node['memory_mb_used'])
+
+ def test_create_instance_with_oversubscribed_ram_fail(self):
+ """Test passing of oversubscribed ram policy from the scheduler, but
+ with insufficient memory.
+ """
+ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
+ self.rt.update_available_resource(self.context.elevated())
+
+ # get total memory as reported by virt driver:
+ resources = self.compute.driver.get_available_resource(NODENAME)
+ total_mem_mb = resources['memory_mb']
+
+ oversub_limit_mb = total_mem_mb * 1.5
+ instance_mb = int(total_mem_mb * 1.55)
+
+ # build an instance, specifying an amount of memory that exceeds
+ # both total_mem_mb and the oversubscribed limit:
+ params = {"memory_mb": instance_mb, "root_gb": 128,
+ "ephemeral_gb": 128}
+ instance = self._create_fake_instance(params)
+
+ filter_properties = {'limits': {'memory_mb': oversub_limit_mb}}
+
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self.compute.run_instance, self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+
+ def test_create_instance_with_oversubscribed_cpu(self):
+ # Test passing of oversubscribed cpu policy from the scheduler.
+
+ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
+ self.rt.update_available_resource(self.context.elevated())
+ limits = {'vcpu': 3}
+ filter_properties = {'limits': limits}
+
+ # get total memory as reported by virt driver:
+ resources = self.compute.driver.get_available_resource(NODENAME)
+ self.assertEqual(1, resources['vcpus'])
+
+ # build an instance, specifying an amount of memory that exceeds
+ # total_mem_mb, but is less than the oversubscribed limit:
+ params = {"memory_mb": 10, "root_gb": 1,
+ "ephemeral_gb": 1, "vcpus": 2}
+ instance = self._create_fake_instance_obj(params)
+ self.compute.run_instance(self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+
+ self.assertEqual(2, self.rt.compute_node['vcpus_used'])
+
+ # create one more instance:
+ params = {"memory_mb": 10, "root_gb": 1,
+ "ephemeral_gb": 1, "vcpus": 1}
+ instance = self._create_fake_instance_obj(params)
+ self.compute.run_instance(self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+
+ self.assertEqual(3, self.rt.compute_node['vcpus_used'])
+
+ # delete the instance:
+ instance['vm_state'] = vm_states.DELETED
+ self.rt.update_usage(self.context,
+ instance=instance)
+
+ self.assertEqual(2, self.rt.compute_node['vcpus_used'])
+
+ # now oversubscribe vcpus and fail:
+ params = {"memory_mb": 10, "root_gb": 1,
+ "ephemeral_gb": 1, "vcpus": 2}
+ instance = self._create_fake_instance_obj(params)
+
+ limits = {'vcpu': 3}
+ filter_properties = {'limits': limits}
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self.compute.run_instance, self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+
+ def test_create_instance_with_oversubscribed_disk(self):
+ # Test passing of oversubscribed disk policy from the scheduler.
+
+ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
+ self.rt.update_available_resource(self.context.elevated())
+
+ # get total memory as reported by virt driver:
+ resources = self.compute.driver.get_available_resource(NODENAME)
+ total_disk_gb = resources['local_gb']
+
+ oversub_limit_gb = total_disk_gb * 1.5
+ instance_gb = int(total_disk_gb * 1.45)
+
+ # build an instance, specifying an amount of disk that exceeds
+ # total_disk_gb, but is less than the oversubscribed limit:
+ params = {"root_gb": instance_gb, "memory_mb": 10}
+ instance = self._create_fake_instance_obj(params)
+
+ limits = {'disk_gb': oversub_limit_gb}
+ filter_properties = {'limits': limits}
+ self.compute.run_instance(self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+
+ self.assertEqual(instance_gb, self.rt.compute_node['local_gb_used'])
+
+ def test_create_instance_with_oversubscribed_disk_fail(self):
+ """Test passing of oversubscribed disk policy from the scheduler, but
+ with insufficient disk.
+ """
+ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
+ self.rt.update_available_resource(self.context.elevated())
+
+ # get total memory as reported by virt driver:
+ resources = self.compute.driver.get_available_resource(NODENAME)
+ total_disk_gb = resources['local_gb']
+
+ oversub_limit_gb = total_disk_gb * 1.5
+ instance_gb = int(total_disk_gb * 1.55)
+
+ # build an instance, specifying an amount of disk that exceeds
+ # total_disk_gb, but is less than the oversubscribed limit:
+ params = {"root_gb": instance_gb, "memory_mb": 10}
+ instance = self._create_fake_instance(params)
+
+ limits = {'disk_gb': oversub_limit_gb}
+ filter_properties = {'limits': limits}
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self.compute.run_instance, self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+
+ def test_create_instance_without_node_param(self):
+ instance = self._create_fake_instance_obj({'node': None})
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ instances = db.instance_get_all(self.context)
+ instance = instances[0]
+
+ self.assertEqual(NODENAME, instance['node'])
+
+ def test_create_instance_no_image(self):
+ # Create instance with no image provided.
+ params = {'image_ref': ''}
+ instance = self._create_fake_instance_obj(params)
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ self._assert_state({'vm_state': vm_states.ACTIVE,
+ 'task_state': None})
+
+ def test_default_access_ip(self):
+ self.flags(default_access_ip_network_name='test1')
+ fake_network.unset_stub_network_methods(self.stubs)
+ instance = self._create_fake_instance_obj()
+
+ orig_update = self.compute._instance_update
+
+ # Make sure the access_ip_* updates happen in the same DB
+ # update as the set to ACTIVE.
+ def _instance_update(ctxt, instance_uuid, **kwargs):
+ if kwargs.get('vm_state', None) == vm_states.ACTIVE:
+ self.assertEqual(kwargs['access_ip_v4'], '192.168.1.100')
+ self.assertEqual(kwargs['access_ip_v6'], '2001:db8:0:1::1')
+ return orig_update(ctxt, instance_uuid, **kwargs)
+
+ self.stubs.Set(self.compute, '_instance_update', _instance_update)
+
+ try:
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ instances = db.instance_get_all(self.context)
+ instance = instances[0]
+
+ self.assertEqual(instance['access_ip_v4'], '192.168.1.100')
+ self.assertEqual(instance['access_ip_v6'],
+ '2001:db8:0:1:dcad:beff:feef:1')
+ finally:
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_no_default_access_ip(self):
+ instance = self._create_fake_instance_obj()
+
+ try:
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ instances = db.instance_get_all(self.context)
+ instance = instances[0]
+
+ self.assertFalse(instance['access_ip_v4'])
+ self.assertFalse(instance['access_ip_v6'])
+ finally:
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_fail_to_schedule_persists(self):
+ # check the persistence of the ERROR(scheduling) state.
+ params = {'vm_state': vm_states.ERROR,
+ 'task_state': task_states.SCHEDULING}
+ self._create_fake_instance(params=params)
+ # check state is failed even after the periodic poll
+ self.compute.periodic_tasks(context.get_admin_context())
+ self._assert_state({'vm_state': vm_states.ERROR,
+ 'task_state': task_states.SCHEDULING})
+
+ def test_run_instance_setup_block_device_mapping_fail(self):
+ """block device mapping failure test.
+
+ Make sure that when there is a block device mapping problem,
+ the instance goes to ERROR state, keeping the task state
+ """
+ def fake(*args, **kwargs):
+ raise exception.InvalidBDM()
+ self.stubs.Set(nova.compute.manager.ComputeManager,
+ '_prep_block_device', fake)
+ instance = self._create_fake_instance()
+ self.assertRaises(exception.InvalidBDM, self.compute.run_instance,
+ self.context, instance=instance, request_spec={},
+ filter_properties={}, requested_networks=[],
+ injected_files=None, admin_password=None,
+ is_first_time=True, node=None,
+ legacy_bdm_in_spec=False)
+ # check state is failed even after the periodic poll
+ self._assert_state({'vm_state': vm_states.ERROR,
+ 'task_state': None})
+ self.compute.periodic_tasks(context.get_admin_context())
+ self._assert_state({'vm_state': vm_states.ERROR,
+ 'task_state': None})
+
+ @mock.patch('nova.compute.manager.ComputeManager._prep_block_device',
+ side_effect=exception.OverQuota(overs='volumes'))
+ def test_setup_block_device_over_quota_fail(self, mock_prep_block_dev):
+ """block device mapping over quota failure test.
+
+ Make sure when we're over volume quota according to Cinder client, the
+ appropriate exception is raised and the instances to ERROR state, keep
+ the task state.
+ """
+ instance = self._create_fake_instance()
+ self.assertRaises(exception.OverQuota, self.compute.run_instance,
+ self.context, instance=instance, request_spec={},
+ filter_properties={}, requested_networks=[],
+ injected_files=None, admin_password=None,
+ is_first_time=True, node=None,
+ legacy_bdm_in_spec=False)
+ # check state is failed even after the periodic poll
+ self._assert_state({'vm_state': vm_states.ERROR,
+ 'task_state': None})
+ self.compute.periodic_tasks(context.get_admin_context())
+ self._assert_state({'vm_state': vm_states.ERROR,
+ 'task_state': None})
+ self.assertTrue(mock_prep_block_dev.called)
+
+ def test_run_instance_spawn_fail(self):
+ """spawn failure test.
+
+ Make sure that when there is a spawning problem,
+ the instance goes to ERROR state, keeping the task state.
+ """
+ def fake(*args, **kwargs):
+ raise test.TestingException()
+ self.stubs.Set(self.compute.driver, 'spawn', fake)
+ instance = self._create_fake_instance_obj()
+ self.assertRaises(test.TestingException, self.compute.run_instance,
+ self.context, instance=instance, request_spec={},
+ filter_properties={}, requested_networks=[],
+ injected_files=None, admin_password=None,
+ is_first_time=True, node=None,
+ legacy_bdm_in_spec=False)
+ # check state is failed even after the periodic poll
+ self._assert_state({'vm_state': vm_states.ERROR,
+ 'task_state': None})
+ self.compute.periodic_tasks(context.get_admin_context())
+ self._assert_state({'vm_state': vm_states.ERROR,
+ 'task_state': None})
+
+ def test_run_instance_dealloc_network_instance_not_found(self):
+ """spawn network deallocate test.
+
+ Make sure that when an instance is not found during spawn
+ that the network is deallocated
+ """
+ instance = self._create_fake_instance_obj()
+
+ def fake(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id="fake")
+
+ self.stubs.Set(self.compute.driver, 'spawn', fake)
+ self.mox.StubOutWithMock(self.compute, '_deallocate_network')
+ self.compute._deallocate_network(mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ def test_run_instance_bails_on_missing_instance(self):
+ # Make sure that run_instance() will quickly ignore a deleted instance
+ called = {}
+ instance = self._create_fake_instance()
+
+ def fake_instance_update(self, *a, **args):
+ called['instance_update'] = True
+ raise exception.InstanceNotFound(instance_id='foo')
+ self.stubs.Set(self.compute, '_instance_update', fake_instance_update)
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ self.assertIn('instance_update', called)
+
+ def test_run_instance_bails_on_deleting_instance(self):
+ # Make sure that run_instance() will quickly ignore a deleting instance
+ called = {}
+ instance = self._create_fake_instance()
+
+ def fake_instance_update(self, *a, **args):
+ called['instance_update'] = True
+ raise exception.UnexpectedDeletingTaskStateError(
+ expected='scheduling', actual='deleting')
+ self.stubs.Set(self.compute, '_instance_update', fake_instance_update)
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ self.assertIn('instance_update', called)
+
+ def test_run_instance_bails_on_missing_instance_2(self):
+ # Make sure that run_instance() will quickly ignore a deleted instance
+ called = {}
+ instance = self._create_fake_instance()
+
+ def fake_default_block_device_names(self, *a, **args):
+ called['default_block_device_names'] = True
+ raise exception.InstanceNotFound(instance_id='foo')
+ self.stubs.Set(self.compute, '_default_block_device_names',
+ fake_default_block_device_names)
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ self.assertIn('default_block_device_names', called)
+
+ def test_can_terminate_on_error_state(self):
+ # Make sure that the instance can be terminated in ERROR state.
+ # check failed to schedule --> terminate
+ params = {'vm_state': vm_states.ERROR}
+ instance = self._create_fake_instance_obj(params=params)
+ self.compute.terminate_instance(self.context, instance, [], [])
+ self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
+ self.context, instance['uuid'])
+ # Double check it's not there for admins, either.
+ self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
+ self.context.elevated(), instance['uuid'])
+
+ def test_run_terminate(self):
+ # Make sure it is possible to run and terminate instance.
+ instance = self._create_fake_instance_obj()
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ instances = db.instance_get_all(self.context)
+ LOG.info("Running instances: %s", instances)
+ self.assertEqual(len(instances), 1)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ instances = db.instance_get_all(self.context)
+ LOG.info("After terminating instances: %s", instances)
+ self.assertEqual(len(instances), 0)
+
+ admin_deleted_context = context.get_admin_context(
+ read_deleted="only")
+ instance = db.instance_get_by_uuid(admin_deleted_context,
+ instance['uuid'])
+ self.assertEqual(instance['vm_state'], vm_states.DELETED)
+ self.assertIsNone(instance['task_state'])
+
+ def test_run_terminate_with_vol_attached(self):
+ """Make sure it is possible to run and terminate instance with volume
+ attached
+ """
+ instance = self._create_fake_instance_obj()
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ instances = db.instance_get_all(self.context)
+ LOG.info("Running instances: %s", instances)
+ self.assertEqual(len(instances), 1)
+
+ def fake_check_attach(*args, **kwargs):
+ pass
+
+ def fake_reserve_volume(*args, **kwargs):
+ pass
+
+ def fake_volume_get(self, context, volume_id):
+ return {'id': volume_id}
+
+ def fake_terminate_connection(self, context, volume_id, connector):
+ pass
+
+ def fake_detach(self, context, volume_id):
+ pass
+
+ bdms = []
+
+ def fake_rpc_reserve_block_device_name(self, context, instance, device,
+ volume_id, **kwargs):
+ bdm = objects.BlockDeviceMapping(
+ **{'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': 1,
+ 'instance_uuid': instance['uuid'],
+ 'device_name': '/dev/vdc'})
+ bdm.create(context)
+ bdms.append(bdm)
+ return bdm
+
+ self.stubs.Set(cinder.API, 'get', fake_volume_get)
+ self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
+ self.stubs.Set(cinder.API, 'reserve_volume',
+ fake_reserve_volume)
+ self.stubs.Set(cinder.API, 'terminate_connection',
+ fake_terminate_connection)
+ self.stubs.Set(cinder.API, 'detach', fake_detach)
+ self.stubs.Set(compute_rpcapi.ComputeAPI,
+ 'reserve_block_device_name',
+ fake_rpc_reserve_block_device_name)
+
+ self.compute_api.attach_volume(self.context, instance, 1,
+ '/dev/vdc')
+
+ self.compute.terminate_instance(self.context,
+ instance, bdms, [])
+
+ instances = db.instance_get_all(self.context)
+ LOG.info("After terminating instances: %s", instances)
+ self.assertEqual(len(instances), 0)
+ bdms = db.block_device_mapping_get_all_by_instance(self.context,
+ instance['uuid'])
+ self.assertEqual(len(bdms), 0)
+
+ def test_run_terminate_no_image(self):
+ """Make sure instance started without image (from volume)
+ can be termintad without issues
+ """
+ params = {'image_ref': ''}
+ instance = self._create_fake_instance_obj(params)
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ self._assert_state({'vm_state': vm_states.ACTIVE,
+ 'task_state': None})
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+ instances = db.instance_get_all(self.context)
+ self.assertEqual(len(instances), 0)
+
+ def test_terminate_no_network(self):
+ # This is as reported in LP bug 1008875
+ instance = self._create_fake_instance_obj()
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ instances = db.instance_get_all(self.context)
+ LOG.info("Running instances: %s", instances)
+ self.assertEqual(len(instances), 1)
+ self.mox.ReplayAll()
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ instances = db.instance_get_all(self.context)
+ LOG.info("After terminating instances: %s", instances)
+ self.assertEqual(len(instances), 0)
+
+ def test_run_terminate_timestamps(self):
+ # Make sure timestamps are set for launched and destroyed.
+ instance = self._create_fake_instance_obj()
+ instance['launched_at'] = None
+ self.assertIsNone(instance['launched_at'])
+ self.assertIsNone(instance['deleted_at'])
+ launch = timeutils.utcnow()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ instance.refresh()
+ self.assertTrue(instance['launched_at'].replace(tzinfo=None) > launch)
+ self.assertIsNone(instance['deleted_at'])
+ terminate = timeutils.utcnow()
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ with utils.temporary_mutation(self.context, read_deleted='only'):
+ instance = db.instance_get_by_uuid(self.context,
+ instance['uuid'])
+ self.assertTrue(instance['launched_at'].replace(
+ tzinfo=None) < terminate)
+ self.assertTrue(instance['deleted_at'].replace(
+ tzinfo=None) > terminate)
+
+ def test_run_terminate_deallocate_net_failure_sets_error_state(self):
+ instance = self._create_fake_instance_obj()
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ instances = db.instance_get_all(self.context)
+ LOG.info("Running instances: %s", instances)
+ self.assertEqual(len(instances), 1)
+
+ def _fake_deallocate_network(*args, **kwargs):
+ raise test.TestingException()
+
+ self.stubs.Set(self.compute, '_deallocate_network',
+ _fake_deallocate_network)
+
+ try:
+ self.compute.terminate_instance(self.context, instance, [], [])
+ except test.TestingException:
+ pass
+
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.assertEqual(instance['vm_state'], vm_states.ERROR)
+
+ def test_stop(self):
+ # Ensure instance can be stopped.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.POWERING_OFF})
+ inst_uuid = instance['uuid']
+ extra = ['system_metadata', 'metadata']
+ inst_obj = objects.Instance.get_by_uuid(self.context,
+ inst_uuid,
+ expected_attrs=extra)
+ self.compute.stop_instance(self.context, instance=inst_obj)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_start(self):
+ # Ensure instance can be started.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.POWERING_OFF})
+ extra = ['system_metadata', 'metadata']
+ inst_uuid = instance['uuid']
+ inst_obj = objects.Instance.get_by_uuid(self.context,
+ inst_uuid,
+ expected_attrs=extra)
+ self.compute.stop_instance(self.context, instance=inst_obj)
+ inst_obj.task_state = task_states.POWERING_ON
+ inst_obj.save(self.context)
+ self.compute.start_instance(self.context, instance=inst_obj)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_stop_start_no_image(self):
+ params = {'image_ref': ''}
+ instance = self._create_fake_instance_obj(params)
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.POWERING_OFF})
+ extra = ['system_metadata', 'metadata']
+ inst_uuid = instance['uuid']
+ inst_obj = objects.Instance.get_by_uuid(self.context,
+ inst_uuid,
+ expected_attrs=extra)
+ self.compute.stop_instance(self.context, instance=inst_obj)
+ inst_obj.task_state = task_states.POWERING_ON
+ inst_obj.save(self.context)
+ self.compute.start_instance(self.context, instance=inst_obj)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_rescue(self):
+ # Ensure instance can be rescued and unrescued.
+
+ called = {'rescued': False,
+ 'unrescued': False}
+
+ def fake_rescue(self, context, instance_ref, network_info, image_meta,
+ rescue_password):
+ called['rescued'] = True
+
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue)
+
+ def fake_unrescue(self, instance_ref, network_info):
+ called['unrescued'] = True
+
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue',
+ fake_unrescue)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ instance.task_state = task_states.RESCUING
+ instance.save()
+ self.compute.rescue_instance(self.context, instance, None)
+ self.assertTrue(called['rescued'])
+ instance.task_state = task_states.UNRESCUING
+ instance.save()
+ self.compute.unrescue_instance(self.context, instance)
+ self.assertTrue(called['unrescued'])
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_rescue_notifications(self):
+ # Ensure notifications on instance rescue.
+ def fake_rescue(self, context, instance_ref, network_info, image_meta,
+ rescue_password):
+ pass
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ fake_notifier.NOTIFICATIONS = []
+ instance.task_state = task_states.RESCUING
+ instance.save()
+ self.compute.rescue_instance(self.context, instance, None)
+
+ expected_notifications = ['compute.instance.rescue.start',
+ 'compute.instance.exists',
+ 'compute.instance.rescue.end']
+ self.assertEqual([m.event_type for m in fake_notifier.NOTIFICATIONS],
+ expected_notifications)
+ for n, msg in enumerate(fake_notifier.NOTIFICATIONS):
+ self.assertEqual(msg.event_type, expected_notifications[n])
+ self.assertEqual(msg.priority, 'INFO')
+ payload = msg.payload
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], instance.uuid)
+ self.assertEqual(payload['instance_type'], 'm1.tiny')
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ self.assertEqual(str(payload['instance_type_id']), str(type_id))
+ self.assertIn('display_name', payload)
+ self.assertIn('created_at', payload)
+ self.assertIn('launched_at', payload)
+ image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
+ self.assertEqual(payload['image_ref_url'], image_ref_url)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertIn('rescue_image_name', msg.payload)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_unrescue_notifications(self):
+ # Ensure notifications on instance rescue.
+ def fake_unrescue(self, instance_ref, network_info):
+ pass
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue',
+ fake_unrescue)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ fake_notifier.NOTIFICATIONS = []
+ instance.task_state = task_states.UNRESCUING
+ instance.save()
+ self.compute.unrescue_instance(self.context, instance)
+
+ expected_notifications = ['compute.instance.unrescue.start',
+ 'compute.instance.unrescue.end']
+ self.assertEqual([m.event_type for m in fake_notifier.NOTIFICATIONS],
+ expected_notifications)
+ for n, msg in enumerate(fake_notifier.NOTIFICATIONS):
+ self.assertEqual(msg.event_type, expected_notifications[n])
+ self.assertEqual(msg.priority, 'INFO')
+ payload = msg.payload
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], instance.uuid)
+ self.assertEqual(payload['instance_type'], 'm1.tiny')
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ self.assertEqual(str(payload['instance_type_id']), str(type_id))
+ self.assertIn('display_name', payload)
+ self.assertIn('created_at', payload)
+ self.assertIn('launched_at', payload)
+ image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
+ self.assertEqual(payload['image_ref_url'], image_ref_url)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_rescue_handle_err(self):
+ # If the driver fails to rescue, instance state should remain the same
+ # and the exception should be converted to InstanceNotRescuable
+ inst_obj = self._create_fake_instance_obj()
+ self.mox.StubOutWithMock(self.compute, '_get_rescue_image')
+ self.mox.StubOutWithMock(nova.virt.fake.FakeDriver, 'rescue')
+
+ self.compute._get_rescue_image(
+ mox.IgnoreArg(), inst_obj, mox.IgnoreArg()).AndReturn({})
+ nova.virt.fake.FakeDriver.rescue(
+ mox.IgnoreArg(), inst_obj, [], mox.IgnoreArg(), 'password'
+ ).AndRaise(RuntimeError("Try again later"))
+
+ self.mox.ReplayAll()
+
+ expected_message = ('Instance %s cannot be rescued: '
+ 'Driver Error: Try again later' % inst_obj.uuid)
+ inst_obj.vm_state = 'some_random_state'
+
+ with testtools.ExpectedException(
+ exception.InstanceNotRescuable, expected_message):
+ self.compute.rescue_instance(
+ self.context, instance=inst_obj,
+ rescue_password='password')
+
+ self.assertEqual('some_random_state', inst_obj.vm_state)
+
+ @mock.patch.object(nova.compute.utils, "get_image_metadata")
+ @mock.patch.object(nova.virt.fake.FakeDriver, "rescue")
+ def test_rescue_with_image_specified(self, mock_rescue,
+ mock_get_image_metadata):
+
+ image_ref = "image-ref"
+ rescue_image_meta = {}
+ params = {"task_state": task_states.RESCUING}
+ instance = self._create_fake_instance_obj(params=params)
+
+ ctxt = context.get_admin_context()
+ mock_context = mock.Mock()
+ mock_context.elevated.return_value = ctxt
+
+ mock_get_image_metadata.return_value = rescue_image_meta
+
+ self.compute.rescue_instance(mock_context, instance=instance,
+ rescue_password="password", rescue_image_ref=image_ref)
+
+ mock_get_image_metadata.assert_called_with(ctxt,
+ self.compute.image_api,
+ image_ref, instance)
+ mock_rescue.assert_called_with(ctxt, instance, [],
+ rescue_image_meta, 'password')
+ self.compute.terminate_instance(ctxt, instance, [], [])
+
+ @mock.patch.object(nova.compute.utils, "get_image_metadata")
+ @mock.patch.object(nova.virt.fake.FakeDriver, "rescue")
+ def test_rescue_with_base_image_when_image_not_specified(self,
+ mock_rescue, mock_get_image_metadata):
+
+ image_ref = "image-ref"
+ system_meta = {"image_base_image_ref": image_ref}
+ rescue_image_meta = {}
+ params = {"task_state": task_states.RESCUING,
+ "system_metadata": system_meta}
+ instance = self._create_fake_instance_obj(params=params)
+
+ ctxt = context.get_admin_context()
+ mock_context = mock.Mock()
+ mock_context.elevated.return_value = ctxt
+
+ mock_get_image_metadata.return_value = rescue_image_meta
+
+ self.compute.rescue_instance(mock_context, instance=instance,
+ rescue_password="password")
+
+ mock_get_image_metadata.assert_called_with(ctxt,
+ self.compute.image_api,
+ image_ref, instance)
+ mock_rescue.assert_called_with(ctxt, instance, [],
+ rescue_image_meta, 'password')
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_power_on(self):
+ # Ensure instance can be powered on.
+
+ called = {'power_on': False}
+
+ def fake_driver_power_on(self, context, instance, network_info,
+ block_device_info):
+ called['power_on'] = True
+
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'power_on',
+ fake_driver_power_on)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ extra = ['system_metadata', 'metadata']
+ inst_obj = objects.Instance.get_by_uuid(self.context,
+ instance['uuid'],
+ expected_attrs=extra)
+ inst_obj.task_state = task_states.POWERING_ON
+ inst_obj.save(self.context)
+ self.compute.start_instance(self.context, instance=inst_obj)
+ self.assertTrue(called['power_on'])
+ self.compute.terminate_instance(self.context, inst_obj, [], [])
+
+ def test_power_off(self):
+ # Ensure instance can be powered off.
+
+ called = {'power_off': False}
+
+ def fake_driver_power_off(self, instance,
+ shutdown_timeout, shutdown_attempts):
+ called['power_off'] = True
+
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'power_off',
+ fake_driver_power_off)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ extra = ['system_metadata', 'metadata']
+ inst_obj = objects.Instance.get_by_uuid(self.context,
+ instance['uuid'],
+ expected_attrs=extra)
+ inst_obj.task_state = task_states.POWERING_OFF
+ inst_obj.save(self.context)
+ self.compute.stop_instance(self.context, instance=inst_obj)
+ self.assertTrue(called['power_off'])
+ self.compute.terminate_instance(self.context, inst_obj, [], [])
+
+ def test_pause(self):
+ # Ensure instance can be paused and unpaused.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None, None, True,
+ None, False)
+ instance.task_state = task_states.PAUSING
+ instance.save()
+ fake_notifier.NOTIFICATIONS = []
+ self.compute.pause_instance(self.context, instance=instance)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.pause.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.pause.end')
+ instance.task_state = task_states.UNPAUSING
+ instance.save()
+ fake_notifier.NOTIFICATIONS = []
+ self.compute.unpause_instance(self.context, instance=instance)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.unpause.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.unpause.end')
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_suspend(self):
+ # ensure instance can be suspended and resumed.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ instance.task_state = task_states.SUSPENDING
+ instance.save()
+ self.compute.suspend_instance(self.context, instance)
+ instance.task_state = task_states.RESUMING
+ instance.save()
+ self.compute.resume_instance(self.context, instance)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_suspend_error(self):
+ # Ensure vm_state is ERROR when suspend error occurs.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ with mock.patch.object(self.compute.driver, 'suspend',
+ side_effect=test.TestingException):
+ self.assertRaises(test.TestingException,
+ self.compute.suspend_instance,
+ self.context,
+ instance=instance)
+
+ instance = db.instance_get_by_uuid(self.context, instance.uuid)
+ self.assertEqual(vm_states.ERROR, instance.vm_state)
+
+ def test_suspend_not_implemented(self):
+ # Ensure expected exception is raised and the vm_state of instance
+ # restore to original value if suspend is not implemented by driver
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ with mock.patch.object(self.compute.driver, 'suspend',
+ side_effect=NotImplementedError('suspend test')):
+ self.assertRaises(NotImplementedError,
+ self.compute.suspend_instance,
+ self.context,
+ instance=instance)
+
+ instance = db.instance_get_by_uuid(self.context, instance.uuid)
+ self.assertEqual(vm_states.ACTIVE, instance.vm_state)
+
+ def test_suspend_rescued(self):
+ # ensure rescued instance can be suspended and resumed.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ instance.vm_state = vm_states.RESCUED
+ instance.task_state = task_states.SUSPENDING
+ instance.save()
+
+ self.compute.suspend_instance(self.context, instance)
+ self.assertEqual(instance.vm_state, vm_states.SUSPENDED)
+
+ instance.task_state = task_states.RESUMING
+ instance.save()
+ self.compute.resume_instance(self.context, instance)
+ self.assertEqual(instance.vm_state, vm_states.RESCUED)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_resume_no_old_state(self):
+ # ensure a suspended instance with no old_vm_state is resumed to the
+ # ACTIVE state
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ instance.vm_state = vm_states.SUSPENDED
+ instance.task_state = task_states.RESUMING
+ instance.save()
+
+ self.compute.resume_instance(self.context, instance)
+ self.assertEqual(instance.vm_state, vm_states.ACTIVE)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_rebuild(self):
+ # Ensure instance can be rebuilt.
+ instance = self._create_fake_instance_obj()
+ image_ref = instance['image_ref']
+ sys_metadata = db.instance_system_metadata_get(self.context,
+ instance['uuid'])
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.REBUILDING})
+ self.compute.rebuild_instance(self.context, instance,
+ image_ref, image_ref,
+ injected_files=[],
+ new_pass="new_password",
+ orig_sys_metadata=sys_metadata,
+ bdms=[], recreate=False,
+ on_shared_storage=False)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_rebuild_driver(self):
+ # Make sure virt drivers can override default rebuild
+ called = {'rebuild': False}
+
+ def fake(**kwargs):
+ instance = kwargs['instance']
+ instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
+ instance.save(expected_task_state=[task_states.REBUILDING])
+ instance.task_state = task_states.REBUILD_SPAWNING
+ instance.save(
+ expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING])
+ called['rebuild'] = True
+
+ self.stubs.Set(self.compute.driver, 'rebuild', fake)
+ instance = self._create_fake_instance_obj()
+ image_ref = instance['image_ref']
+ sys_metadata = db.instance_system_metadata_get(self.context,
+ instance['uuid'])
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.REBUILDING})
+ self.compute.rebuild_instance(self.context, instance,
+ image_ref, image_ref,
+ injected_files=[],
+ new_pass="new_password",
+ orig_sys_metadata=sys_metadata,
+ bdms=[], recreate=False,
+ on_shared_storage=False)
+ self.assertTrue(called['rebuild'])
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_rebuild_no_image(self):
+ # Ensure instance can be rebuilt when started with no image.
+ params = {'image_ref': ''}
+ instance = self._create_fake_instance_obj(params)
+ sys_metadata = db.instance_system_metadata_get(self.context,
+ instance['uuid'])
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.REBUILDING})
+ self.compute.rebuild_instance(self.context, instance,
+ '', '', injected_files=[],
+ new_pass="new_password",
+ orig_sys_metadata=sys_metadata, bdms=[],
+ recreate=False, on_shared_storage=False)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_rebuild_launched_at_time(self):
+ # Ensure instance can be rebuilt.
+ old_time = datetime.datetime(2012, 4, 1)
+ cur_time = datetime.datetime(2012, 12, 21, 12, 21)
+ timeutils.set_time_override(old_time)
+ instance = self._create_fake_instance_obj()
+ image_ref = instance['image_ref']
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ timeutils.set_time_override(cur_time)
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.REBUILDING})
+ self.compute.rebuild_instance(self.context, instance,
+ image_ref, image_ref,
+ injected_files=[],
+ new_pass="new_password",
+ orig_sys_metadata={},
+ bdms=[], recreate=False,
+ on_shared_storage=False)
+ instance.refresh()
+ self.assertEqual(cur_time,
+ instance['launched_at'].replace(tzinfo=None))
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_rebuild_with_injected_files(self):
+ # Ensure instance can be rebuilt with injected files.
+ injected_files = [
+ ('/a/b/c', base64.b64encode('foobarbaz')),
+ ]
+
+ self.decoded_files = [
+ ('/a/b/c', 'foobarbaz'),
+ ]
+
+ def _spawn(context, instance, image_meta, injected_files,
+ admin_password, network_info, block_device_info):
+ self.assertEqual(self.decoded_files, injected_files)
+
+ self.stubs.Set(self.compute.driver, 'spawn', _spawn)
+ instance = self._create_fake_instance_obj()
+ image_ref = instance['image_ref']
+ sys_metadata = db.instance_system_metadata_get(self.context,
+ instance['uuid'])
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.REBUILDING})
+ self.compute.rebuild_instance(self.context, instance,
+ image_ref, image_ref,
+ injected_files=injected_files,
+ new_pass="new_password",
+ orig_sys_metadata=sys_metadata,
+ bdms=[], recreate=False,
+ on_shared_storage=False)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def _test_reboot(self, soft,
+ test_delete=False, test_unrescue=False,
+ fail_reboot=False, fail_running=False):
+
+ reboot_type = soft and 'SOFT' or 'HARD'
+ task_pending = (soft and task_states.REBOOT_PENDING
+ or task_states.REBOOT_PENDING_HARD)
+ task_started = (soft and task_states.REBOOT_STARTED
+ or task_states.REBOOT_STARTED_HARD)
+ expected_task = (soft and task_states.REBOOTING
+ or task_states.REBOOTING_HARD)
+ expected_tasks = (soft and (task_states.REBOOTING,
+ task_states.REBOOT_PENDING,
+ task_states.REBOOT_STARTED)
+ or (task_states.REBOOTING_HARD,
+ task_states.REBOOT_PENDING_HARD,
+ task_states.REBOOT_STARTED_HARD))
+
+ # This is a true unit test, so we don't need the network stubs.
+ fake_network.unset_stub_network_methods(self.stubs)
+
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_block_device_info')
+ self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
+ self.mox.StubOutWithMock(self.compute, '_instance_update')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.compute.driver, 'reboot')
+
+ # FIXME(comstud): I don't feel like the context needs to
+ # be elevated at all. Hopefully remove elevated from
+ # reboot_instance and remove the stub here in a future patch.
+ # econtext would just become self.context below then.
+ econtext = self.context.elevated()
+
+ db_instance = fake_instance.fake_db_instance(
+ **dict(uuid='fake-instance',
+ power_state=power_state.NOSTATE,
+ vm_state=vm_states.ACTIVE,
+ task_state=expected_task,
+ launched_at=timeutils.utcnow()))
+ instance = objects.Instance._from_db_object(econtext,
+ objects.Instance(),
+ db_instance)
+
+ updated_dbinstance1 = fake_instance.fake_db_instance(
+ **dict(uuid='updated-instance1',
+ power_state=10003,
+ vm_state=vm_states.ACTIVE,
+ task_state=expected_task,
+ launched_at=timeutils.utcnow()))
+ updated_dbinstance2 = fake_instance.fake_db_instance(
+ **dict(uuid='updated-instance2',
+ power_state=10003,
+ vm_state=vm_states.ACTIVE,
+ task_state=expected_task,
+ launched_at=timeutils.utcnow()))
+
+ if test_unrescue:
+ instance.vm_state = vm_states.RESCUED
+ instance.obj_reset_changes()
+
+ fake_nw_model = network_model.NetworkInfo()
+
+ fake_block_dev_info = 'fake_block_dev_info'
+ fake_power_state1 = 10001
+ fake_power_state2 = power_state.RUNNING
+ fake_power_state3 = 10002
+
+ # Beginning of calls we expect.
+
+ self.mox.StubOutWithMock(self.context, 'elevated')
+ self.context.elevated().AndReturn(econtext)
+
+ self.compute._get_instance_block_device_info(
+ econtext, instance).AndReturn(fake_block_dev_info)
+ self.compute._get_instance_nw_info(econtext,
+ instance).AndReturn(
+ fake_nw_model)
+ self.compute._notify_about_instance_usage(econtext,
+ instance,
+ 'reboot.start')
+ self.compute._get_power_state(econtext,
+ instance).AndReturn(fake_power_state1)
+ db.instance_update_and_get_original(econtext, instance['uuid'],
+ {'task_state': task_pending,
+ 'expected_task_state': expected_tasks,
+ 'power_state': fake_power_state1},
+ update_cells=False,
+ columns_to_join=['system_metadata']
+ ).AndReturn((None,
+ updated_dbinstance1))
+ expected_nw_info = fake_nw_model
+ db.instance_update_and_get_original(econtext,
+ updated_dbinstance1['uuid'],
+ {'task_state': task_started,
+ 'expected_task_state': task_pending},
+ update_cells=False,
+ columns_to_join=['system_metadata']
+ ).AndReturn((None,
+ updated_dbinstance1))
+
+ # Annoying. driver.reboot is wrapped in a try/except, and
+ # doesn't re-raise. It eats exception generated by mox if
+ # this is called with the wrong args, so we have to hack
+ # around it.
+ reboot_call_info = {}
+ expected_call_info = {
+ 'args': (econtext, instance, expected_nw_info,
+ reboot_type),
+ 'kwargs': {'block_device_info': fake_block_dev_info}}
+ fault = exception.InstanceNotFound(instance_id='instance-0000')
+
+ def fake_reboot(*args, **kwargs):
+ reboot_call_info['args'] = args
+ reboot_call_info['kwargs'] = kwargs
+
+ # NOTE(sirp): Since `bad_volumes_callback` is a function defined
+ # within `reboot_instance`, we don't have access to its value and
+ # can't stub it out, thus we skip that comparison.
+ kwargs.pop('bad_volumes_callback')
+ if fail_reboot:
+ raise fault
+
+ self.stubs.Set(self.compute.driver, 'reboot', fake_reboot)
+
+ # Power state should be updated again
+ if not fail_reboot or fail_running:
+ new_power_state = fake_power_state2
+ self.compute._get_power_state(econtext,
+ instance).AndReturn(fake_power_state2)
+ else:
+ new_power_state = fake_power_state3
+ self.compute._get_power_state(econtext,
+ instance).AndReturn(fake_power_state3)
+
+ if test_delete:
+ fault = exception.InstanceNotFound(
+ instance_id=instance['uuid'])
+ db.instance_update_and_get_original(
+ econtext, updated_dbinstance1['uuid'],
+ {'power_state': new_power_state,
+ 'task_state': None,
+ 'vm_state': vm_states.ACTIVE},
+ update_cells=False,
+ columns_to_join=['system_metadata'],
+ ).AndRaise(fault)
+ self.compute._notify_about_instance_usage(
+ econtext,
+ instance,
+ 'reboot.end')
+ elif fail_reboot and not fail_running:
+ db.instance_update_and_get_original(
+ econtext, updated_dbinstance1['uuid'],
+ {'vm_state': vm_states.ERROR},
+ update_cells=False,
+ columns_to_join=['system_metadata'],
+ ).AndRaise(fault)
+ else:
+ db.instance_update_and_get_original(
+ econtext, updated_dbinstance1['uuid'],
+ {'power_state': new_power_state,
+ 'task_state': None,
+ 'vm_state': vm_states.ACTIVE},
+ update_cells=False,
+ columns_to_join=['system_metadata'],
+ ).AndReturn((None, updated_dbinstance2))
+ if fail_running:
+ self.compute._notify_about_instance_usage(econtext, instance,
+ 'reboot.error', fault=fault)
+ self.compute._notify_about_instance_usage(
+ econtext,
+ instance,
+ 'reboot.end')
+
+ self.mox.ReplayAll()
+
+ if not fail_reboot or fail_running:
+ self.compute.reboot_instance(self.context, instance=instance,
+ block_device_info=None,
+ reboot_type=reboot_type)
+ else:
+ self.assertRaises(exception.InstanceNotFound,
+ self.compute.reboot_instance,
+ self.context, instance=instance,
+ block_device_info=None,
+ reboot_type=reboot_type)
+
+ self.assertEqual(expected_call_info, reboot_call_info)
+
+ def test_reboot_soft(self):
+ self._test_reboot(True)
+
+ def test_reboot_soft_and_delete(self):
+ self._test_reboot(True, True)
+
+ def test_reboot_soft_and_rescued(self):
+ self._test_reboot(True, False, True)
+
+ def test_reboot_soft_and_delete_and_rescued(self):
+ self._test_reboot(True, True, True)
+
+ def test_reboot_hard(self):
+ self._test_reboot(False)
+
+ def test_reboot_hard_and_delete(self):
+ self._test_reboot(False, True)
+
+ def test_reboot_hard_and_rescued(self):
+ self._test_reboot(False, False, True)
+
+ def test_reboot_hard_and_delete_and_rescued(self):
+ self._test_reboot(False, True, True)
+
+ def test_reboot_fail(self):
+ self._test_reboot(False, fail_reboot=True)
+
+ def test_reboot_fail_running(self):
+ self._test_reboot(False, fail_reboot=True,
+ fail_running=True)
+
+ def test_get_instance_block_device_info_source_image(self):
+ bdms = block_device_obj.block_device_make_list(self.context,
+ [fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 3,
+ 'volume_id': u'4cbc9e62-6ba0-45dd-b647-934942ead7d6',
+ 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vda',
+ 'connection_info': '{"driver_volume_type": "rbd"}',
+ 'source_type': 'image',
+ 'destination_type': 'volume',
+ 'image_id': 'fake-image-id-1',
+ 'boot_index': 0
+ })])
+
+ with (mock.patch.object(
+ objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid',
+ return_value=bdms)
+ ) as mock_get_by_instance:
+ block_device_info = (
+ self.compute._get_instance_block_device_info(
+ self.context, self._create_fake_instance())
+ )
+ expected = {
+ 'swap': None,
+ 'ephemerals': [],
+ 'block_device_mapping': [{
+ 'connection_info': {
+ 'driver_volume_type': 'rbd'
+ },
+ 'mount_device': '/dev/vda',
+ 'delete_on_termination': False
+ }]
+ }
+ self.assertTrue(mock_get_by_instance.called)
+ self.assertEqual(block_device_info, expected)
+
+ def test_get_instance_block_device_info_passed_bdms(self):
+ bdms = block_device_obj.block_device_make_list(self.context,
+ [fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 3,
+ 'volume_id': u'4cbc9e62-6ba0-45dd-b647-934942ead7d6',
+ 'device_name': '/dev/vdd',
+ 'connection_info': '{"driver_volume_type": "rbd"}',
+ 'source_type': 'volume',
+ 'destination_type': 'volume'})
+ ])
+ with (mock.patch.object(
+ objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')) as mock_get_by_instance:
+ block_device_info = (
+ self.compute._get_instance_block_device_info(
+ self.context, self._create_fake_instance(), bdms=bdms)
+ )
+ expected = {
+ 'swap': None,
+ 'ephemerals': [],
+ 'block_device_mapping': [{
+ 'connection_info': {
+ 'driver_volume_type': 'rbd'
+ },
+ 'mount_device': '/dev/vdd',
+ 'delete_on_termination': False
+ }]
+ }
+ self.assertFalse(mock_get_by_instance.called)
+ self.assertEqual(block_device_info, expected)
+
+ def test_get_instance_block_device_info_swap_and_ephemerals(self):
+ instance = self._create_fake_instance()
+
+ ephemeral0 = fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 1, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdb',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'delete_on_termination': True,
+ 'guest_format': None,
+ 'volume_size': 1,
+ 'boot_index': -1
+ })
+ ephemeral1 = fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 2, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdc',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'delete_on_termination': True,
+ 'guest_format': None,
+ 'volume_size': 2,
+ 'boot_index': -1
+ })
+ swap = fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdd',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'delete_on_termination': True,
+ 'guest_format': 'swap',
+ 'volume_size': 1,
+ 'boot_index': -1
+ })
+
+ bdms = block_device_obj.block_device_make_list(self.context,
+ [swap, ephemeral0, ephemeral1])
+
+ with (
+ mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid', return_value=bdms)
+ ) as mock_get_by_instance_uuid:
+ expected_block_device_info = {
+ 'swap': {'device_name': '/dev/vdd', 'swap_size': 1},
+ 'ephemerals': [{'device_name': '/dev/vdb', 'num': 0, 'size': 1,
+ 'virtual_name': 'ephemeral0'},
+ {'device_name': '/dev/vdc', 'num': 1, 'size': 2,
+ 'virtual_name': 'ephemeral1'}],
+ 'block_device_mapping': []
+ }
+
+ block_device_info = (
+ self.compute._get_instance_block_device_info(
+ self.context, instance)
+ )
+
+ mock_get_by_instance_uuid.assert_called_once_with(self.context,
+ instance['uuid'])
+ self.assertEqual(expected_block_device_info, block_device_info)
+
+ def test_inject_network_info(self):
+ # Ensure we can inject network info.
+ called = {'inject': False}
+
+ def fake_driver_inject_network(self, instance, network_info):
+ called['inject'] = True
+
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'inject_network_info',
+ fake_driver_inject_network)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ self.compute.inject_network_info(self.context, instance=instance)
+ self.assertTrue(called['inject'])
+ self.compute.terminate_instance(self.context,
+ instance, [], [])
+
+ def test_reset_network(self):
+ # Ensure we can reset networking on an instance.
+ called = {'count': 0}
+
+ def fake_driver_reset_network(self, instance):
+ called['count'] += 1
+
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'reset_network',
+ fake_driver_reset_network)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.compute.reset_network(self.context, instance)
+
+ self.assertEqual(called['count'], 1)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def _get_snapshotting_instance(self):
+ # Ensure instance can be snapshotted.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
+ instance.save()
+ return instance
+
+ def test_snapshot(self):
+ inst_obj = self._get_snapshotting_instance()
+ self.compute.snapshot_instance(self.context, image_id='fakesnap',
+ instance=inst_obj)
+
+ def test_snapshot_no_image(self):
+ inst_obj = self._get_snapshotting_instance()
+ inst_obj.image_ref = ''
+ inst_obj.save()
+ self.compute.snapshot_instance(self.context, image_id='fakesnap',
+ instance=inst_obj)
+
+ def _test_snapshot_fails(self, raise_during_cleanup, method,
+ expected_state=True):
+ def fake_snapshot(*args, **kwargs):
+ raise test.TestingException()
+
+ self.fake_image_delete_called = False
+
+ def fake_delete(self_, context, image_id):
+ self.fake_image_delete_called = True
+ if raise_during_cleanup:
+ raise Exception()
+
+ self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot)
+ fake_image.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
+
+ inst_obj = self._get_snapshotting_instance()
+ if method == 'snapshot':
+ self.assertRaises(test.TestingException,
+ self.compute.snapshot_instance,
+ self.context, image_id='fakesnap',
+ instance=inst_obj)
+ else:
+ self.assertRaises(test.TestingException,
+ self.compute.backup_instance,
+ self.context, image_id='fakesnap',
+ instance=inst_obj, backup_type='fake',
+ rotation=1)
+
+ self.assertEqual(expected_state, self.fake_image_delete_called)
+ self._assert_state({'task_state': None})
+
+ @mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups')
+ def test_backup_fails(self, mock_rotate):
+ self._test_snapshot_fails(False, 'backup')
+
+ @mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups')
+ def test_backup_fails_cleanup_ignores_exception(self, mock_rotate):
+ self._test_snapshot_fails(True, 'backup')
+
+ @mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups')
+ @mock.patch.object(nova.compute.manager.ComputeManager,
+ '_do_snapshot_instance')
+ def test_backup_fails_rotate_backup(self, mock_snap, mock_rotate):
+ mock_rotate.side_effect = test.TestingException()
+ self._test_snapshot_fails(True, 'backup', False)
+
+ def test_snapshot_fails(self):
+ self._test_snapshot_fails(False, 'snapshot')
+
+ def test_snapshot_fails_cleanup_ignores_exception(self):
+ self._test_snapshot_fails(True, 'snapshot')
+
+ def _test_snapshot_deletes_image_on_failure(self, status, exc):
+ self.fake_image_delete_called = False
+
+ def fake_show(self_, context, image_id, **kwargs):
+ self.assertEqual('fakesnap', image_id)
+ image = {'id': image_id,
+ 'status': status}
+ return image
+
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+
+ def fake_delete(self_, context, image_id):
+ self.fake_image_delete_called = True
+ self.assertEqual('fakesnap', image_id)
+
+ self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
+
+ def fake_snapshot(*args, **kwargs):
+ raise exc
+
+ self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot)
+
+ fake_image.stub_out_image_service(self.stubs)
+
+ inst_obj = self._get_snapshotting_instance()
+
+ self.compute.snapshot_instance(self.context, image_id='fakesnap',
+ instance=inst_obj)
+
+ def test_snapshot_fails_with_glance_error(self):
+ image_not_found = exception.ImageNotFound(image_id='fakesnap')
+ self._test_snapshot_deletes_image_on_failure('error', image_not_found)
+ self.assertFalse(self.fake_image_delete_called)
+ self._assert_state({'task_state': None})
+
+ def test_snapshot_fails_with_task_state_error(self):
+ deleting_state_error = exception.UnexpectedDeletingTaskStateError(
+ expected=task_states.IMAGE_SNAPSHOT, actual=task_states.DELETING)
+ self._test_snapshot_deletes_image_on_failure(
+ 'error', deleting_state_error)
+ self.assertTrue(self.fake_image_delete_called)
+ self._test_snapshot_deletes_image_on_failure(
+ 'active', deleting_state_error)
+ self.assertFalse(self.fake_image_delete_called)
+
+ def test_snapshot_fails_with_instance_not_found(self):
+ instance_not_found = exception.InstanceNotFound(instance_id='uuid')
+ self._test_snapshot_deletes_image_on_failure(
+ 'error', instance_not_found)
+ self.assertTrue(self.fake_image_delete_called)
+ self._test_snapshot_deletes_image_on_failure(
+ 'active', instance_not_found)
+ self.assertFalse(self.fake_image_delete_called)
+
+ def test_snapshot_handles_cases_when_instance_is_deleted(self):
+ inst_obj = self._get_snapshotting_instance()
+ inst_obj.task_state = task_states.DELETING
+ inst_obj.save()
+ self.compute.snapshot_instance(self.context, image_id='fakesnap',
+ instance=inst_obj)
+
+ def test_snapshot_handles_cases_when_instance_is_not_found(self):
+ inst_obj = self._get_snapshotting_instance()
+ inst_obj2 = objects.Instance.get_by_uuid(self.context, inst_obj.uuid)
+ inst_obj2.destroy()
+ self.compute.snapshot_instance(self.context, image_id='fakesnap',
+ instance=inst_obj)
+
+ def _assert_state(self, state_dict):
+ """Assert state of VM is equal to state passed as parameter."""
+ instances = db.instance_get_all(self.context)
+ self.assertEqual(len(instances), 1)
+
+ if 'vm_state' in state_dict:
+ self.assertEqual(state_dict['vm_state'], instances[0]['vm_state'])
+ if 'task_state' in state_dict:
+ self.assertEqual(state_dict['task_state'],
+ instances[0]['task_state'])
+ if 'power_state' in state_dict:
+ self.assertEqual(state_dict['power_state'],
+ instances[0]['power_state'])
+
+ def test_console_output(self):
+ # Make sure we can get console output from instance.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ output = self.compute.get_console_output(self.context,
+ instance=instance, tail_length=None)
+ self.assertEqual(output, 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE')
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_console_output_tail(self):
+ # Make sure we can get console output from instance.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ output = self.compute.get_console_output(self.context,
+ instance=instance, tail_length=2)
+ self.assertEqual(output, 'ANOTHER\nLAST LINE')
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_console_output_not_implemented(self):
+ def fake_not_implemented(*args, **kwargs):
+ raise NotImplementedError()
+
+ self.stubs.Set(self.compute.driver, 'get_console_output',
+ fake_not_implemented)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.get_console_output, self.context,
+ instance, 0)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(NotImplementedError,
+ self.compute.get_console_output, self.context,
+ instance, 0)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_console_output_instance_not_found(self):
+ def fake_not_found(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake-instance')
+
+ self.stubs.Set(self.compute.driver, 'get_console_output',
+ fake_not_found)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.get_console_output, self.context,
+ instance, 0)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.InstanceNotFound,
+ self.compute.get_console_output, self.context,
+ instance, 0)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_novnc_vnc_console(self):
+ # Make sure we can a vnc console for an instance.
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=False, group='spice')
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ # Try with the full instance
+ console = self.compute.get_vnc_console(self.context, 'novnc',
+ instance=instance)
+ self.assertTrue(console)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_validate_console_port_vnc(self):
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=True, group='spice')
+ instance = self._create_fake_instance_obj()
+
+ def fake_driver_get_console(*args, **kwargs):
+ return ctype.ConsoleVNC(host="fake_host", port=5900)
+
+ self.stubs.Set(self.compute.driver, "get_vnc_console",
+ fake_driver_get_console)
+
+ self.assertTrue(self.compute.validate_console_port(
+ context=self.context, instance=instance, port=5900,
+ console_type="novnc"))
+
+ def test_validate_console_port_spice(self):
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=True, group='spice')
+ instance = self._create_fake_instance_obj()
+
+ def fake_driver_get_console(*args, **kwargs):
+ return ctype.ConsoleSpice(host="fake_host", port=5900, tlsPort=88)
+
+ self.stubs.Set(self.compute.driver, "get_spice_console",
+ fake_driver_get_console)
+
+ self.assertTrue(self.compute.validate_console_port(
+ context=self.context, instance=instance, port=5900,
+ console_type="spice-html5"))
+
+ def test_validate_console_port_rdp(self):
+ self.flags(enabled=True, group='rdp')
+ instance = self._create_fake_instance_obj()
+
+ def fake_driver_get_console(*args, **kwargs):
+ return ctype.ConsoleRDP(host="fake_host", port=5900)
+
+ self.stubs.Set(self.compute.driver, "get_rdp_console",
+ fake_driver_get_console)
+
+ self.assertTrue(self.compute.validate_console_port(
+ context=self.context, instance=instance, port=5900,
+ console_type="rdp-html5"))
+
+ def test_validate_console_port_wrong_port(self):
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=True, group='spice')
+ instance = self._create_fake_instance_obj()
+
+ def fake_driver_get_console(*args, **kwargs):
+ return ctype.ConsoleSpice(host="fake_host", port=5900, tlsPort=88)
+
+ self.stubs.Set(self.compute.driver, "get_vnc_console",
+ fake_driver_get_console)
+
+ self.assertFalse(self.compute.validate_console_port(
+ context=self.context, instance=instance, port="wrongport",
+ console_type="spice-html5"))
+
+ def test_xvpvnc_vnc_console(self):
+ # Make sure we can a vnc console for an instance.
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=False, group='spice')
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ console = self.compute.get_vnc_console(self.context, 'xvpvnc',
+ instance=instance)
+ self.assertTrue(console)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_invalid_vnc_console_type(self):
+ # Raise useful error if console type is an unrecognised string.
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=False, group='spice')
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.get_vnc_console,
+ self.context, 'invalid', instance=instance)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.ConsoleTypeInvalid,
+ self.compute.get_vnc_console,
+ self.context, 'invalid', instance=instance)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_missing_vnc_console_type(self):
+ # Raise useful error is console type is None.
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=False, group='spice')
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.get_vnc_console,
+ self.context, None, instance=instance)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.ConsoleTypeInvalid,
+ self.compute.get_vnc_console,
+ self.context, None, instance=instance)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_get_vnc_console_not_implemented(self):
+ self.stubs.Set(self.compute.driver, 'get_vnc_console',
+ fake_not_implemented)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.get_vnc_console,
+ self.context, 'novnc', instance=instance)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(NotImplementedError,
+ self.compute.get_vnc_console,
+ self.context, 'novnc', instance=instance)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_spicehtml5_spice_console(self):
+ # Make sure we can a spice console for an instance.
+ self.flags(vnc_enabled=False)
+ self.flags(enabled=True, group='spice')
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ # Try with the full instance
+ console = self.compute.get_spice_console(self.context, 'spice-html5',
+ instance=instance)
+ self.assertTrue(console)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_invalid_spice_console_type(self):
+ # Raise useful error if console type is an unrecognised string
+ self.flags(vnc_enabled=False)
+ self.flags(enabled=True, group='spice')
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.get_spice_console,
+ self.context, 'invalid', instance=instance)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.ConsoleTypeInvalid,
+ self.compute.get_spice_console,
+ self.context, 'invalid', instance=instance)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_missing_spice_console_type(self):
+ # Raise useful error is console type is None
+ self.flags(vnc_enabled=False)
+ self.flags(enabled=True, group='spice')
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.get_spice_console,
+ self.context, None, instance=instance)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.ConsoleTypeInvalid,
+ self.compute.get_spice_console,
+ self.context, None, instance=instance)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_rdphtml5_rdp_console(self):
+ # Make sure we can a rdp console for an instance.
+ self.flags(vnc_enabled=False)
+ self.flags(enabled=True, group='rdp')
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ # Try with the full instance
+ console = self.compute.get_rdp_console(self.context, 'rdp-html5',
+ instance=instance)
+ self.assertTrue(console)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_invalid_rdp_console_type(self):
+ # Raise useful error if console type is an unrecognised string
+ self.flags(vnc_enabled=False)
+ self.flags(enabled=True, group='rdp')
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.get_rdp_console,
+ self.context, 'invalid', instance=instance)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.ConsoleTypeInvalid,
+ self.compute.get_rdp_console,
+ self.context, 'invalid', instance=instance)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_missing_rdp_console_type(self):
+ # Raise useful error is console type is None
+ self.flags(vnc_enabled=False)
+ self.flags(enabled=True, group='rdp')
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.get_rdp_console,
+ self.context, None, instance=instance)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.ConsoleTypeInvalid,
+ self.compute.get_rdp_console,
+ self.context, None, instance=instance)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_vnc_console_instance_not_ready(self):
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=False, group='spice')
+ instance = self._create_fake_instance_obj(
+ params={'vm_state': vm_states.BUILDING})
+
+ def fake_driver_get_console(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ self.stubs.Set(self.compute.driver, "get_vnc_console",
+ fake_driver_get_console)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute.get_vnc_console, self.context, 'novnc',
+ instance=instance)
+
+ def test_spice_console_instance_not_ready(self):
+ self.flags(vnc_enabled=False)
+ self.flags(enabled=True, group='spice')
+ instance = self._create_fake_instance_obj(
+ params={'vm_state': vm_states.BUILDING})
+
+ def fake_driver_get_console(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ self.stubs.Set(self.compute.driver, "get_spice_console",
+ fake_driver_get_console)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute.get_spice_console, self.context, 'spice-html5',
+ instance=instance)
+
+ def test_rdp_console_instance_not_ready(self):
+ self.flags(vnc_enabled=False)
+ self.flags(enabled=True, group='rdp')
+ instance = self._create_fake_instance_obj(
+ params={'vm_state': vm_states.BUILDING})
+
+ def fake_driver_get_console(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ self.stubs.Set(self.compute.driver, "get_rdp_console",
+ fake_driver_get_console)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute.get_rdp_console, self.context, 'rdp-html5',
+ instance=instance)
+
+ def test_vnc_console_disabled(self):
+ self.flags(vnc_enabled=False)
+ instance = self._create_fake_instance_obj(
+ params={'vm_state': vm_states.BUILDING})
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.ConsoleTypeUnavailable,
+ self.compute.get_vnc_console, self.context, 'novnc',
+ instance=instance)
+
+ def test_spice_console_disabled(self):
+ self.flags(enabled=False, group='spice')
+ instance = self._create_fake_instance_obj(
+ params={'vm_state': vm_states.BUILDING})
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.ConsoleTypeUnavailable,
+ self.compute.get_spice_console, self.context, 'spice-html5',
+ instance=instance)
+
+ def test_rdp_console_disabled(self):
+ self.flags(enabled=False, group='rdp')
+ instance = self._create_fake_instance_obj(
+ params={'vm_state': vm_states.BUILDING})
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.ConsoleTypeUnavailable,
+ self.compute.get_rdp_console, self.context, 'rdp-html5',
+ instance=instance)
+
+ def test_diagnostics(self):
+ # Make sure we can get diagnostics for an instance.
+ expected_diagnostic = {'cpu0_time': 17300000000,
+ 'memory': 524288,
+ 'vda_errors': -1,
+ 'vda_read': 262144,
+ 'vda_read_req': 112,
+ 'vda_write': 5778432,
+ 'vda_write_req': 488,
+ 'vnet1_rx': 2070139,
+ 'vnet1_rx_drop': 0,
+ 'vnet1_rx_errors': 0,
+ 'vnet1_rx_packets': 26701,
+ 'vnet1_tx': 140208,
+ 'vnet1_tx_drop': 0,
+ 'vnet1_tx_errors': 0,
+ 'vnet1_tx_packets': 662,
+ }
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ diagnostics = self.compute.get_diagnostics(self.context,
+ instance=instance)
+ self.assertEqual(diagnostics, expected_diagnostic)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_instance_diagnostics(self):
+ # Make sure we can get diagnostics for an instance.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ diagnostics = self.compute.get_instance_diagnostics(self.context,
+ instance=instance)
+ expected = {'config_drive': True,
+ 'cpu_details': [{'time': 17300000000}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': 'fake-disk-id',
+ 'read_bytes': 262144,
+ 'read_requests': 112,
+ 'write_bytes': 5778432,
+ 'write_requests': 488}],
+ 'driver': 'fake',
+ 'hypervisor_os': 'fake-os',
+ 'memory_details': {'maximum': 524288, 'used': 0},
+ 'nic_details': [{'mac_address': '01:23:45:67:89:ab',
+ 'rx_drop': 0,
+ 'rx_errors': 0,
+ 'rx_octets': 2070139,
+ 'rx_packets': 26701,
+ 'tx_drop': 0,
+ 'tx_errors': 0,
+ 'tx_octets': 140208,
+ 'tx_packets': 662}],
+ 'state': 'running',
+ 'uptime': 46664,
+ 'version': '1.0'}
+ self.assertEqual(expected, diagnostics)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_add_fixed_ip_usage_notification(self):
+ def dummy(*args, **kwargs):
+ pass
+
+ self.stubs.Set(network_api.API, 'add_fixed_ip_to_instance',
+ dummy)
+ self.stubs.Set(nova.compute.manager.ComputeManager,
+ 'inject_network_info', dummy)
+ self.stubs.Set(nova.compute.manager.ComputeManager,
+ 'reset_network', dummy)
+
+ instance = self._create_fake_instance_obj()
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
+ self.compute.add_fixed_ip_to_instance(self.context, network_id=1,
+ instance=instance)
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_remove_fixed_ip_usage_notification(self):
+ def dummy(*args, **kwargs):
+ pass
+
+ self.stubs.Set(network_api.API, 'remove_fixed_ip_from_instance',
+ dummy)
+ self.stubs.Set(nova.compute.manager.ComputeManager,
+ 'inject_network_info', dummy)
+ self.stubs.Set(nova.compute.manager.ComputeManager,
+ 'reset_network', dummy)
+
+ instance = self._create_fake_instance_obj()
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
+ self.compute.remove_fixed_ip_from_instance(self.context, 1,
+ instance=instance)
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_run_instance_usage_notification(self, request_spec=None):
+ # Ensure run instance generates appropriate usage notification.
+ request_spec = request_spec or {}
+ instance = self._create_fake_instance_obj()
+ expected_image_name = request_spec.get('image', {}).get('name', '')
+ self.compute.run_instance(self.context, instance, request_spec,
+ {}, [], None, None, True, None, False)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ instance.refresh()
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type, 'compute.instance.create.start')
+ # The last event is the one with the sugar in it.
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.priority, 'INFO')
+ self.assertEqual(msg.event_type, 'compute.instance.create.end')
+ payload = msg.payload
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(expected_image_name, payload['image_name'])
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], instance['uuid'])
+ self.assertEqual(payload['instance_type'], 'm1.tiny')
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ self.assertEqual(str(payload['instance_type_id']), str(type_id))
+ flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
+ self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
+ self.assertEqual(payload['state'], 'active')
+ self.assertIn('display_name', payload)
+ self.assertIn('created_at', payload)
+ self.assertIn('launched_at', payload)
+ self.assertIn('fixed_ips', payload)
+ self.assertTrue(payload['launched_at'])
+ image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
+ self.assertEqual(payload['image_ref_url'], image_ref_url)
+ self.assertEqual('Success', payload['message'])
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_run_instance_image_usage_notification(self):
+ request_spec = {'image': {'name': 'fake_name', 'key': 'value'}}
+ self.test_run_instance_usage_notification(request_spec=request_spec)
+
+ def test_run_instance_usage_notification_volume_meta(self):
+ # Volume's image metadata won't contain the image name
+ request_spec = {'image': {'key': 'value'}}
+ self.test_run_instance_usage_notification(request_spec=request_spec)
+
+ def test_run_instance_end_notification_on_abort(self):
+ # Test that an end notif is sent if the build is aborted
+ instance = self._create_fake_instance_obj()
+ instance_uuid = instance['uuid']
+
+ def build_inst_abort(*args, **kwargs):
+ raise exception.BuildAbortException(reason="already deleted",
+ instance_uuid=instance_uuid)
+
+ self.stubs.Set(self.compute, '_build_instance', build_inst_abort)
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type, 'compute.instance.create.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+
+ self.assertEqual(msg.event_type, 'compute.instance.create.end')
+ self.assertEqual('INFO', msg.priority)
+ payload = msg.payload
+ message = payload['message']
+ self.assertNotEqual(-1, message.find("already deleted"))
+
+ def test_run_instance_error_notification_on_reschedule(self):
+ # Test that error notif is sent if the build got rescheduled
+ instance = self._create_fake_instance_obj()
+ instance_uuid = instance['uuid']
+
+ def build_inst_fail(*args, **kwargs):
+ raise exception.RescheduledException(instance_uuid=instance_uuid,
+ reason="something bad happened")
+
+ self.stubs.Set(self.compute, '_build_instance', build_inst_fail)
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type, 'compute.instance.create.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+
+ self.assertEqual(msg.event_type, 'compute.instance.create.error')
+ self.assertEqual('ERROR', msg.priority)
+ payload = msg.payload
+ message = payload['message']
+ self.assertNotEqual(-1, message.find("something bad happened"))
+
+ def test_run_instance_error_notification_on_failure(self):
+ # Test that error notif is sent if build fails hard
+ instance = self._create_fake_instance_obj()
+
+ def build_inst_fail(*args, **kwargs):
+ raise test.TestingException("i'm dying")
+
+ self.stubs.Set(self.compute, '_build_instance', build_inst_fail)
+
+ self.assertRaises(test.TestingException, self.compute.run_instance,
+ self.context, instance, {}, {}, [], None, None, True, None,
+ False)
+
+ self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type, 'compute.instance.create.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+
+ self.assertEqual(msg.event_type, 'compute.instance.create.error')
+ self.assertEqual('ERROR', msg.priority)
+ payload = msg.payload
+ message = payload['message']
+ self.assertNotEqual(-1, message.find("i'm dying"))
+
+ def test_terminate_usage_notification(self):
+ # Ensure terminate_instance generates correct usage notification.
+ old_time = datetime.datetime(2012, 4, 1)
+ cur_time = datetime.datetime(2012, 12, 21, 12, 21)
+
+ timeutils.set_time_override(old_time)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ fake_notifier.NOTIFICATIONS = []
+ timeutils.set_time_override(cur_time)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 4)
+
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.priority, 'INFO')
+ self.assertEqual(msg.event_type, 'compute.instance.delete.start')
+ msg1 = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg1.event_type, 'compute.instance.shutdown.start')
+ msg1 = fake_notifier.NOTIFICATIONS[2]
+ self.assertEqual(msg1.event_type, 'compute.instance.shutdown.end')
+ msg1 = fake_notifier.NOTIFICATIONS[3]
+ self.assertEqual(msg1.event_type, 'compute.instance.delete.end')
+ payload = msg1.payload
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], instance['uuid'])
+ self.assertEqual(payload['instance_type'], 'm1.tiny')
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ self.assertEqual(str(payload['instance_type_id']), str(type_id))
+ flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
+ self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
+ self.assertIn('display_name', payload)
+ self.assertIn('created_at', payload)
+ self.assertIn('launched_at', payload)
+ self.assertIn('terminated_at', payload)
+ self.assertIn('deleted_at', payload)
+ self.assertEqual(payload['terminated_at'], timeutils.strtime(cur_time))
+ self.assertEqual(payload['deleted_at'], timeutils.strtime(cur_time))
+ image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
+ self.assertEqual(payload['image_ref_url'], image_ref_url)
+
+ def test_run_instance_existing(self):
+ # Ensure failure when running an instance that already exists.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ self.assertRaises(exception.InstanceExists,
+ self.compute.run_instance,
+ self.context, instance, {}, {}, [], None, None, True,
+ None, False)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_run_instance_queries_macs(self):
+ # run_instance should ask the driver for node mac addresses and pass
+ # that to the network_api in use.
+ fake_network.unset_stub_network_methods(self.stubs)
+ instance = self._create_fake_instance_obj()
+
+ macs = set(['01:23:45:67:89:ab'])
+ self.mox.StubOutWithMock(self.compute.network_api,
+ "allocate_for_instance")
+ self.compute.network_api.allocate_for_instance(
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ requested_networks=None,
+ vpn=False, macs=macs,
+ security_groups=[], dhcp_options=None).AndReturn(
+ fake_network.fake_get_instance_nw_info(self.stubs, 1, 1))
+
+ self.mox.StubOutWithMock(self.compute.driver, "macs_for_instance")
+ self.compute.driver.macs_for_instance(
+ mox.IsA(instance_obj.Instance)).AndReturn(macs)
+ self.mox.ReplayAll()
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+
+ def _create_server_group(self):
+ group_instance = self._create_fake_instance_obj(
+ params=dict(host=self.compute.host))
+
+ instance_group = objects.InstanceGroup(self.context)
+ instance_group.user_id = self.user_id
+ instance_group.project_id = self.project_id
+ instance_group.name = 'messi'
+ instance_group.uuid = str(uuid.uuid4())
+ instance_group.members = [group_instance.uuid]
+ instance_group.policies = ['anti-affinity']
+ fake_notifier.NOTIFICATIONS = []
+ instance_group.create()
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(instance_group.name, msg.payload['name'])
+ self.assertEqual(instance_group.members, msg.payload['members'])
+ self.assertEqual(instance_group.policies, msg.payload['policies'])
+ self.assertEqual(instance_group.project_id, msg.payload['project_id'])
+ self.assertEqual(instance_group.uuid, msg.payload['uuid'])
+ self.assertEqual('servergroup.create', msg.event_type)
+ return instance_group
+
+ def _run_instance_reschedules_on_anti_affinity_violation(self, group,
+ hint):
+ instance = self._create_fake_instance_obj()
+ filter_properties = {'scheduler_hints': {'group': hint}}
+ self.assertRaises(exception.RescheduledException,
+ self.compute._build_instance,
+ self.context, {}, filter_properties,
+ [], None, None, True, None, instance,
+ None, False)
+
+ def test_run_instance_reschedules_on_anti_affinity_violation_by_name(self):
+ group = self._create_server_group()
+ self._run_instance_reschedules_on_anti_affinity_violation(group,
+ group.name)
+
+ def test_run_instance_reschedules_on_anti_affinity_violation_by_uuid(self):
+ group = self._create_server_group()
+ self._run_instance_reschedules_on_anti_affinity_violation(group,
+ group.uuid)
+
+ def test_instance_set_to_error_on_uncaught_exception(self):
+ # Test that instance is set to error state when exception is raised.
+ instance = self._create_fake_instance_obj()
+
+ self.mox.StubOutWithMock(self.compute.network_api,
+ "allocate_for_instance")
+ self.mox.StubOutWithMock(self.compute.network_api,
+ "deallocate_for_instance")
+ self.compute.network_api.allocate_for_instance(
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ requested_networks=None,
+ vpn=False, macs=None,
+ security_groups=[], dhcp_options=None
+ ).AndRaise(messaging.RemoteError())
+ self.compute.network_api.deallocate_for_instance(
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ requested_networks=None).MultipleTimes()
+
+ fake_network.unset_stub_network_methods(self.stubs)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(messaging.RemoteError,
+ self.compute.run_instance,
+ self.context, instance, {}, {}, None, None, None,
+ True, None, False)
+
+ instance.refresh()
+ self.assertEqual(vm_states.ERROR, instance.vm_state)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_delete_instance_keeps_net_on_power_off_fail(self):
+ self.mox.StubOutWithMock(self.compute.driver, 'destroy')
+ self.mox.StubOutWithMock(self.compute, '_deallocate_network')
+ exp = exception.InstancePowerOffFailure(reason='')
+ self.compute.driver.destroy(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg()).AndRaise(exp)
+ # mox will detect if _deallocate_network gets called unexpectedly
+ self.mox.ReplayAll()
+ instance = self._create_fake_instance_obj()
+ self.assertRaises(exception.InstancePowerOffFailure,
+ self.compute._delete_instance,
+ self.context,
+ instance,
+ [],
+ self.none_quotas)
+
+ def test_delete_instance_loses_net_on_other_fail(self):
+ self.mox.StubOutWithMock(self.compute.driver, 'destroy')
+ self.mox.StubOutWithMock(self.compute, '_deallocate_network')
+ exp = test.TestingException()
+ self.compute.driver.destroy(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg()).AndRaise(exp)
+ self.compute._deallocate_network(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+ instance = self._create_fake_instance_obj()
+ self.assertRaises(test.TestingException,
+ self.compute._delete_instance,
+ self.context,
+ instance,
+ [],
+ self.none_quotas)
+
+ def test_delete_instance_deletes_console_auth_tokens(self):
+ instance = self._create_fake_instance_obj()
+ self.flags(vnc_enabled=True)
+
+ self.tokens_deleted = False
+
+ def fake_delete_tokens(*args, **kwargs):
+ self.tokens_deleted = True
+
+ cauth_rpcapi = self.compute.consoleauth_rpcapi
+ self.stubs.Set(cauth_rpcapi, 'delete_tokens_for_instance',
+ fake_delete_tokens)
+
+ self.compute._delete_instance(self.context, instance, [],
+ self.none_quotas)
+
+ self.assertTrue(self.tokens_deleted)
+
+ def test_delete_instance_deletes_console_auth_tokens_cells(self):
+ instance = self._create_fake_instance_obj()
+ self.flags(vnc_enabled=True)
+ self.flags(enable=True, group='cells')
+
+ self.tokens_deleted = False
+
+ def fake_delete_tokens(*args, **kwargs):
+ self.tokens_deleted = True
+
+ cells_rpcapi = self.compute.cells_rpcapi
+ self.stubs.Set(cells_rpcapi, 'consoleauth_delete_tokens',
+ fake_delete_tokens)
+
+ self.compute._delete_instance(self.context, instance,
+ [], self.none_quotas)
+
+ self.assertTrue(self.tokens_deleted)
+
+ def test_instance_termination_exception_sets_error(self):
+ """Test that we handle InstanceTerminationFailure
+ which is propagated up from the underlying driver.
+ """
+ instance = self._create_fake_instance_obj()
+
+ def fake_delete_instance(context, instance, bdms,
+ reservations=None):
+ raise exception.InstanceTerminationFailure(reason='')
+
+ self.stubs.Set(self.compute, '_delete_instance',
+ fake_delete_instance)
+
+ self.assertRaises(exception.InstanceTerminationFailure,
+ self.compute.terminate_instance,
+ self.context,
+ instance, [], [])
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.assertEqual(instance['vm_state'], vm_states.ERROR)
+
+ def test_network_is_deallocated_on_spawn_failure(self):
+ # When a spawn fails the network must be deallocated.
+ instance = self._create_fake_instance_obj()
+
+ self.mox.StubOutWithMock(self.compute, "_prep_block_device")
+ self.compute._prep_block_device(
+ mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndRaise(messaging.RemoteError('', '', ''))
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(messaging.RemoteError,
+ self.compute.run_instance,
+ self.context, instance, {}, {}, None, None, None,
+ True, None, False)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_lock(self):
+ # FIXME(comstud): This test is such crap. This is testing
+ # compute API lock functionality in a test class for the compute
+ # manager by running an instance. Hello? We should just have
+ # unit tests in test_compute_api that test the check_instance_lock
+ # decorator and make sure that appropriate compute_api methods
+ # have the decorator.
+ instance = self._create_fake_instance_obj()
+ instance_uuid = instance['uuid']
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+
+ non_admin_context = context.RequestContext(None,
+ None,
+ is_admin=False)
+
+ def check_task_state(task_state):
+ instance = db.instance_get_by_uuid(self.context, instance_uuid)
+ self.assertEqual(instance['task_state'], task_state)
+
+ instance.refresh()
+
+ # should fail with locked nonadmin context
+ self.compute_api.lock(self.context, instance)
+ self.assertRaises(exception.InstanceIsLocked,
+ self.compute_api.reboot,
+ non_admin_context, instance, 'SOFT')
+ check_task_state(None)
+
+ # should fail with invalid task state
+ self.compute_api.unlock(self.context, instance)
+ instance.task_state = task_states.REBOOTING
+ instance.save()
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.reboot,
+ non_admin_context, instance, 'SOFT')
+ check_task_state(task_states.REBOOTING)
+
+ # should succeed with admin context
+ instance.task_state = None
+ instance.save()
+ self.compute_api.reboot(self.context, instance, 'SOFT')
+ check_task_state(task_states.REBOOTING)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def _check_locked_by(self, instance_uuid, locked_by):
+ instance = db.instance_get_by_uuid(self.context, instance_uuid)
+ self.assertEqual(instance['locked'], locked_by is not None)
+ self.assertEqual(instance['locked_by'], locked_by)
+ return instance
+
+ def test_override_owner_lock(self):
+ # FIXME(comstud): This test is such crap. This is testing
+ # compute API lock functionality in a test class for the compute
+ # manager by running an instance. Hello? We should just have
+ # unit tests in test_compute_api that test the check_instance_lock
+ # decorator and make sure that appropriate compute_api methods
+ # have the decorator.
+ admin_context = context.RequestContext('admin-user',
+ 'admin-project',
+ is_admin=True)
+
+ instance = self._create_fake_instance_obj()
+ instance_uuid = instance['uuid']
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+
+ # Ensure that an admin can override the owner lock
+ self.compute_api.lock(self.context, instance)
+ self._check_locked_by(instance_uuid, 'owner')
+ self.compute_api.unlock(admin_context, instance)
+ self._check_locked_by(instance_uuid, None)
+
+ def test_upgrade_owner_lock(self):
+ # FIXME(comstud): This test is such crap. This is testing
+ # compute API lock functionality in a test class for the compute
+ # manager by running an instance. Hello? We should just have
+ # unit tests in test_compute_api that test the check_instance_lock
+ # decorator and make sure that appropriate compute_api methods
+ # have the decorator.
+ admin_context = context.RequestContext('admin-user',
+ 'admin-project',
+ is_admin=True)
+
+ instance = self._create_fake_instance_obj()
+ instance_uuid = instance['uuid']
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+
+ # Ensure that an admin can upgrade the lock and that
+ # the owner can no longer unlock
+ self.compute_api.lock(self.context, instance)
+ self.compute_api.lock(admin_context, instance)
+ self._check_locked_by(instance_uuid, 'admin')
+ instance.refresh()
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.compute_api.unlock,
+ self.context, instance)
+ self._check_locked_by(instance_uuid, 'admin')
+ self.compute_api.unlock(admin_context, instance)
+ self._check_locked_by(instance_uuid, None)
+
+ def _test_state_revert(self, instance, operation, pre_task_state,
+ kwargs=None, vm_state=None):
+ if kwargs is None:
+ kwargs = {}
+
+ # The API would have set task_state, so do that here to test
+ # that the state gets reverted on failure
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": pre_task_state})
+
+ orig_elevated = self.context.elevated
+ orig_notify = self.compute._notify_about_instance_usage
+
+ def _get_an_exception(*args, **kwargs):
+ raise test.TestingException()
+
+ self.stubs.Set(self.context, 'elevated', _get_an_exception)
+ self.stubs.Set(self.compute,
+ '_notify_about_instance_usage', _get_an_exception)
+
+ func = getattr(self.compute, operation)
+
+ self.assertRaises(test.TestingException,
+ func, self.context, instance=instance, **kwargs)
+ # self.context.elevated() is called in tearDown()
+ self.stubs.Set(self.context, 'elevated', orig_elevated)
+ self.stubs.Set(self.compute,
+ '_notify_about_instance_usage', orig_notify)
+
+ # Fetch the instance's task_state and make sure it reverted to None.
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ if vm_state:
+ self.assertEqual(instance.vm_state, vm_state)
+ self.assertIsNone(instance["task_state"])
+
+ def test_state_revert(self):
+ # ensure that task_state is reverted after a failed operation.
+ migration = objects.Migration()
+ migration.instance_uuid = 'b48316c5-71e8-45e4-9884-6c78055b9b13'
+ migration.new_instance_type_id = '1'
+
+ actions = [
+ ("reboot_instance", task_states.REBOOTING,
+ {'block_device_info': [],
+ 'reboot_type': 'SOFT'}),
+ ("stop_instance", task_states.POWERING_OFF),
+ ("start_instance", task_states.POWERING_ON),
+ ("terminate_instance", task_states.DELETING,
+ {'bdms': [],
+ 'reservations': []},
+ vm_states.ERROR),
+ ("soft_delete_instance", task_states.SOFT_DELETING,
+ {'reservations': []}),
+ ("restore_instance", task_states.RESTORING),
+ ("rebuild_instance", task_states.REBUILDING,
+ {'orig_image_ref': None,
+ 'image_ref': None,
+ 'injected_files': [],
+ 'new_pass': '',
+ 'orig_sys_metadata': {},
+ 'bdms': [],
+ 'recreate': False,
+ 'on_shared_storage': False}),
+ ("set_admin_password", task_states.UPDATING_PASSWORD,
+ {'new_pass': None}),
+ ("rescue_instance", task_states.RESCUING,
+ {'rescue_password': None}),
+ ("unrescue_instance", task_states.UNRESCUING),
+ ("revert_resize", task_states.RESIZE_REVERTING,
+ {'migration': migration,
+ 'reservations': []}),
+ ("prep_resize", task_states.RESIZE_PREP,
+ {'image': {},
+ 'instance_type': {},
+ 'reservations': [],
+ 'request_spec': {},
+ 'filter_properties': {},
+ 'node': None}),
+ ("resize_instance", task_states.RESIZE_PREP,
+ {'migration': migration,
+ 'image': {},
+ 'reservations': [],
+ 'instance_type': {}}),
+ ("pause_instance", task_states.PAUSING),
+ ("unpause_instance", task_states.UNPAUSING),
+ ("suspend_instance", task_states.SUSPENDING),
+ ("resume_instance", task_states.RESUMING),
+ ]
+
+ self._stub_out_resize_network_methods()
+ instance = self._create_fake_instance_obj()
+ for operation in actions:
+ self._test_state_revert(instance, *operation)
+
+ def _ensure_quota_reservations_committed(self, instance):
+ """Mock up commit of quota reservations."""
+ reservations = list('fake_res')
+ self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit')
+ nova.quota.QUOTAS.commit(mox.IgnoreArg(), reservations,
+ project_id=instance['project_id'],
+ user_id=instance['user_id'])
+ self.mox.ReplayAll()
+ return reservations
+
+ def _ensure_quota_reservations_rolledback(self, instance):
+ """Mock up rollback of quota reservations."""
+ reservations = list('fake_res')
+ self.mox.StubOutWithMock(nova.quota.QUOTAS, 'rollback')
+ nova.quota.QUOTAS.rollback(mox.IgnoreArg(), reservations,
+ project_id=instance['project_id'],
+ user_id=instance['user_id'])
+ self.mox.ReplayAll()
+ return reservations
+
+ def test_quotas_successful_delete(self):
+ instance = self._create_fake_instance_obj()
+ resvs = self._ensure_quota_reservations_committed(instance)
+ self.compute.terminate_instance(self.context, instance,
+ bdms=[], reservations=resvs)
+
+ def test_quotas_failed_delete(self):
+ instance = self._create_fake_instance_obj()
+
+ def fake_shutdown_instance(*args, **kwargs):
+ raise test.TestingException()
+
+ self.stubs.Set(self.compute, '_shutdown_instance',
+ fake_shutdown_instance)
+
+ resvs = self._ensure_quota_reservations_rolledback(instance)
+ self.assertRaises(test.TestingException,
+ self.compute.terminate_instance,
+ self.context, instance,
+ bdms=[], reservations=resvs)
+
+ def test_quotas_successful_soft_delete(self):
+ instance = self._create_fake_instance_obj(
+ params=dict(task_state=task_states.SOFT_DELETING))
+ resvs = self._ensure_quota_reservations_committed(instance)
+ self.compute.soft_delete_instance(self.context, instance,
+ reservations=resvs)
+
+ def test_quotas_failed_soft_delete(self):
+ instance = self._create_fake_instance_obj(
+ params=dict(task_state=task_states.SOFT_DELETING))
+
+ def fake_soft_delete(*args, **kwargs):
+ raise test.TestingException()
+
+ self.stubs.Set(self.compute.driver, delete_types.SOFT_DELETE,
+ fake_soft_delete)
+
+ resvs = self._ensure_quota_reservations_rolledback(instance)
+ self.assertRaises(test.TestingException,
+ self.compute.soft_delete_instance,
+ self.context, instance,
+ reservations=resvs)
+
+ def test_quotas_destroy_of_soft_deleted_instance(self):
+ instance = self._create_fake_instance_obj(
+ params=dict(vm_state=vm_states.SOFT_DELETED))
+ # Termination should be successful, but quota reservations
+ # rolled back because the instance was in SOFT_DELETED state.
+ resvs = self._ensure_quota_reservations_rolledback(instance)
+ self.compute.terminate_instance(self.context, instance,
+ bdms=[], reservations=resvs)
+
+ def _stub_out_resize_network_methods(self):
+ def fake(cls, ctxt, instance, *args, **kwargs):
+ pass
+
+ self.stubs.Set(network_api.API, 'setup_networks_on_host', fake)
+ self.stubs.Set(network_api.API, 'migrate_instance_start', fake)
+ self.stubs.Set(network_api.API, 'migrate_instance_finish', fake)
+
+ def _test_finish_resize(self, power_on):
+ # Contrived test to ensure finish_resize doesn't raise anything and
+ # also tests resize from ACTIVE or STOPPED state which determines
+ # if the resized instance is powered on or not.
+ vm_state = None
+ if power_on:
+ vm_state = vm_states.ACTIVE
+ else:
+ vm_state = vm_states.STOPPED
+ params = {'vm_state': vm_state}
+ instance = self._create_fake_instance_obj(params)
+ image = 'fake-image'
+ disk_info = 'fake-disk-info'
+ instance_type = flavors.get_default_flavor()
+ instance.task_state = task_states.RESIZE_PREP
+ instance.save()
+ self.compute.prep_resize(self.context, instance=instance,
+ instance_type=instance_type,
+ image={}, reservations=[], request_spec={},
+ filter_properties={}, node=None)
+ instance.task_state = task_states.RESIZE_MIGRATED
+ instance.save()
+
+ # NOTE(mriedem): make sure prep_resize set old_vm_state correctly
+ sys_meta = instance.system_metadata
+ self.assertIn('old_vm_state', sys_meta)
+ if power_on:
+ self.assertEqual(vm_states.ACTIVE, sys_meta['old_vm_state'])
+ else:
+ self.assertEqual(vm_states.STOPPED, sys_meta['old_vm_state'])
+ migration = objects.Migration.get_by_instance_and_status(
+ self.context.elevated(),
+ instance.uuid, 'pre-migrating')
+
+ orig_mig_save = migration.save
+ orig_inst_save = instance.save
+ network_api = self.compute.network_api
+
+ self.mox.StubOutWithMock(network_api, 'setup_networks_on_host')
+ self.mox.StubOutWithMock(network_api,
+ 'migrate_instance_finish')
+ self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute,
+ '_notify_about_instance_usage')
+ self.mox.StubOutWithMock(self.compute.driver, 'finish_migration')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_block_device_info')
+ self.mox.StubOutWithMock(migration, 'save')
+ self.mox.StubOutWithMock(instance, 'save')
+ self.mox.StubOutWithMock(self.context, 'elevated')
+
+ def _mig_save(context):
+ self.assertEqual(migration.status, 'finished')
+ self.assertEqual(vm_state, instance.vm_state)
+ self.assertEqual(task_states.RESIZE_FINISH, instance.task_state)
+ orig_mig_save()
+
+ def _instance_save1():
+ self.assertEqual(instance_type['id'],
+ instance.instance_type_id)
+ orig_inst_save()
+
+ def _instance_save2(expected_task_state=None):
+ self.assertEqual(task_states.RESIZE_MIGRATED,
+ expected_task_state)
+ self.assertEqual(task_states.RESIZE_FINISH, instance.task_state)
+ orig_inst_save(expected_task_state=expected_task_state)
+
+ def _instance_save3(expected_task_state=None):
+ self.assertEqual(task_states.RESIZE_FINISH,
+ expected_task_state)
+ self.assertEqual(vm_states.RESIZED, instance.vm_state)
+ self.assertIsNone(instance.task_state)
+ self.assertIn('launched_at', instance.obj_what_changed())
+ orig_inst_save(expected_task_state=expected_task_state)
+
+ # First save to update flavor
+ instance.save().WithSideEffects(_instance_save1)
+
+ network_api.setup_networks_on_host(self.context, instance,
+ 'fake-mini')
+ network_api.migrate_instance_finish(self.context,
+ mox.IsA(dict),
+ mox.IsA(dict))
+
+ self.compute._get_instance_nw_info(
+ self.context, instance).AndReturn('fake-nwinfo1')
+
+ # 2nd save to update task state
+ exp_kwargs = dict(expected_task_state=task_states.RESIZE_MIGRATED)
+ instance.save(**exp_kwargs).WithSideEffects(_instance_save2)
+
+ self.compute._notify_about_instance_usage(
+ self.context, instance, 'finish_resize.start',
+ network_info='fake-nwinfo1')
+
+ self.compute._get_instance_block_device_info(
+ self.context, instance,
+ refresh_conn_info=True).AndReturn('fake-bdminfo')
+ # nova.conf sets the default flavor to m1.small and the test
+ # sets the default flavor to m1.tiny so they should be different
+ # which makes this a resize
+ self.compute.driver.finish_migration(self.context, migration,
+ instance, disk_info,
+ 'fake-nwinfo1',
+ image, True,
+ 'fake-bdminfo', power_on)
+ # Ensure instance status updates is after the migration finish
+ self.context.elevated().AndReturn(self.context)
+ migration.save(self.context).WithSideEffects(_mig_save)
+ exp_kwargs = dict(expected_task_state=task_states.RESIZE_FINISH)
+ instance.save(**exp_kwargs).WithSideEffects(_instance_save3)
+ self.compute._notify_about_instance_usage(
+ self.context, instance, 'finish_resize.end',
+ network_info='fake-nwinfo1')
+ # NOTE(comstud): This actually does the mox.ReplayAll()
+ reservations = self._ensure_quota_reservations_committed(instance)
+
+ self.compute.finish_resize(self.context,
+ migration=migration,
+ disk_info=disk_info, image=image, instance=instance,
+ reservations=reservations)
+
+ def test_finish_resize_from_active(self):
+ self._test_finish_resize(power_on=True)
+
+ def test_finish_resize_from_stopped(self):
+ self._test_finish_resize(power_on=False)
+
+ def test_finish_resize_with_volumes(self):
+ """Contrived test to ensure finish_resize doesn't raise anything."""
+
+ # create instance
+ instance = self._create_fake_instance_obj()
+
+ # create volume
+ volume_id = 'fake'
+ volume = {'instance_uuid': None,
+ 'device_name': None,
+ 'id': volume_id,
+ 'attach_status': 'detached'}
+ bdm = objects.BlockDeviceMapping(
+ **{'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': volume_id,
+ 'instance_uuid': instance['uuid'],
+ 'device_name': '/dev/vdc'})
+ bdm.create(self.context)
+
+ # stub out volume attach
+ def fake_volume_get(self, context, volume_id):
+ return volume
+ self.stubs.Set(cinder.API, "get", fake_volume_get)
+
+ def fake_volume_check_attach(self, context, volume_id, instance):
+ pass
+ self.stubs.Set(cinder.API, "check_attach", fake_volume_check_attach)
+
+ def fake_get_volume_encryption_metadata(self, context, volume_id):
+ return {}
+ self.stubs.Set(cinder.API, 'get_volume_encryption_metadata',
+ fake_get_volume_encryption_metadata)
+
+ orig_connection_data = {
+ 'target_discovered': True,
+ 'target_iqn': 'iqn.2010-10.org.openstack:%s.1' % volume_id,
+ 'target_portal': '127.0.0.0.1:3260',
+ 'volume_id': volume_id,
+ }
+ connection_info = {
+ 'driver_volume_type': 'iscsi',
+ 'data': orig_connection_data,
+ }
+
+ def fake_init_conn(self, context, volume_id, session):
+ return connection_info
+ self.stubs.Set(cinder.API, "initialize_connection", fake_init_conn)
+
+ def fake_attach(self, context, volume_id, instance_uuid, device_name,
+ mode='rw'):
+ volume['instance_uuid'] = instance_uuid
+ volume['device_name'] = device_name
+ self.stubs.Set(cinder.API, "attach", fake_attach)
+
+ # stub out virt driver attach
+ def fake_get_volume_connector(*args, **kwargs):
+ return {}
+ self.stubs.Set(self.compute.driver, 'get_volume_connector',
+ fake_get_volume_connector)
+
+ def fake_attach_volume(*args, **kwargs):
+ pass
+ self.stubs.Set(self.compute.driver, 'attach_volume',
+ fake_attach_volume)
+
+ # attach volume to instance
+ self.compute.attach_volume(self.context, volume['id'],
+ '/dev/vdc', instance, bdm=bdm)
+
+ # assert volume attached correctly
+ self.assertEqual(volume['device_name'], '/dev/vdc')
+ disk_info = db.block_device_mapping_get_all_by_instance(
+ self.context, instance.uuid)
+ self.assertEqual(len(disk_info), 1)
+ for bdm in disk_info:
+ self.assertEqual(bdm['device_name'], volume['device_name'])
+ self.assertEqual(bdm['connection_info'],
+ jsonutils.dumps(connection_info))
+
+ # begin resize
+ instance_type = flavors.get_default_flavor()
+ instance.task_state = task_states.RESIZE_PREP
+ instance.save()
+ self.compute.prep_resize(self.context, instance=instance,
+ instance_type=instance_type,
+ image={}, reservations=[], request_spec={},
+ filter_properties={}, node=None)
+
+ # fake out detach for prep_resize (and later terminate)
+ def fake_terminate_connection(self, context, volume, connector):
+ connection_info['data'] = None
+ self.stubs.Set(cinder.API, "terminate_connection",
+ fake_terminate_connection)
+
+ self._stub_out_resize_network_methods()
+
+ migration = objects.Migration.get_by_instance_and_status(
+ self.context.elevated(),
+ instance.uuid, 'pre-migrating')
+ self.compute.resize_instance(self.context, instance=instance,
+ migration=migration, image={}, reservations=[],
+ instance_type=jsonutils.to_primitive(instance_type))
+
+ # assert bdm is unchanged
+ disk_info = db.block_device_mapping_get_all_by_instance(
+ self.context, instance.uuid)
+ self.assertEqual(len(disk_info), 1)
+ for bdm in disk_info:
+ self.assertEqual(bdm['device_name'], volume['device_name'])
+ cached_connection_info = jsonutils.loads(bdm['connection_info'])
+ self.assertEqual(cached_connection_info['data'],
+ orig_connection_data)
+ # but connection was terminated
+ self.assertIsNone(connection_info['data'])
+
+ # stub out virt driver finish_migration
+ def fake(*args, **kwargs):
+ pass
+ self.stubs.Set(self.compute.driver, 'finish_migration', fake)
+
+ instance.task_state = task_states.RESIZE_MIGRATED
+ instance.save()
+
+ reservations = self._ensure_quota_reservations_committed(instance)
+
+ # new initialize connection
+ new_connection_data = dict(orig_connection_data)
+ new_iqn = 'iqn.2010-10.org.openstack:%s.2' % volume_id,
+ new_connection_data['target_iqn'] = new_iqn
+
+ def fake_init_conn_with_data(self, context, volume, session):
+ connection_info['data'] = new_connection_data
+ return connection_info
+ self.stubs.Set(cinder.API, "initialize_connection",
+ fake_init_conn_with_data)
+
+ self.compute.finish_resize(self.context,
+ migration=migration,
+ disk_info={}, image={}, instance=instance,
+ reservations=reservations)
+
+ # assert volume attached correctly
+ disk_info = db.block_device_mapping_get_all_by_instance(
+ self.context, instance['uuid'])
+ self.assertEqual(len(disk_info), 1)
+ for bdm in disk_info:
+ self.assertEqual(bdm['connection_info'],
+ jsonutils.dumps(connection_info))
+
+ # stub out detach
+ def fake_detach(self, context, volume_uuid):
+ volume['device_path'] = None
+ volume['instance_uuid'] = None
+ self.stubs.Set(cinder.API, "detach", fake_detach)
+
+ # clean up
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_finish_resize_handles_error(self):
+ # Make sure we don't leave the instance in RESIZE on error.
+
+ def throw_up(*args, **kwargs):
+ raise test.TestingException()
+
+ def fake(*args, **kwargs):
+ pass
+
+ self.stubs.Set(self.compute.driver, 'finish_migration', throw_up)
+
+ self._stub_out_resize_network_methods()
+
+ old_flavor_name = 'm1.tiny'
+ instance = self._create_fake_instance_obj(type_name=old_flavor_name)
+ reservations = self._ensure_quota_reservations_rolledback(instance)
+
+ instance_type = flavors.get_flavor_by_name('m1.small')
+
+ self.compute.prep_resize(self.context, instance=instance,
+ instance_type=instance_type,
+ image={}, reservations=reservations,
+ request_spec={}, filter_properties={},
+ node=None)
+
+ migration = objects.Migration.get_by_instance_and_status(
+ self.context.elevated(),
+ instance.uuid, 'pre-migrating')
+
+ instance.refresh()
+ instance.task_state = task_states.RESIZE_MIGRATED
+ instance.save()
+ self.assertRaises(test.TestingException, self.compute.finish_resize,
+ self.context,
+ migration=migration,
+ disk_info={}, image={}, instance=instance,
+ reservations=reservations)
+ instance.refresh()
+ self.assertEqual(vm_states.ERROR, instance.vm_state)
+
+ old_flavor = flavors.get_flavor_by_name(old_flavor_name)
+ self.assertEqual(old_flavor['memory_mb'], instance.memory_mb)
+ self.assertEqual(old_flavor['vcpus'], instance.vcpus)
+ self.assertEqual(old_flavor['root_gb'], instance.root_gb)
+ self.assertEqual(old_flavor['ephemeral_gb'], instance.ephemeral_gb)
+ self.assertEqual(old_flavor['id'], instance.instance_type_id)
+ self.assertNotEqual(instance_type['id'], instance.instance_type_id)
+
+ def test_save_instance_info(self):
+ old_flavor_name = 'm1.tiny'
+ new_flavor_name = 'm1.small'
+ instance = self._create_fake_instance_obj(type_name=old_flavor_name)
+ new_flavor = flavors.get_flavor_by_name(new_flavor_name)
+
+ self.compute._save_instance_info(instance, new_flavor,
+ instance.system_metadata)
+
+ self.assertEqual(new_flavor['memory_mb'], instance.memory_mb)
+ self.assertEqual(new_flavor['vcpus'], instance.vcpus)
+ self.assertEqual(new_flavor['root_gb'], instance.root_gb)
+ self.assertEqual(new_flavor['ephemeral_gb'], instance.ephemeral_gb)
+ self.assertEqual(new_flavor['id'], instance.instance_type_id)
+ self.assertEqual(new_flavor['id'], instance.instance_type_id)
+
+ def test_rebuild_instance_notification(self):
+ # Ensure notifications on instance migrate/resize.
+ old_time = datetime.datetime(2012, 4, 1)
+ cur_time = datetime.datetime(2012, 12, 21, 12, 21)
+ timeutils.set_time_override(old_time)
+ inst_ref = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, inst_ref, {}, {}, None, None,
+ None, True, None, False)
+ timeutils.set_time_override(cur_time)
+
+ fake_notifier.NOTIFICATIONS = []
+ instance = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
+ orig_sys_metadata = db.instance_system_metadata_get(self.context,
+ inst_ref['uuid'])
+ image_ref = instance["image_ref"]
+ new_image_ref = image_ref + '-new_image_ref'
+ db.instance_update(self.context, inst_ref['uuid'],
+ {'image_ref': new_image_ref})
+
+ password = "new_password"
+
+ inst_ref.task_state = task_states.REBUILDING
+ inst_ref.save()
+ self.compute.rebuild_instance(self.context,
+ inst_ref,
+ image_ref, new_image_ref,
+ injected_files=[],
+ new_pass=password,
+ orig_sys_metadata=orig_sys_metadata,
+ bdms=[], recreate=False,
+ on_shared_storage=False)
+
+ inst_ref.refresh()
+
+ image_ref_url = glance.generate_image_url(image_ref)
+ new_image_ref_url = glance.generate_image_url(new_image_ref)
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.exists')
+ self.assertEqual(msg.payload['image_ref_url'], image_ref_url)
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.rebuild.start')
+ self.assertEqual(msg.payload['image_ref_url'], new_image_ref_url)
+ self.assertEqual(msg.payload['image_name'], 'fake_name')
+ msg = fake_notifier.NOTIFICATIONS[2]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.rebuild.end')
+ self.assertEqual(msg.priority, 'INFO')
+ payload = msg.payload
+ self.assertEqual(payload['image_name'], 'fake_name')
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], inst_ref['uuid'])
+ self.assertEqual(payload['instance_type'], 'm1.tiny')
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ self.assertEqual(str(payload['instance_type_id']), str(type_id))
+ flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
+ self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
+ self.assertIn('display_name', payload)
+ self.assertIn('created_at', payload)
+ self.assertIn('launched_at', payload)
+ self.assertEqual(payload['launched_at'], timeutils.strtime(cur_time))
+ self.assertEqual(payload['image_ref_url'], new_image_ref_url)
+ self.compute.terminate_instance(self.context, inst_ref, [], [])
+
+ def test_finish_resize_instance_notification(self):
+ # Ensure notifications on instance migrate/resize.
+ old_time = datetime.datetime(2012, 4, 1)
+ cur_time = datetime.datetime(2012, 12, 21, 12, 21)
+ timeutils.set_time_override(old_time)
+ instance = self._create_fake_instance_obj()
+ new_type = flavors.get_flavor_by_name('m1.small')
+ new_type = jsonutils.to_primitive(new_type)
+ new_type_id = new_type['id']
+ flavor_id = new_type['flavorid']
+ instance_p = obj_base.obj_to_primitive(instance)
+ self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
+ None, True, None, False)
+
+ instance.host = 'foo'
+ instance.task_state = task_states.RESIZE_PREP
+ instance.save()
+
+ self.compute.prep_resize(self.context, instance=instance,
+ instance_type=new_type, image={}, reservations=[],
+ request_spec={}, filter_properties={}, node=None)
+
+ self._stub_out_resize_network_methods()
+
+ migration = objects.Migration.get_by_instance_and_status(
+ self.context.elevated(),
+ instance.uuid, 'pre-migrating')
+ self.compute.resize_instance(self.context, instance=instance,
+ migration=migration, image={}, instance_type=new_type,
+ reservations=[])
+ timeutils.set_time_override(cur_time)
+ fake_notifier.NOTIFICATIONS = []
+
+ self.compute.finish_resize(self.context,
+ migration=migration, reservations=[],
+ disk_info={}, image={}, instance=instance)
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.finish_resize.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.finish_resize.end')
+ self.assertEqual(msg.priority, 'INFO')
+ payload = msg.payload
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], instance.uuid)
+ self.assertEqual(payload['instance_type'], 'm1.small')
+ self.assertEqual(str(payload['instance_type_id']), str(new_type_id))
+ self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
+ self.assertIn('display_name', payload)
+ self.assertIn('created_at', payload)
+ self.assertIn('launched_at', payload)
+ self.assertEqual(payload['launched_at'], timeutils.strtime(cur_time))
+ image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
+ self.assertEqual(payload['image_ref_url'], image_ref_url)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_resize_instance_notification(self):
+ # Ensure notifications on instance migrate/resize.
+ old_time = datetime.datetime(2012, 4, 1)
+ cur_time = datetime.datetime(2012, 12, 21, 12, 21)
+ timeutils.set_time_override(old_time)
+ instance = self._create_fake_instance_obj()
+
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+ timeutils.set_time_override(cur_time)
+ fake_notifier.NOTIFICATIONS = []
+
+ instance.host = 'foo'
+ instance.task_state = task_states.RESIZE_PREP
+ instance.save()
+
+ instance_type = flavors.get_default_flavor()
+ self.compute.prep_resize(self.context, instance=instance,
+ instance_type=instance_type, image={}, reservations=[],
+ request_spec={}, filter_properties={}, node=None)
+ db.migration_get_by_instance_and_status(self.context.elevated(),
+ instance.uuid,
+ 'pre-migrating')
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.exists')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.resize.prep.start')
+ msg = fake_notifier.NOTIFICATIONS[2]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.resize.prep.end')
+ self.assertEqual(msg.priority, 'INFO')
+ payload = msg.payload
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], instance.uuid)
+ self.assertEqual(payload['instance_type'], 'm1.tiny')
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ self.assertEqual(str(payload['instance_type_id']), str(type_id))
+ flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
+ self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
+ self.assertIn('display_name', payload)
+ self.assertIn('created_at', payload)
+ self.assertIn('launched_at', payload)
+ image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
+ self.assertEqual(payload['image_ref_url'], image_ref_url)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_prep_resize_instance_migration_error_on_same_host(self):
+ """Ensure prep_resize raise a migration error if destination is set on
+ the same source host and allow_resize_to_same_host is false
+ """
+ self.flags(host="foo", allow_resize_to_same_host=False)
+
+ instance = self._create_fake_instance_obj()
+
+ reservations = self._ensure_quota_reservations_rolledback(instance)
+
+ instance_p = obj_base.obj_to_primitive(instance)
+ self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
+ None, True, None, False)
+ instance.host = self.compute.host
+ instance.save()
+ instance_type = flavors.get_default_flavor()
+
+ self.assertRaises(exception.MigrationError, self.compute.prep_resize,
+ self.context, instance=instance,
+ instance_type=instance_type, image={},
+ reservations=reservations, request_spec={},
+ filter_properties={}, node=None)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_prep_resize_instance_migration_error_on_none_host(self):
+ """Ensure prep_resize raises a migration error if destination host is
+ not defined
+ """
+ instance = self._create_fake_instance_obj()
+
+ reservations = self._ensure_quota_reservations_rolledback(instance)
+
+ instance_p = obj_base.obj_to_primitive(instance)
+ self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
+ None, True, None, False)
+ instance.host = None
+ instance.save()
+ instance_type = flavors.get_default_flavor()
+
+ self.assertRaises(exception.MigrationError, self.compute.prep_resize,
+ self.context, instance=instance,
+ instance_type=instance_type, image={},
+ reservations=reservations, request_spec={},
+ filter_properties={}, node=None)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_resize_instance_driver_error(self):
+ # Ensure instance status set to Error on resize error.
+
+ def throw_up(*args, **kwargs):
+ raise test.TestingException()
+
+ self.stubs.Set(self.compute.driver, 'migrate_disk_and_power_off',
+ throw_up)
+
+ instance = self._create_fake_instance_obj()
+ instance_type = flavors.get_default_flavor()
+
+ reservations = self._ensure_quota_reservations_rolledback(instance)
+
+ instance_p = obj_base.obj_to_primitive(instance)
+ self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
+ None, True, None, False)
+ instance.host = 'foo'
+ instance.save()
+ self.compute.prep_resize(self.context, instance=instance,
+ instance_type=instance_type, image={},
+ reservations=reservations, request_spec={},
+ filter_properties={}, node=None)
+ instance.task_state = task_states.RESIZE_PREP
+ instance.save()
+ migration = objects.Migration.get_by_instance_and_status(
+ self.context.elevated(),
+ instance.uuid, 'pre-migrating')
+
+ # verify
+ self.assertRaises(test.TestingException, self.compute.resize_instance,
+ self.context, instance=instance,
+ migration=migration, image={},
+ reservations=reservations,
+ instance_type=jsonutils.to_primitive(instance_type))
+ # NOTE(comstud): error path doesn't use objects, so our object
+ # is not updated. Refresh and compare against the DB.
+ instance.refresh()
+ self.assertEqual(instance.vm_state, vm_states.ERROR)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_resize_instance_driver_rollback(self):
+ # Ensure instance status set to Running after rollback.
+
+ def throw_up(*args, **kwargs):
+ raise exception.InstanceFaultRollback(test.TestingException())
+
+ self.stubs.Set(self.compute.driver, 'migrate_disk_and_power_off',
+ throw_up)
+
+ instance = self._create_fake_instance_obj()
+ instance_type = flavors.get_default_flavor()
+ reservations = self._ensure_quota_reservations_rolledback(instance)
+ instance_p = obj_base.obj_to_primitive(instance)
+ self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
+ None, True, None, False)
+ instance.host = 'foo'
+ instance.save()
+ self.compute.prep_resize(self.context, instance=instance,
+ instance_type=instance_type, image={},
+ reservations=reservations, request_spec={},
+ filter_properties={}, node=None)
+ instance.task_state = task_states.RESIZE_PREP
+ instance.save()
+
+ migration = objects.Migration.get_by_instance_and_status(
+ self.context.elevated(),
+ instance.uuid, 'pre-migrating')
+
+ self.assertRaises(test.TestingException, self.compute.resize_instance,
+ self.context, instance=instance,
+ migration=migration, image={},
+ reservations=reservations,
+ instance_type=jsonutils.to_primitive(instance_type))
+ # NOTE(comstud): error path doesn't use objects, so our object
+ # is not updated. Refresh and compare against the DB.
+ instance.refresh()
+ self.assertEqual(instance.vm_state, vm_states.ACTIVE)
+ self.assertIsNone(instance.task_state)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def _test_resize_instance(self, clean_shutdown=True):
+ # Ensure instance can be migrated/resized.
+ instance = self._create_fake_instance_obj()
+ instance_type = flavors.get_default_flavor()
+
+ instance_p = obj_base.obj_to_primitive(instance)
+ self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
+ None, True, None, False)
+ instance.host = 'foo'
+ instance.save()
+ self.compute.prep_resize(self.context, instance=instance,
+ instance_type=instance_type, image={}, reservations=[],
+ request_spec={}, filter_properties={}, node=None)
+
+ # verify 'old_vm_state' was set on system_metadata
+ instance.refresh()
+ sys_meta = instance.system_metadata
+ self.assertEqual(vm_states.ACTIVE, sys_meta['old_vm_state'])
+
+ self._stub_out_resize_network_methods()
+
+ instance.task_state = task_states.RESIZE_PREP
+ instance.save()
+
+ migration = objects.Migration.get_by_instance_and_status(
+ self.context.elevated(),
+ instance.uuid, 'pre-migrating')
+
+ with contextlib.nested(
+ mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid', return_value='fake_bdms'),
+ mock.patch.object(
+ self.compute, '_get_instance_block_device_info',
+ return_value='fake_bdinfo'),
+ mock.patch.object(self.compute, '_terminate_volume_connections'),
+ mock.patch.object(self.compute, '_get_power_off_values',
+ return_value=(1, 2))
+ ) as (mock_get_by_inst_uuid, mock_get_instance_vol_bdinfo,
+ mock_terminate_vol_conn, mock_get_power_off_values):
+ self.compute.resize_instance(self.context, instance=instance,
+ migration=migration, image={}, reservations=[],
+ instance_type=jsonutils.to_primitive(instance_type),
+ clean_shutdown=clean_shutdown)
+ mock_get_instance_vol_bdinfo.assert_called_once_with(
+ self.context, instance, bdms='fake_bdms')
+ mock_terminate_vol_conn.assert_called_once_with(self.context,
+ instance, 'fake_bdms')
+ mock_get_power_off_values.assert_caleld_once_with(self.context,
+ instance, clean_shutdown)
+ self.assertEqual(migration.dest_compute, instance.host)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_resize_instance(self):
+ self._test_resize_instance()
+
+ def test_resize_instance_forced_shutdown(self):
+ self._test_resize_instance(clean_shutdown=False)
+
+ def _test_confirm_resize(self, power_on):
+ # Common test case method for confirm_resize
+ def fake(*args, **kwargs):
+ pass
+
+ def fake_confirm_migration_driver(*args, **kwargs):
+ # Confirm the instance uses the new type in finish_resize
+ inst = args[1]
+ sys_meta = inst['system_metadata']
+ self.assertEqual(sys_meta['instance_type_flavorid'], '3')
+
+ old_vm_state = None
+ p_state = None
+ if power_on:
+ old_vm_state = vm_states.ACTIVE
+ p_state = power_state.RUNNING
+ else:
+ old_vm_state = vm_states.STOPPED
+ p_state = power_state.SHUTDOWN
+ params = {'vm_state': old_vm_state, 'power_state': p_state}
+ instance = self._create_fake_instance_obj(params)
+
+ self.flags(allow_resize_to_same_host=True)
+ self.stubs.Set(self.compute.driver, 'finish_migration', fake)
+ self.stubs.Set(self.compute.driver, 'confirm_migration',
+ fake_confirm_migration_driver)
+
+ self._stub_out_resize_network_methods()
+
+ reservations = self._ensure_quota_reservations_committed(instance)
+
+ instance_p = obj_base.obj_to_primitive(instance)
+ self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
+ None, True, None, False)
+
+ # Confirm the instance size before the resize starts
+ instance.refresh()
+ instance_type_ref = db.flavor_get(self.context,
+ instance.instance_type_id)
+ self.assertEqual(instance_type_ref['flavorid'], '1')
+
+ instance.vm_state = old_vm_state
+ instance.power_state = p_state
+ instance.save()
+
+ new_instance_type_ref = db.flavor_get_by_flavor_id(
+ self.context, 3)
+ new_instance_type_p = jsonutils.to_primitive(new_instance_type_ref)
+ self.compute.prep_resize(self.context,
+ instance=instance,
+ instance_type=new_instance_type_p,
+ image={}, reservations=reservations, request_spec={},
+ filter_properties={}, node=None)
+
+ migration = objects.Migration.get_by_instance_and_status(
+ self.context.elevated(),
+ instance.uuid, 'pre-migrating')
+
+ # NOTE(mriedem): ensure prep_resize set old_vm_state in system_metadata
+ sys_meta = instance.system_metadata
+ self.assertEqual(old_vm_state, sys_meta['old_vm_state'])
+ instance.task_state = task_states.RESIZE_PREP
+ instance.save()
+ self.compute.resize_instance(self.context, instance=instance,
+ migration=migration,
+ image={},
+ reservations=[],
+ instance_type=new_instance_type_p)
+ self.compute.finish_resize(self.context,
+ migration=migration, reservations=[],
+ disk_info={}, image={}, instance=instance)
+
+ # Prove that the instance size is now the new size
+ instance_type_ref = db.flavor_get(self.context,
+ instance.instance_type_id)
+ self.assertEqual(instance_type_ref['flavorid'], '3')
+
+ # Finally, confirm the resize and verify the new flavor is applied
+ instance.task_state = None
+ instance.save()
+ self.compute.confirm_resize(self.context, instance=instance,
+ reservations=reservations,
+ migration=migration)
+
+ instance.refresh()
+
+ instance_type_ref = db.flavor_get(self.context,
+ instance.instance_type_id)
+ self.assertEqual(instance_type_ref['flavorid'], '3')
+ self.assertEqual('fake-mini', migration.source_compute)
+ self.assertEqual(old_vm_state, instance.vm_state)
+ self.assertIsNone(instance.task_state)
+ self.assertEqual(p_state, instance.power_state)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_confirm_resize_from_active(self):
+ self._test_confirm_resize(power_on=True)
+
+ def test_confirm_resize_from_stopped(self):
+ self._test_confirm_resize(power_on=False)
+
+ def _test_finish_revert_resize(self, power_on,
+ remove_old_vm_state=False):
+ """Convenience method that does most of the work for the
+ test_finish_revert_resize tests.
+ :param power_on -- True if testing resize from ACTIVE state, False if
+ testing resize from STOPPED state.
+ :param remove_old_vm_state -- True if testing a case where the
+ 'old_vm_state' system_metadata is not present when the
+ finish_revert_resize method is called.
+ """
+ def fake(*args, **kwargs):
+ pass
+
+ def fake_finish_revert_migration_driver(*args, **kwargs):
+ # Confirm the instance uses the old type in finish_revert_resize
+ inst = args[1]
+ sys_meta = inst.system_metadata
+ self.assertEqual(sys_meta['instance_type_flavorid'], '1')
+
+ old_vm_state = None
+ if power_on:
+ old_vm_state = vm_states.ACTIVE
+ else:
+ old_vm_state = vm_states.STOPPED
+ params = {'vm_state': old_vm_state}
+ instance = self._create_fake_instance_obj(params)
+
+ self.stubs.Set(self.compute.driver, 'finish_migration', fake)
+ self.stubs.Set(self.compute.driver, 'finish_revert_migration',
+ fake_finish_revert_migration_driver)
+
+ self._stub_out_resize_network_methods()
+
+ reservations = self._ensure_quota_reservations_committed(instance)
+
+ instance_p = obj_base.obj_to_primitive(instance)
+ self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
+ None, True, None, False)
+
+ instance.refresh()
+ instance_type_ref = db.flavor_get(self.context,
+ instance.instance_type_id)
+ self.assertEqual(instance_type_ref['flavorid'], '1')
+
+ old_vm_state = instance['vm_state']
+
+ instance.host = 'foo'
+ instance.vm_state = old_vm_state
+ instance.save()
+
+ new_instance_type_ref = db.flavor_get_by_flavor_id(
+ self.context, 3)
+ new_instance_type_p = jsonutils.to_primitive(new_instance_type_ref)
+ self.compute.prep_resize(self.context,
+ instance=instance,
+ instance_type=new_instance_type_p,
+ image={}, reservations=reservations, request_spec={},
+ filter_properties={}, node=None)
+
+ migration = objects.Migration.get_by_instance_and_status(
+ self.context.elevated(),
+ instance.uuid, 'pre-migrating')
+
+ # NOTE(mriedem): ensure prep_resize set old_vm_state in system_metadata
+ sys_meta = instance.system_metadata
+ self.assertEqual(old_vm_state, sys_meta['old_vm_state'])
+ instance.task_state = task_states.RESIZE_PREP
+ instance.save()
+ self.compute.resize_instance(self.context, instance=instance,
+ migration=migration,
+ image={},
+ reservations=[],
+ instance_type=new_instance_type_p)
+ self.compute.finish_resize(self.context,
+ migration=migration, reservations=[],
+ disk_info={}, image={}, instance=instance)
+
+ # Prove that the instance size is now the new size
+ instance_type_ref = db.flavor_get(self.context,
+ instance['instance_type_id'])
+ self.assertEqual(instance_type_ref['flavorid'], '3')
+
+ instance.task_state = task_states.RESIZE_REVERTING
+ instance.save()
+
+ self.compute.revert_resize(self.context,
+ migration=migration, instance=instance,
+ reservations=reservations)
+
+ instance.refresh()
+ if remove_old_vm_state:
+ # need to wipe out the old_vm_state from system_metadata
+ # before calling finish_revert_resize
+ sys_meta = instance.system_metadata
+ sys_meta.pop('old_vm_state')
+ # Have to reset for save() to work
+ instance.system_metadata = sys_meta
+ instance.save()
+
+ self.compute.finish_revert_resize(self.context,
+ migration=migration,
+ instance=instance, reservations=reservations)
+
+ self.assertIsNone(instance.task_state)
+
+ instance_type_ref = db.flavor_get(self.context,
+ instance['instance_type_id'])
+ self.assertEqual(instance_type_ref['flavorid'], '1')
+ self.assertEqual(instance.host, migration.source_compute)
+ if remove_old_vm_state:
+ self.assertEqual(vm_states.ACTIVE, instance.vm_state)
+ else:
+ self.assertEqual(old_vm_state, instance.vm_state)
+
+ def test_finish_revert_resize_from_active(self):
+ self._test_finish_revert_resize(power_on=True)
+
+ def test_finish_revert_resize_from_stopped(self):
+ self._test_finish_revert_resize(power_on=False)
+
+ def test_finish_revert_resize_from_stopped_remove_old_vm_state(self):
+ # in this case we resize from STOPPED but end up with ACTIVE
+ # because the old_vm_state value is not present in
+ # finish_revert_resize
+ self._test_finish_revert_resize(power_on=False,
+ remove_old_vm_state=True)
+
+ def _test_cleanup_stored_instance_types(self, old, new, revert=False):
+ instance = self._create_fake_instance_obj()
+ migration = dict(old_instance_type_id=old,
+ new_instance_type_id=new)
+ instance.system_metadata = dict(instance_type_id=old)
+ sys_meta = dict(instance.system_metadata)
+ self.mox.StubOutWithMock(flavors, 'extract_flavor')
+ self.mox.StubOutWithMock(flavors, 'delete_flavor_info')
+ self.mox.StubOutWithMock(flavors, 'save_flavor_info')
+ if revert:
+ flavors.extract_flavor(instance, 'old_').AndReturn(
+ {'instance_type_id': old})
+ flavors.extract_flavor(instance).AndReturn(
+ {'instance_type_id': new})
+ flavors.save_flavor_info(
+ sys_meta, {'instance_type_id': old}).AndReturn(sys_meta)
+ else:
+ flavors.extract_flavor(instance).AndReturn(
+ {'instance_type_id': new})
+ flavors.extract_flavor(instance, 'old_').AndReturn(
+ {'instance_type_id': old})
+ flavors.delete_flavor_info(
+ sys_meta, 'old_').AndReturn(sys_meta)
+ flavors.delete_flavor_info(
+ sys_meta, 'new_').AndReturn(sys_meta)
+
+ self.mox.ReplayAll()
+ res = self.compute._cleanup_stored_instance_types(migration, instance,
+ revert)
+ self.assertEqual(res,
+ (sys_meta,
+ {'instance_type_id': revert and old or new},
+ {'instance_type_id': revert and new or old}))
+
+ def test_cleanup_stored_instance_types_for_resize(self):
+ self._test_cleanup_stored_instance_types('1', '2')
+
+ def test_cleanup_stored_instance_types_for_resize_with_update(self):
+ self._test_cleanup_stored_instance_types('1', '2', True)
+
+ def test_cleanup_stored_instance_types_for_migration(self):
+ self._test_cleanup_stored_instance_types('1', '1')
+
+ def test_cleanup_stored_instance_types_for_migration_with_update(self):
+ self._test_cleanup_stored_instance_types('1', '1', True)
+
+ def test_get_by_flavor_id(self):
+ flavor_type = flavors.get_flavor_by_flavor_id(1)
+ self.assertEqual(flavor_type['name'], 'm1.tiny')
+
+ def test_resize_same_source_fails(self):
+ """Ensure instance fails to migrate when source and destination are
+ the same host.
+ """
+ instance = self._create_fake_instance_obj()
+ reservations = self._ensure_quota_reservations_rolledback(instance)
+ instance_p = obj_base.obj_to_primitive(instance)
+ self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
+ None, True, None, False)
+ instance.refresh()
+ instance_type = flavors.get_default_flavor()
+ self.assertRaises(exception.MigrationError, self.compute.prep_resize,
+ self.context, instance=instance,
+ instance_type=instance_type, image={},
+ reservations=reservations, request_spec={},
+ filter_properties={}, node=None)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_resize_instance_handles_migration_error(self):
+ # Ensure vm_state is ERROR when error occurs.
+ def raise_migration_failure(*args):
+ raise test.TestingException()
+ self.stubs.Set(self.compute.driver,
+ 'migrate_disk_and_power_off',
+ raise_migration_failure)
+
+ instance = self._create_fake_instance_obj()
+ reservations = self._ensure_quota_reservations_rolledback(instance)
+
+ instance_type = flavors.get_default_flavor()
+
+ instance_p = obj_base.obj_to_primitive(instance)
+ self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
+ None, True, None, False)
+ instance.host = 'foo'
+ instance.save()
+ self.compute.prep_resize(self.context, instance=instance,
+ instance_type=instance_type,
+ image={}, reservations=reservations,
+ request_spec={}, filter_properties={},
+ node=None)
+ migration = objects.Migration.get_by_instance_and_status(
+ self.context.elevated(),
+ instance.uuid, 'pre-migrating')
+ instance.task_state = task_states.RESIZE_PREP
+ instance.save()
+ self.assertRaises(test.TestingException, self.compute.resize_instance,
+ self.context, instance=instance,
+ migration=migration, image={},
+ reservations=reservations,
+ instance_type=jsonutils.to_primitive(instance_type))
+ # NOTE(comstud): error path doesn't use objects, so our object
+ # is not updated. Refresh and compare against the DB.
+ instance.refresh()
+ self.assertEqual(instance.vm_state, vm_states.ERROR)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_pre_live_migration_instance_has_no_fixed_ip(self):
+ # Confirm that no exception is raised if there is no fixed ip on
+ # pre_live_migration
+ instance = self._create_fake_instance_obj()
+ c = context.get_admin_context()
+
+ self.mox.ReplayAll()
+ self.compute.driver.pre_live_migration(mox.IsA(c), mox.IsA(instance),
+ {'block_device_mapping': []},
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ def test_pre_live_migration_works_correctly(self):
+ # Confirm setup_compute_volume is called when volume is mounted.
+ def stupid(*args, **kwargs):
+ return fake_network.fake_get_instance_nw_info(self.stubs)
+ self.stubs.Set(nova.compute.manager.ComputeManager,
+ '_get_instance_nw_info', stupid)
+
+ # creating instance testdata
+ instance = self._create_fake_instance_obj({'host': 'dummy'})
+ c = context.get_admin_context()
+ nw_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ # creating mocks
+ self.mox.StubOutWithMock(self.compute.driver, 'pre_live_migration')
+ self.compute.driver.pre_live_migration(mox.IsA(c), mox.IsA(instance),
+ {'swap': None, 'ephemerals': [],
+ 'block_device_mapping': []},
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'ensure_filtering_rules_for_instance')
+ self.compute.driver.ensure_filtering_rules_for_instance(
+ mox.IsA(instance), nw_info)
+
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'setup_networks_on_host')
+ self.compute.network_api.setup_networks_on_host(c, instance,
+ self.compute.host)
+
+ fake_notifier.NOTIFICATIONS = []
+ # start test
+ self.mox.ReplayAll()
+ migrate_data = {'is_shared_instance_path': False}
+ ret = self.compute.pre_live_migration(c, instance=instance,
+ block_migration=False, disk=None,
+ migrate_data=migrate_data)
+ self.assertIsNone(ret)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.live_migration.pre.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.live_migration.pre.end')
+
+ # cleanup
+ db.instance_destroy(c, instance['uuid'])
+
+ def test_live_migration_exception_rolls_back(self):
+ # Confirm exception when pre_live_migration fails.
+ c = context.get_admin_context()
+
+ instance = self._create_fake_instance_obj(
+ {'host': 'src_host',
+ 'task_state': task_states.MIGRATING})
+ updated_instance = self._create_fake_instance_obj(
+ {'host': 'fake-dest-host'})
+ dest_host = updated_instance['host']
+ fake_bdms = [
+ objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': 'vol1-id', 'source_type': 'volume',
+ 'destination_type': 'volume'})),
+ objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': 'vol2-id', 'source_type': 'volume',
+ 'destination_type': 'volume'}))
+ ]
+
+ # creating mocks
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'get_instance_disk_info')
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'pre_live_migration')
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'setup_networks_on_host')
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'remove_volume_connection')
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'rollback_live_migration_at_destination')
+
+ block_device_info = {
+ 'swap': None, 'ephemerals': [], 'block_device_mapping': []}
+ self.compute.driver.get_instance_disk_info(
+ instance.name,
+ block_device_info=block_device_info).AndReturn('fake_disk')
+ self.compute.compute_rpcapi.pre_live_migration(c,
+ instance, True, 'fake_disk', dest_host,
+ {}).AndRaise(test.TestingException())
+
+ self.compute.network_api.setup_networks_on_host(c,
+ instance, self.compute.host)
+ objects.BlockDeviceMappingList.get_by_instance_uuid(c,
+ instance.uuid).MultipleTimes().AndReturn(fake_bdms)
+ self.compute.compute_rpcapi.remove_volume_connection(
+ c, instance, 'vol1-id', dest_host)
+ self.compute.compute_rpcapi.remove_volume_connection(
+ c, instance, 'vol2-id', dest_host)
+ self.compute.compute_rpcapi.rollback_live_migration_at_destination(
+ c, instance, dest_host, destroy_disks=True, migrate_data={})
+
+ # start test
+ self.mox.ReplayAll()
+ self.assertRaises(test.TestingException,
+ self.compute.live_migration,
+ c, dest=dest_host, block_migration=True,
+ instance=instance, migrate_data={})
+ instance.refresh()
+ self.assertEqual('src_host', instance.host)
+ self.assertEqual(vm_states.ACTIVE, instance.vm_state)
+ self.assertIsNone(instance.task_state)
+
+ def test_live_migration_works_correctly(self):
+ # Confirm live_migration() works as expected correctly.
+ # creating instance testdata
+ c = context.get_admin_context()
+ instance = self._create_fake_instance_obj()
+ instance.host = self.compute.host
+ dest = 'desthost'
+
+ migrate_data = {'is_shared_instance_path': False}
+
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'pre_live_migration')
+ self.compute.compute_rpcapi.pre_live_migration(
+ c, instance, False, None, dest, migrate_data)
+
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'migrate_instance_start')
+ migration = {'source_compute': instance['host'], 'dest_compute': dest}
+ self.compute.network_api.migrate_instance_start(c, instance,
+ migration)
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'post_live_migration_at_destination')
+ self.compute.compute_rpcapi.post_live_migration_at_destination(
+ c, instance, False, dest)
+
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'setup_networks_on_host')
+ self.compute.network_api.setup_networks_on_host(c, instance,
+ instance['host'],
+ teardown=True)
+ self.mox.StubOutWithMock(self.compute.instance_events,
+ 'clear_events_for_instance')
+ self.compute.instance_events.clear_events_for_instance(
+ mox.IgnoreArg())
+
+ # start test
+ self.mox.ReplayAll()
+
+ ret = self.compute.live_migration(c, dest=dest,
+ instance=instance,
+ block_migration=False,
+ migrate_data=migrate_data)
+ self.assertIsNone(ret)
+
+ # cleanup
+ instance.destroy(c)
+
+ def test_post_live_migration_no_shared_storage_working_correctly(self):
+ """Confirm post_live_migration() works correctly as expected
+ for non shared storage migration.
+ """
+ # Create stubs
+ result = {}
+ # No share storage live migration don't need to destroy at source
+ # server because instance has been migrated to destination, but a
+ # cleanup for block device and network are needed.
+
+ def fakecleanup(*args, **kwargs):
+ result['cleanup'] = True
+
+ self.stubs.Set(self.compute.driver, 'cleanup', fakecleanup)
+ dest = 'desthost'
+ srchost = self.compute.host
+
+ # creating testdata
+ c = context.get_admin_context()
+ instance = self._create_fake_instance_obj({
+ 'host': srchost,
+ 'state_description': 'migrating',
+ 'state': power_state.PAUSED,
+ 'task_state': task_states.MIGRATING,
+ 'power_state': power_state.PAUSED})
+
+ # creating mocks
+ self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
+ self.compute.driver.unfilter_instance(instance, [])
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'migrate_instance_start')
+ migration = {'source_compute': srchost, 'dest_compute': dest, }
+ self.compute.network_api.migrate_instance_start(c, instance,
+ migration)
+
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'post_live_migration_at_destination')
+ self.compute.compute_rpcapi.post_live_migration_at_destination(
+ c, instance, False, dest)
+
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'setup_networks_on_host')
+ self.compute.network_api.setup_networks_on_host(c, instance,
+ self.compute.host,
+ teardown=True)
+ self.mox.StubOutWithMock(self.compute.instance_events,
+ 'clear_events_for_instance')
+ self.compute.instance_events.clear_events_for_instance(
+ mox.IgnoreArg())
+
+ # start test
+ self.mox.ReplayAll()
+ migrate_data = {'is_shared_instance_path': False}
+ self.compute._post_live_migration(c, instance, dest,
+ migrate_data=migrate_data)
+ self.assertIn('cleanup', result)
+ self.assertEqual(result['cleanup'], True)
+
+ def test_post_live_migration_working_correctly(self):
+ # Confirm post_live_migration() works as expected correctly.
+ dest = 'desthost'
+ srchost = self.compute.host
+
+ # creating testdata
+ c = context.get_admin_context()
+ instance = self._create_fake_instance_obj({
+ 'host': srchost,
+ 'state_description': 'migrating',
+ 'state': power_state.PAUSED})
+
+ instance.update({'task_state': task_states.MIGRATING,
+ 'power_state': power_state.PAUSED})
+ instance.save(c)
+
+ # creating mocks
+ with contextlib.nested(
+ mock.patch.object(self.compute.driver, 'post_live_migration'),
+ mock.patch.object(self.compute.driver, 'unfilter_instance'),
+ mock.patch.object(self.compute.network_api,
+ 'migrate_instance_start'),
+ mock.patch.object(self.compute.compute_rpcapi,
+ 'post_live_migration_at_destination'),
+ mock.patch.object(self.compute.driver,
+ 'post_live_migration_at_source'),
+ mock.patch.object(self.compute.network_api,
+ 'setup_networks_on_host'),
+ mock.patch.object(self.compute.instance_events,
+ 'clear_events_for_instance'),
+ mock.patch.object(self.compute, 'update_available_resource')
+ ) as (
+ post_live_migration, unfilter_instance,
+ migrate_instance_start, post_live_migration_at_destination,
+ post_live_migration_at_source, setup_networks_on_host,
+ clear_events, update_available_resource
+ ):
+ self.compute._post_live_migration(c, instance, dest)
+
+ post_live_migration.assert_has_calls([
+ mock.call(c, instance, {'swap': None, 'ephemerals': [],
+ 'block_device_mapping': []}, None)])
+ unfilter_instance.assert_has_calls([mock.call(instance, [])])
+ migration = {'source_compute': srchost,
+ 'dest_compute': dest, }
+ migrate_instance_start.assert_has_calls([
+ mock.call(c, instance, migration)])
+ post_live_migration_at_destination.assert_has_calls([
+ mock.call(c, instance, False, dest)])
+ post_live_migration_at_source.assert_has_calls(
+ [mock.call(c, instance, [])])
+ setup_networks_on_host.assert_has_calls([
+ mock.call(c, instance, self.compute.host, teardown=True)])
+ clear_events.assert_called_once_with(instance)
+ update_available_resource.assert_has_calls([mock.call(c)])
+
+ def test_post_live_migration_terminate_volume_connections(self):
+ c = context.get_admin_context()
+ instance = self._create_fake_instance_obj({
+ 'host': self.compute.host,
+ 'state_description': 'migrating',
+ 'state': power_state.PAUSED})
+ instance.update({'task_state': task_states.MIGRATING,
+ 'power_state': power_state.PAUSED})
+ instance.save(c)
+
+ bdms = block_device_obj.block_device_make_list(c,
+ [fake_block_device.FakeDbBlockDeviceDict({
+ 'source_type': 'blank', 'guest_format': None,
+ 'destination_type': 'local'}),
+ fake_block_device.FakeDbBlockDeviceDict({
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id'}),
+ ])
+
+ with contextlib.nested(
+ mock.patch.object(self.compute.network_api,
+ 'migrate_instance_start'),
+ mock.patch.object(self.compute.compute_rpcapi,
+ 'post_live_migration_at_destination'),
+ mock.patch.object(self.compute.network_api,
+ 'setup_networks_on_host'),
+ mock.patch.object(self.compute.instance_events,
+ 'clear_events_for_instance'),
+ mock.patch.object(self.compute,
+ '_get_instance_block_device_info'),
+ mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid'),
+ mock.patch.object(self.compute.driver, 'get_volume_connector'),
+ mock.patch.object(cinder.API, 'terminate_connection')
+ ) as (
+ migrate_instance_start, post_live_migration_at_destination,
+ setup_networks_on_host, clear_events_for_instance,
+ get_instance_volume_block_device_info, get_by_instance_uuid,
+ get_volume_connector, terminate_connection
+ ):
+ get_by_instance_uuid.return_value = bdms
+ get_volume_connector.return_value = 'fake-connector'
+
+ self.compute._post_live_migration(c, instance, 'dest_host')
+
+ terminate_connection.assert_called_once_with(
+ c, 'fake-volume-id', 'fake-connector')
+
+ def _begin_post_live_migration_at_destination(self):
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'setup_networks_on_host')
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'migrate_instance_finish')
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.compute, '_get_compute_info')
+
+ params = {'task_state': task_states.MIGRATING,
+ 'power_state': power_state.PAUSED, }
+ self.instance = self._create_fake_instance_obj(params)
+
+ self.admin_ctxt = context.get_admin_context()
+ self.instance = objects.Instance._from_db_object(self.context,
+ objects.Instance(),
+ db.instance_get_by_uuid(self.admin_ctxt, self.instance['uuid']))
+
+ self.compute.network_api.setup_networks_on_host(self.admin_ctxt,
+ self.instance,
+ self.compute.host)
+ migration = {'source_compute': self.instance['host'],
+ 'dest_compute': self.compute.host, }
+ self.compute.network_api.migrate_instance_finish(
+ self.admin_ctxt, self.instance, migration)
+ fake_net_info = []
+ fake_block_dev_info = {'foo': 'bar'}
+ self.compute.driver.post_live_migration_at_destination(self.admin_ctxt,
+ self.instance,
+ fake_net_info,
+ False,
+ fake_block_dev_info)
+ self.compute._get_power_state(self.admin_ctxt,
+ self.instance).AndReturn(10001)
+
+ def _finish_post_live_migration_at_destination(self):
+ self.compute.network_api.setup_networks_on_host(self.admin_ctxt,
+ mox.IgnoreArg(), self.compute.host)
+
+ fake_notifier.NOTIFICATIONS = []
+ self.mox.ReplayAll()
+
+ self.compute.post_live_migration_at_destination(self.admin_ctxt,
+ self.instance, False)
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.live_migration.post.dest.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.live_migration.post.dest.end')
+
+ return objects.Instance.get_by_uuid(self.admin_ctxt,
+ self.instance['uuid'])
+
+ def test_post_live_migration_at_destination_with_compute_info(self):
+ """The instance's node property should be updated correctly."""
+ self._begin_post_live_migration_at_destination()
+ hypervisor_hostname = 'fake_hypervisor_hostname'
+ fake_compute_info = objects.ComputeNode(
+ hypervisor_hostname=hypervisor_hostname)
+ self.compute._get_compute_info(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ fake_compute_info)
+ updated = self._finish_post_live_migration_at_destination()
+ self.assertEqual(updated['node'], hypervisor_hostname)
+
+ def test_post_live_migration_at_destination_without_compute_info(self):
+ """The instance's node property should be set to None if we fail to
+ get compute_info.
+ """
+ self._begin_post_live_migration_at_destination()
+ self.compute._get_compute_info(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndRaise(
+ exception.NotFound())
+ updated = self._finish_post_live_migration_at_destination()
+ self.assertIsNone(updated['node'])
+
+ def test_rollback_live_migration_at_destination_correctly(self):
+ # creating instance testdata
+ c = context.get_admin_context()
+ instance = self._create_fake_instance_obj({'host': 'dummy'})
+
+ fake_notifier.NOTIFICATIONS = []
+
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'setup_networks_on_host')
+ self.compute.network_api.setup_networks_on_host(c, instance,
+ self.compute.host,
+ teardown=True)
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'rollback_live_migration_at_destination')
+ self.compute.driver.rollback_live_migration_at_destination(c,
+ instance, [], {'swap': None, 'ephemerals': [],
+ 'block_device_mapping': []},
+ destroy_disks=True, migrate_data=None)
+
+ # start test
+ self.mox.ReplayAll()
+ ret = self.compute.rollback_live_migration_at_destination(c,
+ instance=instance)
+ self.assertIsNone(ret)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.live_migration.rollback.dest.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.live_migration.rollback.dest.end')
+
+ def test_run_kill_vm(self):
+ # Detect when a vm is terminated behind the scenes.
+ instance = self._create_fake_instance_obj()
+
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+
+ instances = db.instance_get_all(self.context)
+ LOG.info("Running instances: %s", instances)
+ self.assertEqual(len(instances), 1)
+
+ instance_name = instances[0]['name']
+ self.compute.driver.test_remove_vm(instance_name)
+
+ # Force the compute manager to do its periodic poll
+ ctxt = context.get_admin_context()
+ self.compute._sync_power_states(ctxt)
+
+ instances = db.instance_get_all(self.context)
+ LOG.info("After force-killing instances: %s", instances)
+ self.assertEqual(len(instances), 1)
+ self.assertIsNone(instances[0]['task_state'])
+
+ def _fill_fault(self, values):
+ extra = dict([(x, None) for x in ['created_at',
+ 'deleted_at',
+ 'updated_at',
+ 'deleted']])
+ extra['id'] = 1
+ extra['details'] = ''
+ extra.update(values)
+ return extra
+
+ def test_add_instance_fault(self):
+ instance = self._create_fake_instance()
+ exc_info = None
+
+ def fake_db_fault_create(ctxt, values):
+ self.assertIn('raise NotImplementedError', values['details'])
+ del values['details']
+
+ expected = {
+ 'code': 500,
+ 'message': 'test',
+ 'instance_uuid': instance['uuid'],
+ 'host': self.compute.host
+ }
+ self.assertEqual(expected, values)
+ return self._fill_fault(expected)
+
+ try:
+ raise NotImplementedError('test')
+ except NotImplementedError:
+ exc_info = sys.exc_info()
+
+ self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
+
+ ctxt = context.get_admin_context()
+ compute_utils.add_instance_fault_from_exc(ctxt,
+ instance,
+ NotImplementedError('test'),
+ exc_info)
+
+ def test_add_instance_fault_with_remote_error(self):
+ instance = self._create_fake_instance()
+ exc_info = None
+
+ def fake_db_fault_create(ctxt, values):
+ self.assertIn('raise messaging.RemoteError', values['details'])
+ del values['details']
+
+ expected = {
+ 'code': 500,
+ 'instance_uuid': instance['uuid'],
+ 'message': 'Remote error: test My Test Message\nNone.',
+ 'host': self.compute.host
+ }
+ self.assertEqual(expected, values)
+ return self._fill_fault(expected)
+
+ try:
+ raise messaging.RemoteError('test', 'My Test Message')
+ except messaging.RemoteError as exc:
+ exc_info = sys.exc_info()
+
+ self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
+
+ ctxt = context.get_admin_context()
+ compute_utils.add_instance_fault_from_exc(ctxt,
+ instance, exc, exc_info)
+
+ def test_add_instance_fault_user_error(self):
+ instance = self._create_fake_instance()
+ exc_info = None
+
+ def fake_db_fault_create(ctxt, values):
+
+ expected = {
+ 'code': 400,
+ 'message': 'fake details',
+ 'details': '',
+ 'instance_uuid': instance['uuid'],
+ 'host': self.compute.host
+ }
+ self.assertEqual(expected, values)
+ return self._fill_fault(expected)
+
+ user_exc = exception.Invalid('fake details', code=400)
+
+ try:
+ raise user_exc
+ except exception.Invalid:
+ exc_info = sys.exc_info()
+
+ self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
+
+ ctxt = context.get_admin_context()
+ compute_utils.add_instance_fault_from_exc(ctxt,
+ instance, user_exc, exc_info)
+
+ def test_add_instance_fault_no_exc_info(self):
+ instance = self._create_fake_instance()
+
+ def fake_db_fault_create(ctxt, values):
+ expected = {
+ 'code': 500,
+ 'message': 'test',
+ 'details': '',
+ 'instance_uuid': instance['uuid'],
+ 'host': self.compute.host
+ }
+ self.assertEqual(expected, values)
+ return self._fill_fault(expected)
+
+ self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
+
+ ctxt = context.get_admin_context()
+ compute_utils.add_instance_fault_from_exc(ctxt,
+ instance,
+ NotImplementedError('test'))
+
+ def test_add_instance_fault_long_message(self):
+ instance = self._create_fake_instance()
+
+ message = 300 * 'a'
+
+ def fake_db_fault_create(ctxt, values):
+ expected = {
+ 'code': 500,
+ 'message': message[:255],
+ 'details': '',
+ 'instance_uuid': instance['uuid'],
+ 'host': self.compute.host
+ }
+ self.assertEqual(expected, values)
+ return self._fill_fault(expected)
+
+ self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
+
+ ctxt = context.get_admin_context()
+ compute_utils.add_instance_fault_from_exc(ctxt,
+ instance,
+ NotImplementedError(message))
+
+ def _test_cleanup_running(self, action):
+ admin_context = context.get_admin_context()
+ deleted_at = (timeutils.utcnow() -
+ datetime.timedelta(hours=1, minutes=5))
+ instance1 = self._create_fake_instance_obj({"deleted_at": deleted_at,
+ "deleted": True})
+ instance2 = self._create_fake_instance_obj({"deleted_at": deleted_at,
+ "deleted": True})
+
+ self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
+ self.compute._get_instances_on_driver(
+ admin_context, {'deleted': True,
+ 'soft_deleted': False,
+ 'host': self.compute.host}).AndReturn([instance1,
+ instance2])
+ self.flags(running_deleted_instance_timeout=3600,
+ running_deleted_instance_action=action)
+
+ return admin_context, instance1, instance2
+
+ def test_cleanup_running_deleted_instances_unrecognized_value(self):
+ admin_context = context.get_admin_context()
+ deleted_at = (timeutils.utcnow() -
+ datetime.timedelta(hours=1, minutes=5))
+ instance = self._create_fake_instance_obj({"deleted_at": deleted_at,
+ "deleted": True})
+ self.flags(running_deleted_instance_action='foo-action')
+
+ with mock.patch.object(
+ self.compute, '_get_instances_on_driver',
+ return_value=[instance]):
+ try:
+ # We cannot simply use an assertRaises here because the
+ # exception raised is too generally "Exception". To be sure
+ # that the exception raised is the expected one, we check
+ # the message.
+ self.compute._cleanup_running_deleted_instances(admin_context)
+ self.fail("Be sure this will never be executed.")
+ except Exception as e:
+ self.assertIn("Unrecognized value", six.text_type(e))
+
+ def test_cleanup_running_deleted_instances_reap(self):
+ ctxt, inst1, inst2 = self._test_cleanup_running('reap')
+ bdms = block_device_obj.block_device_make_list(ctxt, [])
+
+ self.mox.StubOutWithMock(self.compute, "_shutdown_instance")
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ "get_by_instance_uuid")
+ # Simulate an error and make sure cleanup proceeds with next instance.
+ self.compute._shutdown_instance(ctxt, inst1, bdms, notify=False).\
+ AndRaise(test.TestingException)
+ objects.BlockDeviceMappingList.get_by_instance_uuid(ctxt,
+ inst1.uuid, use_slave=True).AndReturn(bdms)
+ objects.BlockDeviceMappingList.get_by_instance_uuid(ctxt,
+ inst2.uuid, use_slave=True).AndReturn(bdms)
+ self.compute._shutdown_instance(ctxt, inst2, bdms, notify=False).\
+ AndReturn(None)
+
+ self.mox.StubOutWithMock(self.compute, "_cleanup_volumes")
+ self.compute._cleanup_volumes(ctxt, inst1['uuid'], bdms).\
+ AndReturn(None)
+
+ self.mox.ReplayAll()
+ self.compute._cleanup_running_deleted_instances(ctxt)
+
+ def test_cleanup_running_deleted_instances_shutdown(self):
+ ctxt, inst1, inst2 = self._test_cleanup_running('shutdown')
+
+ self.mox.StubOutWithMock(self.compute.driver, 'set_bootable')
+ self.mox.StubOutWithMock(self.compute.driver, 'power_off')
+
+ self.compute.driver.set_bootable(inst1, False)
+ self.compute.driver.power_off(inst1)
+ self.compute.driver.set_bootable(inst2, False)
+ self.compute.driver.power_off(inst2)
+
+ self.mox.ReplayAll()
+ self.compute._cleanup_running_deleted_instances(ctxt)
+
+ def test_cleanup_running_deleted_instances_shutdown_notimpl(self):
+ ctxt, inst1, inst2 = self._test_cleanup_running('shutdown')
+
+ self.mox.StubOutWithMock(self.compute.driver, 'set_bootable')
+ self.mox.StubOutWithMock(self.compute.driver, 'power_off')
+
+ self.compute.driver.set_bootable(inst1, False).AndRaise(
+ NotImplementedError)
+ compute_manager.LOG.warn(mox.IgnoreArg())
+ self.compute.driver.power_off(inst1)
+ self.compute.driver.set_bootable(inst2, False).AndRaise(
+ NotImplementedError)
+ compute_manager.LOG.warn(mox.IgnoreArg())
+ self.compute.driver.power_off(inst2)
+
+ self.mox.ReplayAll()
+ self.compute._cleanup_running_deleted_instances(ctxt)
+
+ def test_cleanup_running_deleted_instances_shutdown_error(self):
+ ctxt, inst1, inst2 = self._test_cleanup_running('shutdown')
+
+ self.mox.StubOutWithMock(self.compute.driver, 'set_bootable')
+ self.mox.StubOutWithMock(self.compute.driver, 'power_off')
+
+ self.mox.StubOutWithMock(compute_manager.LOG, 'exception')
+ e = test.TestingException('bad')
+
+ self.compute.driver.set_bootable(inst1, False)
+ self.compute.driver.power_off(inst1).AndRaise(e)
+ compute_manager.LOG.warn(mox.IgnoreArg())
+
+ self.compute.driver.set_bootable(inst2, False)
+ self.compute.driver.power_off(inst2).AndRaise(e)
+ compute_manager.LOG.warn(mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.compute._cleanup_running_deleted_instances(ctxt)
+
+ def test_running_deleted_instances(self):
+ admin_context = context.get_admin_context()
+
+ self.compute.host = 'host'
+
+ instance1 = {}
+ instance1['deleted'] = True
+ instance1['deleted_at'] = "sometimeago"
+
+ self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
+ self.compute._get_instances_on_driver(
+ admin_context, {'deleted': True,
+ 'soft_deleted': False,
+ 'host': self.compute.host}).AndReturn([instance1])
+
+ self.mox.StubOutWithMock(timeutils, 'is_older_than')
+ timeutils.is_older_than('sometimeago',
+ CONF.running_deleted_instance_timeout).AndReturn(True)
+
+ self.mox.ReplayAll()
+ val = self.compute._running_deleted_instances(admin_context)
+ self.assertEqual(val, [instance1])
+
+ def test_get_instance_nw_info(self):
+ fake_network.unset_stub_network_methods(self.stubs)
+
+ fake_inst = fake_instance.fake_db_instance(uuid='fake-instance')
+ fake_nw_info = network_model.NetworkInfo()
+
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'get_instance_nw_info')
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+
+ db.instance_get_by_uuid(self.context, fake_inst['uuid']
+ ).AndReturn(fake_inst)
+ # NOTE(danms): compute manager will re-query since we're not giving
+ # it an instance with system_metadata. We're stubbing out the
+ # subsequent call so we don't need it, but keep this to make sure it
+ # does the right thing.
+ db.instance_get_by_uuid(self.context, fake_inst['uuid'],
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ self.compute.network_api.get_instance_nw_info(self.context,
+ mox.IsA(objects.Instance)).AndReturn(fake_nw_info)
+
+ self.mox.ReplayAll()
+
+ fake_inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), fake_inst, [])
+ result = self.compute._get_instance_nw_info(self.context,
+ fake_inst_obj)
+ self.assertEqual(fake_nw_info, result)
+
+ def _heal_instance_info_cache(self, _get_instance_nw_info_raise=False):
+ # Update on every call for the test
+ self.flags(heal_instance_info_cache_interval=-1)
+ ctxt = context.get_admin_context()
+
+ instance_map = {}
+ instances = []
+ for x in xrange(8):
+ inst_uuid = 'fake-uuid-%s' % x
+ instance_map[inst_uuid] = fake_instance.fake_db_instance(
+ uuid=inst_uuid, host=CONF.host, created_at=None)
+ # These won't be in our instance since they're not requested
+ instances.append(instance_map[inst_uuid])
+
+ call_info = {'get_all_by_host': 0, 'get_by_uuid': 0,
+ 'get_nw_info': 0, 'expected_instance': None}
+
+ def fake_instance_get_all_by_host(context, host,
+ columns_to_join, use_slave=False):
+ call_info['get_all_by_host'] += 1
+ self.assertEqual([], columns_to_join)
+ return instances[:]
+
+ def fake_instance_get_by_uuid(context, instance_uuid,
+ columns_to_join, use_slave=False):
+ if instance_uuid not in instance_map:
+ raise exception.InstanceNotFound(instance_id=instance_uuid)
+ call_info['get_by_uuid'] += 1
+ self.assertEqual(['system_metadata', 'info_cache'],
+ columns_to_join)
+ return instance_map[instance_uuid]
+
+ # NOTE(comstud): Override the stub in setUp()
+ def fake_get_instance_nw_info(context, instance, use_slave=False):
+ # Note that this exception gets caught in compute/manager
+ # and is ignored. However, the below increment of
+ # 'get_nw_info' won't happen, and you'll get an assert
+ # failure checking it below.
+ self.assertEqual(call_info['expected_instance']['uuid'],
+ instance['uuid'])
+ call_info['get_nw_info'] += 1
+ if _get_instance_nw_info_raise:
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ self.stubs.Set(db, 'instance_get_all_by_host',
+ fake_instance_get_all_by_host)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fake_instance_get_by_uuid)
+ self.stubs.Set(self.compute, '_get_instance_nw_info',
+ fake_get_instance_nw_info)
+
+ # Make an instance appear to be still Building
+ instances[0]['vm_state'] = vm_states.BUILDING
+ # Make an instance appear to be Deleting
+ instances[1]['task_state'] = task_states.DELETING
+ # '0', '1' should be skipped..
+ call_info['expected_instance'] = instances[2]
+ self.compute._heal_instance_info_cache(ctxt)
+ self.assertEqual(1, call_info['get_all_by_host'])
+ self.assertEqual(0, call_info['get_by_uuid'])
+ self.assertEqual(1, call_info['get_nw_info'])
+
+ call_info['expected_instance'] = instances[3]
+ self.compute._heal_instance_info_cache(ctxt)
+ self.assertEqual(1, call_info['get_all_by_host'])
+ self.assertEqual(1, call_info['get_by_uuid'])
+ self.assertEqual(2, call_info['get_nw_info'])
+
+ # Make an instance switch hosts
+ instances[4]['host'] = 'not-me'
+ # Make an instance disappear
+ instance_map.pop(instances[5]['uuid'])
+ # Make an instance switch to be Deleting
+ instances[6]['task_state'] = task_states.DELETING
+ # '4', '5', and '6' should be skipped..
+ call_info['expected_instance'] = instances[7]
+ self.compute._heal_instance_info_cache(ctxt)
+ self.assertEqual(1, call_info['get_all_by_host'])
+ self.assertEqual(4, call_info['get_by_uuid'])
+ self.assertEqual(3, call_info['get_nw_info'])
+ # Should be no more left.
+ self.assertEqual(0, len(self.compute._instance_uuids_to_heal))
+
+ # This should cause a DB query now, so get a list of instances
+ # where none can be processed to make sure we handle that case
+ # cleanly. Use just '0' (Building) and '1' (Deleting)
+ instances = instances[0:2]
+
+ self.compute._heal_instance_info_cache(ctxt)
+ # Should have called the list once more
+ self.assertEqual(2, call_info['get_all_by_host'])
+ # Stays the same because we remove invalid entries from the list
+ self.assertEqual(4, call_info['get_by_uuid'])
+ # Stays the same because we didn't find anything to process
+ self.assertEqual(3, call_info['get_nw_info'])
+
+ def test_heal_instance_info_cache(self):
+ self._heal_instance_info_cache()
+
+ def test_heal_instance_info_cache_with_exception(self):
+ self._heal_instance_info_cache(_get_instance_nw_info_raise=True)
+
+ @mock.patch('nova.objects.InstanceList.get_by_filters')
+ @mock.patch('nova.compute.api.API.unrescue')
+ def test_poll_rescued_instances(self, unrescue, get):
+ timed_out_time = timeutils.utcnow() - datetime.timedelta(minutes=5)
+ not_timed_out_time = timeutils.utcnow()
+
+ instances = [objects.Instance(uuid='fake_uuid1',
+ vm_state=vm_states.RESCUED,
+ launched_at=timed_out_time),
+ objects.Instance(uuid='fake_uuid2',
+ vm_state=vm_states.RESCUED,
+ launched_at=timed_out_time),
+ objects.Instance(uuid='fake_uuid3',
+ vm_state=vm_states.RESCUED,
+ launched_at=not_timed_out_time)]
+ unrescued_instances = {'fake_uuid1': False, 'fake_uuid2': False}
+
+ def fake_instance_get_all_by_filters(context, filters,
+ expected_attrs=None,
+ use_slave=False):
+ self.assertEqual(["system_metadata"], expected_attrs)
+ return instances
+
+ get.side_effect = fake_instance_get_all_by_filters
+
+ def fake_unrescue(context, instance):
+ unrescued_instances[instance['uuid']] = True
+
+ unrescue.side_effect = fake_unrescue
+
+ self.flags(rescue_timeout=60)
+ ctxt = context.get_admin_context()
+
+ self.compute._poll_rescued_instances(ctxt)
+
+ for instance in unrescued_instances.values():
+ self.assertTrue(instance)
+
+ def test_poll_unconfirmed_resizes(self):
+ instances = [
+ fake_instance.fake_db_instance(uuid='fake_uuid1',
+ vm_state=vm_states.RESIZED,
+ task_state=None),
+ fake_instance.fake_db_instance(uuid='noexist'),
+ fake_instance.fake_db_instance(uuid='fake_uuid2',
+ vm_state=vm_states.ERROR,
+ task_state=None),
+ fake_instance.fake_db_instance(uuid='fake_uuid3',
+ vm_state=vm_states.ACTIVE,
+ task_state=
+ task_states.REBOOTING),
+ fake_instance.fake_db_instance(uuid='fake_uuid4',
+ vm_state=vm_states.RESIZED,
+ task_state=None),
+ fake_instance.fake_db_instance(uuid='fake_uuid5',
+ vm_state=vm_states.ACTIVE,
+ task_state=None),
+ # The expceted migration result will be None instead of error
+ # since _poll_unconfirmed_resizes will not change it
+ # when the instance vm state is RESIZED and task state
+ # is deleting, see bug 1301696 for more detail
+ fake_instance.fake_db_instance(uuid='fake_uuid6',
+ vm_state=vm_states.RESIZED,
+ task_state='deleting'),
+ fake_instance.fake_db_instance(uuid='fake_uuid7',
+ vm_state=vm_states.RESIZED,
+ task_state='soft-deleting'),
+ fake_instance.fake_db_instance(uuid='fake_uuid8',
+ vm_state=vm_states.ACTIVE,
+ task_state='resize_finish')]
+ expected_migration_status = {'fake_uuid1': 'confirmed',
+ 'noexist': 'error',
+ 'fake_uuid2': 'error',
+ 'fake_uuid3': 'error',
+ 'fake_uuid4': None,
+ 'fake_uuid5': 'error',
+ 'fake_uuid6': None,
+ 'fake_uuid7': None,
+ 'fake_uuid8': None}
+ migrations = []
+ for i, instance in enumerate(instances, start=1):
+ fake_mig = test_migration.fake_db_migration()
+ fake_mig.update({'id': i,
+ 'instance_uuid': instance['uuid'],
+ 'status': None})
+ migrations.append(fake_mig)
+
+ def fake_instance_get_by_uuid(context, instance_uuid,
+ columns_to_join=None, use_slave=False):
+ self.assertIn('metadata', columns_to_join)
+ self.assertIn('system_metadata', columns_to_join)
+ # raise InstanceNotFound exception for uuid 'noexist'
+ if instance_uuid == 'noexist':
+ raise exception.InstanceNotFound(instance_id=instance_uuid)
+ for instance in instances:
+ if instance['uuid'] == instance_uuid:
+ return instance
+
+ def fake_migration_get_unconfirmed_by_dest_compute(context,
+ resize_confirm_window, dest_compute, use_slave=False):
+ self.assertEqual(dest_compute, CONF.host)
+ return migrations
+
+ def fake_migration_update(context, mid, updates):
+ for migration in migrations:
+ if migration['id'] == mid:
+ migration.update(updates)
+ return migration
+
+ def fake_confirm_resize(context, instance, migration=None):
+ # raise exception for 'fake_uuid4' to check migration status
+ # does not get set to 'error' on confirm_resize failure.
+ if instance['uuid'] == 'fake_uuid4':
+ raise test.TestingException('bomb')
+ self.assertIsNotNone(migration)
+ for migration2 in migrations:
+ if (migration2['instance_uuid'] ==
+ migration['instance_uuid']):
+ migration2['status'] = 'confirmed'
+
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fake_instance_get_by_uuid)
+ self.stubs.Set(db, 'migration_get_unconfirmed_by_dest_compute',
+ fake_migration_get_unconfirmed_by_dest_compute)
+ self.stubs.Set(db, 'migration_update', fake_migration_update)
+ self.stubs.Set(self.compute.compute_api, 'confirm_resize',
+ fake_confirm_resize)
+
+ def fetch_instance_migration_status(instance_uuid):
+ for migration in migrations:
+ if migration['instance_uuid'] == instance_uuid:
+ return migration['status']
+
+ self.flags(resize_confirm_window=60)
+ ctxt = context.get_admin_context()
+
+ self.compute._poll_unconfirmed_resizes(ctxt)
+
+ for instance_uuid, status in expected_migration_status.iteritems():
+ self.assertEqual(status,
+ fetch_instance_migration_status(instance_uuid))
+
+ def test_instance_build_timeout_mixed_instances(self):
+ # Tests that instances which failed to build within the configured
+ # instance_build_timeout value are set to error state.
+ self.flags(instance_build_timeout=30)
+ ctxt = context.get_admin_context()
+ created_at = timeutils.utcnow() + datetime.timedelta(seconds=-60)
+
+ filters = {'vm_state': vm_states.BUILDING, 'host': CONF.host}
+ # these are the ones that are expired
+ old_instances = []
+ for x in xrange(4):
+ instance = {'uuid': str(uuid.uuid4()), 'created_at': created_at}
+ instance.update(filters)
+ old_instances.append(fake_instance.fake_db_instance(**instance))
+
+ # not expired
+ instances = list(old_instances) # copy the contents of old_instances
+ new_instance = {
+ 'uuid': str(uuid.uuid4()),
+ 'created_at': timeutils.utcnow(),
+ }
+ sort_key = 'created_at'
+ sort_dir = 'desc'
+ new_instance.update(filters)
+ instances.append(fake_instance.fake_db_instance(**new_instance))
+
+ # need something to return from conductor_api.instance_update
+ # that is defined outside the for loop and can be used in the mock
+ # context
+ fake_instance_ref = {'host': CONF.host, 'node': 'fake'}
+
+ # creating mocks
+ with contextlib.nested(
+ mock.patch.object(self.compute.db.sqlalchemy.api,
+ 'instance_get_all_by_filters',
+ return_value=instances),
+ mock.patch.object(self.compute.conductor_api, 'instance_update',
+ return_value=fake_instance_ref),
+ mock.patch.object(self.compute.driver, 'node_is_available',
+ return_value=False)
+ ) as (
+ instance_get_all_by_filters,
+ conductor_instance_update,
+ node_is_available
+ ):
+ # run the code
+ self.compute._check_instance_build_time(ctxt)
+ # check our assertions
+ instance_get_all_by_filters.assert_called_once_with(
+ ctxt, filters,
+ sort_key,
+ sort_dir,
+ marker=None,
+ columns_to_join=[],
+ use_slave=True,
+ limit=None)
+ self.assertThat(conductor_instance_update.mock_calls,
+ testtools_matchers.HasLength(len(old_instances)))
+ self.assertThat(node_is_available.mock_calls,
+ testtools_matchers.HasLength(len(old_instances)))
+ for inst in old_instances:
+ conductor_instance_update.assert_has_calls([
+ mock.call(ctxt, inst['uuid'],
+ vm_state=vm_states.ERROR)])
+ node_is_available.assert_has_calls([
+ mock.call(fake_instance_ref['node'])])
+
+ def test_get_resource_tracker_fail(self):
+ self.assertRaises(exception.NovaException,
+ self.compute._get_resource_tracker,
+ 'invalidnodename')
+
+ def test_instance_update_host_check(self):
+ # make sure rt usage doesn't happen if the host or node is different
+ def fail_get(nodename):
+ raise test.TestingException(_("wrong host/node"))
+ self.stubs.Set(self.compute, '_get_resource_tracker', fail_get)
+
+ instance = self._create_fake_instance({'host': 'someotherhost'})
+ self.compute._instance_update(self.context, instance['uuid'])
+
+ instance = self._create_fake_instance({'node': 'someothernode'})
+ self.compute._instance_update(self.context, instance['uuid'])
+
+ params = {'host': 'someotherhost', 'node': 'someothernode'}
+ instance = self._create_fake_instance(params)
+ self.compute._instance_update(self.context, instance['uuid'])
+
+ def test_destroy_evacuated_instance_on_shared_storage(self):
+ fake_context = context.get_admin_context()
+
+ # instances in central db
+ instances = [
+ # those are still related to this host
+ self._create_fake_instance_obj(
+ {'host': self.compute.host}),
+ self._create_fake_instance_obj(
+ {'host': self.compute.host}),
+ self._create_fake_instance_obj(
+ {'host': self.compute.host})
+ ]
+
+ # those are already been evacuated to other host
+ evacuated_instance = self._create_fake_instance_obj(
+ {'host': 'otherhost'})
+
+ instances.append(evacuated_instance)
+
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instances_on_driver')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_block_device_info')
+ self.mox.StubOutWithMock(self.compute,
+ '_is_instance_storage_shared')
+ self.mox.StubOutWithMock(self.compute.driver, 'destroy')
+
+ self.compute._get_instances_on_driver(
+ fake_context, {'deleted': False}).AndReturn(instances)
+ self.compute._get_instance_nw_info(fake_context,
+ evacuated_instance).AndReturn(
+ 'fake_network_info')
+ self.compute._get_instance_block_device_info(
+ fake_context, evacuated_instance).AndReturn('fake_bdi')
+ self.compute._is_instance_storage_shared(fake_context,
+ evacuated_instance).AndReturn(True)
+ self.compute.driver.destroy(fake_context, evacuated_instance,
+ 'fake_network_info',
+ 'fake_bdi',
+ False)
+
+ self.mox.ReplayAll()
+ self.compute._destroy_evacuated_instances(fake_context)
+
+ def test_destroy_evacuated_instance_with_disks(self):
+ fake_context = context.get_admin_context()
+
+ # instances in central db
+ instances = [
+ # those are still related to this host
+ self._create_fake_instance_obj(
+ {'host': self.compute.host}),
+ self._create_fake_instance_obj(
+ {'host': self.compute.host}),
+ self._create_fake_instance_obj(
+ {'host': self.compute.host})
+ ]
+
+ # those are already been evacuated to other host
+ evacuated_instance = self._create_fake_instance_obj(
+ {'host': 'otherhost'})
+
+ instances.append(evacuated_instance)
+
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instances_on_driver')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_block_device_info')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'check_instance_shared_storage_local')
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'check_instance_shared_storage')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'check_instance_shared_storage_cleanup')
+ self.mox.StubOutWithMock(self.compute.driver, 'destroy')
+
+ self.compute._get_instances_on_driver(
+ fake_context, {'deleted': False}).AndReturn(instances)
+ self.compute._get_instance_nw_info(fake_context,
+ evacuated_instance).AndReturn(
+ 'fake_network_info')
+ self.compute._get_instance_block_device_info(
+ fake_context, evacuated_instance).AndReturn('fake_bdi')
+ self.compute.driver.check_instance_shared_storage_local(fake_context,
+ evacuated_instance).AndReturn({'filename': 'tmpfilename'})
+ self.compute.compute_rpcapi.check_instance_shared_storage(fake_context,
+ evacuated_instance,
+ {'filename': 'tmpfilename'}).AndReturn(False)
+ self.compute.driver.check_instance_shared_storage_cleanup(fake_context,
+ {'filename': 'tmpfilename'})
+ self.compute.driver.destroy(fake_context, evacuated_instance,
+ 'fake_network_info',
+ 'fake_bdi',
+ True)
+
+ self.mox.ReplayAll()
+ self.compute._destroy_evacuated_instances(fake_context)
+
+ def test_destroy_evacuated_instance_not_implemented(self):
+ fake_context = context.get_admin_context()
+
+ # instances in central db
+ instances = [
+ # those are still related to this host
+ self._create_fake_instance_obj(
+ {'host': self.compute.host}),
+ self._create_fake_instance_obj(
+ {'host': self.compute.host}),
+ self._create_fake_instance_obj(
+ {'host': self.compute.host})
+ ]
+
+ # those are already been evacuated to other host
+ evacuated_instance = self._create_fake_instance_obj(
+ {'host': 'otherhost'})
+
+ instances.append(evacuated_instance)
+
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instances_on_driver')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_block_device_info')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'check_instance_shared_storage_local')
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'check_instance_shared_storage')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'check_instance_shared_storage_cleanup')
+ self.mox.StubOutWithMock(self.compute.driver, 'destroy')
+
+ self.compute._get_instances_on_driver(
+ fake_context, {'deleted': False}).AndReturn(instances)
+ self.compute._get_instance_nw_info(fake_context,
+ evacuated_instance).AndReturn(
+ 'fake_network_info')
+ self.compute._get_instance_block_device_info(
+ fake_context, evacuated_instance).AndReturn('fake_bdi')
+ self.compute.driver.check_instance_shared_storage_local(fake_context,
+ evacuated_instance).AndRaise(NotImplementedError())
+ self.compute.driver.destroy(fake_context, evacuated_instance,
+ 'fake_network_info',
+ 'fake_bdi',
+ True)
+
+ self.mox.ReplayAll()
+ self.compute._destroy_evacuated_instances(fake_context)
+
+ def test_complete_partial_deletion(self):
+ admin_context = context.get_admin_context()
+ instance = objects.Instance()
+ instance.id = 1
+ instance.uuid = 'fake-uuid'
+ instance.vm_state = vm_states.DELETED
+ instance.task_state = None
+ instance.system_metadata = {'fake_key': 'fake_value'}
+ instance.vcpus = 1
+ instance.memory_mb = 1
+ instance.project_id = 'fake-prj'
+ instance.user_id = 'fake-user'
+ instance.deleted = False
+
+ def fake_destroy():
+ instance.deleted = True
+
+ self.stubs.Set(instance, 'destroy', fake_destroy)
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ lambda *a, **k: None)
+
+ self.stubs.Set(self.compute,
+ '_complete_deletion',
+ lambda *a, **k: None)
+
+ self.stubs.Set(objects.Quotas, 'reserve', lambda *a, **k: None)
+
+ self.compute._complete_partial_deletion(admin_context, instance)
+
+ self.assertNotEqual(0, instance.deleted)
+
+ def test_init_instance_for_partial_deletion(self):
+ admin_context = context.get_admin_context()
+ instance = objects.Instance(admin_context)
+ instance.id = 1
+ instance.vm_state = vm_states.DELETED
+ instance.deleted = False
+
+ def fake_partial_deletion(context, instance):
+ instance['deleted'] = instance['id']
+
+ self.stubs.Set(self.compute,
+ '_complete_partial_deletion',
+ fake_partial_deletion)
+ self.compute._init_instance(admin_context, instance)
+
+ self.assertNotEqual(0, instance['deleted'])
+
+ def test_partial_deletion_raise_exception(self):
+ admin_context = context.get_admin_context()
+ instance = objects.Instance(admin_context)
+ instance.uuid = str(uuid.uuid4())
+ instance.vm_state = vm_states.DELETED
+ instance.deleted = False
+
+ self.mox.StubOutWithMock(self.compute, '_complete_partial_deletion')
+ self.compute._complete_partial_deletion(
+ admin_context, instance).AndRaise(ValueError)
+ self.mox.ReplayAll()
+
+ self.compute._init_instance(admin_context, instance)
+
+ def test_add_remove_fixed_ip_updates_instance_updated_at(self):
+ def _noop(*args, **kwargs):
+ pass
+
+ self.stubs.Set(self.compute.network_api,
+ 'add_fixed_ip_to_instance', _noop)
+ self.stubs.Set(self.compute.network_api,
+ 'remove_fixed_ip_from_instance', _noop)
+
+ instance = self._create_fake_instance_obj()
+ updated_at_1 = instance['updated_at']
+
+ self.compute.add_fixed_ip_to_instance(self.context, 'fake', instance)
+ updated_at_2 = db.instance_get_by_uuid(self.context,
+ instance['uuid'])['updated_at']
+
+ self.compute.remove_fixed_ip_from_instance(self.context, 'fake',
+ instance)
+ updated_at_3 = db.instance_get_by_uuid(self.context,
+ instance['uuid'])['updated_at']
+
+ updated_ats = (updated_at_1, updated_at_2, updated_at_3)
+ self.assertEqual(len(updated_ats), len(set(updated_ats)))
+
+ def test_no_pending_deletes_for_soft_deleted_instances(self):
+ self.flags(reclaim_instance_interval=0)
+ ctxt = context.get_admin_context()
+
+ instance = self._create_fake_instance(
+ params={'host': CONF.host,
+ 'vm_state': vm_states.SOFT_DELETED,
+ 'deleted_at': timeutils.utcnow()})
+
+ self.compute._run_pending_deletes(ctxt)
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.assertFalse(instance['cleaned'])
+
+ def test_reclaim_queued_deletes(self):
+ self.flags(reclaim_instance_interval=3600)
+ ctxt = context.get_admin_context()
+
+ # Active
+ self._create_fake_instance(params={'host': CONF.host})
+
+ # Deleted not old enough
+ self._create_fake_instance(params={'host': CONF.host,
+ 'vm_state': vm_states.SOFT_DELETED,
+ 'deleted_at': timeutils.utcnow()})
+
+ # Deleted old enough (only this one should be reclaimed)
+ deleted_at = (timeutils.utcnow() -
+ datetime.timedelta(hours=1, minutes=5))
+ self._create_fake_instance(
+ params={'host': CONF.host,
+ 'vm_state': vm_states.SOFT_DELETED,
+ 'deleted_at': deleted_at})
+
+ # Restoring
+ # NOTE(hanlind): This specifically tests for a race condition
+ # where restoring a previously soft deleted instance sets
+ # deleted_at back to None, causing reclaim to think it can be
+ # deleted, see LP #1186243.
+ self._create_fake_instance(
+ params={'host': CONF.host,
+ 'vm_state': vm_states.SOFT_DELETED,
+ 'task_state': task_states.RESTORING})
+
+ self.mox.StubOutWithMock(self.compute, '_delete_instance')
+ self.compute._delete_instance(
+ ctxt, mox.IsA(objects.Instance), [],
+ mox.IsA(objects.Quotas))
+
+ self.mox.ReplayAll()
+
+ self.compute._reclaim_queued_deletes(ctxt)
+
+ def test_reclaim_queued_deletes_continue_on_error(self):
+ # Verify that reclaim continues on error.
+ self.flags(reclaim_instance_interval=3600)
+ ctxt = context.get_admin_context()
+
+ deleted_at = (timeutils.utcnow() -
+ datetime.timedelta(hours=1, minutes=5))
+ instance1 = self._create_fake_instance_obj(
+ params={'host': CONF.host,
+ 'vm_state': vm_states.SOFT_DELETED,
+ 'deleted_at': deleted_at})
+ instance2 = self._create_fake_instance_obj(
+ params={'host': CONF.host,
+ 'vm_state': vm_states.SOFT_DELETED,
+ 'deleted_at': deleted_at})
+ instances = []
+ instances.append(instance1)
+ instances.append(instance2)
+
+ self.mox.StubOutWithMock(objects.InstanceList,
+ 'get_by_filters')
+ self.mox.StubOutWithMock(self.compute, '_deleted_old_enough')
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ self.mox.StubOutWithMock(self.compute, '_delete_instance')
+
+ objects.InstanceList.get_by_filters(
+ ctxt, mox.IgnoreArg(),
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
+ use_slave=True
+ ).AndReturn(instances)
+
+ # The first instance delete fails.
+ self.compute._deleted_old_enough(instance1, 3600).AndReturn(True)
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ ctxt, instance1.uuid).AndReturn([])
+ self.compute._delete_instance(ctxt, instance1,
+ [], self.none_quotas).AndRaise(
+ test.TestingException)
+
+ # The second instance delete that follows.
+ self.compute._deleted_old_enough(instance2, 3600).AndReturn(True)
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ ctxt, instance2.uuid).AndReturn([])
+ self.compute._delete_instance(ctxt, instance2,
+ [], self.none_quotas)
+
+ self.mox.ReplayAll()
+
+ self.compute._reclaim_queued_deletes(ctxt)
+
+ def test_sync_power_states(self):
+ ctxt = self.context.elevated()
+ self._create_fake_instance({'host': self.compute.host})
+ self._create_fake_instance({'host': self.compute.host})
+ self._create_fake_instance({'host': self.compute.host})
+ self.mox.StubOutWithMock(self.compute.driver, 'get_info')
+ self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')
+
+ # Check to make sure task continues on error.
+ self.compute.driver.get_info(mox.IgnoreArg()).AndRaise(
+ exception.InstanceNotFound(instance_id='fake-uuid'))
+ self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
+ power_state.NOSTATE).AndRaise(
+ exception.InstanceNotFound(instance_id='fake-uuid'))
+
+ self.compute.driver.get_info(mox.IgnoreArg()).AndReturn(
+ {'state': power_state.RUNNING})
+ self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
+ power_state.RUNNING,
+ use_slave=True)
+ self.compute.driver.get_info(mox.IgnoreArg()).AndReturn(
+ {'state': power_state.SHUTDOWN})
+ self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
+ power_state.SHUTDOWN,
+ use_slave=True)
+ self.mox.ReplayAll()
+ self.compute._sync_power_states(ctxt)
+
+ def _test_lifecycle_event(self, lifecycle_event, power_state):
+ instance = self._create_fake_instance()
+ uuid = instance['uuid']
+
+ self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')
+ if power_state is not None:
+ self.compute._sync_instance_power_state(
+ mox.IgnoreArg(),
+ mox.ContainsKeyValue('uuid', uuid),
+ power_state)
+ self.mox.ReplayAll()
+ self.compute.handle_events(event.LifecycleEvent(uuid, lifecycle_event))
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def test_lifecycle_events(self):
+ self._test_lifecycle_event(event.EVENT_LIFECYCLE_STOPPED,
+ power_state.SHUTDOWN)
+ self._test_lifecycle_event(event.EVENT_LIFECYCLE_STARTED,
+ power_state.RUNNING)
+ self._test_lifecycle_event(event.EVENT_LIFECYCLE_PAUSED,
+ power_state.PAUSED)
+ self._test_lifecycle_event(event.EVENT_LIFECYCLE_RESUMED,
+ power_state.RUNNING)
+ self._test_lifecycle_event(-1, None)
+
+ def test_lifecycle_event_non_existent_instance(self):
+ # No error raised for non-existent instance because of inherent race
+ # between database updates and hypervisor events. See bug #1180501.
+ event_instance = event.LifecycleEvent('does-not-exist',
+ event.EVENT_LIFECYCLE_STOPPED)
+ self.compute.handle_events(event_instance)
+
+ @mock.patch.object(objects.Migration, 'get_by_id')
+ @mock.patch.object(objects.Quotas, 'rollback')
+ def test_confirm_resize_roll_back_quota_migration_not_found(self,
+ mock_rollback, mock_get_by_id):
+ instance = self._create_fake_instance_obj()
+
+ migration = objects.Migration()
+ migration.instance_uuid = instance.uuid
+ migration.status = 'finished'
+ migration.id = 0
+
+ mock_get_by_id.side_effect = exception.MigrationNotFound(
+ migration_id=0)
+ self.compute.confirm_resize(self.context, instance=instance,
+ migration=migration, reservations=[])
+ self.assertTrue(mock_rollback.called)
+
+ @mock.patch.object(instance_obj.Instance, 'get_by_uuid')
+ @mock.patch.object(objects.Quotas, 'rollback')
+ def test_confirm_resize_roll_back_quota_instance_not_found(self,
+ mock_rollback, mock_get_by_id):
+ instance = self._create_fake_instance_obj()
+
+ migration = objects.Migration()
+ migration.instance_uuid = instance.uuid
+ migration.status = 'finished'
+ migration.id = 0
+
+ mock_get_by_id.side_effect = exception.InstanceNotFound(
+ instance_id=instance.uuid)
+ self.compute.confirm_resize(self.context, instance=instance,
+ migration=migration, reservations=[])
+ self.assertTrue(mock_rollback.called)
+
+ @mock.patch.object(objects.Migration, 'get_by_id')
+ @mock.patch.object(objects.Quotas, 'rollback')
+ def test_confirm_resize_roll_back_quota_status_confirmed(self,
+ mock_rollback, mock_get_by_id):
+ instance = self._create_fake_instance_obj()
+
+ migration = objects.Migration()
+ migration.instance_uuid = instance.uuid
+ migration.status = 'confirmed'
+ migration.id = 0
+
+ mock_get_by_id.return_value = migration
+ self.compute.confirm_resize(self.context, instance=instance,
+ migration=migration, reservations=[])
+ self.assertTrue(mock_rollback.called)
+
+ @mock.patch.object(objects.Migration, 'get_by_id')
+ @mock.patch.object(objects.Quotas, 'rollback')
+ def test_confirm_resize_roll_back_quota_status_dummy(self,
+ mock_rollback, mock_get_by_id):
+ instance = self._create_fake_instance_obj()
+
+ migration = objects.Migration()
+ migration.instance_uuid = instance.uuid
+ migration.status = 'dummy'
+ migration.id = 0
+
+ mock_get_by_id.return_value = migration
+ self.compute.confirm_resize(self.context, instance=instance,
+ migration=migration, reservations=[])
+ self.assertTrue(mock_rollback.called)
+
+ def test_allow_confirm_resize_on_instance_in_deleting_task_state(self):
+ instance = self._create_fake_instance_obj()
+ old_type = flavors.extract_flavor(instance)
+ new_type = flavors.get_flavor_by_flavor_id('4')
+ sys_meta = instance.system_metadata
+ sys_meta = flavors.save_flavor_info(sys_meta,
+ old_type, 'old_')
+ sys_meta = flavors.save_flavor_info(sys_meta,
+ new_type, 'new_')
+ sys_meta = flavors.save_flavor_info(sys_meta,
+ new_type)
+
+ fake_rt = self.mox.CreateMockAnything()
+
+ def fake_drop_resize_claim(*args, **kwargs):
+ pass
+
+ def fake_get_resource_tracker(self):
+ return fake_rt
+
+ def fake_setup_networks_on_host(self, *args, **kwargs):
+ pass
+
+ self.stubs.Set(fake_rt, 'drop_resize_claim', fake_drop_resize_claim)
+ self.stubs.Set(self.compute, '_get_resource_tracker',
+ fake_get_resource_tracker)
+ self.stubs.Set(self.compute.network_api, 'setup_networks_on_host',
+ fake_setup_networks_on_host)
+
+ migration = objects.Migration()
+ migration.instance_uuid = instance.uuid
+ migration.status = 'finished'
+ migration.create(self.context.elevated())
+
+ instance.task_state = task_states.DELETING
+ instance.vm_state = vm_states.RESIZED
+ instance.system_metadata = sys_meta
+ instance.save()
+
+ self.compute.confirm_resize(self.context, instance=instance,
+ migration=migration, reservations=[])
+ instance.refresh()
+ self.assertEqual(vm_states.ACTIVE, instance['vm_state'])
+
+ def _get_instance_and_bdm_for_dev_defaults_tests(self):
+ instance = self._create_fake_instance_obj(
+ params={'root_device_name': '/dev/vda'})
+ block_device_mapping = block_device_obj.block_device_make_list(
+ self.context, [fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vda',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'image_id': 'fake-image-id-1',
+ 'boot_index': 0})])
+
+ return instance, block_device_mapping
+
+ def test_default_block_device_names_empty_instance_root_dev(self):
+ instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
+ instance.root_device_name = None
+ self.mox.StubOutWithMock(objects.Instance, 'save')
+ self.mox.StubOutWithMock(self.compute,
+ '_default_device_names_for_instance')
+ self.compute._default_device_names_for_instance(instance,
+ '/dev/vda', [], [],
+ [bdm for bdm in bdms])
+ self.mox.ReplayAll()
+ self.compute._default_block_device_names(self.context,
+ instance,
+ {}, bdms)
+ self.assertEqual('/dev/vda', instance.root_device_name)
+
+ def test_default_block_device_names_empty_root_device(self):
+ instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
+ bdms[0]['device_name'] = None
+ self.mox.StubOutWithMock(self.compute,
+ '_default_device_names_for_instance')
+ self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'save')
+ bdms[0].save().AndReturn(None)
+ self.compute._default_device_names_for_instance(instance,
+ '/dev/vda', [], [],
+ [bdm for bdm in bdms])
+ self.mox.ReplayAll()
+ self.compute._default_block_device_names(self.context,
+ instance,
+ {}, bdms)
+
+ def test_default_block_device_names_no_root_device(self):
+ instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
+ instance.root_device_name = None
+ bdms[0]['device_name'] = None
+ self.mox.StubOutWithMock(objects.Instance, 'save')
+ self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'save')
+ self.mox.StubOutWithMock(self.compute,
+ '_default_root_device_name')
+ self.mox.StubOutWithMock(self.compute,
+ '_default_device_names_for_instance')
+
+ self.compute._default_root_device_name(instance, mox.IgnoreArg(),
+ bdms[0]).AndReturn('/dev/vda')
+ bdms[0].save().AndReturn(None)
+ self.compute._default_device_names_for_instance(instance,
+ '/dev/vda', [], [],
+ [bdm for bdm in bdms])
+ self.mox.ReplayAll()
+ self.compute._default_block_device_names(self.context,
+ instance,
+ {}, bdms)
+ self.assertEqual('/dev/vda', instance.root_device_name)
+
+ def test_default_block_device_names_with_blank_volumes(self):
+ instance = self._create_fake_instance_obj()
+ image_meta = {}
+ root_volume = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 1, 'instance_uuid': 'fake-instance',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'image_id': 'fake-image-id-1',
+ 'boot_index': 0}))
+ blank_volume1 = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 2, 'instance_uuid': 'fake-instance',
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
+ 'boot_index': -1}))
+ blank_volume2 = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 3, 'instance_uuid': 'fake-instance',
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
+ 'boot_index': -1}))
+ ephemeral = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 4, 'instance_uuid': 'fake-instance',
+ 'source_type': 'blank',
+ 'destination_type': 'local'}))
+ swap = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 5, 'instance_uuid': 'fake-instance',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': 'swap'
+ }))
+ bdms = block_device_obj.block_device_make_list(
+ self.context, [root_volume, blank_volume1, blank_volume2,
+ ephemeral, swap])
+
+ with contextlib.nested(
+ mock.patch.object(self.compute, '_default_root_device_name',
+ return_value='/dev/vda'),
+ mock.patch.object(objects.BlockDeviceMapping, 'save'),
+ mock.patch.object(self.compute,
+ '_default_device_names_for_instance')
+ ) as (default_root_device, object_save,
+ default_device_names):
+ self.compute._default_block_device_names(self.context, instance,
+ image_meta, bdms)
+ default_root_device.assert_called_once_with(instance, image_meta,
+ bdms[0])
+ self.assertEqual('/dev/vda', instance.root_device_name)
+ self.assertTrue(object_save.called)
+ default_device_names.assert_called_once_with(instance,
+ '/dev/vda', [bdms[-2]], [bdms[-1]],
+ [bdm for bdm in bdms[:-2]])
+
+ def test_reserve_block_device_name(self):
+ instance = self._create_fake_instance_obj(
+ params={'root_device_name': '/dev/vda'})
+ bdm = objects.BlockDeviceMapping(
+ **{'source_type': 'image', 'destination_type': 'local',
+ 'image_id': 'fake-image-id', 'device_name': '/dev/vda',
+ 'instance_uuid': instance.uuid})
+ bdm.create(self.context)
+
+ self.compute.reserve_block_device_name(self.context, instance,
+ '/dev/vdb', 'fake-volume-id',
+ 'virtio', 'disk')
+
+ bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
+ self.context, instance.uuid)
+ bdms = list(bdms)
+ self.assertEqual(len(bdms), 2)
+ bdms.sort(key=operator.attrgetter('device_name'))
+ vol_bdm = bdms[1]
+ self.assertEqual(vol_bdm.source_type, 'volume')
+ self.assertEqual(vol_bdm.destination_type, 'volume')
+ self.assertEqual(vol_bdm.device_name, '/dev/vdb')
+ self.assertEqual(vol_bdm.volume_id, 'fake-volume-id')
+ self.assertEqual(vol_bdm.disk_bus, 'virtio')
+ self.assertEqual(vol_bdm.device_type, 'disk')
+
+
+class ComputeAPITestCase(BaseTestCase):
+ def setUp(self):
+ def fake_get_nw_info(cls, ctxt, instance):
+ self.assertTrue(ctxt.is_admin)
+ return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
+
+ super(ComputeAPITestCase, self).setUp()
+ self.stubs.Set(network_api.API, 'get_instance_nw_info',
+ fake_get_nw_info)
+ self.security_group_api = (
+ openstack_driver.get_openstack_security_group_driver())
+
+ self.compute_api = compute.API(
+ security_group_api=self.security_group_api)
+ self.fake_image = {
+ 'id': 1,
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {'kernel_id': 'fake_kernel_id',
+ 'ramdisk_id': 'fake_ramdisk_id'},
+ }
+
+ def fake_show(obj, context, image_id, **kwargs):
+ if image_id:
+ return self.fake_image
+ else:
+ raise exception.ImageNotFound(image_id=image_id)
+
+ self.fake_show = fake_show
+
+ def _run_instance(self, params=None):
+ instance = self._create_fake_instance_obj(params, services=True)
+ instance_uuid = instance['uuid']
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+
+ instance.refresh()
+ self.assertIsNone(instance['task_state'])
+ return instance, instance_uuid
+
+ def test_ip_filtering(self):
+ info = [{
+ 'address': 'aa:bb:cc:dd:ee:ff',
+ 'id': 1,
+ 'network': {
+ 'bridge': 'br0',
+ 'id': 1,
+ 'label': 'private',
+ 'subnets': [{
+ 'cidr': '192.168.0.0/24',
+ 'ips': [{
+ 'address': '192.168.0.10',
+ 'type': 'fixed',
+ }]
+ }]
+ }
+ }]
+
+ info1 = objects.InstanceInfoCache(network_info=jsonutils.dumps(info))
+ inst1 = objects.Instance(id=1, info_cache=info1)
+ info[0]['network']['subnets'][0]['ips'][0]['address'] = '192.168.0.20'
+ info2 = objects.InstanceInfoCache(network_info=jsonutils.dumps(info))
+ inst2 = objects.Instance(id=2, info_cache=info2)
+ instances = objects.InstanceList(objects=[inst1, inst2])
+
+ instances = self.compute_api._ip_filter(instances, {'ip': '.*10'})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0].id, 1)
+
+ def test_create_with_too_little_ram(self):
+ # Test an instance type with too little memory.
+
+ inst_type = flavors.get_default_flavor()
+ inst_type['memory_mb'] = 1
+
+ self.fake_image['min_ram'] = 2
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ self.assertRaises(exception.FlavorMemoryTooSmall,
+ self.compute_api.create, self.context,
+ inst_type, self.fake_image['id'])
+
+ # Now increase the inst_type memory and make sure all is fine.
+ inst_type['memory_mb'] = 2
+ (refs, resv_id) = self.compute_api.create(self.context,
+ inst_type, self.fake_image['id'])
+ db.instance_destroy(self.context, refs[0]['uuid'])
+
+ def test_create_with_too_little_disk(self):
+ # Test an instance type with too little disk space.
+
+ inst_type = flavors.get_default_flavor()
+ inst_type['root_gb'] = 1
+
+ self.fake_image['min_disk'] = 2
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ self.compute_api.create, self.context,
+ inst_type, self.fake_image['id'])
+
+ # Now increase the inst_type disk space and make sure all is fine.
+ inst_type['root_gb'] = 2
+ (refs, resv_id) = self.compute_api.create(self.context,
+ inst_type, self.fake_image['id'])
+ db.instance_destroy(self.context, refs[0]['uuid'])
+
+ def test_create_with_too_large_image(self):
+ # Test an instance type with too little disk space.
+
+ inst_type = flavors.get_default_flavor()
+ inst_type['root_gb'] = 1
+
+ self.fake_image['size'] = '1073741825'
+
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ self.compute_api.create, self.context,
+ inst_type, self.fake_image['id'])
+
+ # Reduce image to 1 GB limit and ensure it works
+ self.fake_image['size'] = '1073741824'
+ (refs, resv_id) = self.compute_api.create(self.context,
+ inst_type, self.fake_image['id'])
+ db.instance_destroy(self.context, refs[0]['uuid'])
+
+ def test_create_just_enough_ram_and_disk(self):
+ # Test an instance type with just enough ram and disk space.
+
+ inst_type = flavors.get_default_flavor()
+ inst_type['root_gb'] = 2
+ inst_type['memory_mb'] = 2
+
+ self.fake_image['min_ram'] = 2
+ self.fake_image['min_disk'] = 2
+ self.fake_image['name'] = 'fake_name'
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ (refs, resv_id) = self.compute_api.create(self.context,
+ inst_type, self.fake_image['id'])
+ db.instance_destroy(self.context, refs[0]['uuid'])
+
+ def test_create_with_no_ram_and_disk_reqs(self):
+ # Test an instance type with no min_ram or min_disk.
+
+ inst_type = flavors.get_default_flavor()
+ inst_type['root_gb'] = 1
+ inst_type['memory_mb'] = 1
+
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ (refs, resv_id) = self.compute_api.create(self.context,
+ inst_type, self.fake_image['id'])
+ db.instance_destroy(self.context, refs[0]['uuid'])
+
+ def test_create_with_deleted_image(self):
+ # If we're given a deleted image by glance, we should not be able to
+ # build from it
+ inst_type = flavors.get_default_flavor()
+
+ self.fake_image['name'] = 'fake_name'
+ self.fake_image['status'] = 'DELETED'
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ expected_message = (
+ exception.ImageNotActive.msg_fmt % {'image_id':
+ self.fake_image['id']})
+ with testtools.ExpectedException(exception.ImageNotActive,
+ expected_message):
+ self.compute_api.create(self.context, inst_type,
+ self.fake_image['id'])
+
+ @mock.patch('nova.virt.hardware.VirtNUMAInstanceTopology.get_constraints')
+ def test_create_with_numa_topology(self, numa_constraints_mock):
+ inst_type = flavors.get_default_flavor()
+ # This is what the stubbed out method will return
+ fake_image_props = {'kernel_id': 'fake_kernel_id',
+ 'ramdisk_id': 'fake_ramdisk_id',
+ 'something_else': 'meow'}
+
+ numa_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1, 2]), 512),
+ hardware.VirtNUMATopologyCellInstance(1, set([3, 4]), 512)])
+ numa_constraints_mock.return_value = numa_topology
+
+ instances, resv_id = self.compute_api.create(self.context, inst_type,
+ self.fake_image['id'])
+ numa_constraints_mock.assert_called_once_with(
+ inst_type, fake_image_props)
+ self.assertThat(numa_topology._to_dict(),
+ matchers.DictMatches(
+ instances[0].numa_topology
+ .topology_from_obj()._to_dict()))
+
+ def test_create_instance_defaults_display_name(self):
+ # Verify that an instance cannot be created without a display_name.
+ cases = [dict(), dict(display_name=None)]
+ for instance in cases:
+ (ref, resv_id) = self.compute_api.create(self.context,
+ flavors.get_default_flavor(),
+ 'fake-image-uuid', **instance)
+ try:
+ self.assertIsNotNone(ref[0]['display_name'])
+ finally:
+ db.instance_destroy(self.context, ref[0]['uuid'])
+
+ def test_create_instance_sets_system_metadata(self):
+ # Make sure image properties are copied into system metadata.
+ (ref, resv_id) = self.compute_api.create(
+ self.context,
+ instance_type=flavors.get_default_flavor(),
+ image_href='fake-image-uuid')
+ try:
+ sys_metadata = db.instance_system_metadata_get(self.context,
+ ref[0]['uuid'])
+
+ image_props = {'image_kernel_id': 'fake_kernel_id',
+ 'image_ramdisk_id': 'fake_ramdisk_id',
+ 'image_something_else': 'meow', }
+ for key, value in image_props.iteritems():
+ self.assertIn(key, sys_metadata)
+ self.assertEqual(value, sys_metadata[key])
+
+ finally:
+ db.instance_destroy(self.context, ref[0]['uuid'])
+
+ def test_create_saves_type_in_system_metadata(self):
+ instance_type = flavors.get_default_flavor()
+ (ref, resv_id) = self.compute_api.create(
+ self.context,
+ instance_type=instance_type,
+ image_href='some-fake-image')
+ try:
+ sys_metadata = db.instance_system_metadata_get(self.context,
+ ref[0]['uuid'])
+
+ instance_type_props = ['name', 'memory_mb', 'vcpus', 'root_gb',
+ 'ephemeral_gb', 'flavorid', 'swap',
+ 'rxtx_factor', 'vcpu_weight']
+ for key in instance_type_props:
+ sys_meta_key = "instance_type_%s" % key
+ self.assertIn(sys_meta_key, sys_metadata)
+ self.assertEqual(str(instance_type[key]),
+ str(sys_metadata[sys_meta_key]))
+
+ finally:
+ db.instance_destroy(self.context, ref[0]['uuid'])
+
+ def test_create_instance_associates_security_groups(self):
+ # Make sure create associates security groups.
+ group = self._create_group()
+ (ref, resv_id) = self.compute_api.create(
+ self.context,
+ instance_type=flavors.get_default_flavor(),
+ image_href='some-fake-image',
+ security_group=['testgroup'])
+ try:
+ self.assertEqual(len(db.security_group_get_by_instance(
+ self.context, ref[0]['uuid'])), 1)
+ group = db.security_group_get(self.context, group['id'])
+ self.assertEqual(1, len(group['instances']))
+ finally:
+ db.security_group_destroy(self.context, group['id'])
+ db.instance_destroy(self.context, ref[0]['uuid'])
+
+ def test_create_instance_with_invalid_security_group_raises(self):
+ instance_type = flavors.get_default_flavor()
+
+ pre_build_len = len(db.instance_get_all(self.context))
+ self.assertRaises(exception.SecurityGroupNotFoundForProject,
+ self.compute_api.create,
+ self.context,
+ instance_type=instance_type,
+ image_href=None,
+ security_group=['this_is_a_fake_sec_group'])
+ self.assertEqual(pre_build_len,
+ len(db.instance_get_all(self.context)))
+
+ def test_create_with_large_user_data(self):
+ # Test an instance type with too much user data.
+
+ inst_type = flavors.get_default_flavor()
+
+ self.fake_image['min_ram'] = 2
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ self.assertRaises(exception.InstanceUserDataTooLarge,
+ self.compute_api.create, self.context, inst_type,
+ self.fake_image['id'], user_data=('1' * 65536))
+
+ def test_create_with_malformed_user_data(self):
+ # Test an instance type with malformed user data.
+
+ inst_type = flavors.get_default_flavor()
+
+ self.fake_image['min_ram'] = 2
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ self.assertRaises(exception.InstanceUserDataMalformed,
+ self.compute_api.create, self.context, inst_type,
+ self.fake_image['id'], user_data='banana')
+
+ def test_create_with_base64_user_data(self):
+ # Test an instance type with ok much user data.
+
+ inst_type = flavors.get_default_flavor()
+
+ self.fake_image['min_ram'] = 2
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ # NOTE(mikal): a string of length 48510 encodes to 65532 characters of
+ # base64
+ (refs, resv_id) = self.compute_api.create(
+ self.context, inst_type, self.fake_image['id'],
+ user_data=base64.encodestring('1' * 48510))
+ db.instance_destroy(self.context, refs[0]['uuid'])
+
+ def test_populate_instance_for_create(self):
+ base_options = {'image_ref': self.fake_image['id'],
+ 'system_metadata': {'fake': 'value'}}
+ instance = objects.Instance()
+ instance.update(base_options)
+ inst_type = flavors.get_flavor_by_name("m1.tiny")
+ instance = self.compute_api._populate_instance_for_create(
+ self.context,
+ instance,
+ self.fake_image,
+ 1,
+ security_groups=None,
+ instance_type=inst_type)
+ self.assertEqual(str(base_options['image_ref']),
+ instance['system_metadata']['image_base_image_ref'])
+ self.assertEqual(vm_states.BUILDING, instance['vm_state'])
+ self.assertEqual(task_states.SCHEDULING, instance['task_state'])
+ self.assertEqual(1, instance['launch_index'])
+ self.assertIsNotNone(instance.get('uuid'))
+ self.assertEqual([], instance.security_groups.objects)
+
+ def test_default_hostname_generator(self):
+ fake_uuids = [str(uuid.uuid4()) for x in xrange(4)]
+
+ orig_populate = self.compute_api._populate_instance_for_create
+
+ def _fake_populate(context, base_options, *args, **kwargs):
+ base_options['uuid'] = fake_uuids.pop(0)
+ return orig_populate(context, base_options, *args, **kwargs)
+
+ self.stubs.Set(self.compute_api,
+ '_populate_instance_for_create',
+ _fake_populate)
+
+ cases = [(None, 'server-%s' % fake_uuids[0]),
+ ('Hello, Server!', 'hello-server'),
+ ('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello'),
+ ('hello_server', 'hello-server')]
+ for display_name, hostname in cases:
+ (ref, resv_id) = self.compute_api.create(self.context,
+ flavors.get_default_flavor(), image_href='some-fake-image',
+ display_name=display_name)
+ try:
+ self.assertEqual(ref[0]['hostname'], hostname)
+ finally:
+ db.instance_destroy(self.context, ref[0]['uuid'])
+
+ def test_instance_create_adds_to_instance_group(self):
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ group = objects.InstanceGroup(self.context)
+ group.uuid = str(uuid.uuid4())
+ group.create()
+
+ inst_type = flavors.get_default_flavor()
+ (refs, resv_id) = self.compute_api.create(
+ self.context, inst_type, self.fake_image['id'],
+ scheduler_hints={'group': group.uuid})
+
+ group = objects.InstanceGroup.get_by_uuid(self.context, group.uuid)
+ self.assertIn(refs[0]['uuid'], group.members)
+
+ db.instance_destroy(self.context, refs[0]['uuid'])
+
+ def test_instance_create_auto_creates_group(self):
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ inst_type = flavors.get_default_flavor()
+ (refs, resv_id) = self.compute_api.create(
+ self.context, inst_type, self.fake_image['id'],
+ scheduler_hints={'group': 'groupname'})
+
+ group = objects.InstanceGroup.get_by_name(self.context, 'groupname')
+ self.assertEqual('groupname', group.name)
+ self.assertIn('legacy', group.policies)
+ self.assertEqual(1, len(group.members))
+ self.assertIn(refs[0]['uuid'], group.members)
+
+ # On a second instance, make sure it gets added to the group that was
+ # auto-created above
+ (refs2, resv_id) = self.compute_api.create(
+ self.context, inst_type, self.fake_image['id'],
+ scheduler_hints={'group': 'groupname'})
+ group = objects.InstanceGroup.get_by_name(self.context, 'groupname')
+ self.assertEqual('groupname', group.name)
+ self.assertIn('legacy', group.policies)
+ self.assertEqual(2, len(group.members))
+ self.assertIn(refs[0]['uuid'], group.members)
+ self.assertIn(refs2[0]['uuid'], group.members)
+
+ db.instance_destroy(self.context, refs[0]['uuid'])
+
+ def test_destroy_instance_disassociates_security_groups(self):
+ # Make sure destroying disassociates security groups.
+ group = self._create_group()
+
+ (ref, resv_id) = self.compute_api.create(
+ self.context,
+ instance_type=flavors.get_default_flavor(),
+ image_href='some-fake-image',
+ security_group=['testgroup'])
+ try:
+ db.instance_destroy(self.context, ref[0]['uuid'])
+ group = db.security_group_get(self.context, group['id'])
+ self.assertEqual(0, len(group['instances']))
+ finally:
+ db.security_group_destroy(self.context, group['id'])
+
+ def test_destroy_security_group_disassociates_instances(self):
+ # Make sure destroying security groups disassociates instances.
+ group = self._create_group()
+
+ (ref, resv_id) = self.compute_api.create(
+ self.context,
+ instance_type=flavors.get_default_flavor(),
+ image_href='some-fake-image',
+ security_group=['testgroup'])
+
+ try:
+ db.security_group_destroy(self.context, group['id'])
+ admin_deleted_context = context.get_admin_context(
+ read_deleted="only")
+ group = db.security_group_get(admin_deleted_context, group['id'])
+ self.assertEqual(0, len(group['instances']))
+ finally:
+ db.instance_destroy(self.context, ref[0]['uuid'])
+
+ def _test_rebuild(self, vm_state):
+ instance = self._create_fake_instance_obj()
+ instance_uuid = instance['uuid']
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+
+ instance = objects.Instance.get_by_uuid(self.context,
+ instance_uuid)
+ self.assertIsNone(instance.task_state)
+ # Set some image metadata that should get wiped out and reset
+ # as well as some other metadata that should be preserved.
+ instance.system_metadata.update({
+ 'image_kernel_id': 'old-data',
+ 'image_ramdisk_id': 'old_data',
+ 'image_something_else': 'old-data',
+ 'image_should_remove': 'bye-bye',
+ 'preserved': 'preserve this!'})
+
+ instance.save()
+
+ # Make sure Compute API updates the image_ref before casting to
+ # compute manager.
+ info = {'image_ref': None, 'clean': False}
+
+ def fake_rpc_rebuild(context, **kwargs):
+ info['image_ref'] = kwargs['instance'].image_ref
+ info['clean'] = kwargs['instance'].obj_what_changed() == set()
+
+ self.stubs.Set(self.compute_api.compute_task_api, 'rebuild_instance',
+ fake_rpc_rebuild)
+
+ image_ref = instance["image_ref"] + '-new_image_ref'
+ password = "new_password"
+
+ instance.vm_state = vm_state
+ instance.save()
+
+ self.compute_api.rebuild(self.context, instance, image_ref, password)
+ self.assertEqual(info['image_ref'], image_ref)
+ self.assertTrue(info['clean'])
+
+ instance.refresh()
+ self.assertEqual(instance.task_state, task_states.REBUILDING)
+ sys_meta = dict([(k, v) for k, v in instance.system_metadata.items()
+ if not k.startswith('instance_type')])
+ self.assertEqual(sys_meta,
+ {'image_kernel_id': 'fake_kernel_id',
+ 'image_min_disk': '1',
+ 'image_ramdisk_id': 'fake_ramdisk_id',
+ 'image_something_else': 'meow',
+ 'preserved': 'preserve this!'})
+ instance.destroy()
+
+ def test_rebuild(self):
+ self._test_rebuild(vm_state=vm_states.ACTIVE)
+
+ def test_rebuild_in_error_state(self):
+ self._test_rebuild(vm_state=vm_states.ERROR)
+
+ def test_rebuild_in_error_not_launched(self):
+ instance = self._create_fake_instance_obj(params={'image_ref': ''})
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+
+ db.instance_update(self.context, instance['uuid'],
+ {"vm_state": vm_states.ERROR,
+ "launched_at": None})
+
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.rebuild,
+ self.context,
+ instance,
+ instance['image_ref'],
+ "new password")
+
+ def test_rebuild_no_image(self):
+ instance = self._create_fake_instance_obj(params={'image_ref': ''})
+ instance_uuid = instance.uuid
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+ self.compute_api.rebuild(self.context, instance, '', 'new_password')
+
+ instance = db.instance_get_by_uuid(self.context, instance_uuid)
+ self.assertEqual(instance['task_state'], task_states.REBUILDING)
+
+ def test_rebuild_with_deleted_image(self):
+ # If we're given a deleted image by glance, we should not be able to
+ # rebuild from it
+ instance = self._create_fake_instance_obj(params={'image_ref': '1'})
+ self.fake_image['name'] = 'fake_name'
+ self.fake_image['status'] = 'DELETED'
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ expected_message = (
+ exception.ImageNotActive.msg_fmt % {'image_id':
+ self.fake_image['id']})
+ with testtools.ExpectedException(exception.ImageNotActive,
+ expected_message):
+ self.compute_api.rebuild(self.context, instance,
+ self.fake_image['id'], 'new_password')
+
+ def test_rebuild_with_too_little_ram(self):
+ instance = self._create_fake_instance_obj(params={'image_ref': '1'})
+
+ def fake_extract_flavor(_inst, prefix):
+ self.assertEqual('', prefix)
+ return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
+
+ self.stubs.Set(flavors, 'extract_flavor',
+ fake_extract_flavor)
+
+ self.fake_image['min_ram'] = 128
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ self.assertRaises(exception.FlavorMemoryTooSmall,
+ self.compute_api.rebuild, self.context,
+ instance, self.fake_image['id'], 'new_password')
+
+ # Reduce image memory requirements and make sure it works
+ self.fake_image['min_ram'] = 64
+
+ self.compute_api.rebuild(self.context,
+ instance, self.fake_image['id'], 'new_password')
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_rebuild_with_too_little_disk(self):
+ instance = self._create_fake_instance_obj(params={'image_ref': '1'})
+
+ def fake_extract_flavor(_inst, prefix):
+ self.assertEqual('', prefix)
+ return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
+
+ self.stubs.Set(flavors, 'extract_flavor',
+ fake_extract_flavor)
+
+ self.fake_image['min_disk'] = 2
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ self.compute_api.rebuild, self.context,
+ instance, self.fake_image['id'], 'new_password')
+
+ # Reduce image disk requirements and make sure it works
+ self.fake_image['min_disk'] = 1
+
+ self.compute_api.rebuild(self.context,
+ instance, self.fake_image['id'], 'new_password')
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_rebuild_with_just_enough_ram_and_disk(self):
+ instance = self._create_fake_instance_obj(params={'image_ref': '1'})
+
+ def fake_extract_flavor(_inst, prefix):
+ self.assertEqual('', prefix)
+ return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
+
+ self.stubs.Set(flavors, 'extract_flavor',
+ fake_extract_flavor)
+
+ self.fake_image['min_ram'] = 64
+ self.fake_image['min_disk'] = 1
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ self.compute_api.rebuild(self.context,
+ instance, self.fake_image['id'], 'new_password')
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_rebuild_with_no_ram_and_disk_reqs(self):
+ instance = self._create_fake_instance_obj(params={'image_ref': '1'})
+
+ def fake_extract_flavor(_inst, prefix):
+ self.assertEqual('', prefix)
+ return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
+
+ self.stubs.Set(flavors, 'extract_flavor',
+ fake_extract_flavor)
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ self.compute_api.rebuild(self.context,
+ instance, self.fake_image['id'], 'new_password')
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_rebuild_with_too_large_image(self):
+ instance = self._create_fake_instance_obj(params={'image_ref': '1'})
+
+ def fake_extract_flavor(_inst, prefix):
+ self.assertEqual('', prefix)
+ return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
+
+ self.stubs.Set(flavors, 'extract_flavor',
+ fake_extract_flavor)
+
+ self.fake_image['size'] = '1073741825'
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ self.compute_api.rebuild, self.context,
+ instance, self.fake_image['id'], 'new_password')
+
+ # Reduce image to 1 GB limit and ensure it works
+ self.fake_image['size'] = '1073741824'
+ self.compute_api.rebuild(self.context,
+ instance, self.fake_image['id'], 'new_password')
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_hostname_create(self):
+ # Ensure instance hostname is set during creation.
+ inst_type = flavors.get_flavor_by_name('m1.tiny')
+ (instances, _) = self.compute_api.create(self.context,
+ inst_type,
+ image_href='some-fake-image',
+ display_name='test host')
+
+ self.assertEqual('test-host', instances[0]['hostname'])
+
+ def _fake_rescue_block_devices(self, instance, status="in-use"):
+ fake_bdms = block_device_obj.block_device_make_list(self.context,
+ [fake_block_device.FakeDbBlockDeviceDict(
+ {'device_name': '/dev/vda',
+ 'source_type': 'volume',
+ 'boot_index': 0,
+ 'destination_type': 'volume',
+ 'volume_id': 'bf0b6b00-a20c-11e2-9e96-0800200c9a66'})])
+
+ volume = {'id': 'bf0b6b00-a20c-11e2-9e96-0800200c9a66',
+ 'state': 'active', 'instance_uuid': instance['uuid']}
+
+ return fake_bdms, volume
+
+ @mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ @mock.patch.object(cinder.API, 'get')
+ def test_rescue_volume_backed_no_image(self, mock_get_vol, mock_get_bdms):
+ # Instance started without an image
+ params = {'image_ref': ''}
+ volume_backed_inst_1 = self._create_fake_instance_obj(params=params)
+ bdms, volume = self._fake_rescue_block_devices(volume_backed_inst_1)
+
+ mock_get_vol.return_value = {'id': volume['id'], 'status': "in-use"}
+ mock_get_bdms.return_value = bdms
+
+ with mock.patch.object(self.compute, '_prep_block_device'):
+ self.compute.run_instance(self.context,
+ volume_backed_inst_1, {}, {}, None, None,
+ None, True, None, False)
+
+ self.assertRaises(exception.InstanceNotRescuable,
+ self.compute_api.rescue, self.context,
+ volume_backed_inst_1)
+
+ self.compute.terminate_instance(self.context, volume_backed_inst_1,
+ [], [])
+
+ @mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ @mock.patch.object(cinder.API, 'get')
+ def test_rescue_volume_backed_placeholder_image(self,
+ mock_get_vol,
+ mock_get_bdms):
+ # Instance started with a placeholder image (for metadata)
+ volume_backed_inst_2 = self._create_fake_instance_obj(
+ {'image_ref': 'my_placeholder_img',
+ 'root_device_name': '/dev/vda'})
+ bdms, volume = self._fake_rescue_block_devices(volume_backed_inst_2)
+
+ mock_get_vol.return_value = {'id': volume['id'], 'status': "in-use"}
+ mock_get_bdms.return_value = bdms
+
+ with mock.patch.object(self.compute, '_prep_block_device'):
+ self.compute.run_instance(self.context,
+ volume_backed_inst_2, {}, {}, None, None,
+ None, True, None, False)
+
+ self.assertRaises(exception.InstanceNotRescuable,
+ self.compute_api.rescue, self.context,
+ volume_backed_inst_2)
+
+ self.compute.terminate_instance(self.context, volume_backed_inst_2,
+ [], [])
+
+ def test_get(self):
+ # Test get instance.
+ exp_instance = self._create_fake_instance()
+ # NOTE(danms): Transform the db object in a similar way as
+ # the API method will do.
+ expected = obj_base.obj_to_primitive(
+ objects.Instance._from_db_object(
+ self.context, objects.Instance(), exp_instance,
+ instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
+
+ def fake_db_get(_context, _instance_uuid,
+ columns_to_join=None, use_slave=False):
+ return exp_instance
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get)
+
+ instance = self.compute_api.get(self.context, exp_instance['uuid'])
+ self.assertEqual(unify_instance(expected),
+ unify_instance(instance))
+
+ def test_get_with_admin_context(self):
+ # Test get instance.
+ c = context.get_admin_context()
+ exp_instance = self._create_fake_instance()
+ # NOTE(danms): Transform the db object in a similar way as
+ # the API method will do.
+ expected = obj_base.obj_to_primitive(
+ objects.Instance._from_db_object(
+ c, objects.Instance(), exp_instance,
+ instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
+
+ def fake_db_get(context, instance_uuid,
+ columns_to_join=None, use_slave=False):
+ return exp_instance
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get)
+
+ instance = self.compute_api.get(c, exp_instance['uuid'])
+ self.assertEqual(unify_instance(expected),
+ unify_instance(instance))
+
+ def test_get_with_integer_id(self):
+ # Test get instance with an integer id.
+ exp_instance = self._create_fake_instance()
+ # NOTE(danms): Transform the db object in a similar way as
+ # the API method will do.
+ expected = obj_base.obj_to_primitive(
+ objects.Instance._from_db_object(
+ self.context, objects.Instance(), exp_instance,
+ instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
+
+ def fake_db_get(_context, _instance_id, columns_to_join=None):
+ return exp_instance
+
+ self.stubs.Set(db, 'instance_get', fake_db_get)
+
+ instance = self.compute_api.get(self.context, exp_instance['id'])
+ self.assertEqual(unify_instance(expected),
+ unify_instance(instance))
+
+ def test_get_all_by_name_regexp(self):
+ # Test searching instances by name (display_name).
+ c = context.get_admin_context()
+ instance1 = self._create_fake_instance({'display_name': 'woot'})
+ instance2 = self._create_fake_instance({
+ 'display_name': 'woo'})
+ instance3 = self._create_fake_instance({
+ 'display_name': 'not-woot'})
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'name': '^woo.*'})
+ self.assertEqual(len(instances), 2)
+ instance_uuids = [instance['uuid'] for instance in instances]
+ self.assertIn(instance1['uuid'], instance_uuids)
+ self.assertIn(instance2['uuid'], instance_uuids)
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'name': '^woot.*'})
+ instance_uuids = [instance['uuid'] for instance in instances]
+ self.assertEqual(len(instances), 1)
+ self.assertIn(instance1['uuid'], instance_uuids)
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'name': '.*oot.*'})
+ self.assertEqual(len(instances), 2)
+ instance_uuids = [instance['uuid'] for instance in instances]
+ self.assertIn(instance1['uuid'], instance_uuids)
+ self.assertIn(instance3['uuid'], instance_uuids)
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'name': '^n.*'})
+ self.assertEqual(len(instances), 1)
+ instance_uuids = [instance['uuid'] for instance in instances]
+ self.assertIn(instance3['uuid'], instance_uuids)
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'name': 'noth.*'})
+ self.assertEqual(len(instances), 0)
+
+ db.instance_destroy(c, instance1['uuid'])
+ db.instance_destroy(c, instance2['uuid'])
+ db.instance_destroy(c, instance3['uuid'])
+
+ def test_get_all_by_multiple_options_at_once(self):
+ # Test searching by multiple options at once.
+ c = context.get_admin_context()
+
+ def fake_network_info(ip):
+ info = [{
+ 'address': 'aa:bb:cc:dd:ee:ff',
+ 'id': 1,
+ 'network': {
+ 'bridge': 'br0',
+ 'id': 1,
+ 'label': 'private',
+ 'subnets': [{
+ 'cidr': '192.168.0.0/24',
+ 'ips': [{
+ 'address': ip,
+ 'type': 'fixed',
+ }]
+ }]
+ }
+ }]
+ return jsonutils.dumps(info)
+
+ instance1 = self._create_fake_instance({
+ 'display_name': 'woot',
+ 'id': 1,
+ 'uuid': '00000000-0000-0000-0000-000000000010',
+ 'info_cache': {'network_info':
+ fake_network_info('192.168.0.1')}})
+ instance2 = self._create_fake_instance({
+ 'display_name': 'woo',
+ 'id': 20,
+ 'uuid': '00000000-0000-0000-0000-000000000020',
+ 'info_cache': {'network_info':
+ fake_network_info('192.168.0.2')}})
+ instance3 = self._create_fake_instance({
+ 'display_name': 'not-woot',
+ 'id': 30,
+ 'uuid': '00000000-0000-0000-0000-000000000030',
+ 'info_cache': {'network_info':
+ fake_network_info('192.168.0.3')}})
+
+ # ip ends up matching 2nd octet here.. so all 3 match ip
+ # but 'name' only matches one
+ instances = self.compute_api.get_all(c,
+ search_opts={'ip': '.*\.1', 'name': 'not.*'})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0]['uuid'], instance3['uuid'])
+
+ # ip ends up matching any ip with a '1' in the last octet..
+ # so instance 1 and 3.. but name should only match #1
+ # but 'name' only matches one
+ instances = self.compute_api.get_all(c,
+ search_opts={'ip': '.*\.1$', 'name': '^woo.*'})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0]['uuid'], instance1['uuid'])
+
+ # same as above but no match on name (name matches instance1
+ # but the ip query doesn't
+ instances = self.compute_api.get_all(c,
+ search_opts={'ip': '.*\.2$', 'name': '^woot.*'})
+ self.assertEqual(len(instances), 0)
+
+ # ip matches all 3... ipv6 matches #2+#3...name matches #3
+ instances = self.compute_api.get_all(c,
+ search_opts={'ip': '.*\.1',
+ 'name': 'not.*',
+ 'ip6': '^.*12.*34.*'})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0]['uuid'], instance3['uuid'])
+
+ db.instance_destroy(c, instance1['uuid'])
+ db.instance_destroy(c, instance2['uuid'])
+ db.instance_destroy(c, instance3['uuid'])
+
+ def test_get_all_by_image(self):
+ # Test searching instances by image.
+
+ c = context.get_admin_context()
+ instance1 = self._create_fake_instance({'image_ref': '1234'})
+ instance2 = self._create_fake_instance({'image_ref': '4567'})
+ instance3 = self._create_fake_instance({'image_ref': '4567'})
+
+ instances = self.compute_api.get_all(c, search_opts={'image': '123'})
+ self.assertEqual(len(instances), 0)
+
+ instances = self.compute_api.get_all(c, search_opts={'image': '1234'})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0]['uuid'], instance1['uuid'])
+
+ instances = self.compute_api.get_all(c, search_opts={'image': '4567'})
+ self.assertEqual(len(instances), 2)
+ instance_uuids = [instance['uuid'] for instance in instances]
+ self.assertIn(instance2['uuid'], instance_uuids)
+ self.assertIn(instance3['uuid'], instance_uuids)
+
+ # Test passing a list as search arg
+ instances = self.compute_api.get_all(c,
+ search_opts={'image': ['1234', '4567']})
+ self.assertEqual(len(instances), 3)
+
+ db.instance_destroy(c, instance1['uuid'])
+ db.instance_destroy(c, instance2['uuid'])
+ db.instance_destroy(c, instance3['uuid'])
+
+ def test_get_all_by_flavor(self):
+ # Test searching instances by image.
+
+ c = context.get_admin_context()
+ instance1 = self._create_fake_instance({'instance_type_id': 1})
+ instance2 = self._create_fake_instance({'instance_type_id': 2})
+ instance3 = self._create_fake_instance({'instance_type_id': 2})
+
+ # NOTE(comstud): Migrations set up the instance_types table
+ # for us. Therefore, we assume the following is true for
+ # these tests:
+ # instance_type_id 1 == flavor 3
+ # instance_type_id 2 == flavor 1
+ # instance_type_id 3 == flavor 4
+ # instance_type_id 4 == flavor 5
+ # instance_type_id 5 == flavor 2
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'flavor': 5})
+ self.assertEqual(len(instances), 0)
+
+ # ensure unknown filter maps to an exception
+ self.assertRaises(exception.FlavorNotFound,
+ self.compute_api.get_all, c,
+ search_opts={'flavor': 99})
+
+ instances = self.compute_api.get_all(c, search_opts={'flavor': 3})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0]['id'], instance1['id'])
+
+ instances = self.compute_api.get_all(c, search_opts={'flavor': 1})
+ self.assertEqual(len(instances), 2)
+ instance_uuids = [instance['uuid'] for instance in instances]
+ self.assertIn(instance2['uuid'], instance_uuids)
+ self.assertIn(instance3['uuid'], instance_uuids)
+
+ db.instance_destroy(c, instance1['uuid'])
+ db.instance_destroy(c, instance2['uuid'])
+ db.instance_destroy(c, instance3['uuid'])
+
+ def test_get_all_by_state(self):
+ # Test searching instances by state.
+
+ c = context.get_admin_context()
+ instance1 = self._create_fake_instance({
+ 'power_state': power_state.SHUTDOWN,
+ })
+ instance2 = self._create_fake_instance({
+ 'power_state': power_state.RUNNING,
+ })
+ instance3 = self._create_fake_instance({
+ 'power_state': power_state.RUNNING,
+ })
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'power_state': power_state.SUSPENDED})
+ self.assertEqual(len(instances), 0)
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'power_state': power_state.SHUTDOWN})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0]['uuid'], instance1['uuid'])
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'power_state': power_state.RUNNING})
+ self.assertEqual(len(instances), 2)
+ instance_uuids = [instance['uuid'] for instance in instances]
+ self.assertIn(instance2['uuid'], instance_uuids)
+ self.assertIn(instance3['uuid'], instance_uuids)
+
+ # Test passing a list as search arg
+ instances = self.compute_api.get_all(c,
+ search_opts={'power_state': [power_state.SHUTDOWN,
+ power_state.RUNNING]})
+ self.assertEqual(len(instances), 3)
+
+ db.instance_destroy(c, instance1['uuid'])
+ db.instance_destroy(c, instance2['uuid'])
+ db.instance_destroy(c, instance3['uuid'])
+
+ def test_get_all_by_metadata(self):
+ # Test searching instances by metadata.
+
+ c = context.get_admin_context()
+ instance0 = self._create_fake_instance()
+ instance1 = self._create_fake_instance({
+ 'metadata': {'key1': 'value1'}})
+ instance2 = self._create_fake_instance({
+ 'metadata': {'key2': 'value2'}})
+ instance3 = self._create_fake_instance({
+ 'metadata': {'key3': 'value3'}})
+ instance4 = self._create_fake_instance({
+ 'metadata': {'key3': 'value3',
+ 'key4': 'value4'}})
+
+ # get all instances
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': u"{}"})
+ self.assertEqual(len(instances), 5)
+
+ # wrong key/value combination
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': u'{"key1": "value3"}'})
+ self.assertEqual(len(instances), 0)
+
+ # non-existing keys
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': u'{"key5": "value1"}'})
+ self.assertEqual(len(instances), 0)
+
+ # find existing instance
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': u'{"key2": "value2"}'})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0]['uuid'], instance2['uuid'])
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': u'{"key3": "value3"}'})
+ self.assertEqual(len(instances), 2)
+ instance_uuids = [instance['uuid'] for instance in instances]
+ self.assertIn(instance3['uuid'], instance_uuids)
+ self.assertIn(instance4['uuid'], instance_uuids)
+
+ # multiple criteria as a dict
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': u'{"key3": "value3","key4": "value4"}'})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0]['uuid'], instance4['uuid'])
+
+ # multiple criteria as a list
+ instances = self.compute_api.get_all(c,
+ search_opts=
+ {'metadata': u'[{"key4": "value4"},{"key3": "value3"}]'})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0]['uuid'], instance4['uuid'])
+
+ db.instance_destroy(c, instance0['uuid'])
+ db.instance_destroy(c, instance1['uuid'])
+ db.instance_destroy(c, instance2['uuid'])
+ db.instance_destroy(c, instance3['uuid'])
+ db.instance_destroy(c, instance4['uuid'])
+
+ def test_get_all_by_system_metadata(self):
+ # Test searching instances by system metadata.
+
+ c = context.get_admin_context()
+ instance1 = self._create_fake_instance({
+ 'system_metadata': {'key1': 'value1'}})
+
+ # find existing instance
+ instances = self.compute_api.get_all(c,
+ search_opts={'system_metadata': u'{"key1": "value1"}'})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0]['uuid'], instance1['uuid'])
+
+ def test_all_instance_metadata(self):
+ self._create_fake_instance({'metadata': {'key1': 'value1'},
+ 'user_id': 'user1',
+ 'project_id': 'project1'})
+
+ self._create_fake_instance({'metadata': {'key2': 'value2'},
+ 'user_id': 'user2',
+ 'project_id': 'project2'})
+
+ _context = self.context
+ _context.user_id = 'user1'
+ _context.project_id = 'project1'
+ metadata = self.compute_api.get_all_instance_metadata(_context,
+ search_filts=[])
+ self.assertEqual(1, len(metadata))
+ self.assertEqual(metadata[0]['key'], 'key1')
+
+ _context.user_id = 'user2'
+ _context.project_id = 'project2'
+ metadata = self.compute_api.get_all_instance_metadata(_context,
+ search_filts=[])
+ self.assertEqual(1, len(metadata))
+ self.assertEqual(metadata[0]['key'], 'key2')
+
+ _context = context.get_admin_context()
+ metadata = self.compute_api.get_all_instance_metadata(_context,
+ search_filts=[])
+ self.assertEqual(2, len(metadata))
+
+ def test_instance_metadata(self):
+ meta_changes = [None]
+ self.flags(notify_on_state_change='vm_state')
+
+ def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
+ instance_uuid=None):
+ meta_changes[0] = diff
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
+ fake_change_instance_metadata)
+
+ _context = context.get_admin_context()
+ instance = self._create_fake_instance_obj({'metadata':
+ {'key1': 'value1'}})
+
+ metadata = self.compute_api.get_instance_metadata(_context, instance)
+ self.assertEqual(metadata, {'key1': 'value1'})
+
+ self.compute_api.update_instance_metadata(_context, instance,
+ {'key2': 'value2'})
+ metadata = self.compute_api.get_instance_metadata(_context, instance)
+ self.assertEqual(metadata, {'key1': 'value1', 'key2': 'value2'})
+ self.assertEqual(meta_changes, [{'key2': ['+', 'value2']}])
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ payload = msg.payload
+ self.assertIn('metadata', payload)
+ self.assertEqual(payload['metadata'], metadata)
+
+ new_metadata = {'key2': 'bah', 'key3': 'value3'}
+ self.compute_api.update_instance_metadata(_context, instance,
+ new_metadata, delete=True)
+ metadata = self.compute_api.get_instance_metadata(_context, instance)
+ self.assertEqual(metadata, new_metadata)
+ self.assertEqual(meta_changes, [{
+ 'key1': ['-'],
+ 'key2': ['+', 'bah'],
+ 'key3': ['+', 'value3'],
+ }])
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[1]
+ payload = msg.payload
+ self.assertIn('metadata', payload)
+ self.assertEqual(payload['metadata'], metadata)
+
+ self.compute_api.delete_instance_metadata(_context, instance, 'key2')
+ metadata = self.compute_api.get_instance_metadata(_context, instance)
+ self.assertEqual(metadata, {'key3': 'value3'})
+ self.assertEqual(meta_changes, [{'key2': ['-']}])
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3)
+ msg = fake_notifier.NOTIFICATIONS[2]
+ payload = msg.payload
+ self.assertIn('metadata', payload)
+ self.assertEqual(payload['metadata'], {'key3': 'value3'})
+
+ db.instance_destroy(_context, instance['uuid'])
+
+ def test_disallow_metadata_changes_during_building(self):
+ def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
+ instance_uuid=None):
+ pass
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
+ fake_change_instance_metadata)
+
+ instance = self._create_fake_instance({'vm_state': vm_states.BUILDING})
+ instance = dict(instance)
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.delete_instance_metadata, self.context,
+ instance, "key")
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.update_instance_metadata, self.context,
+ instance, "key")
+
+ def test_get_instance_faults(self):
+ # Get an instances latest fault.
+ instance = self._create_fake_instance()
+
+ fault_fixture = {
+ 'code': 404,
+ 'instance_uuid': instance['uuid'],
+ 'message': "HTTPNotFound",
+ 'details': "Stock details for test",
+ 'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
+ }
+
+ def return_fault(_ctxt, instance_uuids):
+ return dict.fromkeys(instance_uuids, [fault_fixture])
+
+ self.stubs.Set(nova.db,
+ 'instance_fault_get_by_instance_uuids',
+ return_fault)
+
+ _context = context.get_admin_context()
+ output = self.compute_api.get_instance_faults(_context, [instance])
+ expected = {instance['uuid']: [fault_fixture]}
+ self.assertEqual(output, expected)
+
+ db.instance_destroy(_context, instance['uuid'])
+
+ @staticmethod
+ def _parse_db_block_device_mapping(bdm_ref):
+ attr_list = ('delete_on_termination', 'device_name', 'no_device',
+ 'virtual_name', 'volume_id', 'volume_size', 'snapshot_id')
+ bdm = {}
+ for attr in attr_list:
+ val = bdm_ref.get(attr, None)
+ if val:
+ bdm[attr] = val
+
+ return bdm
+
+ def test_update_block_device_mapping(self):
+ swap_size = ephemeral_size = 1
+ instance_type = {'swap': swap_size, 'ephemeral_gb': ephemeral_size}
+ instance = self._create_fake_instance_obj()
+ mappings = [
+ {'virtual': 'ami', 'device': 'sda1'},
+ {'virtual': 'root', 'device': '/dev/sda1'},
+
+ {'virtual': 'swap', 'device': 'sdb4'},
+ {'virtual': 'swap', 'device': 'sdb3'},
+ {'virtual': 'swap', 'device': 'sdb2'},
+ {'virtual': 'swap', 'device': 'sdb1'},
+
+ {'virtual': 'ephemeral0', 'device': 'sdc1'},
+ {'virtual': 'ephemeral1', 'device': 'sdc2'},
+ {'virtual': 'ephemeral2', 'device': 'sdc3'}]
+ block_device_mapping = [
+ # root
+ {'device_name': '/dev/sda1',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000',
+ 'delete_on_termination': False},
+
+ # overwrite swap
+ {'device_name': '/dev/sdb2',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111',
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdb3',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222'},
+ {'device_name': '/dev/sdb4',
+ 'no_device': True},
+
+ # overwrite ephemeral
+ {'device_name': '/dev/sdc1',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333',
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdc2',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '33333333-aaaa-bbbb-cccc-444444444444',
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdc3',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '44444444-aaaa-bbbb-cccc-555555555555'},
+ {'device_name': '/dev/sdc4',
+ 'no_device': True},
+
+ # volume
+ {'device_name': '/dev/sdd1',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '55555555-aaaa-bbbb-cccc-666666666666',
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdd2',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '66666666-aaaa-bbbb-cccc-777777777777'},
+ {'device_name': '/dev/sdd3',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '77777777-aaaa-bbbb-cccc-888888888888'},
+ {'device_name': '/dev/sdd4',
+ 'no_device': True}]
+
+ image_mapping = self.compute_api._prepare_image_mapping(
+ instance_type, mappings)
+ self.compute_api._update_block_device_mapping(
+ self.context, instance_type, instance['uuid'], image_mapping)
+
+ bdms = [block_device.BlockDeviceDict(bdm) for bdm in
+ db.block_device_mapping_get_all_by_instance(
+ self.context, instance['uuid'])]
+ expected_result = [
+ {'source_type': 'blank', 'destination_type': 'local',
+ 'guest_format': 'swap', 'device_name': '/dev/sdb1',
+ 'volume_size': swap_size, 'delete_on_termination': True},
+ {'source_type': 'blank', 'destination_type': 'local',
+ 'guest_format': CONF.default_ephemeral_format,
+ 'device_name': '/dev/sdc3', 'delete_on_termination': True},
+ {'source_type': 'blank', 'destination_type': 'local',
+ 'guest_format': CONF.default_ephemeral_format,
+ 'device_name': '/dev/sdc1', 'delete_on_termination': True},
+ {'source_type': 'blank', 'destination_type': 'local',
+ 'guest_format': CONF.default_ephemeral_format,
+ 'device_name': '/dev/sdc2', 'delete_on_termination': True},
+ ]
+ bdms.sort(key=operator.itemgetter('device_name'))
+ expected_result.sort(key=operator.itemgetter('device_name'))
+ self.assertEqual(len(bdms), len(expected_result))
+ for expected, got in zip(expected_result, bdms):
+ self.assertThat(expected, matchers.IsSubDictOf(got))
+
+ self.compute_api._update_block_device_mapping(
+ self.context, flavors.get_default_flavor(),
+ instance['uuid'], block_device_mapping)
+ bdms = [block_device.BlockDeviceDict(bdm) for bdm in
+ db.block_device_mapping_get_all_by_instance(
+ self.context, instance['uuid'])]
+ expected_result = [
+ {'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000',
+ 'device_name': '/dev/sda1'},
+
+ {'source_type': 'blank', 'destination_type': 'local',
+ 'guest_format': 'swap', 'device_name': '/dev/sdb1',
+ 'volume_size': swap_size, 'delete_on_termination': True},
+ {'device_name': '/dev/sdb2',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111',
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdb3',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222'},
+ {'device_name': '/dev/sdb4', 'no_device': True},
+
+ {'device_name': '/dev/sdc1',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333',
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdc2',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '33333333-aaaa-bbbb-cccc-444444444444',
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdc3',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '44444444-aaaa-bbbb-cccc-555555555555'},
+ {'no_device': True, 'device_name': '/dev/sdc4'},
+
+ {'device_name': '/dev/sdd1',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '55555555-aaaa-bbbb-cccc-666666666666',
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdd2',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '66666666-aaaa-bbbb-cccc-777777777777'},
+ {'device_name': '/dev/sdd3',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '77777777-aaaa-bbbb-cccc-888888888888'},
+ {'no_device': True, 'device_name': '/dev/sdd4'}]
+ bdms.sort(key=operator.itemgetter('device_name'))
+ expected_result.sort(key=operator.itemgetter('device_name'))
+ self.assertEqual(len(bdms), len(expected_result))
+ for expected, got in zip(expected_result, bdms):
+ self.assertThat(expected, matchers.IsSubDictOf(got))
+
+ for bdm in db.block_device_mapping_get_all_by_instance(
+ self.context, instance['uuid']):
+ db.block_device_mapping_destroy(self.context, bdm['id'])
+ instance.refresh()
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def _test_check_and_transform_bdm(self, bdms, expected_bdms,
+ image_bdms=None, base_options=None,
+ legacy_bdms=False,
+ legacy_image_bdms=False):
+ image_bdms = image_bdms or []
+ image_meta = {}
+ if image_bdms:
+ image_meta = {'properties': {'block_device_mapping': image_bdms}}
+ if not legacy_image_bdms:
+ image_meta['properties']['bdm_v2'] = True
+ base_options = base_options or {'root_device_name': 'vda',
+ 'image_ref': FAKE_IMAGE_REF}
+ transformed_bdm = self.compute_api._check_and_transform_bdm(
+ base_options, {}, image_meta, 1, 1, bdms, legacy_bdms)
+ self.assertThat(expected_bdms,
+ matchers.DictListMatches(transformed_bdm))
+
+ def test_check_and_transform_legacy_bdm_no_image_bdms(self):
+ legacy_bdms = [
+ {'device_name': '/dev/vda',
+ 'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
+ 'delete_on_termination': False}]
+ expected_bdms = [block_device.BlockDeviceDict.from_legacy(
+ legacy_bdms[0])]
+ expected_bdms[0]['boot_index'] = 0
+ self._test_check_and_transform_bdm(legacy_bdms, expected_bdms,
+ legacy_bdms=True)
+
+ def test_check_and_transform_legacy_bdm_legacy_image_bdms(self):
+ image_bdms = [
+ {'device_name': '/dev/vda',
+ 'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
+ 'delete_on_termination': False}]
+ legacy_bdms = [
+ {'device_name': '/dev/vdb',
+ 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444',
+ 'delete_on_termination': False}]
+ expected_bdms = [
+ block_device.BlockDeviceDict.from_legacy(legacy_bdms[0]),
+ block_device.BlockDeviceDict.from_legacy(image_bdms[0])]
+ expected_bdms[0]['boot_index'] = -1
+ expected_bdms[1]['boot_index'] = 0
+ self._test_check_and_transform_bdm(legacy_bdms, expected_bdms,
+ image_bdms=image_bdms,
+ legacy_bdms=True,
+ legacy_image_bdms=True)
+
+ def test_check_and_transform_legacy_bdm_image_bdms(self):
+ legacy_bdms = [
+ {'device_name': '/dev/vdb',
+ 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444',
+ 'delete_on_termination': False}]
+ image_bdms = [block_device.BlockDeviceDict(
+ {'source_type': 'volume', 'destination_type': 'volume',
+ 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444',
+ 'boot_index': 0})]
+ expected_bdms = [
+ block_device.BlockDeviceDict.from_legacy(legacy_bdms[0]),
+ image_bdms[0]]
+ expected_bdms[0]['boot_index'] = -1
+ self._test_check_and_transform_bdm(legacy_bdms, expected_bdms,
+ image_bdms=image_bdms,
+ legacy_bdms=True)
+
+ def test_check_and_transform_bdm_no_image_bdms(self):
+ bdms = [block_device.BlockDeviceDict({'source_type': 'image',
+ 'destination_type': 'local',
+ 'image_id': FAKE_IMAGE_REF,
+ 'boot_index': 0})]
+ expected_bdms = bdms
+ self._test_check_and_transform_bdm(bdms, expected_bdms)
+
+ def test_check_and_transform_bdm_image_bdms(self):
+ bdms = [block_device.BlockDeviceDict({'source_type': 'image',
+ 'destination_type': 'local',
+ 'image_id': FAKE_IMAGE_REF,
+ 'boot_index': 0})]
+ image_bdms = [block_device.BlockDeviceDict(
+ {'source_type': 'volume', 'destination_type': 'volume',
+ 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444'})]
+ expected_bdms = bdms + image_bdms
+ self._test_check_and_transform_bdm(bdms, expected_bdms,
+ image_bdms=image_bdms)
+
+ def test_check_and_transform_bdm_legacy_image_bdms(self):
+ bdms = [block_device.BlockDeviceDict({'source_type': 'image',
+ 'destination_type': 'local',
+ 'image_id': FAKE_IMAGE_REF,
+ 'boot_index': 0})]
+ image_bdms = [{'device_name': '/dev/vda',
+ 'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
+ 'delete_on_termination': False}]
+ expected_bdms = [block_device.BlockDeviceDict.from_legacy(
+ image_bdms[0])]
+ expected_bdms[0]['boot_index'] = 0
+ self._test_check_and_transform_bdm(bdms, expected_bdms,
+ image_bdms=image_bdms,
+ legacy_image_bdms=True)
+
+ def test_check_and_transform_image(self):
+ base_options = {'root_device_name': 'vdb',
+ 'image_ref': FAKE_IMAGE_REF}
+ fake_legacy_bdms = [
+ {'device_name': '/dev/vda',
+ 'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
+ 'delete_on_termination': False}]
+
+ image_meta = {'properties': {'block_device_mapping': [
+ {'device_name': '/dev/vda',
+ 'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333'}]}}
+
+ # We get an image BDM
+ transformed_bdm = self.compute_api._check_and_transform_bdm(
+ base_options, {}, {}, 1, 1, fake_legacy_bdms, True)
+ self.assertEqual(len(transformed_bdm), 2)
+
+ # No image BDM created if image already defines a root BDM
+ base_options['root_device_name'] = 'vda'
+ transformed_bdm = self.compute_api._check_and_transform_bdm(
+ base_options, {}, image_meta, 1, 1, [], True)
+ self.assertEqual(len(transformed_bdm), 1)
+
+ # No image BDM created
+ transformed_bdm = self.compute_api._check_and_transform_bdm(
+ base_options, {}, {}, 1, 1, fake_legacy_bdms, True)
+ self.assertEqual(len(transformed_bdm), 1)
+
+ # Volumes with multiple instances fails
+ self.assertRaises(exception.InvalidRequest,
+ self.compute_api._check_and_transform_bdm,
+ base_options, {}, {}, 1, 2, fake_legacy_bdms, True)
+
+ checked_bdm = self.compute_api._check_and_transform_bdm(
+ base_options, {}, {}, 1, 1, transformed_bdm, True)
+ self.assertEqual(checked_bdm, transformed_bdm)
+
+ def test_volume_size(self):
+ ephemeral_size = 2
+ swap_size = 3
+ volume_size = 5
+
+ swap_bdm = {'source_type': 'blank', 'guest_format': 'swap'}
+ ephemeral_bdm = {'source_type': 'blank', 'guest_format': None}
+ volume_bdm = {'source_type': 'volume', 'volume_size': volume_size}
+
+ inst_type = {'ephemeral_gb': ephemeral_size, 'swap': swap_size}
+ self.assertEqual(
+ self.compute_api._volume_size(inst_type, ephemeral_bdm),
+ ephemeral_size)
+ ephemeral_bdm['volume_size'] = 42
+ self.assertEqual(
+ self.compute_api._volume_size(inst_type, ephemeral_bdm), 42)
+ self.assertEqual(
+ self.compute_api._volume_size(inst_type, swap_bdm),
+ swap_size)
+ swap_bdm['volume_size'] = 42
+ self.assertEqual(
+ self.compute_api._volume_size(inst_type, swap_bdm), 42)
+ self.assertEqual(
+ self.compute_api._volume_size(inst_type, volume_bdm),
+ volume_size)
+
+ def test_is_volume_backed_instance(self):
+ ctxt = self.context
+
+ instance = self._create_fake_instance({'image_ref': ''})
+ self.assertTrue(
+ self.compute_api.is_volume_backed_instance(ctxt, instance, None))
+
+ instance = self._create_fake_instance({'root_device_name': 'vda'})
+ self.assertFalse(
+ self.compute_api.is_volume_backed_instance(
+ ctxt, instance,
+ block_device_obj.block_device_make_list(ctxt, [])))
+
+ bdms = block_device_obj.block_device_make_list(ctxt,
+ [fake_block_device.FakeDbBlockDeviceDict(
+ {'source_type': 'volume',
+ 'device_name': '/dev/vda',
+ 'volume_id': 'fake_volume_id',
+ 'boot_index': 0,
+ 'destination_type': 'volume'})])
+ self.assertTrue(
+ self.compute_api.is_volume_backed_instance(ctxt, instance, bdms))
+
+ bdms = block_device_obj.block_device_make_list(ctxt,
+ [fake_block_device.FakeDbBlockDeviceDict(
+ {'source_type': 'volume',
+ 'device_name': '/dev/vda',
+ 'volume_id': 'fake_volume_id',
+ 'destination_type': 'local',
+ 'boot_index': 0,
+ 'snapshot_id': None}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'source_type': 'volume',
+ 'device_name': '/dev/vdb',
+ 'boot_index': 1,
+ 'destination_type': 'volume',
+ 'volume_id': 'c2ec2156-d75e-11e2-985b-5254009297d6',
+ 'snapshot_id': None})])
+ self.assertFalse(
+ self.compute_api.is_volume_backed_instance(ctxt, instance, bdms))
+
+ bdms = block_device_obj.block_device_make_list(ctxt,
+ [fake_block_device.FakeDbBlockDeviceDict(
+ {'source_type': 'volume',
+ 'device_name': '/dev/vda',
+ 'snapshot_id': 'de8836ac-d75e-11e2-8271-5254009297d6',
+ 'destination_type': 'volume',
+ 'boot_index': 0,
+ 'volume_id': None})])
+ self.assertTrue(
+ self.compute_api.is_volume_backed_instance(ctxt, instance, bdms))
+
+ def test_is_volume_backed_instance_no_bdms(self):
+ ctxt = self.context
+ instance = self._create_fake_instance()
+
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ ctxt, instance['uuid']).AndReturn(
+ block_device_obj.block_device_make_list(ctxt, []))
+ self.mox.ReplayAll()
+
+ self.compute_api.is_volume_backed_instance(ctxt, instance, None)
+
+ def test_reservation_id_one_instance(self):
+ """Verify building an instance has a reservation_id that
+ matches return value from create.
+ """
+ (refs, resv_id) = self.compute_api.create(self.context,
+ flavors.get_default_flavor(), image_href='some-fake-image')
+ try:
+ self.assertEqual(len(refs), 1)
+ self.assertEqual(refs[0]['reservation_id'], resv_id)
+ finally:
+ db.instance_destroy(self.context, refs[0]['uuid'])
+
+ def test_reservation_ids_two_instances(self):
+ """Verify building 2 instances at once results in a
+ reservation_id being returned equal to reservation id set
+ in both instances.
+ """
+ (refs, resv_id) = self.compute_api.create(self.context,
+ flavors.get_default_flavor(), image_href='some-fake-image',
+ min_count=2, max_count=2)
+ try:
+ self.assertEqual(len(refs), 2)
+ self.assertIsNotNone(resv_id)
+ finally:
+ for instance in refs:
+ self.assertEqual(instance['reservation_id'], resv_id)
+
+ db.instance_destroy(self.context, refs[0]['uuid'])
+
+ def test_multi_instance_display_name_template(self):
+ self.flags(multi_instance_display_name_template='%(name)s')
+ (refs, resv_id) = self.compute_api.create(self.context,
+ flavors.get_default_flavor(), image_href='some-fake-image',
+ min_count=2, max_count=2, display_name='x')
+ self.assertEqual(refs[0]['display_name'], 'x')
+ self.assertEqual(refs[0]['hostname'], 'x')
+ self.assertEqual(refs[1]['display_name'], 'x')
+ self.assertEqual(refs[1]['hostname'], 'x')
+
+ self.flags(multi_instance_display_name_template='%(name)s-%(count)s')
+ (refs, resv_id) = self.compute_api.create(self.context,
+ flavors.get_default_flavor(), image_href='some-fake-image',
+ min_count=2, max_count=2, display_name='x')
+ self.assertEqual(refs[0]['display_name'], 'x-1')
+ self.assertEqual(refs[0]['hostname'], 'x-1')
+ self.assertEqual(refs[1]['display_name'], 'x-2')
+ self.assertEqual(refs[1]['hostname'], 'x-2')
+
+ self.flags(multi_instance_display_name_template='%(name)s-%(uuid)s')
+ (refs, resv_id) = self.compute_api.create(self.context,
+ flavors.get_default_flavor(), image_href='some-fake-image',
+ min_count=2, max_count=2, display_name='x')
+ self.assertEqual(refs[0]['display_name'], 'x-%s' % refs[0]['uuid'])
+ self.assertEqual(refs[0]['hostname'], 'x-%s' % refs[0]['uuid'])
+ self.assertEqual(refs[1]['display_name'], 'x-%s' % refs[1]['uuid'])
+ self.assertEqual(refs[1]['hostname'], 'x-%s' % refs[1]['uuid'])
+
+ def test_instance_architecture(self):
+ # Test the instance architecture.
+ i_ref = self._create_fake_instance()
+ self.assertEqual(i_ref['architecture'], arch.X86_64)
+ db.instance_destroy(self.context, i_ref['uuid'])
+
+ def test_instance_unknown_architecture(self):
+ # Test if the architecture is unknown.
+ instance = self._create_fake_instance_obj(
+ params={'architecture': ''})
+ try:
+ self.compute.run_instance(self.context, instance, {}, {}, None,
+ None, None, True, None, False)
+ instance = db.instance_get_by_uuid(self.context,
+ instance['uuid'])
+ self.assertNotEqual(instance['architecture'], 'Unknown')
+ finally:
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_instance_name_template(self):
+ # Test the instance_name template.
+ self.flags(instance_name_template='instance-%d')
+ i_ref = self._create_fake_instance()
+ self.assertEqual(i_ref['name'], 'instance-%d' % i_ref['id'])
+ db.instance_destroy(self.context, i_ref['uuid'])
+
+ self.flags(instance_name_template='instance-%(uuid)s')
+ i_ref = self._create_fake_instance()
+ self.assertEqual(i_ref['name'], 'instance-%s' % i_ref['uuid'])
+ db.instance_destroy(self.context, i_ref['uuid'])
+
+ self.flags(instance_name_template='%(id)d-%(uuid)s')
+ i_ref = self._create_fake_instance()
+ self.assertEqual(i_ref['name'], '%d-%s' %
+ (i_ref['id'], i_ref['uuid']))
+ db.instance_destroy(self.context, i_ref['uuid'])
+
+ # not allowed.. default is uuid
+ self.flags(instance_name_template='%(name)s')
+ i_ref = self._create_fake_instance()
+ self.assertEqual(i_ref['name'], i_ref['uuid'])
+ db.instance_destroy(self.context, i_ref['uuid'])
+
+ def test_add_remove_fixed_ip(self):
+ instance = self._create_fake_instance_obj(params={'host': CONF.host})
+ self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
+ lambda *a, **kw: None)
+ self.compute_api.add_fixed_ip(self.context, instance, '1')
+ self.compute_api.remove_fixed_ip(self.context,
+ instance, '192.168.1.1')
+ self.compute_api.delete(self.context, instance)
+
+ def test_attach_volume_invalid(self):
+ self.assertRaises(exception.InvalidDevicePath,
+ self.compute_api.attach_volume,
+ self.context,
+ {'locked': False, 'vm_state': vm_states.ACTIVE,
+ 'task_state': None,
+ 'launched_at': timeutils.utcnow()},
+ None,
+ '/invalid')
+
+ def test_no_attach_volume_in_rescue_state(self):
+ def fake(*args, **kwargs):
+ pass
+
+ def fake_volume_get(self, context, volume_id):
+ return {'id': volume_id}
+
+ self.stubs.Set(cinder.API, 'get', fake_volume_get)
+ self.stubs.Set(cinder.API, 'check_attach', fake)
+ self.stubs.Set(cinder.API, 'reserve_volume', fake)
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.attach_volume,
+ self.context,
+ {'uuid': 'fake_uuid', 'locked': False,
+ 'vm_state': vm_states.RESCUED},
+ None,
+ '/dev/vdb')
+
+ def test_no_attach_volume_in_suspended_state(self):
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.attach_volume,
+ self.context,
+ {'uuid': 'fake_uuid', 'locked': False,
+ 'vm_state': vm_states.SUSPENDED},
+ {'id': 'fake-volume-id'},
+ '/dev/vdb')
+
+ def test_no_detach_volume_in_rescue_state(self):
+ # Ensure volume can be detached from instance
+
+ params = {'vm_state': vm_states.RESCUED}
+ instance = self._create_fake_instance(params=params)
+
+ volume = {'id': 1, 'attach_status': 'in-use',
+ 'instance_uuid': instance['uuid']}
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.detach_volume,
+ self.context, instance, volume)
+
+ @mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ @mock.patch.object(cinder.API, 'get')
+ def test_no_rescue_in_volume_state_attaching(self,
+ mock_get_vol,
+ mock_get_bdms):
+ # Make sure a VM cannot be rescued while volume is being attached
+ instance = self._create_fake_instance_obj()
+ bdms, volume = self._fake_rescue_block_devices(instance)
+
+ mock_get_vol.return_value = {'id': volume['id'],
+ 'status': "attaching"}
+ mock_get_bdms.return_value = bdms
+
+ self.assertRaises(exception.InvalidVolume,
+ self.compute_api.rescue, self.context, instance)
+
+ def test_vnc_console(self):
+ # Make sure we can a vnc console for an instance.
+
+ fake_instance = {'uuid': 'fake_uuid',
+ 'host': 'fake_compute_host'}
+ fake_console_type = "novnc"
+ fake_connect_info = {'token': 'fake_token',
+ 'console_type': fake_console_type,
+ 'host': 'fake_console_host',
+ 'port': 'fake_console_port',
+ 'internal_access_path': 'fake_access_path',
+ 'instance_uuid': fake_instance['uuid'],
+ 'access_url': 'fake_console_url'}
+
+ rpcapi = compute_rpcapi.ComputeAPI
+ self.mox.StubOutWithMock(rpcapi, 'get_vnc_console')
+ rpcapi.get_vnc_console(
+ self.context, instance=fake_instance,
+ console_type=fake_console_type).AndReturn(fake_connect_info)
+
+ self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi,
+ 'authorize_console')
+ self.compute_api.consoleauth_rpcapi.authorize_console(
+ self.context, 'fake_token', fake_console_type, 'fake_console_host',
+ 'fake_console_port', 'fake_access_path', 'fake_uuid')
+
+ self.mox.ReplayAll()
+
+ console = self.compute_api.get_vnc_console(self.context,
+ fake_instance, fake_console_type)
+ self.assertEqual(console, {'url': 'fake_console_url'})
+
+ def test_get_vnc_console_no_host(self):
+ instance = self._create_fake_instance(params={'host': ''})
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.get_vnc_console,
+ self.context, instance, 'novnc')
+
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_spice_console(self):
+ # Make sure we can a spice console for an instance.
+
+ fake_instance = {'uuid': 'fake_uuid',
+ 'host': 'fake_compute_host'}
+ fake_console_type = "spice-html5"
+ fake_connect_info = {'token': 'fake_token',
+ 'console_type': fake_console_type,
+ 'host': 'fake_console_host',
+ 'port': 'fake_console_port',
+ 'internal_access_path': 'fake_access_path',
+ 'instance_uuid': fake_instance['uuid'],
+ 'access_url': 'fake_console_url'}
+
+ rpcapi = compute_rpcapi.ComputeAPI
+ self.mox.StubOutWithMock(rpcapi, 'get_spice_console')
+ rpcapi.get_spice_console(
+ self.context, instance=fake_instance,
+ console_type=fake_console_type).AndReturn(fake_connect_info)
+
+ self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi,
+ 'authorize_console')
+ self.compute_api.consoleauth_rpcapi.authorize_console(
+ self.context, 'fake_token', fake_console_type, 'fake_console_host',
+ 'fake_console_port', 'fake_access_path', 'fake_uuid')
+
+ self.mox.ReplayAll()
+
+ console = self.compute_api.get_spice_console(self.context,
+ fake_instance, fake_console_type)
+ self.assertEqual(console, {'url': 'fake_console_url'})
+
+ def test_get_spice_console_no_host(self):
+ instance = self._create_fake_instance(params={'host': ''})
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.get_spice_console,
+ self.context, instance, 'spice')
+
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_rdp_console(self):
+ # Make sure we can a rdp console for an instance.
+
+ fake_instance = {'uuid': 'fake_uuid',
+ 'host': 'fake_compute_host'}
+ fake_console_type = "rdp-html5"
+ fake_connect_info = {'token': 'fake_token',
+ 'console_type': fake_console_type,
+ 'host': 'fake_console_host',
+ 'port': 'fake_console_port',
+ 'internal_access_path': 'fake_access_path',
+ 'instance_uuid': fake_instance['uuid'],
+ 'access_url': 'fake_console_url'}
+
+ rpcapi = compute_rpcapi.ComputeAPI
+ self.mox.StubOutWithMock(rpcapi, 'get_rdp_console')
+ rpcapi.get_rdp_console(
+ self.context, instance=fake_instance,
+ console_type=fake_console_type).AndReturn(fake_connect_info)
+
+ self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi,
+ 'authorize_console')
+ self.compute_api.consoleauth_rpcapi.authorize_console(
+ self.context, 'fake_token', fake_console_type, 'fake_console_host',
+ 'fake_console_port', 'fake_access_path', 'fake_uuid')
+
+ self.mox.ReplayAll()
+
+ console = self.compute_api.get_rdp_console(self.context,
+ fake_instance, fake_console_type)
+ self.assertEqual(console, {'url': 'fake_console_url'})
+
+ def test_get_rdp_console_no_host(self):
+ instance = self._create_fake_instance(params={'host': ''})
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.get_rdp_console,
+ self.context, instance, 'rdp')
+
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_serial_console(self):
+ # Make sure we can get a serial proxy url for an instance.
+
+ fake_instance = {'uuid': 'fake_uuid',
+ 'host': 'fake_compute_host'}
+ fake_console_type = 'serial'
+ fake_connect_info = {'token': 'fake_token',
+ 'console_type': fake_console_type,
+ 'host': 'fake_serial_host',
+ 'port': 'fake_tcp_port',
+ 'internal_access_path': 'fake_access_path',
+ 'instance_uuid': fake_instance['uuid'],
+ 'access_url': 'fake_access_url'}
+
+ rpcapi = compute_rpcapi.ComputeAPI
+
+ with contextlib.nested(
+ mock.patch.object(rpcapi, 'get_serial_console',
+ return_value=fake_connect_info),
+ mock.patch.object(self.compute_api.consoleauth_rpcapi,
+ 'authorize_console')
+ ) as (mock_get_serial_console, mock_authorize_console):
+ self.compute_api.consoleauth_rpcapi.authorize_console(
+ self.context, 'fake_token', fake_console_type,
+ 'fake_serial_host', 'fake_tcp_port',
+ 'fake_access_path', 'fake_uuid')
+
+ console = self.compute_api.get_serial_console(self.context,
+ fake_instance,
+ fake_console_type)
+ self.assertEqual(console, {'url': 'fake_access_url'})
+
+ def test_get_serial_console_no_host(self):
+ # Make sure an exception is raised when instance is not Active.
+ instance = self._create_fake_instance(params={'host': ''})
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.get_serial_console,
+ self.context, instance, 'serial')
+
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_console_output(self):
+ fake_instance = {'uuid': 'fake_uuid',
+ 'host': 'fake_compute_host'}
+ fake_tail_length = 699
+ fake_console_output = 'fake console output'
+
+ rpcapi = compute_rpcapi.ComputeAPI
+ self.mox.StubOutWithMock(rpcapi, 'get_console_output')
+ rpcapi.get_console_output(
+ self.context, instance=fake_instance,
+ tail_length=fake_tail_length).AndReturn(fake_console_output)
+
+ self.mox.ReplayAll()
+
+ output = self.compute_api.get_console_output(self.context,
+ fake_instance, tail_length=fake_tail_length)
+ self.assertEqual(output, fake_console_output)
+
+ def test_console_output_no_host(self):
+ instance = self._create_fake_instance(params={'host': ''})
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.get_console_output,
+ self.context, instance)
+
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_attach_interface(self):
+ new_type = flavors.get_flavor_by_flavor_id('4')
+ sys_meta = flavors.save_flavor_info({}, new_type)
+
+ instance = objects.Instance(image_ref='foo',
+ system_metadata=sys_meta)
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'allocate_port_for_instance')
+ nwinfo = [fake_network_cache_model.new_vif()]
+ network_id = nwinfo[0]['network']['id']
+ port_id = nwinfo[0]['id']
+ req_ip = '1.2.3.4'
+ self.compute.network_api.allocate_port_for_instance(
+ self.context, instance, port_id, network_id, req_ip
+ ).AndReturn(nwinfo)
+ self.mox.ReplayAll()
+ vif = self.compute.attach_interface(self.context,
+ instance,
+ network_id,
+ port_id,
+ req_ip)
+ self.assertEqual(vif['id'], network_id)
+ return nwinfo, port_id
+
+ def test_detach_interface(self):
+ nwinfo, port_id = self.test_attach_interface()
+ self.stubs.Set(self.compute.network_api,
+ 'deallocate_port_for_instance',
+ lambda a, b, c: [])
+ instance = objects.Instance()
+ instance.info_cache = objects.InstanceInfoCache.new(
+ self.context, 'fake-uuid')
+ instance.info_cache.network_info = network_model.NetworkInfo.hydrate(
+ nwinfo)
+ self.compute.detach_interface(self.context, instance, port_id)
+ self.assertEqual(self.compute.driver._interfaces, {})
+
+ def test_attach_volume(self):
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
+ {'source_type': 'volume', 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id', 'device_name': '/dev/vdb'})
+ bdm = block_device_obj.BlockDeviceMapping()._from_db_object(
+ self.context,
+ block_device_obj.BlockDeviceMapping(),
+ fake_bdm)
+ instance = self._create_fake_instance()
+ fake_volume = {'id': 'fake-volume-id'}
+
+ with contextlib.nested(
+ mock.patch.object(cinder.API, 'get', return_value=fake_volume),
+ mock.patch.object(cinder.API, 'check_attach'),
+ mock.patch.object(cinder.API, 'reserve_volume'),
+ mock.patch.object(compute_rpcapi.ComputeAPI,
+ 'reserve_block_device_name', return_value=bdm),
+ mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume')
+ ) as (mock_get, mock_check_attach, mock_reserve_vol, mock_reserve_bdm,
+ mock_attach):
+
+ self.compute_api.attach_volume(
+ self.context, instance, 'fake-volume-id',
+ '/dev/vdb', 'ide', 'cdrom')
+
+ mock_reserve_bdm.assert_called_once_with(
+ self.context, instance, '/dev/vdb', 'fake-volume-id',
+ disk_bus='ide', device_type='cdrom')
+ self.assertEqual(mock_get.call_args,
+ mock.call(self.context, 'fake-volume-id'))
+ self.assertEqual(mock_check_attach.call_args,
+ mock.call(
+ self.context, fake_volume, instance=instance))
+ mock_reserve_vol.assert_called_once_with(
+ self.context, 'fake-volume-id')
+ a, kw = mock_attach.call_args
+ self.assertEqual(kw['volume_id'], 'fake-volume-id')
+ self.assertEqual(kw['mountpoint'], '/dev/vdb')
+ self.assertEqual(kw['bdm'].device_name, '/dev/vdb')
+ self.assertEqual(kw['bdm'].volume_id, 'fake-volume-id')
+
+ def test_attach_volume_no_device(self):
+
+ called = {}
+
+ def fake_check_attach(*args, **kwargs):
+ called['fake_check_attach'] = True
+
+ def fake_reserve_volume(*args, **kwargs):
+ called['fake_reserve_volume'] = True
+
+ def fake_volume_get(self, context, volume_id):
+ called['fake_volume_get'] = True
+ return {'id': volume_id}
+
+ def fake_rpc_attach_volume(self, context, **kwargs):
+ called['fake_rpc_attach_volume'] = True
+
+ def fake_rpc_reserve_block_device_name(self, context, instance, device,
+ volume_id, **kwargs):
+ called['fake_rpc_reserve_block_device_name'] = True
+ bdm = block_device_obj.BlockDeviceMapping()
+ bdm['device_name'] = '/dev/vdb'
+ return bdm
+
+ self.stubs.Set(cinder.API, 'get', fake_volume_get)
+ self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
+ self.stubs.Set(cinder.API, 'reserve_volume',
+ fake_reserve_volume)
+ self.stubs.Set(compute_rpcapi.ComputeAPI,
+ 'reserve_block_device_name',
+ fake_rpc_reserve_block_device_name)
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'attach_volume',
+ fake_rpc_attach_volume)
+
+ instance = self._create_fake_instance()
+ self.compute_api.attach_volume(self.context, instance, 1, device=None)
+ self.assertTrue(called.get('fake_check_attach'))
+ self.assertTrue(called.get('fake_reserve_volume'))
+ self.assertTrue(called.get('fake_volume_get'))
+ self.assertTrue(called.get('fake_rpc_reserve_block_device_name'))
+ self.assertTrue(called.get('fake_rpc_attach_volume'))
+
+ def test_detach_volume(self):
+ # Ensure volume can be detached from instance
+ called = {}
+ instance = self._create_fake_instance()
+ volume = {'id': 1, 'attach_status': 'in-use',
+ 'instance_uuid': instance['uuid']}
+
+ def fake_check_detach(*args, **kwargs):
+ called['fake_check_detach'] = True
+
+ def fake_begin_detaching(*args, **kwargs):
+ called['fake_begin_detaching'] = True
+
+ def fake_rpc_detach_volume(self, context, **kwargs):
+ called['fake_rpc_detach_volume'] = True
+
+ self.stubs.Set(cinder.API, 'check_detach', fake_check_detach)
+ self.stubs.Set(cinder.API, 'begin_detaching', fake_begin_detaching)
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'detach_volume',
+ fake_rpc_detach_volume)
+
+ self.compute_api.detach_volume(self.context,
+ instance, volume)
+ self.assertTrue(called.get('fake_check_detach'))
+ self.assertTrue(called.get('fake_begin_detaching'))
+ self.assertTrue(called.get('fake_rpc_detach_volume'))
+
+ def test_detach_invalid_volume(self):
+ # Ensure exception is raised while detaching an un-attached volume
+ instance = {'uuid': 'uuid1',
+ 'locked': False,
+ 'launched_at': timeutils.utcnow(),
+ 'vm_state': vm_states.ACTIVE,
+ 'task_state': None}
+ volume = {'id': 1, 'attach_status': 'detached'}
+
+ self.assertRaises(exception.InvalidVolume,
+ self.compute_api.detach_volume, self.context,
+ instance, volume)
+
+ def test_detach_unattached_volume(self):
+ # Ensure exception is raised when volume's idea of attached
+ # instance doesn't match.
+ instance = {'uuid': 'uuid1',
+ 'locked': False,
+ 'launched_at': timeutils.utcnow(),
+ 'vm_state': vm_states.ACTIVE,
+ 'task_state': None}
+ volume = {'id': 1, 'attach_status': 'in-use',
+ 'instance_uuid': 'uuid2'}
+
+ self.assertRaises(exception.VolumeUnattached,
+ self.compute_api.detach_volume, self.context,
+ instance, volume)
+
+ def test_detach_suspended_instance_fails(self):
+ instance = {'uuid': 'uuid1',
+ 'locked': False,
+ 'launched_at': timeutils.utcnow(),
+ 'vm_state': vm_states.SUSPENDED,
+ 'task_state': None}
+ volume = {'id': 1, 'attach_status': 'in-use',
+ 'instance_uuid': 'uuid2'}
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.detach_volume, self.context,
+ instance, volume)
+
+ def test_detach_volume_libvirt_is_down(self):
+ # Ensure rollback during detach if libvirt goes down
+
+ called = {}
+ instance = self._create_fake_instance()
+
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
+ {'device_name': '/dev/vdb', 'volume_id': 1,
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'connection_info': '{"test": "test"}'})
+
+ def fake_libvirt_driver_instance_exists(_instance):
+ called['fake_libvirt_driver_instance_exists'] = True
+ return False
+
+ def fake_libvirt_driver_detach_volume_fails(*args, **kwargs):
+ called['fake_libvirt_driver_detach_volume_fails'] = True
+ raise AttributeError()
+
+ def fake_roll_detaching(*args, **kwargs):
+ called['fake_roll_detaching'] = True
+
+ self.stubs.Set(cinder.API, 'roll_detaching', fake_roll_detaching)
+ self.stubs.Set(self.compute.driver, "instance_exists",
+ fake_libvirt_driver_instance_exists)
+ self.stubs.Set(self.compute.driver, "detach_volume",
+ fake_libvirt_driver_detach_volume_fails)
+
+ self.mox.StubOutWithMock(objects.BlockDeviceMapping,
+ 'get_by_volume_id')
+ objects.BlockDeviceMapping.get_by_volume_id(
+ self.context, 1).AndReturn(objects.BlockDeviceMapping(
+ **fake_bdm))
+ self.mox.ReplayAll()
+
+ self.assertRaises(AttributeError, self.compute.detach_volume,
+ self.context, 1, instance)
+ self.assertTrue(called.get('fake_libvirt_driver_instance_exists'))
+ self.assertTrue(called.get('fake_roll_detaching'))
+
+ def test_detach_volume_not_found(self):
+ # Ensure that a volume can be detached even when it is removed
+ # from an instance but remaining in bdm. See bug #1367964.
+
+ instance = self._create_fake_instance()
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
+ {'source_type': 'volume', 'destination_type': 'volume',
+ 'volume_id': 'fake-id', 'device_name': '/dev/vdb',
+ 'connection_info': '{"test": "test"}'})
+ bdm = objects.BlockDeviceMapping(**fake_bdm)
+
+ with contextlib.nested(
+ mock.patch.object(self.compute.driver, 'detach_volume',
+ side_effect=exception.DiskNotFound('sdb')),
+ mock.patch.object(objects.BlockDeviceMapping,
+ 'get_by_volume_id', return_value=bdm),
+ mock.patch.object(cinder.API, 'terminate_connection'),
+ mock.patch.object(bdm, 'destroy'),
+ mock.patch.object(self.compute, '_notify_about_instance_usage'),
+ mock.patch.object(self.compute.volume_api, 'detach'),
+ mock.patch.object(self.compute.driver, 'get_volume_connector',
+ return_value='fake-connector')
+ ) as (mock_detach_volume, mock_volume, mock_terminate_connection,
+ mock_destroy, mock_notify, mock_detach, mock_volume_connector):
+ self.compute.detach_volume(self.context, 'fake-id', instance)
+ self.assertTrue(mock_detach_volume.called)
+ mock_terminate_connection.assert_called_once_with(self.context,
+ 'fake-id',
+ 'fake-connector')
+ mock_destroy.assert_called_once_with()
+ mock_detach.assert_called_once_with(mock.ANY, 'fake-id')
+
+ def test_terminate_with_volumes(self):
+ # Make sure that volumes get detached during instance termination.
+ admin = context.get_admin_context()
+ instance = self._create_fake_instance_obj()
+
+ volume_id = 'fake'
+ values = {'instance_uuid': instance['uuid'],
+ 'device_name': '/dev/vdc',
+ 'delete_on_termination': False,
+ 'volume_id': volume_id,
+ }
+ db.block_device_mapping_create(admin, values)
+
+ def fake_volume_get(self, context, volume_id):
+ return {'id': volume_id}
+ self.stubs.Set(cinder.API, "get", fake_volume_get)
+
+ # Stub out and record whether it gets detached
+ result = {"detached": False}
+
+ def fake_detach(self, context, volume_id_param):
+ result["detached"] = volume_id_param == volume_id
+ self.stubs.Set(cinder.API, "detach", fake_detach)
+
+ def fake_terminate_connection(self, context, volume_id, connector):
+ return {}
+ self.stubs.Set(cinder.API, "terminate_connection",
+ fake_terminate_connection)
+
+ # Kill the instance and check that it was detached
+ bdms = db.block_device_mapping_get_all_by_instance(admin,
+ instance['uuid'])
+ self.compute.terminate_instance(admin, instance, bdms, [])
+
+ self.assertTrue(result["detached"])
+
+ def test_terminate_deletes_all_bdms(self):
+ admin = context.get_admin_context()
+ instance = self._create_fake_instance_obj()
+
+ img_bdm = {'instance_uuid': instance['uuid'],
+ 'device_name': '/dev/vda',
+ 'source_type': 'image',
+ 'destination_type': 'local',
+ 'delete_on_termination': False,
+ 'boot_index': 0,
+ 'image_id': 'fake_image'}
+ vol_bdm = {'instance_uuid': instance['uuid'],
+ 'device_name': '/dev/vdc',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'delete_on_termination': False,
+ 'volume_id': 'fake_vol'}
+ bdms = []
+ for bdm in img_bdm, vol_bdm:
+ bdm_obj = objects.BlockDeviceMapping(**bdm)
+ bdm_obj.create(admin)
+ bdms.append(bdm_obj)
+
+ self.stubs.Set(self.compute, 'volume_api', mox.MockAnything())
+ self.stubs.Set(self.compute, '_prep_block_device', mox.MockAnything())
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+
+ self.compute.terminate_instance(self.context, instance, bdms, [])
+
+ bdms = db.block_device_mapping_get_all_by_instance(admin,
+ instance['uuid'])
+ self.assertEqual(len(bdms), 0)
+
+ def test_inject_network_info(self):
+ instance = self._create_fake_instance_obj(params={'host': CONF.host})
+ self.compute.run_instance(self.context,
+ instance, {}, {}, None, None,
+ None, True, None, False)
+ instance = self.compute_api.get(self.context, instance['uuid'],
+ want_objects=True)
+ self.compute_api.inject_network_info(self.context, instance)
+ self.stubs.Set(self.compute_api.network_api,
+ 'deallocate_for_instance',
+ lambda *a, **kw: None)
+ self.compute_api.delete(self.context, instance)
+
+ def test_reset_network(self):
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, None, None,
+ None, True, None, False)
+ instance = self.compute_api.get(self.context, instance['uuid'],
+ want_objects=True)
+ self.compute_api.reset_network(self.context, instance)
+
+ def test_lock(self):
+ instance = self._create_fake_instance_obj()
+ self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
+ lambda *a, **kw: None)
+ self.compute_api.lock(self.context, instance)
+
+ def test_unlock(self):
+ instance = self._create_fake_instance_obj()
+ self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
+ lambda *a, **kw: None)
+ self.compute_api.unlock(self.context, instance)
+
+ def test_get_lock(self):
+ instance = self._create_fake_instance()
+ self.assertFalse(self.compute_api.get_lock(self.context, instance))
+ db.instance_update(self.context, instance['uuid'], {'locked': True})
+ self.assertTrue(self.compute_api.get_lock(self.context, instance))
+
+ def test_add_remove_security_group(self):
+ instance = self._create_fake_instance_obj()
+
+ self.compute.run_instance(self.context,
+ instance, {}, {}, None, None,
+ None, True, None, False)
+ instance = self.compute_api.get(self.context, instance['uuid'])
+ security_group_name = self._create_group()['name']
+
+ self.security_group_api.add_to_instance(self.context,
+ instance,
+ security_group_name)
+ self.security_group_api.remove_from_instance(self.context,
+ instance,
+ security_group_name)
+
+ def test_get_diagnostics(self):
+ instance = self._create_fake_instance_obj()
+
+ rpcapi = compute_rpcapi.ComputeAPI
+ self.mox.StubOutWithMock(rpcapi, 'get_diagnostics')
+ rpcapi.get_diagnostics(self.context, instance=instance)
+ self.mox.ReplayAll()
+
+ self.compute_api.get_diagnostics(self.context, instance)
+
+ self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
+ lambda *a, **kw: None)
+ self.compute_api.delete(self.context, instance)
+
+ def test_get_instance_diagnostics(self):
+ instance = self._create_fake_instance_obj()
+
+ rpcapi = compute_rpcapi.ComputeAPI
+ self.mox.StubOutWithMock(rpcapi, 'get_instance_diagnostics')
+ rpcapi.get_instance_diagnostics(self.context, instance=instance)
+ self.mox.ReplayAll()
+
+ self.compute_api.get_instance_diagnostics(self.context, instance)
+
+ self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
+ lambda *a, **kw: None)
+ self.compute_api.delete(self.context, instance)
+
+ def test_secgroup_refresh(self):
+ instance = self._create_fake_instance()
+
+ def rule_get(*args, **kwargs):
+ mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
+ return [mock_rule]
+
+ def group_get(*args, **kwargs):
+ mock_group = db_fakes.FakeModel({'instances': [instance]})
+ return mock_group
+
+ self.stubs.Set(
+ self.compute_api.db,
+ 'security_group_rule_get_by_security_group_grantee',
+ rule_get)
+ self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
+
+ rpcapi = self.security_group_api.security_group_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
+ rpcapi.refresh_instance_security_rules(self.context,
+ instance['host'],
+ instance)
+ self.mox.ReplayAll()
+
+ self.security_group_api.trigger_members_refresh(self.context, [1])
+
+ def test_secgroup_refresh_once(self):
+ instance = self._create_fake_instance()
+
+ def rule_get(*args, **kwargs):
+ mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
+ return [mock_rule]
+
+ def group_get(*args, **kwargs):
+ mock_group = db_fakes.FakeModel({'instances': [instance]})
+ return mock_group
+
+ self.stubs.Set(
+ self.compute_api.db,
+ 'security_group_rule_get_by_security_group_grantee',
+ rule_get)
+ self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
+
+ rpcapi = self.security_group_api.security_group_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
+ rpcapi.refresh_instance_security_rules(self.context,
+ instance['host'],
+ instance)
+ self.mox.ReplayAll()
+
+ self.security_group_api.trigger_members_refresh(self.context, [1, 2])
+
+ def test_secgroup_refresh_none(self):
+ def rule_get(*args, **kwargs):
+ mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
+ return [mock_rule]
+
+ def group_get(*args, **kwargs):
+ mock_group = db_fakes.FakeModel({'instances': []})
+ return mock_group
+
+ self.stubs.Set(
+ self.compute_api.db,
+ 'security_group_rule_get_by_security_group_grantee',
+ rule_get)
+ self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
+
+ rpcapi = self.security_group_api.security_group_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
+
+ self.mox.ReplayAll()
+
+ self.security_group_api.trigger_members_refresh(self.context, [1])
+
+ def test_secrule_refresh(self):
+ instance = self._create_fake_instance()
+
+ def group_get(*args, **kwargs):
+ mock_group = db_fakes.FakeModel({'instances': [instance]})
+ return mock_group
+
+ self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
+
+ rpcapi = self.security_group_api.security_group_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
+ rpcapi.refresh_instance_security_rules(self.context,
+ instance['host'],
+ instance)
+ self.mox.ReplayAll()
+
+ self.security_group_api.trigger_rules_refresh(self.context, [1])
+
+ def test_secrule_refresh_once(self):
+ instance = self._create_fake_instance()
+
+ def group_get(*args, **kwargs):
+ mock_group = db_fakes.FakeModel({'instances': [instance]})
+ return mock_group
+
+ self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
+
+ rpcapi = self.security_group_api.security_group_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
+ rpcapi.refresh_instance_security_rules(self.context,
+ instance['host'],
+ instance)
+ self.mox.ReplayAll()
+
+ self.security_group_api.trigger_rules_refresh(self.context, [1, 2])
+
+ def test_secrule_refresh_none(self):
+ def group_get(*args, **kwargs):
+ mock_group = db_fakes.FakeModel({'instances': []})
+ return mock_group
+
+ self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
+
+ rpcapi = self.security_group_api.security_group_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
+ self.mox.ReplayAll()
+
+ self.security_group_api.trigger_rules_refresh(self.context, [1, 2])
+
+ def test_live_migrate(self):
+ instance, instance_uuid = self._run_instance()
+
+ rpcapi = self.compute_api.compute_task_api
+ self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
+ self.mox.StubOutWithMock(rpcapi, 'live_migrate_instance')
+ self.compute_api._record_action_start(self.context, instance,
+ 'live-migration')
+ rpcapi.live_migrate_instance(self.context, instance, 'fake_dest_host',
+ block_migration=True,
+ disk_over_commit=True)
+
+ self.mox.ReplayAll()
+
+ self.compute_api.live_migrate(self.context, instance,
+ block_migration=True,
+ disk_over_commit=True,
+ host_name='fake_dest_host')
+
+ instance.refresh()
+ self.assertEqual(instance['task_state'], task_states.MIGRATING)
+
+ def test_evacuate(self):
+ instance = self._create_fake_instance_obj(services=True)
+ self.assertIsNone(instance.task_state)
+
+ def fake_service_is_up(*args, **kwargs):
+ return False
+
+ def fake_rebuild_instance(*args, **kwargs):
+ instance.host = kwargs['host']
+ instance.save()
+
+ self.stubs.Set(self.compute_api.servicegroup_api, 'service_is_up',
+ fake_service_is_up)
+ self.stubs.Set(self.compute_api.compute_task_api, 'rebuild_instance',
+ fake_rebuild_instance)
+ self.compute_api.evacuate(self.context.elevated(),
+ instance,
+ host='fake_dest_host',
+ on_shared_storage=True,
+ admin_password=None)
+
+ instance.refresh()
+ self.assertEqual(instance.task_state, task_states.REBUILDING)
+ self.assertEqual(instance.host, 'fake_dest_host')
+ instance.destroy()
+
+ def test_fail_evacuate_from_non_existing_host(self):
+ inst = {}
+ inst['vm_state'] = vm_states.ACTIVE
+ inst['launched_at'] = timeutils.utcnow()
+ inst['image_ref'] = FAKE_IMAGE_REF
+ inst['reservation_id'] = 'r-fakeres'
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
+ inst['host'] = 'fake_host'
+ inst['node'] = NODENAME
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ inst['instance_type_id'] = type_id
+ inst['ami_launch_index'] = 0
+ inst['memory_mb'] = 0
+ inst['vcpus'] = 0
+ inst['root_gb'] = 0
+ inst['ephemeral_gb'] = 0
+ inst['architecture'] = arch.X86_64
+ inst['os_type'] = 'Linux'
+ instance = self._create_fake_instance_obj(inst)
+
+ self.assertIsNone(instance.task_state)
+ self.assertRaises(exception.ComputeHostNotFound,
+ self.compute_api.evacuate, self.context.elevated(), instance,
+ host='fake_dest_host', on_shared_storage=True,
+ admin_password=None)
+ instance.destroy()
+
+ def test_fail_evacuate_from_running_host(self):
+ instance = self._create_fake_instance_obj(services=True)
+ self.assertIsNone(instance.task_state)
+
+ def fake_service_is_up(*args, **kwargs):
+ return True
+
+ self.stubs.Set(self.compute_api.servicegroup_api, 'service_is_up',
+ fake_service_is_up)
+
+ self.assertRaises(exception.ComputeServiceInUse,
+ self.compute_api.evacuate, self.context.elevated(), instance,
+ host='fake_dest_host', on_shared_storage=True,
+ admin_password=None)
+ instance.destroy()
+
+ def test_fail_evacuate_instance_in_wrong_state(self):
+ states = [vm_states.BUILDING, vm_states.PAUSED, vm_states.SUSPENDED,
+ vm_states.RESCUED, vm_states.RESIZED, vm_states.SOFT_DELETED,
+ vm_states.DELETED]
+ instances = [self._create_fake_instance_obj({'vm_state': state})
+ for state in states]
+
+ for instance in instances:
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.evacuate, self.context, instance,
+ host='fake_dest_host', on_shared_storage=True,
+ admin_password=None)
+ instance.destroy()
+
+ def test_get_migrations(self):
+ migration = test_migration.fake_db_migration(uuid="1234")
+ filters = {'host': 'host1'}
+ self.mox.StubOutWithMock(db, "migration_get_all_by_filters")
+ db.migration_get_all_by_filters(self.context,
+ filters).AndReturn([migration])
+ self.mox.ReplayAll()
+
+ migrations = self.compute_api.get_migrations(self.context,
+ filters)
+ self.assertEqual(1, len(migrations))
+ self.assertEqual(migrations[0].id, migration['id'])
+
+
+def fake_rpc_method(context, method, **kwargs):
+ pass
+
+
+def _create_service_entries(context, values=[['avail_zone1', ['fake_host1',
+ 'fake_host2']],
+ ['avail_zone2', ['fake_host3']]]):
+ for (avail_zone, hosts) in values:
+ for host in hosts:
+ db.service_create(context,
+ {'host': host,
+ 'binary': 'nova-compute',
+ 'topic': 'compute',
+ 'report_count': 0})
+ return values
+
+
+class ComputeAPIAggrTestCase(BaseTestCase):
+ """This is for unit coverage of aggregate-related methods
+ defined in nova.compute.api.
+ """
+
+ def setUp(self):
+ super(ComputeAPIAggrTestCase, self).setUp()
+ self.api = compute_api.AggregateAPI()
+ self.context = context.get_admin_context()
+ self.stubs.Set(self.api.compute_rpcapi.client, 'call', fake_rpc_method)
+ self.stubs.Set(self.api.compute_rpcapi.client, 'cast', fake_rpc_method)
+
+ def test_aggregate_no_zone(self):
+ # Ensure we can create an aggregate without an availability zone
+ aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
+ None)
+ self.api.delete_aggregate(self.context, aggr['id'])
+ db.aggregate_get(self.context.elevated(read_deleted='yes'),
+ aggr['id'])
+ self.assertRaises(exception.AggregateNotFound,
+ self.api.delete_aggregate, self.context, aggr['id'])
+
+ def test_check_az_for_aggregate(self):
+ # Ensure all conflict hosts can be returned
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host1 = values[0][1][0]
+ fake_host2 = values[0][1][1]
+ aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
+ fake_zone, fake_host1)
+ aggr1 = self._init_aggregate_with_host(aggr1, None, None, fake_host2)
+ aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
+ fake_host2)
+ aggr2 = self._init_aggregate_with_host(aggr2, None, None, fake_host1)
+ metadata = {'availability_zone': 'another_zone'}
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.api.update_aggregate,
+ self.context, aggr2['id'], metadata)
+
+ def test_update_aggregate(self):
+ # Ensure metadata can be updated.
+ aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
+ 'fake_zone')
+ fake_notifier.NOTIFICATIONS = []
+ aggr = self.api.update_aggregate(self.context, aggr['id'],
+ {'name': 'new_fake_aggregate'})
+ self.assertIsNone(availability_zones._get_cache().get('cache'))
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updateprop.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updateprop.end')
+
+ def test_update_aggregate_no_az(self):
+ # Ensure metadata without availability zone can be
+ # updated,even the aggregate contains hosts belong
+ # to another availability zone
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host = values[0][1][0]
+ self._init_aggregate_with_host(None, 'fake_aggregate1',
+ fake_zone, fake_host)
+ aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
+ fake_host)
+ metadata = {'name': 'new_fake_aggregate'}
+ fake_notifier.NOTIFICATIONS = []
+ aggr2 = self.api.update_aggregate(self.context, aggr2['id'],
+ metadata)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updateprop.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updateprop.end')
+
+ def test_update_aggregate_az_change(self):
+ # Ensure availability zone can be updated,
+ # when the aggregate is the only one with
+ # availability zone
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host = values[0][1][0]
+ aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
+ fake_zone, fake_host)
+ self._init_aggregate_with_host(None, 'fake_aggregate2', None,
+ fake_host)
+ metadata = {'availability_zone': 'new_fake_zone'}
+ fake_notifier.NOTIFICATIONS = []
+ aggr1 = self.api.update_aggregate(self.context, aggr1['id'],
+ metadata)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.end')
+
+ def test_update_aggregate_az_fails(self):
+ # Ensure aggregate's availability zone can't be updated,
+ # when aggregate has hosts in other availability zone
+ fake_notifier.NOTIFICATIONS = []
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host = values[0][1][0]
+ self._init_aggregate_with_host(None, 'fake_aggregate1',
+ fake_zone, fake_host)
+ aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
+ fake_host)
+ metadata = {'availability_zone': 'another_zone'}
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.api.update_aggregate,
+ self.context, aggr2['id'], metadata)
+ fake_host2 = values[0][1][1]
+ aggr3 = self._init_aggregate_with_host(None, 'fake_aggregate3',
+ None, fake_host2)
+ metadata = {'availability_zone': fake_zone}
+ aggr3 = self.api.update_aggregate(self.context, aggr3['id'],
+ metadata)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 15)
+ msg = fake_notifier.NOTIFICATIONS[13]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.start')
+ msg = fake_notifier.NOTIFICATIONS[14]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.end')
+
+ def test_update_aggregate_az_fails_with_nova_az(self):
+ # Ensure aggregate's availability zone can't be updated,
+ # when aggregate has hosts in other availability zone
+ fake_notifier.NOTIFICATIONS = []
+ values = _create_service_entries(self.context)
+ fake_host = values[0][1][0]
+ self._init_aggregate_with_host(None, 'fake_aggregate1',
+ CONF.default_availability_zone,
+ fake_host)
+ aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
+ fake_host)
+ metadata = {'availability_zone': 'another_zone'}
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.api.update_aggregate,
+ self.context, aggr2['id'], metadata)
+
+ def test_update_aggregate_metadata(self):
+ # Ensure metadata can be updated.
+ aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
+ 'fake_zone')
+ metadata = {'foo_key1': 'foo_value1',
+ 'foo_key2': 'foo_value2',
+ 'availability_zone': 'fake_zone'}
+ fake_notifier.NOTIFICATIONS = []
+ availability_zones._get_cache().add('fake_key', 'fake_value')
+ aggr = self.api.update_aggregate_metadata(self.context, aggr['id'],
+ metadata)
+ self.assertIsNone(availability_zones._get_cache().get('fake_key'))
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.end')
+ fake_notifier.NOTIFICATIONS = []
+ metadata['foo_key1'] = None
+ expected_payload_meta_data = {'foo_key1': None,
+ 'foo_key2': 'foo_value2',
+ 'availability_zone': 'fake_zone'}
+ expected = self.api.update_aggregate_metadata(self.context,
+ aggr['id'], metadata)
+ self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('aggregate.updatemetadata.start', msg.event_type)
+ self.assertEqual(expected_payload_meta_data, msg.payload['meta_data'])
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual('aggregate.updatemetadata.end', msg.event_type)
+ self.assertEqual(expected_payload_meta_data, msg.payload['meta_data'])
+ self.assertThat(expected['metadata'],
+ matchers.DictMatches({'availability_zone': 'fake_zone',
+ 'foo_key2': 'foo_value2'}))
+
+ def test_update_aggregate_metadata_no_az(self):
+ # Ensure metadata without availability zone can be
+ # updated,even the aggregate contains hosts belong
+ # to another availability zone
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host = values[0][1][0]
+ self._init_aggregate_with_host(None, 'fake_aggregate1',
+ fake_zone, fake_host)
+ aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
+ fake_host)
+ metadata = {'foo_key2': 'foo_value3'}
+ fake_notifier.NOTIFICATIONS = []
+ aggr2 = self.api.update_aggregate_metadata(self.context, aggr2['id'],
+ metadata)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.end')
+ self.assertThat(aggr2['metadata'],
+ matchers.DictMatches({'foo_key2': 'foo_value3'}))
+
+ def test_update_aggregate_metadata_az_change(self):
+ # Ensure availability zone can be updated,
+ # when the aggregate is the only one with
+ # availability zone
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host = values[0][1][0]
+ aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
+ fake_zone, fake_host)
+ self._init_aggregate_with_host(None, 'fake_aggregate2', None,
+ fake_host)
+ metadata = {'availability_zone': 'new_fake_zone'}
+ fake_notifier.NOTIFICATIONS = []
+ aggr1 = self.api.update_aggregate_metadata(self.context,
+ aggr1['id'], metadata)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.end')
+
+ def test_update_aggregate_az_do_not_replace_existing_metadata(self):
+ # Ensure that that update of the aggregate availability zone
+ # does not replace the aggregate existing metadata
+ aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
+ 'fake_zone')
+ metadata = {'foo_key1': 'foo_value1'}
+ aggr = self.api.update_aggregate_metadata(self.context,
+ aggr['id'],
+ metadata)
+ metadata = {'availability_zone': 'new_fake_zone'}
+ aggr = self.api.update_aggregate(self.context,
+ aggr['id'],
+ metadata)
+ self.assertThat(aggr['metadata'], matchers.DictMatches(
+ {'availability_zone': 'new_fake_zone', 'foo_key1': 'foo_value1'}))
+
+ def test_update_aggregate_metadata_az_fails(self):
+ # Ensure aggregate's availability zone can't be updated,
+ # when aggregate has hosts in other availability zone
+ fake_notifier.NOTIFICATIONS = []
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host = values[0][1][0]
+ self._init_aggregate_with_host(None, 'fake_aggregate1',
+ fake_zone, fake_host)
+ aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
+ fake_host)
+ metadata = {'availability_zone': 'another_zone'}
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.api.update_aggregate_metadata,
+ self.context, aggr2['id'], metadata)
+ aggr3 = self._init_aggregate_with_host(None, 'fake_aggregate3',
+ None, fake_host)
+ metadata = {'availability_zone': fake_zone}
+ aggr3 = self.api.update_aggregate_metadata(self.context,
+ aggr3['id'],
+ metadata)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 15)
+ msg = fake_notifier.NOTIFICATIONS[13]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.start')
+ msg = fake_notifier.NOTIFICATIONS[14]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.end')
+
+ def test_delete_aggregate(self):
+ # Ensure we can delete an aggregate.
+ fake_notifier.NOTIFICATIONS = []
+ aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
+ 'fake_zone')
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'aggregate.create.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'aggregate.create.end')
+ fake_notifier.NOTIFICATIONS = []
+ self.api.delete_aggregate(self.context, aggr['id'])
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'aggregate.delete.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'aggregate.delete.end')
+ db.aggregate_get(self.context.elevated(read_deleted='yes'),
+ aggr['id'])
+ self.assertRaises(exception.AggregateNotFound,
+ self.api.delete_aggregate, self.context, aggr['id'])
+
+ def test_delete_non_empty_aggregate(self):
+ # Ensure InvalidAggregateAction is raised when non empty aggregate.
+ _create_service_entries(self.context,
+ [['fake_availability_zone', ['fake_host']]])
+ aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
+ 'fake_availability_zone')
+ self.api.add_host_to_aggregate(self.context, aggr['id'], 'fake_host')
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.api.delete_aggregate, self.context, aggr['id'])
+
+ def test_add_host_to_aggregate(self):
+ # Ensure we can add a host to an aggregate.
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host = values[0][1][0]
+ aggr = self.api.create_aggregate(self.context,
+ 'fake_aggregate', fake_zone)
+
+ def fake_add_aggregate_host(*args, **kwargs):
+ hosts = kwargs["aggregate"]["hosts"]
+ self.assertIn(fake_host, hosts)
+
+ self.stubs.Set(self.api.compute_rpcapi, 'add_aggregate_host',
+ fake_add_aggregate_host)
+
+ self.mox.StubOutWithMock(availability_zones,
+ 'update_host_availability_zone_cache')
+
+ def _stub_update_host_avail_zone_cache(host, az=None):
+ if az is not None:
+ availability_zones.update_host_availability_zone_cache(
+ self.context, host, az)
+ else:
+ availability_zones.update_host_availability_zone_cache(
+ self.context, host)
+
+ for (avail_zone, hosts) in values:
+ for host in hosts:
+ _stub_update_host_avail_zone_cache(
+ host, CONF.default_availability_zone)
+ _stub_update_host_avail_zone_cache(fake_host)
+ self.mox.ReplayAll()
+
+ fake_notifier.NOTIFICATIONS = []
+ aggr = self.api.add_host_to_aggregate(self.context,
+ aggr['id'], fake_host)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'aggregate.addhost.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'aggregate.addhost.end')
+ self.assertEqual(len(aggr['hosts']), 1)
+
+ def test_add_host_to_aggr_with_no_az(self):
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host = values[0][1][0]
+ aggr = self.api.create_aggregate(self.context,
+ 'fake_aggregate', fake_zone)
+ aggr = self.api.add_host_to_aggregate(self.context, aggr['id'],
+ fake_host)
+ aggr_no_az = self.api.create_aggregate(self.context, 'fake_aggregate2',
+ None)
+ aggr_no_az = self.api.add_host_to_aggregate(self.context,
+ aggr_no_az['id'],
+ fake_host)
+ self.assertIn(fake_host, aggr['hosts'])
+ self.assertIn(fake_host, aggr_no_az['hosts'])
+
+ def test_add_host_no_az_metadata(self):
+ # NOTE(mtreinish) based on how create works this is not how the
+ # the metadata is supposed to end up in the database but it has
+ # been seen. See lp bug #1209007. This test just confirms that
+ # the host is still added to the aggregate if there is no
+ # availability zone metadata.
+ def fake_aggregate_metadata_get_by_metadata_key(*args, **kwargs):
+ return {'meta_key': 'fake_value'}
+ self.stubs.Set(self.compute.db,
+ 'aggregate_metadata_get_by_metadata_key',
+ fake_aggregate_metadata_get_by_metadata_key)
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host = values[0][1][0]
+ aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
+ fake_zone)
+ aggr = self.api.add_host_to_aggregate(self.context, aggr['id'],
+ fake_host)
+ self.assertIn(fake_host, aggr['hosts'])
+
+ def test_add_host_to_multi_az(self):
+ # Ensure we can't add a host to different availability zone
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host = values[0][1][0]
+ aggr = self.api.create_aggregate(self.context,
+ 'fake_aggregate', fake_zone)
+ aggr = self.api.add_host_to_aggregate(self.context,
+ aggr['id'], fake_host)
+ self.assertEqual(len(aggr['hosts']), 1)
+ fake_zone2 = "another_zone"
+ aggr2 = self.api.create_aggregate(self.context,
+ 'fake_aggregate2', fake_zone2)
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.api.add_host_to_aggregate,
+ self.context, aggr2['id'], fake_host)
+
+ def test_add_host_to_multi_az_with_nova_agg(self):
+ # Ensure we can't add a host if already existing in an agg with AZ set
+ # to default
+ values = _create_service_entries(self.context)
+ fake_host = values[0][1][0]
+ aggr = self.api.create_aggregate(self.context,
+ 'fake_aggregate',
+ CONF.default_availability_zone)
+ aggr = self.api.add_host_to_aggregate(self.context,
+ aggr['id'], fake_host)
+ self.assertEqual(len(aggr['hosts']), 1)
+ fake_zone2 = "another_zone"
+ aggr2 = self.api.create_aggregate(self.context,
+ 'fake_aggregate2', fake_zone2)
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.api.add_host_to_aggregate,
+ self.context, aggr2['id'], fake_host)
+
+ def test_add_host_to_aggregate_multiple(self):
+ # Ensure we can add multiple hosts to an aggregate.
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ aggr = self.api.create_aggregate(self.context,
+ 'fake_aggregate', fake_zone)
+ for host in values[0][1]:
+ aggr = self.api.add_host_to_aggregate(self.context,
+ aggr['id'], host)
+ self.assertEqual(len(aggr['hosts']), len(values[0][1]))
+
+ def test_add_host_to_aggregate_raise_not_found(self):
+ # Ensure ComputeHostNotFound is raised when adding invalid host.
+ aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
+ 'fake_zone')
+ fake_notifier.NOTIFICATIONS = []
+ self.assertRaises(exception.ComputeHostNotFound,
+ self.api.add_host_to_aggregate,
+ self.context, aggr['id'], 'invalid_host')
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ self.assertEqual(fake_notifier.NOTIFICATIONS[1].publisher_id,
+ 'compute.fake-mini')
+
+ def test_remove_host_from_aggregate_active(self):
+ # Ensure we can remove a host from an aggregate.
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ aggr = self.api.create_aggregate(self.context,
+ 'fake_aggregate', fake_zone)
+ for host in values[0][1]:
+ aggr = self.api.add_host_to_aggregate(self.context,
+ aggr['id'], host)
+ host_to_remove = values[0][1][0]
+
+ def fake_remove_aggregate_host(*args, **kwargs):
+ hosts = kwargs["aggregate"]["hosts"]
+ self.assertNotIn(host_to_remove, hosts)
+
+ self.stubs.Set(self.api.compute_rpcapi, 'remove_aggregate_host',
+ fake_remove_aggregate_host)
+
+ self.mox.StubOutWithMock(availability_zones,
+ 'update_host_availability_zone_cache')
+ availability_zones.update_host_availability_zone_cache(self.context,
+ host_to_remove)
+ self.mox.ReplayAll()
+
+ fake_notifier.NOTIFICATIONS = []
+ expected = self.api.remove_host_from_aggregate(self.context,
+ aggr['id'],
+ host_to_remove)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'aggregate.removehost.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'aggregate.removehost.end')
+ self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
+
+ def test_remove_host_from_aggregate_raise_not_found(self):
+ # Ensure ComputeHostNotFound is raised when removing invalid host.
+ _create_service_entries(self.context, [['fake_zone', ['fake_host']]])
+ aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
+ 'fake_zone')
+ self.assertRaises(exception.ComputeHostNotFound,
+ self.api.remove_host_from_aggregate,
+ self.context, aggr['id'], 'invalid_host')
+
+ def test_aggregate_list(self):
+ aggregate = self.api.create_aggregate(self.context,
+ 'fake_aggregate',
+ 'fake_zone')
+ metadata = {'foo_key1': 'foo_value1',
+ 'foo_key2': 'foo_value2'}
+ meta_aggregate = self.api.create_aggregate(self.context,
+ 'fake_aggregate2',
+ 'fake_zone2')
+ self.api.update_aggregate_metadata(self.context, meta_aggregate['id'],
+ metadata)
+ aggregate_list = self.api.get_aggregate_list(self.context)
+ self.assertIn(aggregate['id'],
+ map(lambda x: x['id'], aggregate_list))
+ self.assertIn(meta_aggregate['id'],
+ map(lambda x: x['id'], aggregate_list))
+ self.assertIn('fake_aggregate',
+ map(lambda x: x['name'], aggregate_list))
+ self.assertIn('fake_aggregate2',
+ map(lambda x: x['name'], aggregate_list))
+ self.assertIn('fake_zone',
+ map(lambda x: x['availability_zone'], aggregate_list))
+ self.assertIn('fake_zone2',
+ map(lambda x: x['availability_zone'], aggregate_list))
+ test_meta_aggregate = aggregate_list[1]
+ self.assertIn('foo_key1', test_meta_aggregate.get('metadata'))
+ self.assertIn('foo_key2', test_meta_aggregate.get('metadata'))
+ self.assertEqual('foo_value1',
+ test_meta_aggregate.get('metadata')['foo_key1'])
+ self.assertEqual('foo_value2',
+ test_meta_aggregate.get('metadata')['foo_key2'])
+
+ def test_aggregate_list_with_hosts(self):
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ host_aggregate = self.api.create_aggregate(self.context,
+ 'fake_aggregate',
+ fake_zone)
+ self.api.add_host_to_aggregate(self.context, host_aggregate['id'],
+ values[0][1][0])
+ aggregate_list = self.api.get_aggregate_list(self.context)
+ aggregate = aggregate_list[0]
+ self.assertIn(values[0][1][0], aggregate.get('hosts'))
+
+
+class ComputeAggrTestCase(BaseTestCase):
+ """This is for unit coverage of aggregate-related methods
+ defined in nova.compute.manager.
+ """
+
+ def setUp(self):
+ super(ComputeAggrTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ values = {'name': 'test_aggr'}
+ az = {'availability_zone': 'test_zone'}
+ self.aggr = db.aggregate_create(self.context, values, metadata=az)
+
+ def test_add_aggregate_host(self):
+ def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
+ fake_driver_add_to_aggregate.called = True
+ return {"foo": "bar"}
+ self.stubs.Set(self.compute.driver, "add_to_aggregate",
+ fake_driver_add_to_aggregate)
+
+ self.compute.add_aggregate_host(self.context, host="host",
+ aggregate=jsonutils.to_primitive(self.aggr), slave_info=None)
+ self.assertTrue(fake_driver_add_to_aggregate.called)
+
+ def test_remove_aggregate_host(self):
+ def fake_driver_remove_from_aggregate(context, aggregate, host,
+ **_ignore):
+ fake_driver_remove_from_aggregate.called = True
+ self.assertEqual("host", host, "host")
+ return {"foo": "bar"}
+ self.stubs.Set(self.compute.driver, "remove_from_aggregate",
+ fake_driver_remove_from_aggregate)
+
+ self.compute.remove_aggregate_host(self.context,
+ aggregate=jsonutils.to_primitive(self.aggr), host="host",
+ slave_info=None)
+ self.assertTrue(fake_driver_remove_from_aggregate.called)
+
+ def test_add_aggregate_host_passes_slave_info_to_driver(self):
+ def driver_add_to_aggregate(context, aggregate, host, **kwargs):
+ self.assertEqual(self.context, context)
+ self.assertEqual(aggregate['id'], self.aggr['id'])
+ self.assertEqual(host, "the_host")
+ self.assertEqual("SLAVE_INFO", kwargs.get("slave_info"))
+
+ self.stubs.Set(self.compute.driver, "add_to_aggregate",
+ driver_add_to_aggregate)
+
+ self.compute.add_aggregate_host(self.context, host="the_host",
+ slave_info="SLAVE_INFO",
+ aggregate=jsonutils.to_primitive(self.aggr))
+
+ def test_remove_from_aggregate_passes_slave_info_to_driver(self):
+ def driver_remove_from_aggregate(context, aggregate, host, **kwargs):
+ self.assertEqual(self.context, context)
+ self.assertEqual(aggregate['id'], self.aggr['id'])
+ self.assertEqual(host, "the_host")
+ self.assertEqual("SLAVE_INFO", kwargs.get("slave_info"))
+
+ self.stubs.Set(self.compute.driver, "remove_from_aggregate",
+ driver_remove_from_aggregate)
+
+ self.compute.remove_aggregate_host(self.context,
+ aggregate=jsonutils.to_primitive(self.aggr), host="the_host",
+ slave_info="SLAVE_INFO")
+
+
+class ComputePolicyTestCase(BaseTestCase):
+
+ def setUp(self):
+ super(ComputePolicyTestCase, self).setUp()
+
+ self.compute_api = compute.API()
+
+ def test_actions_are_prefixed(self):
+ self.mox.StubOutWithMock(policy, 'enforce')
+ nova.policy.enforce(self.context, 'compute:reboot', {})
+ self.mox.ReplayAll()
+ compute_api.check_policy(self.context, 'reboot', {})
+
+ def test_wrapped_method(self):
+ instance = self._create_fake_instance_obj(params={'host': None,
+ 'cell_name': 'foo'})
+
+ # force delete to fail
+ rules = {"compute:delete": [["false:false"]]}
+ self.policy.set_rules(rules)
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.compute_api.delete, self.context, instance)
+
+ # reset rules to allow deletion
+ rules = {"compute:delete": []}
+ self.policy.set_rules(rules)
+
+ self.compute_api.delete(self.context, instance)
+
+ def test_create_fail(self):
+ rules = {"compute:create": [["false:false"]]}
+ self.policy.set_rules(rules)
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.compute_api.create, self.context, '1', '1')
+
+ def test_create_attach_volume_fail(self):
+ rules = {
+ "compute:create": [],
+ "compute:create:attach_network": [["false:false"]],
+ "compute:create:attach_volume": [],
+ }
+ self.policy.set_rules(rules)
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.compute_api.create, self.context, '1', '1',
+ requested_networks='blah',
+ block_device_mapping='blah')
+
+ def test_create_attach_network_fail(self):
+ rules = {
+ "compute:create": [],
+ "compute:create:attach_network": [],
+ "compute:create:attach_volume": [["false:false"]],
+ }
+ self.policy.set_rules(rules)
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.compute_api.create, self.context, '1', '1',
+ requested_networks='blah',
+ block_device_mapping='blah')
+
+ def test_get_fail(self):
+ instance = self._create_fake_instance()
+
+ rules = {
+ "compute:get": [["false:false"]],
+ }
+ self.policy.set_rules(rules)
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.compute_api.get, self.context, instance['uuid'])
+
+ def test_get_all_fail(self):
+ rules = {
+ "compute:get_all": [["false:false"]],
+ }
+ self.policy.set_rules(rules)
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.compute_api.get_all, self.context)
+
+ def test_get_instance_faults(self):
+ instance1 = self._create_fake_instance()
+ instance2 = self._create_fake_instance()
+ instances = [instance1, instance2]
+
+ rules = {
+ "compute:get_instance_faults": [["false:false"]],
+ }
+ self.policy.set_rules(rules)
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.compute_api.get_instance_faults,
+ context.get_admin_context(), instances)
+
+ def test_force_host_fail(self):
+ rules = {"compute:create": [],
+ "compute:create:forced_host": [["role:fake"]],
+ "network:validate_networks": []}
+ self.policy.set_rules(rules)
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.compute_api.create, self.context, None, '1',
+ availability_zone='1:1')
+
+ def test_force_host_pass(self):
+ rules = {"compute:create": [],
+ "compute:create:forced_host": [],
+ "network:validate_networks": []}
+ self.policy.set_rules(rules)
+
+ self.compute_api.create(self.context, None, '1',
+ availability_zone='1:1')
+
+
+class DisabledInstanceTypesTestCase(BaseTestCase):
+ """Some instance-types are marked 'disabled' which means that they will not
+ show up in customer-facing listings. We do, however, want those
+ instance-types to be available for emergency migrations and for rebuilding
+ of existing instances.
+
+ One legitimate use of the 'disabled' field would be when phasing out a
+ particular instance-type. We still want customers to be able to use an
+ instance that of the old type, and we want Ops to be able perform
+ migrations against it, but we *don't* want customers building new
+ instances with the phased-out instance-type.
+ """
+ def setUp(self):
+ super(DisabledInstanceTypesTestCase, self).setUp()
+ self.compute_api = compute.API()
+ self.inst_type = flavors.get_default_flavor()
+
+ def test_can_build_instance_from_visible_instance_type(self):
+ self.inst_type['disabled'] = False
+ # Assert that exception.FlavorNotFound is not raised
+ self.compute_api.create(self.context, self.inst_type,
+ image_href='some-fake-image')
+
+ def test_cannot_build_instance_from_disabled_instance_type(self):
+ self.inst_type['disabled'] = True
+ self.assertRaises(exception.FlavorNotFound,
+ self.compute_api.create, self.context, self.inst_type, None)
+
+ def test_can_resize_to_visible_instance_type(self):
+ instance = self._create_fake_instance_obj()
+ orig_get_flavor_by_flavor_id =\
+ flavors.get_flavor_by_flavor_id
+
+ def fake_get_flavor_by_flavor_id(flavor_id, ctxt=None,
+ read_deleted="yes"):
+ instance_type = orig_get_flavor_by_flavor_id(flavor_id,
+ ctxt,
+ read_deleted)
+ instance_type['disabled'] = False
+ return instance_type
+
+ self.stubs.Set(flavors, 'get_flavor_by_flavor_id',
+ fake_get_flavor_by_flavor_id)
+
+ self._stub_migrate_server()
+ self.compute_api.resize(self.context, instance, '4')
+
+ def test_cannot_resize_to_disabled_instance_type(self):
+ instance = self._create_fake_instance_obj()
+ orig_get_flavor_by_flavor_id = \
+ flavors.get_flavor_by_flavor_id
+
+ def fake_get_flavor_by_flavor_id(flavor_id, ctxt=None,
+ read_deleted="yes"):
+ instance_type = orig_get_flavor_by_flavor_id(flavor_id,
+ ctxt,
+ read_deleted)
+ instance_type['disabled'] = True
+ return instance_type
+
+ self.stubs.Set(flavors, 'get_flavor_by_flavor_id',
+ fake_get_flavor_by_flavor_id)
+
+ self.assertRaises(exception.FlavorNotFound,
+ self.compute_api.resize, self.context, instance, '4')
+
+
+class ComputeReschedulingTestCase(BaseTestCase):
+ """Tests re-scheduling logic for new build requests."""
+
+ def setUp(self):
+ super(ComputeReschedulingTestCase, self).setUp()
+
+ self.expected_task_state = task_states.SCHEDULING
+
+ def fake_update(*args, **kwargs):
+ self.updated_task_state = kwargs.get('task_state')
+ self.stubs.Set(self.compute, '_instance_update', fake_update)
+
+ def _reschedule(self, request_spec=None, filter_properties=None,
+ exc_info=None):
+ if not filter_properties:
+ filter_properties = {}
+
+ instance = self._create_fake_instance_obj()
+
+ admin_password = None
+ injected_files = None
+ requested_networks = None
+ is_first_time = False
+
+ scheduler_method = self.compute.scheduler_rpcapi.run_instance
+ method_args = (request_spec, admin_password, injected_files,
+ requested_networks, is_first_time, filter_properties)
+ return self.compute._reschedule(self.context, request_spec,
+ filter_properties, instance, scheduler_method,
+ method_args, self.expected_task_state, exc_info=exc_info)
+
+ def test_reschedule_no_filter_properties(self):
+ # no filter_properties will disable re-scheduling.
+ self.assertFalse(self._reschedule())
+
+ def test_reschedule_no_retry_info(self):
+ # no retry info will also disable re-scheduling.
+ filter_properties = {}
+ self.assertFalse(self._reschedule(filter_properties=filter_properties))
+
+ def test_reschedule_no_request_spec(self):
+ # no request spec will also disable re-scheduling.
+ retry = dict(num_attempts=1)
+ filter_properties = dict(retry=retry)
+ self.assertFalse(self._reschedule(filter_properties=filter_properties))
+
+ def test_reschedule_success(self):
+ retry = dict(num_attempts=1)
+ filter_properties = dict(retry=retry)
+ request_spec = {'instance_uuids': ['foo', 'bar']}
+ try:
+ raise test.TestingException("just need an exception")
+ except test.TestingException:
+ exc_info = sys.exc_info()
+ exc_str = traceback.format_exception_only(exc_info[0],
+ exc_info[1])
+
+ self.assertTrue(self._reschedule(filter_properties=filter_properties,
+ request_spec=request_spec, exc_info=exc_info))
+ self.assertEqual(1, len(request_spec['instance_uuids']))
+ self.assertEqual(self.updated_task_state, self.expected_task_state)
+ self.assertEqual(exc_str, filter_properties['retry']['exc'])
+
+
+class ComputeReschedulingResizeTestCase(ComputeReschedulingTestCase):
+ """Test re-scheduling logic for prep_resize requests."""
+
+ def setUp(self):
+ super(ComputeReschedulingResizeTestCase, self).setUp()
+ self.expected_task_state = task_states.RESIZE_PREP
+
+ def _reschedule(self, request_spec=None, filter_properties=None,
+ exc_info=None):
+ if not filter_properties:
+ filter_properties = {}
+
+ instance_uuid = str(uuid.uuid4())
+ instance = self._create_fake_instance_obj(
+ params={'uuid': instance_uuid})
+ instance_type = {}
+ reservations = None
+
+ scheduler_method = self.compute.compute_task_api.resize_instance
+ scheduler_hint = dict(filter_properties=filter_properties)
+ method_args = (instance, None, scheduler_hint, instance_type,
+ reservations)
+
+ return self.compute._reschedule(self.context, request_spec,
+ filter_properties, instance, scheduler_method,
+ method_args, self.expected_task_state, exc_info=exc_info)
+
+
+class InnerTestingException(Exception):
+ pass
+
+
+class ComputeRescheduleOrErrorTestCase(BaseTestCase):
+ """Test logic and exception handling around rescheduling or re-raising
+ original exceptions when builds fail.
+ """
+
+ def setUp(self):
+ super(ComputeRescheduleOrErrorTestCase, self).setUp()
+ self.instance = self._create_fake_instance_obj()
+
+ def test_reschedule_or_error_called(self):
+ """Basic sanity check to make sure _reschedule_or_error is called
+ when a build fails.
+ """
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ self.mox.StubOutWithMock(self.compute, '_spawn')
+ self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
+
+ bdms = block_device_obj.block_device_make_list(self.context, [])
+
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ mox.IgnoreArg(), self.instance.uuid).AndReturn(bdms)
+ self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(),
+ [], mox.IgnoreArg(), [], None, set_access_ip=False).AndRaise(
+ test.TestingException("BuildError"))
+ self.compute._reschedule_or_error(mox.IgnoreArg(), self.instance,
+ mox.IgnoreArg(), None, None, None,
+ False, None, {}, bdms, False).AndReturn(True)
+
+ self.mox.ReplayAll()
+ self.compute._run_instance(self.context, None, {}, None, None, None,
+ False, None, self.instance, False)
+
+ def test_shutdown_instance_fail(self):
+ """Test shutdown instance failing before re-scheduling logic can even
+ run.
+ """
+ instance_uuid = self.instance['uuid']
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+
+ try:
+ raise test.TestingException("Original")
+ except Exception:
+ exc_info = sys.exc_info()
+
+ compute_utils.add_instance_fault_from_exc(self.context,
+ self.instance, exc_info[0], exc_info=exc_info)
+ self.compute._shutdown_instance(mox.IgnoreArg(), self.instance,
+ mox.IgnoreArg(),
+ mox.IgnoreArg()).AndRaise(InnerTestingException("Error"))
+ self.compute._log_original_error(exc_info, instance_uuid)
+
+ self.mox.ReplayAll()
+
+ # should raise the deallocation exception, not the original build
+ # error:
+ self.assertRaises(InnerTestingException,
+ self.compute._reschedule_or_error, self.context,
+ self.instance, exc_info, None, None, None, False, None, {})
+
+ def test_shutdown_instance_fail_instance_info_cache_not_found(self):
+ # Covers the case that _shutdown_instance fails with an
+ # InstanceInfoCacheNotFound exception when getting instance network
+ # information prior to calling driver.destroy.
+ elevated_context = self.context.elevated()
+ error = exception.InstanceInfoCacheNotFound(
+ instance_uuid=self.instance['uuid'])
+ with contextlib.nested(
+ mock.patch.object(self.context, 'elevated',
+ return_value=elevated_context),
+ mock.patch.object(self.compute, '_get_instance_nw_info',
+ side_effect=error),
+ mock.patch.object(self.compute,
+ '_get_instance_block_device_info'),
+ mock.patch.object(self.compute.driver, 'destroy'),
+ mock.patch.object(self.compute, '_try_deallocate_network')
+ ) as (
+ elevated_mock,
+ _get_instance_nw_info_mock,
+ _get_instance_block_device_info_mock,
+ destroy_mock,
+ _try_deallocate_network_mock
+ ):
+ inst_obj = self.instance
+ self.compute._shutdown_instance(self.context, inst_obj,
+ bdms=[], notify=False)
+ # By asserting that _try_deallocate_network_mock was called
+ # exactly once, we know that _get_instance_nw_info raising
+ # InstanceInfoCacheNotFound did not make _shutdown_instance error
+ # out and driver.destroy was still called.
+ _try_deallocate_network_mock.assert_called_once_with(
+ elevated_context, inst_obj, None)
+
+ def test_reschedule_fail(self):
+ # Test handling of exception from _reschedule.
+ try:
+ raise test.TestingException("Original")
+ except Exception:
+ exc_info = sys.exc_info()
+
+ instance_uuid = self.instance['uuid']
+ method_args = (None, None, None, None, False, {})
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
+ self.mox.StubOutWithMock(self.compute, '_reschedule')
+
+ self.compute._shutdown_instance(mox.IgnoreArg(), self.instance,
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.compute._cleanup_volumes(mox.IgnoreArg(), instance_uuid,
+ mox.IgnoreArg())
+ self.compute._reschedule(self.context, None, self.instance,
+ {}, self.compute.scheduler_rpcapi.run_instance,
+ method_args, task_states.SCHEDULING, exc_info).AndRaise(
+ InnerTestingException("Inner"))
+
+ self.mox.ReplayAll()
+
+ self.assertFalse(self.compute._reschedule_or_error(self.context,
+ self.instance, exc_info, None, None, None, False, None, {}))
+
+ def test_reschedule_false(self):
+ # Test not-rescheduling, but no nested exception.
+ instance_uuid = self.instance['uuid']
+ method_args = (None, None, None, None, False, {})
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
+ self.mox.StubOutWithMock(self.compute, '_reschedule')
+
+ try:
+ raise test.TestingException("Original")
+ except test.TestingException:
+ exc_info = sys.exc_info()
+ compute_utils.add_instance_fault_from_exc(self.context,
+ self.instance, exc_info[0], exc_info=exc_info)
+
+ self.compute._shutdown_instance(mox.IgnoreArg(), self.instance,
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.compute._cleanup_volumes(mox.IgnoreArg(), instance_uuid,
+ mox.IgnoreArg())
+ self.compute._reschedule(self.context, None, {}, self.instance,
+ self.compute.scheduler_rpcapi.run_instance, method_args,
+ task_states.SCHEDULING, exc_info).AndReturn(False)
+
+ self.mox.ReplayAll()
+
+ # re-scheduling is False, the original build error should be
+ # raised here:
+ self.assertFalse(self.compute._reschedule_or_error(self.context,
+ self.instance, exc_info, None, None, None, False, None, {}))
+
+ def test_reschedule_true(self):
+ # Test behavior when re-scheduling happens.
+ instance_uuid = self.instance['uuid']
+ method_args = (None, None, None, None, False, {})
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
+ self.mox.StubOutWithMock(self.compute, '_reschedule')
+
+ try:
+ raise test.TestingException("Original")
+ except Exception:
+ exc_info = sys.exc_info()
+
+ compute_utils.add_instance_fault_from_exc(self.context,
+ self.instance, exc_info[0], exc_info=exc_info)
+ self.compute._shutdown_instance(mox.IgnoreArg(), self.instance,
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.compute._cleanup_volumes(mox.IgnoreArg(), instance_uuid,
+ mox.IgnoreArg())
+ self.compute._reschedule(self.context, None, {}, self.instance,
+ self.compute.scheduler_rpcapi.run_instance,
+ method_args, task_states.SCHEDULING, exc_info).AndReturn(
+ True)
+ self.compute._log_original_error(exc_info, instance_uuid)
+
+ self.mox.ReplayAll()
+
+ # re-scheduling is True, original error is logged, but nothing
+ # is raised:
+ self.compute._reschedule_or_error(self.context, self.instance,
+ exc_info, None, None, None, False, None, {})
+
+ def test_no_reschedule_on_delete_during_spawn(self):
+ # instance should not be rescheduled if instance is deleted
+ # during the build
+ self.mox.StubOutWithMock(self.compute, '_spawn')
+ self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
+
+ exc = exception.UnexpectedDeletingTaskStateError(
+ expected=task_states.SPAWNING, actual=task_states.DELETING)
+ self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(), set_access_ip=False).AndRaise(exc)
+
+ self.mox.ReplayAll()
+ # test succeeds if mocked method '_reschedule_or_error' is not
+ # called.
+ self.compute._run_instance(self.context, None, {}, None, None, None,
+ False, None, self.instance, False)
+
+ def test_no_reschedule_on_unexpected_task_state(self):
+ # instance shouldn't be rescheduled if unexpected task state arises.
+ # the exception should get reraised.
+ self.mox.StubOutWithMock(self.compute, '_spawn')
+ self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
+
+ exc = exception.UnexpectedTaskStateError(expected=task_states.SPAWNING,
+ actual=task_states.SCHEDULING)
+ self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(), set_access_ip=False).AndRaise(exc)
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.UnexpectedTaskStateError,
+ self.compute._run_instance, self.context, None, {}, None, None,
+ None, False, None, self.instance, False)
+
+ def test_no_reschedule_on_block_device_fail(self):
+ self.mox.StubOutWithMock(self.compute, '_prep_block_device')
+ self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
+
+ exc = exception.InvalidBDM()
+
+ self.compute._prep_block_device(mox.IgnoreArg(), self.instance,
+ mox.IgnoreArg()).AndRaise(exc)
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidBDM, self.compute._run_instance,
+ self.context, None, {}, None, None, None, False,
+ None, self.instance, False)
+
+
+class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
+ """Test logic and exception handling around rescheduling prep resize
+ requests
+ """
+ def setUp(self):
+ super(ComputeRescheduleResizeOrReraiseTestCase, self).setUp()
+ self.instance = self._create_fake_instance()
+ self.instance_uuid = self.instance['uuid']
+ self.instance_type = flavors.get_flavor_by_name(
+ "m1.tiny")
+
+ def test_reschedule_resize_or_reraise_called(self):
+ """Verify the rescheduling logic gets called when there is an error
+ during prep_resize.
+ """
+ inst_obj = self._create_fake_instance_obj()
+
+ self.mox.StubOutWithMock(self.compute.db, 'migration_create')
+ self.mox.StubOutWithMock(self.compute, '_reschedule_resize_or_reraise')
+
+ self.compute.db.migration_create(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndRaise(test.TestingException("Original"))
+
+ self.compute._reschedule_resize_or_reraise(mox.IgnoreArg(), None,
+ inst_obj, mox.IgnoreArg(), self.instance_type,
+ mox.IgnoreArg(), {},
+ {})
+
+ self.mox.ReplayAll()
+
+ self.compute.prep_resize(self.context, image=None,
+ instance=inst_obj,
+ instance_type=self.instance_type,
+ reservations=[], request_spec={},
+ filter_properties={}, node=None)
+
+ def test_reschedule_fails_with_exception(self):
+ """Original exception should be raised if the _reschedule method
+ raises another exception
+ """
+ instance = self._create_fake_instance_obj()
+ scheduler_hint = dict(filter_properties={})
+ method_args = (instance, None, scheduler_hint, self.instance_type,
+ None)
+ self.mox.StubOutWithMock(self.compute, "_reschedule")
+
+ self.compute._reschedule(
+ self.context, None, None, instance,
+ self.compute.compute_task_api.resize_instance, method_args,
+ task_states.RESIZE_PREP).AndRaise(
+ InnerTestingException("Inner"))
+ self.mox.ReplayAll()
+
+ try:
+ raise test.TestingException("Original")
+ except Exception:
+ exc_info = sys.exc_info()
+ self.assertRaises(test.TestingException,
+ self.compute._reschedule_resize_or_reraise, self.context,
+ None, instance, exc_info, self.instance_type,
+ self.none_quotas, {}, {})
+
+ def test_reschedule_false(self):
+ """Original exception should be raised if the resize is not
+ rescheduled.
+ """
+ instance = self._create_fake_instance_obj()
+ scheduler_hint = dict(filter_properties={})
+ method_args = (instance, None, scheduler_hint, self.instance_type,
+ None)
+ self.mox.StubOutWithMock(self.compute, "_reschedule")
+
+ self.compute._reschedule(
+ self.context, None, None, instance,
+ self.compute.compute_task_api.resize_instance, method_args,
+ task_states.RESIZE_PREP).AndReturn(False)
+ self.mox.ReplayAll()
+
+ try:
+ raise test.TestingException("Original")
+ except Exception:
+ exc_info = sys.exc_info()
+ self.assertRaises(test.TestingException,
+ self.compute._reschedule_resize_or_reraise, self.context,
+ None, instance, exc_info, self.instance_type,
+ self.none_quotas, {}, {})
+
+ def test_reschedule_true(self):
+ # If rescheduled, the original resize exception should be logged.
+ instance = self._create_fake_instance_obj()
+ scheduler_hint = dict(filter_properties={})
+ method_args = (instance, None, scheduler_hint, self.instance_type,
+ None)
+
+ try:
+ raise test.TestingException("Original")
+ except Exception:
+ exc_info = sys.exc_info()
+
+ self.mox.StubOutWithMock(self.compute, "_reschedule")
+ self.mox.StubOutWithMock(self.compute, "_log_original_error")
+ self.compute._reschedule(self.context, {}, {},
+ instance,
+ self.compute.compute_task_api.resize_instance, method_args,
+ task_states.RESIZE_PREP, exc_info).AndReturn(True)
+
+ self.compute._log_original_error(exc_info, instance.uuid)
+ self.mox.ReplayAll()
+
+ self.compute._reschedule_resize_or_reraise(
+ self.context, None, instance, exc_info,
+ self.instance_type, self.none_quotas, {}, {})
+
+
+class ComputeInactiveImageTestCase(BaseTestCase):
+ def setUp(self):
+ super(ComputeInactiveImageTestCase, self).setUp()
+
+ def fake_show(meh, context, id, **kwargs):
+ return {'id': id, 'min_disk': None, 'min_ram': None,
+ 'name': 'fake_name',
+ 'status': 'deleted',
+ 'properties': {'kernel_id': 'fake_kernel_id',
+ 'ramdisk_id': 'fake_ramdisk_id',
+ 'something_else': 'meow'}}
+
+ fake_image.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ self.compute_api = compute.API()
+
+ def test_create_instance_with_deleted_image(self):
+ # Make sure we can't start an instance with a deleted image.
+ inst_type = flavors.get_flavor_by_name('m1.tiny')
+ self.assertRaises(exception.ImageNotActive,
+ self.compute_api.create,
+ self.context, inst_type, 'fake-image-uuid')
+
+
+class EvacuateHostTestCase(BaseTestCase):
+ def setUp(self):
+ super(EvacuateHostTestCase, self).setUp()
+ self.inst = self._create_fake_instance_obj(
+ {'host': 'fake_host_2', 'node': 'fakenode2'})
+ self.inst.task_state = task_states.REBUILDING
+ self.inst.save()
+
+ def tearDown(self):
+ db.instance_destroy(self.context, self.inst.uuid)
+ super(EvacuateHostTestCase, self).tearDown()
+
+ def _rebuild(self, on_shared_storage=True):
+ def fake(cls, ctxt, instance, *args, **kwargs):
+ pass
+
+ self.stubs.Set(network_api.API, 'setup_networks_on_host', fake)
+
+ orig_image_ref = None
+ image_ref = None
+ injected_files = None
+ bdms = db.block_device_mapping_get_all_by_instance(self.context,
+ self.inst.uuid)
+ self.compute.rebuild_instance(
+ self.context, self.inst, orig_image_ref,
+ image_ref, injected_files, 'newpass', {}, bdms, recreate=True,
+ on_shared_storage=on_shared_storage)
+
+ def test_rebuild_on_host_updated_target(self):
+ """Confirm evacuate scenario updates host and node."""
+ self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+
+ def fake_get_compute_info(context, host):
+ self.assertTrue(context.is_admin)
+ self.assertEqual('fake-mini', host)
+ cn = objects.ComputeNode(hypervisor_hostname=self.rt.nodename)
+ return cn
+
+ self.stubs.Set(self.compute, '_get_compute_info',
+ fake_get_compute_info)
+ self.mox.ReplayAll()
+
+ self._rebuild()
+
+ # Should be on destination host
+ instance = db.instance_get(self.context, self.inst.id)
+ self.assertEqual(instance['host'], self.compute.host)
+ self.assertEqual(NODENAME, instance['node'])
+
+ def test_rebuild_on_host_updated_target_node_not_found(self):
+ """Confirm evacuate scenario where compute_node isn't found."""
+ self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+
+ def fake_get_compute_info(context, host):
+ raise exception.NotFound(_("Host %s not found") % host)
+
+ self.stubs.Set(self.compute, '_get_compute_info',
+ fake_get_compute_info)
+ self.mox.ReplayAll()
+
+ self._rebuild()
+
+ # Should be on destination host
+ instance = db.instance_get(self.context, self.inst.id)
+ self.assertEqual(instance['host'], self.compute.host)
+ self.assertIsNone(instance['node'])
+
+ def test_rebuild_with_instance_in_stopped_state(self):
+ """Confirm evacuate scenario updates vm_state to stopped
+ if instance is in stopped state
+ """
+ # Initialize the VM to stopped state
+ db.instance_update(self.context, self.inst.uuid,
+ {"vm_state": vm_states.STOPPED})
+ self.inst.vm_state = vm_states.STOPPED
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+ self.mox.ReplayAll()
+
+ self._rebuild()
+
+ # Check the vm state is reset to stopped
+ instance = db.instance_get(self.context, self.inst.id)
+ self.assertEqual(instance['vm_state'], vm_states.STOPPED)
+
+ def test_rebuild_with_wrong_shared_storage(self):
+ """Confirm evacuate scenario does not update host."""
+ self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.InvalidSharedStorage,
+ lambda: self._rebuild(on_shared_storage=False))
+
+ # Should remain on original host
+ instance = db.instance_get(self.context, self.inst.id)
+ self.assertEqual(instance['host'], 'fake_host_2')
+
+ def test_rebuild_on_host_with_volumes(self):
+ """Confirm evacuate scenario reconnects volumes."""
+ values = {'instance_uuid': self.inst.uuid,
+ 'source_type': 'volume',
+ 'device_name': '/dev/vdc',
+ 'delete_on_termination': False,
+ 'volume_id': 'fake_volume_id'}
+
+ db.block_device_mapping_create(self.context, values)
+
+ def fake_volume_get(self, context, volume):
+ return {'id': 'fake_volume_id'}
+ self.stubs.Set(cinder.API, "get", fake_volume_get)
+
+ # Stub out and record whether it gets detached
+ result = {"detached": False}
+
+ def fake_detach(self, context, volume):
+ result["detached"] = volume["id"] == 'fake_volume_id'
+ self.stubs.Set(cinder.API, "detach", fake_detach)
+
+ def fake_terminate_connection(self, context, volume, connector):
+ return {}
+ self.stubs.Set(cinder.API, "terminate_connection",
+ fake_terminate_connection)
+
+ # make sure volumes attach, detach are called
+ self.mox.StubOutWithMock(self.compute.volume_api, 'detach')
+ self.compute.volume_api.detach(mox.IsA(self.context), mox.IgnoreArg())
+
+ self.mox.StubOutWithMock(self.compute, '_prep_block_device')
+ self.compute._prep_block_device(mox.IsA(self.context),
+ mox.IsA(objects.Instance),
+ mox.IgnoreArg())
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+ self.mox.ReplayAll()
+
+ self._rebuild()
+
+ # cleanup
+ for bdms in db.block_device_mapping_get_all_by_instance(
+ self.context, self.inst.uuid):
+ db.block_device_mapping_destroy(self.context, bdms['id'])
+
+ def test_rebuild_on_host_with_shared_storage(self):
+ """Confirm evacuate scenario on shared storage."""
+ self.mox.StubOutWithMock(self.compute.driver, 'spawn')
+ self.compute.driver.spawn(mox.IsA(self.context),
+ mox.IsA(objects.Instance), {}, mox.IgnoreArg(), 'newpass',
+ network_info=mox.IgnoreArg(),
+ block_device_info=mox.IgnoreArg())
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+ self.mox.ReplayAll()
+
+ self._rebuild()
+
+ def test_rebuild_on_host_without_shared_storage(self):
+ """Confirm evacuate scenario without shared storage
+ (rebuild from image)
+ """
+ fake_image = {'id': 1,
+ 'name': 'fake_name',
+ 'properties': {'kernel_id': 'fake_kernel_id',
+ 'ramdisk_id': 'fake_ramdisk_id'}}
+
+ self.mox.StubOutWithMock(self.compute.driver, 'spawn')
+ self.compute.driver.spawn(mox.IsA(self.context),
+ mox.IsA(objects.Instance), mox.IsA(fake_image),
+ mox.IgnoreArg(), mox.IsA('newpass'),
+ network_info=mox.IgnoreArg(),
+ block_device_info=mox.IgnoreArg())
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ lambda x: False)
+ self.mox.ReplayAll()
+
+ self._rebuild(on_shared_storage=False)
+
+ def test_rebuild_on_host_instance_exists(self):
+ """Rebuild if instance exists raises an exception."""
+ db.instance_update(self.context, self.inst.uuid,
+ {"task_state": task_states.SCHEDULING})
+ self.compute.run_instance(self.context,
+ self.inst, {}, {},
+ [], None, None, True, None, False)
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+ self.assertRaises(exception.InstanceExists,
+ lambda: self._rebuild(on_shared_storage=True))
+
+ def test_driver_does_not_support_recreate(self):
+ with utils.temporary_mutation(self.compute.driver.capabilities,
+ supports_recreate=False):
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ lambda x: True)
+ self.assertRaises(exception.InstanceRecreateNotSupported,
+ lambda: self._rebuild(on_shared_storage=True))
+
+
+class ComputeInjectedFilesTestCase(BaseTestCase):
+ # Test that running instances with injected_files decodes files correctly
+
+ def setUp(self):
+ super(ComputeInjectedFilesTestCase, self).setUp()
+ self.instance = self._create_fake_instance_obj()
+ self.stubs.Set(self.compute.driver, 'spawn', self._spawn)
+
+ def _spawn(self, context, instance, image_meta, injected_files,
+ admin_password, nw_info, block_device_info, db_api=None):
+ self.assertEqual(self.expected, injected_files)
+
+ def _test(self, injected_files, decoded_files):
+ self.expected = decoded_files
+ self.compute.run_instance(self.context, self.instance, {}, {}, [],
+ injected_files, None, True, None, False)
+
+ def test_injected_none(self):
+ # test an input of None for injected_files
+ self._test(None, [])
+
+ def test_injected_empty(self):
+ # test an input of [] for injected_files
+ self._test([], [])
+
+ def test_injected_success(self):
+ # test with valid b64 encoded content.
+ injected_files = [
+ ('/a/b/c', base64.b64encode('foobarbaz')),
+ ('/d/e/f', base64.b64encode('seespotrun')),
+ ]
+
+ decoded_files = [
+ ('/a/b/c', 'foobarbaz'),
+ ('/d/e/f', 'seespotrun'),
+ ]
+ self._test(injected_files, decoded_files)
+
+ def test_injected_invalid(self):
+ # test with invalid b64 encoded content
+ injected_files = [
+ ('/a/b/c', base64.b64encode('foobarbaz')),
+ ('/d/e/f', 'seespotrun'),
+ ]
+
+ self.assertRaises(exception.Base64Exception, self.compute.run_instance,
+ self.context, self.instance, {}, {}, [], injected_files, None,
+ True, None, False)
+
+ def test_reschedule(self):
+ # test that rescheduling is done with original encoded files
+ expected = [
+ ('/a/b/c', base64.b64encode('foobarbaz')),
+ ('/d/e/f', base64.b64encode('seespotrun')),
+ ]
+
+ def _roe(context, instance, exc_info, requested_networks,
+ admin_password, injected_files, is_first_time, request_spec,
+ filter_properties, bdms=None, legacy_bdm_in_spec=False):
+ self.assertEqual(expected, injected_files)
+ return True
+
+ def spawn_explode(context, instance, image_meta, injected_files,
+ admin_password, nw_info, block_device_info):
+ # force reschedule logic to execute
+ raise test.TestingException(_("spawn error"))
+
+ self.stubs.Set(self.compute.driver, 'spawn', spawn_explode)
+ self.stubs.Set(self.compute, '_reschedule_or_error', _roe)
+
+ self.compute.run_instance(self.context, self.instance, {}, {}, [],
+ expected, None, True, None, False)
+
+
+class CheckConfigDriveTestCase(test.TestCase):
+ # NOTE(sirp): `TestCase` is far too heavyweight for this test, this should
+ # probably derive from a `test.FastTestCase` that omits DB and env
+ # handling
+ def setUp(self):
+ super(CheckConfigDriveTestCase, self).setUp()
+ self.compute_api = compute.API()
+
+ def _assertCheck(self, expected, config_drive):
+ self.assertEqual(expected,
+ self.compute_api._check_config_drive(config_drive))
+
+ def _assertInvalid(self, config_drive):
+ self.assertRaises(exception.ConfigDriveInvalidValue,
+ self.compute_api._check_config_drive,
+ config_drive)
+
+ def test_config_drive_false_values(self):
+ self._assertCheck('', None)
+ self._assertCheck('', '')
+ self._assertCheck('', 'False')
+ self._assertCheck('', 'f')
+ self._assertCheck('', '0')
+
+ def test_config_drive_true_values(self):
+ self._assertCheck(True, 'True')
+ self._assertCheck(True, 't')
+ self._assertCheck(True, '1')
+
+ def test_config_drive_bogus_values_raise(self):
+ self._assertInvalid('asd')
+ self._assertInvalid(uuidutils.generate_uuid())
+
+
+class CheckRequestedImageTestCase(test.TestCase):
+ def setUp(self):
+ super(CheckRequestedImageTestCase, self).setUp()
+ self.compute_api = compute.API()
+ self.context = context.RequestContext(
+ 'fake_user_id', 'fake_project_id')
+
+ self.instance_type = flavors.get_default_flavor()
+ self.instance_type['memory_mb'] = 64
+ self.instance_type['root_gb'] = 1
+
+ def test_no_image_specified(self):
+ self.compute_api._check_requested_image(self.context, None, None,
+ self.instance_type)
+
+ def test_image_status_must_be_active(self):
+ image = dict(id='123', status='foo')
+
+ self.assertRaises(exception.ImageNotActive,
+ self.compute_api._check_requested_image, self.context,
+ image['id'], image, self.instance_type)
+
+ image['status'] = 'active'
+ self.compute_api._check_requested_image(self.context, image['id'],
+ image, self.instance_type)
+
+ def test_image_min_ram_check(self):
+ image = dict(id='123', status='active', min_ram='65')
+
+ self.assertRaises(exception.FlavorMemoryTooSmall,
+ self.compute_api._check_requested_image, self.context,
+ image['id'], image, self.instance_type)
+
+ image['min_ram'] = '64'
+ self.compute_api._check_requested_image(self.context, image['id'],
+ image, self.instance_type)
+
+ def test_image_min_disk_check(self):
+ image = dict(id='123', status='active', min_disk='2')
+
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ self.compute_api._check_requested_image, self.context,
+ image['id'], image, self.instance_type)
+
+ image['min_disk'] = '1'
+ self.compute_api._check_requested_image(self.context, image['id'],
+ image, self.instance_type)
+
+ def test_image_too_large(self):
+ image = dict(id='123', status='active', size='1073741825')
+
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ self.compute_api._check_requested_image, self.context,
+ image['id'], image, self.instance_type)
+
+ image['size'] = '1073741824'
+ self.compute_api._check_requested_image(self.context, image['id'],
+ image, self.instance_type)
+
+ def test_root_gb_zero_disables_size_check(self):
+ self.instance_type['root_gb'] = 0
+ image = dict(id='123', status='active', size='1073741825')
+
+ self.compute_api._check_requested_image(self.context, image['id'],
+ image, self.instance_type)
+
+ def test_root_gb_zero_disables_min_disk(self):
+ self.instance_type['root_gb'] = 0
+ image = dict(id='123', status='active', min_disk='2')
+
+ self.compute_api._check_requested_image(self.context, image['id'],
+ image, self.instance_type)
+
+ def test_config_drive_option(self):
+ image = {'id': 1, 'status': 'active'}
+ image['properties'] = {'img_config_drive': 'optional'}
+ self.compute_api._check_requested_image(self.context, image['id'],
+ image, self.instance_type)
+ image['properties'] = {'img_config_drive': 'mandatory'}
+ self.compute_api._check_requested_image(self.context, image['id'],
+ image, self.instance_type)
+ image['properties'] = {'img_config_drive': 'bar'}
+ self.assertRaises(exception.InvalidImageConfigDrive,
+ self.compute_api._check_requested_image,
+ self.context, image['id'], image, self.instance_type)
+
+
+class ComputeHooksTestCase(test.BaseHookTestCase):
+ def test_delete_instance_has_hook(self):
+ delete_func = compute_manager.ComputeManager._delete_instance
+ self.assert_has_hook('delete_instance', delete_func)
+
+ def test_create_instance_has_hook(self):
+ create_func = compute_api.API.create
+ self.assert_has_hook('create_instance', create_func)
diff --git a/nova/tests/unit/compute/test_compute_api.py b/nova/tests/unit/compute/test_compute_api.py
new file mode 100644
index 0000000000..10ac29d3dd
--- /dev/null
+++ b/nova/tests/unit/compute/test_compute_api.py
@@ -0,0 +1,2635 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for compute API."""
+
+import contextlib
+import copy
+import datetime
+
+import iso8601
+import mock
+import mox
+from oslo.utils import timeutils
+
+from nova.compute import api as compute_api
+from nova.compute import arch
+from nova.compute import cells_api as compute_cells_api
+from nova.compute import delete_types
+from nova.compute import flavors
+from nova.compute import instance_actions
+from nova.compute import task_states
+from nova.compute import utils as compute_utils
+from nova.compute import vm_mode
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova import objects
+from nova.objects import base as obj_base
+from nova.objects import quotas as quotas_obj
+from nova.openstack.common import uuidutils
+from nova import quota
+from nova import test
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit.image import fake as fake_image
+from nova.tests.unit import matchers
+from nova.tests.unit.objects import test_flavor
+from nova.tests.unit.objects import test_migration
+from nova.tests.unit.objects import test_service
+from nova.volume import cinder
+
+
+FAKE_IMAGE_REF = 'fake-image-ref'
+NODENAME = 'fakenode1'
+SHELVED_IMAGE = 'fake-shelved-image'
+SHELVED_IMAGE_NOT_FOUND = 'fake-shelved-image-notfound'
+SHELVED_IMAGE_NOT_AUTHORIZED = 'fake-shelved-image-not-authorized'
+SHELVED_IMAGE_EXCEPTION = 'fake-shelved-image-exception'
+
+
+class _ComputeAPIUnitTestMixIn(object):
+ def setUp(self):
+ super(_ComputeAPIUnitTestMixIn, self).setUp()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id)
+
+ def _get_vm_states(self, exclude_states=None):
+ vm_state = set([vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED,
+ vm_states.SUSPENDED, vm_states.RESCUED, vm_states.STOPPED,
+ vm_states.RESIZED, vm_states.SOFT_DELETED,
+ vm_states.DELETED, vm_states.ERROR, vm_states.SHELVED,
+ vm_states.SHELVED_OFFLOADED])
+ if not exclude_states:
+ exclude_states = set()
+ return vm_state - exclude_states
+
+ def _create_flavor(self, params=None):
+ flavor = {'id': 1,
+ 'flavorid': 1,
+ 'name': 'm1.tiny',
+ 'memory_mb': 512,
+ 'vcpus': 1,
+ 'vcpu_weight': None,
+ 'root_gb': 1,
+ 'ephemeral_gb': 0,
+ 'rxtx_factor': 1,
+ 'swap': 0,
+ 'deleted': 0,
+ 'disabled': False,
+ 'is_public': True,
+ }
+ if params:
+ flavor.update(params)
+ return flavor
+
+ def _create_instance_obj(self, params=None, flavor=None):
+ """Create a test instance."""
+ if not params:
+ params = {}
+
+ if flavor is None:
+ flavor = self._create_flavor()
+
+ def make_fake_sys_meta():
+ sys_meta = params.pop("system_metadata", {})
+ for key in flavors.system_metadata_flavor_props:
+ sys_meta['instance_type_%s' % key] = flavor[key]
+ return sys_meta
+
+ now = timeutils.utcnow()
+
+ instance = objects.Instance()
+ instance.metadata = {}
+ instance.metadata.update(params.pop('metadata', {}))
+ instance.system_metadata = make_fake_sys_meta()
+ instance.system_metadata.update(params.pop('system_metadata', {}))
+ instance._context = self.context
+ instance.id = 1
+ instance.uuid = uuidutils.generate_uuid()
+ instance.cell_name = 'api!child'
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = None
+ instance.image_ref = FAKE_IMAGE_REF
+ instance.reservation_id = 'r-fakeres'
+ instance.user_id = self.user_id
+ instance.project_id = self.project_id
+ instance.host = 'fake_host'
+ instance.node = NODENAME
+ instance.instance_type_id = flavor['id']
+ instance.ami_launch_index = 0
+ instance.memory_mb = 0
+ instance.vcpus = 0
+ instance.root_gb = 0
+ instance.ephemeral_gb = 0
+ instance.architecture = arch.X86_64
+ instance.os_type = 'Linux'
+ instance.locked = False
+ instance.created_at = now
+ instance.updated_at = now
+ instance.launched_at = now
+ instance.disable_terminate = False
+ instance.info_cache = objects.InstanceInfoCache()
+
+ if params:
+ instance.update(params)
+ instance.obj_reset_changes()
+ return instance
+
+ def test_create_quota_exceeded_messages(self):
+ image_href = "image_href"
+ image_id = 0
+ instance_type = self._create_flavor()
+
+ self.mox.StubOutWithMock(self.compute_api, "_get_image")
+ self.mox.StubOutWithMock(quota.QUOTAS, "limit_check")
+ self.mox.StubOutWithMock(quota.QUOTAS, "reserve")
+
+ quotas = {'instances': 1, 'cores': 1, 'ram': 1}
+ usages = dict((r, {'in_use': 1, 'reserved': 1}) for r in
+ ['instances', 'cores', 'ram'])
+ headroom = dict((res, quotas[res] -
+ (usages[res]['in_use'] + usages[res]['reserved']))
+ for res in quotas.keys())
+ quota_exception = exception.OverQuota(quotas=quotas,
+ usages=usages, overs=['instances'], headroom=headroom)
+
+ for _unused in range(2):
+ self.compute_api._get_image(self.context, image_href).AndReturn(
+ (image_id, {}))
+ quota.QUOTAS.limit_check(self.context, metadata_items=mox.IsA(int))
+ quota.QUOTAS.reserve(self.context, instances=40,
+ cores=mox.IsA(int),
+ expire=mox.IgnoreArg(),
+ project_id=mox.IgnoreArg(),
+ user_id=mox.IgnoreArg(),
+ ram=mox.IsA(int)).AndRaise(quota_exception)
+
+ self.mox.ReplayAll()
+
+ for min_count, message in [(20, '20-40'), (40, '40')]:
+ try:
+ self.compute_api.create(self.context, instance_type,
+ "image_href", min_count=min_count,
+ max_count=40)
+ except exception.TooManyInstances as e:
+ self.assertEqual(message, e.kwargs['req'])
+ else:
+ self.fail("Exception not raised")
+
+ def test_specified_port_and_multiple_instances_neutronv2(self):
+ # Tests that if port is specified there is only one instance booting
+ # (i.e max_count == 1) as we can't share the same port across multiple
+ # instances.
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ port = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ address = '10.0.0.1'
+ min_count = 1
+ max_count = 2
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(address=address,
+ port_id=port)])
+
+ self.assertRaises(exception.MultiplePortsNotApplicable,
+ self.compute_api.create, self.context, 'fake_flavor', 'image_id',
+ min_count=min_count, max_count=max_count,
+ requested_networks=requested_networks)
+
+ def _test_specified_ip_and_multiple_instances_helper(self,
+ requested_networks):
+ # Tests that if ip is specified there is only one instance booting
+ # (i.e max_count == 1)
+ min_count = 1
+ max_count = 2
+ self.assertRaises(exception.InvalidFixedIpAndMaxCountRequest,
+ self.compute_api.create, self.context, "fake_flavor", 'image_id',
+ min_count=min_count, max_count=max_count,
+ requested_networks=requested_networks)
+
+ def test_specified_ip_and_multiple_instances(self):
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ address = '10.0.0.1'
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id=network,
+ address=address)])
+ self._test_specified_ip_and_multiple_instances_helper(
+ requested_networks)
+
+ def test_specified_ip_and_multiple_instances_neutronv2(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ address = '10.0.0.1'
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id=network,
+ address=address)])
+ self._test_specified_ip_and_multiple_instances_helper(
+ requested_networks)
+
+ def test_suspend(self):
+ # Ensure instance can be suspended.
+ instance = self._create_instance_obj()
+ self.assertEqual(instance.vm_state, vm_states.ACTIVE)
+ self.assertIsNone(instance.task_state)
+
+ self.mox.StubOutWithMock(instance, 'save')
+ self.mox.StubOutWithMock(self.compute_api,
+ '_record_action_start')
+ if self.cell_type == 'api':
+ rpcapi = self.compute_api.cells_rpcapi
+ else:
+ rpcapi = self.compute_api.compute_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'suspend_instance')
+
+ instance.save(expected_task_state=[None])
+ self.compute_api._record_action_start(self.context,
+ instance, instance_actions.SUSPEND)
+ rpcapi.suspend_instance(self.context, instance)
+
+ self.mox.ReplayAll()
+
+ self.compute_api.suspend(self.context, instance)
+ self.assertEqual(vm_states.ACTIVE, instance.vm_state)
+ self.assertEqual(task_states.SUSPENDING,
+ instance.task_state)
+
+ def _test_suspend_fails(self, vm_state):
+ params = dict(vm_state=vm_state)
+ instance = self._create_instance_obj(params=params)
+ self.assertIsNone(instance.task_state)
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.suspend,
+ self.context, instance)
+
+ def test_suspend_fails_invalid_states(self):
+ invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE]))
+ for state in invalid_vm_states:
+ self._test_suspend_fails(state)
+
+ def test_resume(self):
+ # Ensure instance can be resumed (if suspended).
+ instance = self._create_instance_obj(
+ params=dict(vm_state=vm_states.SUSPENDED))
+ self.assertEqual(instance.vm_state, vm_states.SUSPENDED)
+ self.assertIsNone(instance.task_state)
+
+ self.mox.StubOutWithMock(instance, 'save')
+ self.mox.StubOutWithMock(self.compute_api,
+ '_record_action_start')
+ if self.cell_type == 'api':
+ rpcapi = self.compute_api.cells_rpcapi
+ else:
+ rpcapi = self.compute_api.compute_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'resume_instance')
+
+ instance.save(expected_task_state=[None])
+ self.compute_api._record_action_start(self.context,
+ instance, instance_actions.RESUME)
+ rpcapi.resume_instance(self.context, instance)
+
+ self.mox.ReplayAll()
+
+ self.compute_api.resume(self.context, instance)
+ self.assertEqual(vm_states.SUSPENDED, instance.vm_state)
+ self.assertEqual(task_states.RESUMING,
+ instance.task_state)
+
+ def test_start(self):
+ params = dict(vm_state=vm_states.STOPPED)
+ instance = self._create_instance_obj(params=params)
+
+ self.mox.StubOutWithMock(instance, 'save')
+ self.mox.StubOutWithMock(self.compute_api,
+ '_record_action_start')
+
+ instance.save(expected_task_state=[None])
+ self.compute_api._record_action_start(self.context,
+ instance, instance_actions.START)
+
+ if self.cell_type == 'api':
+ rpcapi = self.compute_api.cells_rpcapi
+ else:
+ rpcapi = self.compute_api.compute_rpcapi
+
+ self.mox.StubOutWithMock(rpcapi, 'start_instance')
+ rpcapi.start_instance(self.context, instance)
+
+ self.mox.ReplayAll()
+
+ self.compute_api.start(self.context, instance)
+ self.assertEqual(task_states.POWERING_ON,
+ instance.task_state)
+
+ def test_start_invalid_state(self):
+ instance = self._create_instance_obj()
+ self.assertEqual(instance.vm_state, vm_states.ACTIVE)
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.start,
+ self.context, instance)
+
+ def test_start_no_host(self):
+ params = dict(vm_state=vm_states.STOPPED, host='')
+ instance = self._create_instance_obj(params=params)
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.start,
+ self.context, instance)
+
+ def _test_stop(self, vm_state, force=False):
+ # Make sure 'progress' gets reset
+ params = dict(task_state=None, progress=99, vm_state=vm_state)
+ instance = self._create_instance_obj(params=params)
+
+ self.mox.StubOutWithMock(instance, 'save')
+ self.mox.StubOutWithMock(self.compute_api,
+ '_record_action_start')
+
+ instance.save(expected_task_state=[None])
+ self.compute_api._record_action_start(self.context,
+ instance, instance_actions.STOP)
+
+ if self.cell_type == 'api':
+ rpcapi = self.compute_api.cells_rpcapi
+ else:
+ rpcapi = self.compute_api.compute_rpcapi
+
+ self.mox.StubOutWithMock(rpcapi, 'stop_instance')
+ rpcapi.stop_instance(self.context, instance, do_cast=True)
+
+ self.mox.ReplayAll()
+
+ if force:
+ self.compute_api.force_stop(self.context, instance)
+ else:
+ self.compute_api.stop(self.context, instance)
+ self.assertEqual(task_states.POWERING_OFF,
+ instance.task_state)
+ self.assertEqual(0, instance.progress)
+
+ def test_stop(self):
+ self._test_stop(vm_states.ACTIVE)
+
+ def test_stop_stopped_instance_with_bypass(self):
+ self._test_stop(vm_states.STOPPED, force=True)
+
+ def _test_stop_invalid_state(self, vm_state):
+ params = dict(vm_state=vm_state)
+ instance = self._create_instance_obj(params=params)
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.stop,
+ self.context, instance)
+
+ def test_stop_fails_invalid_states(self):
+ invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE,
+ vm_states.ERROR]))
+ for state in invalid_vm_states:
+ self._test_stop_invalid_state(state)
+
+ def test_stop_a_stopped_inst(self):
+ params = {'vm_state': vm_states.STOPPED}
+ instance = self._create_instance_obj(params=params)
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.stop,
+ self.context, instance)
+
+ def test_stop_no_host(self):
+ params = {'host': ''}
+ instance = self._create_instance_obj(params=params)
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.stop,
+ self.context, instance)
+
+ def _test_reboot_type(self, vm_state, reboot_type, task_state=None):
+ # Ensure instance can be soft rebooted.
+ inst = self._create_instance_obj()
+ inst.vm_state = vm_state
+ inst.task_state = task_state
+
+ self.mox.StubOutWithMock(self.context, 'elevated')
+ self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
+ self.mox.StubOutWithMock(self.compute_api, 'update')
+ self.mox.StubOutWithMock(inst, 'save')
+ inst.save(expected_task_state=[None, task_states.REBOOTING,
+ task_states.REBOOT_PENDING,
+ task_states.REBOOT_STARTED])
+ self.compute_api._record_action_start(self.context, inst,
+ instance_actions.REBOOT)
+
+ if self.cell_type == 'api':
+ rpcapi = self.compute_api.cells_rpcapi
+ else:
+ rpcapi = self.compute_api.compute_rpcapi
+
+ self.mox.StubOutWithMock(rpcapi, 'reboot_instance')
+ rpcapi.reboot_instance(self.context, instance=inst,
+ block_device_info=None,
+ reboot_type=reboot_type)
+ self.mox.ReplayAll()
+
+ self.compute_api.reboot(self.context, inst, reboot_type)
+
+ def _test_reboot_type_fails(self, reboot_type, **updates):
+ inst = self._create_instance_obj()
+ inst.update(updates)
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.reboot,
+ self.context, inst, reboot_type)
+
+ def test_reboot_hard_active(self):
+ self._test_reboot_type(vm_states.ACTIVE, 'HARD')
+
+ def test_reboot_hard_error(self):
+ self._test_reboot_type(vm_states.ERROR, 'HARD')
+
+ def test_reboot_hard_rebooting(self):
+ self._test_reboot_type(vm_states.ACTIVE, 'HARD',
+ task_state=task_states.REBOOTING)
+
+ def test_reboot_hard_reboot_started(self):
+ self._test_reboot_type(vm_states.ACTIVE, 'HARD',
+ task_state=task_states.REBOOT_STARTED)
+
+ def test_reboot_hard_reboot_pending(self):
+ self._test_reboot_type(vm_states.ACTIVE, 'HARD',
+ task_state=task_states.REBOOT_PENDING)
+
+ def test_reboot_hard_rescued(self):
+ self._test_reboot_type_fails('HARD', vm_state=vm_states.RESCUED)
+
+ def test_reboot_hard_error_not_launched(self):
+ self._test_reboot_type_fails('HARD', vm_state=vm_states.ERROR,
+ launched_at=None)
+
+ def test_reboot_soft(self):
+ self._test_reboot_type(vm_states.ACTIVE, 'SOFT')
+
+ def test_reboot_soft_error(self):
+ self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR)
+
+ def test_reboot_soft_paused(self):
+ self._test_reboot_type_fails('SOFT', vm_state=vm_states.PAUSED)
+
+ def test_reboot_soft_stopped(self):
+ self._test_reboot_type_fails('SOFT', vm_state=vm_states.STOPPED)
+
+ def test_reboot_soft_suspended(self):
+ self._test_reboot_type_fails('SOFT', vm_state=vm_states.SUSPENDED)
+
+ def test_reboot_soft_rebooting(self):
+ self._test_reboot_type_fails('SOFT', task_state=task_states.REBOOTING)
+
+ def test_reboot_soft_rebooting_hard(self):
+ self._test_reboot_type_fails('SOFT',
+ task_state=task_states.REBOOTING_HARD)
+
+ def test_reboot_soft_reboot_started(self):
+ self._test_reboot_type_fails('SOFT',
+ task_state=task_states.REBOOT_STARTED)
+
+ def test_reboot_soft_reboot_pending(self):
+ self._test_reboot_type_fails('SOFT',
+ task_state=task_states.REBOOT_PENDING)
+
+ def test_reboot_soft_rescued(self):
+ self._test_reboot_type_fails('SOFT', vm_state=vm_states.RESCUED)
+
+ def test_reboot_soft_error_not_launched(self):
+ self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR,
+ launched_at=None)
+
+ def _test_delete_resizing_part(self, inst, deltas):
+ fake_db_migration = test_migration.fake_db_migration()
+ migration = objects.Migration._from_db_object(
+ self.context, objects.Migration(),
+ fake_db_migration)
+ inst.instance_type_id = migration.new_instance_type_id
+ old_flavor = {'vcpus': 1,
+ 'memory_mb': 512}
+ deltas['cores'] = -old_flavor['vcpus']
+ deltas['ram'] = -old_flavor['memory_mb']
+
+ self.mox.StubOutWithMock(objects.Migration,
+ 'get_by_instance_and_status')
+ self.mox.StubOutWithMock(flavors, 'get_flavor')
+
+ self.context.elevated().AndReturn(self.context)
+ objects.Migration.get_by_instance_and_status(
+ self.context, inst.uuid, 'post-migrating').AndReturn(migration)
+ flavors.get_flavor(migration.old_instance_type_id).AndReturn(
+ old_flavor)
+
+ def _test_delete_resized_part(self, inst):
+ migration = objects.Migration._from_db_object(
+ self.context, objects.Migration(),
+ test_migration.fake_db_migration())
+
+ self.mox.StubOutWithMock(objects.Migration,
+ 'get_by_instance_and_status')
+
+ self.context.elevated().AndReturn(self.context)
+ objects.Migration.get_by_instance_and_status(
+ self.context, inst.uuid, 'finished').AndReturn(migration)
+ self.compute_api._downsize_quota_delta(self.context, inst
+ ).AndReturn('deltas')
+ fake_quotas = objects.Quotas.from_reservations(self.context,
+ ['rsvs'])
+ self.compute_api._reserve_quota_delta(self.context, 'deltas', inst,
+ ).AndReturn(fake_quotas)
+ self.compute_api._record_action_start(
+ self.context, inst, instance_actions.CONFIRM_RESIZE)
+ self.compute_api.compute_rpcapi.confirm_resize(
+ self.context, inst, migration,
+ migration['source_compute'], fake_quotas.reservations, cast=False)
+
+ def _test_delete_shelved_part(self, inst):
+ image_api = self.compute_api.image_api
+ self.mox.StubOutWithMock(image_api, 'delete')
+
+ snapshot_id = inst.system_metadata.get('shelved_image_id')
+ if snapshot_id == SHELVED_IMAGE:
+ image_api.delete(self.context, snapshot_id).AndReturn(True)
+ elif snapshot_id == SHELVED_IMAGE_NOT_FOUND:
+ image_api.delete(self.context, snapshot_id).AndRaise(
+ exception.ImageNotFound(image_id=snapshot_id))
+ elif snapshot_id == SHELVED_IMAGE_NOT_AUTHORIZED:
+ image_api.delete(self.context, snapshot_id).AndRaise(
+ exception.ImageNotAuthorized(image_id=snapshot_id))
+ elif snapshot_id == SHELVED_IMAGE_EXCEPTION:
+ image_api.delete(self.context, snapshot_id).AndRaise(
+ test.TestingException("Unexpected error"))
+
+ def _test_downed_host_part(self, inst, updates, delete_time, delete_type):
+ inst.info_cache.delete()
+ compute_utils.notify_about_instance_usage(
+ self.compute_api.notifier, self.context, inst,
+ '%s.start' % delete_type)
+ self.context.elevated().AndReturn(self.context)
+ self.compute_api.network_api.deallocate_for_instance(
+ self.context, inst)
+ state = (delete_types.SOFT_DELETE in delete_type and
+ vm_states.SOFT_DELETED or
+ vm_states.DELETED)
+ updates.update({'vm_state': state,
+ 'task_state': None,
+ 'terminated_at': delete_time})
+ inst.save()
+
+ updates.update({'deleted_at': delete_time,
+ 'deleted': True})
+ fake_inst = fake_instance.fake_db_instance(**updates)
+ db.instance_destroy(self.context, inst.uuid,
+ constraint=None).AndReturn(fake_inst)
+ compute_utils.notify_about_instance_usage(
+ self.compute_api.notifier,
+ self.context, inst, '%s.end' % delete_type,
+ system_metadata=inst.system_metadata)
+
+ def _test_delete(self, delete_type, **attrs):
+ reservations = ['fake-resv']
+ inst = self._create_instance_obj()
+ inst.update(attrs)
+ inst._context = self.context
+ deltas = {'instances': -1,
+ 'cores': -inst.vcpus,
+ 'ram': -inst.memory_mb}
+ delete_time = datetime.datetime(1955, 11, 5, 9, 30,
+ tzinfo=iso8601.iso8601.Utc())
+ timeutils.set_time_override(delete_time)
+ task_state = (delete_type == delete_types.SOFT_DELETE and
+ task_states.SOFT_DELETING or task_states.DELETING)
+ updates = {'progress': 0, 'task_state': task_state}
+ if delete_type == delete_types.SOFT_DELETE:
+ updates['deleted_at'] = delete_time
+ self.mox.StubOutWithMock(inst, 'save')
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
+ self.mox.StubOutWithMock(self.context, 'elevated')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
+ self.mox.StubOutWithMock(self.compute_api.servicegroup_api,
+ 'service_is_up')
+ self.mox.StubOutWithMock(self.compute_api, '_downsize_quota_delta')
+ self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
+ self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(inst.info_cache, 'delete')
+ self.mox.StubOutWithMock(self.compute_api.network_api,
+ 'deallocate_for_instance')
+ self.mox.StubOutWithMock(db, 'instance_system_metadata_get')
+ self.mox.StubOutWithMock(db, 'instance_destroy')
+ self.mox.StubOutWithMock(compute_utils,
+ 'notify_about_instance_usage')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
+ rpcapi = self.compute_api.compute_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'confirm_resize')
+
+ if (inst.vm_state in
+ (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED)):
+ self._test_delete_shelved_part(inst)
+
+ if self.cell_type == 'api':
+ rpcapi = self.compute_api.cells_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'terminate_instance')
+ self.mox.StubOutWithMock(rpcapi, 'soft_delete_instance')
+
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ self.context, inst.uuid).AndReturn([])
+ inst.save()
+ if inst.task_state == task_states.RESIZE_FINISH:
+ self._test_delete_resizing_part(inst, deltas)
+ quota.QUOTAS.reserve(self.context, project_id=inst.project_id,
+ user_id=inst.user_id,
+ expire=mox.IgnoreArg(),
+ **deltas).AndReturn(reservations)
+
+ # NOTE(comstud): This is getting messy. But what we are wanting
+ # to test is:
+ # If cells is enabled and we're the API cell:
+ # * Cast to cells_rpcapi.<method> with reservations=None
+ # * Commit reservations
+ # Otherwise:
+ # * Check for downed host
+ # * If downed host:
+ # * Clean up instance, destroying it, sending notifications.
+ # (Tested in _test_downed_host_part())
+ # * Commit reservations
+ # * If not downed host:
+ # * Record the action start.
+ # * Cast to compute_rpcapi.<method> with the reservations
+
+ cast = True
+ commit_quotas = True
+ if self.cell_type != 'api':
+ if inst.vm_state == vm_states.RESIZED:
+ self._test_delete_resized_part(inst)
+
+ self.context.elevated().AndReturn(self.context)
+ db.service_get_by_compute_host(
+ self.context, inst.host).AndReturn(
+ test_service.fake_service)
+ self.compute_api.servicegroup_api.service_is_up(
+ mox.IsA(objects.Service)).AndReturn(
+ inst.host != 'down-host')
+
+ if inst.host == 'down-host':
+ self._test_downed_host_part(inst, updates, delete_time,
+ delete_type)
+ cast = False
+ else:
+ # Happens on the manager side
+ commit_quotas = False
+
+ if cast:
+ if self.cell_type != 'api':
+ self.compute_api._record_action_start(self.context, inst,
+ instance_actions.DELETE)
+ if commit_quotas:
+ cast_reservations = None
+ else:
+ cast_reservations = reservations
+ if delete_type == delete_types.SOFT_DELETE:
+ rpcapi.soft_delete_instance(self.context, inst,
+ reservations=cast_reservations)
+ elif delete_type in [delete_types.DELETE,
+ delete_types.FORCE_DELETE]:
+ rpcapi.terminate_instance(self.context, inst, [],
+ reservations=cast_reservations)
+
+ if commit_quotas:
+ # Local delete or when we're testing API cell.
+ quota.QUOTAS.commit(self.context, reservations,
+ project_id=inst.project_id,
+ user_id=inst.user_id)
+
+ self.mox.ReplayAll()
+
+ getattr(self.compute_api, delete_type)(self.context, inst)
+ for k, v in updates.items():
+ self.assertEqual(inst[k], v)
+
+ self.mox.UnsetStubs()
+
+ def test_delete(self):
+ self._test_delete(delete_types.DELETE)
+
+ def test_delete_if_not_launched(self):
+ self._test_delete(delete_types.DELETE, launched_at=None)
+
+ def test_delete_in_resizing(self):
+ self._test_delete(delete_types.DELETE,
+ task_state=task_states.RESIZE_FINISH)
+
+ def test_delete_in_resized(self):
+ self._test_delete(delete_types.DELETE, vm_state=vm_states.RESIZED)
+
+ def test_delete_shelved(self):
+ fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE}
+ self._test_delete(delete_types.DELETE,
+ vm_state=vm_states.SHELVED,
+ system_metadata=fake_sys_meta)
+
+ def test_delete_shelved_offloaded(self):
+ fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE}
+ self._test_delete(delete_types.DELETE,
+ vm_state=vm_states.SHELVED_OFFLOADED,
+ system_metadata=fake_sys_meta)
+
+ def test_delete_shelved_image_not_found(self):
+ fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_NOT_FOUND}
+ self._test_delete(delete_types.DELETE,
+ vm_state=vm_states.SHELVED_OFFLOADED,
+ system_metadata=fake_sys_meta)
+
+ def test_delete_shelved_image_not_authorized(self):
+ fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_NOT_AUTHORIZED}
+ self._test_delete(delete_types.DELETE,
+ vm_state=vm_states.SHELVED_OFFLOADED,
+ system_metadata=fake_sys_meta)
+
+ def test_delete_shelved_exception(self):
+ fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_EXCEPTION}
+ self._test_delete(delete_types.DELETE,
+ vm_state=vm_states.SHELVED,
+ system_metadata=fake_sys_meta)
+
+ def test_delete_with_down_host(self):
+ self._test_delete(delete_types.DELETE, host='down-host')
+
+ def test_delete_soft_with_down_host(self):
+ self._test_delete(delete_types.SOFT_DELETE, host='down-host')
+
+ def test_delete_soft(self):
+ self._test_delete(delete_types.SOFT_DELETE)
+
+ def test_delete_forced(self):
+ for vm_state in self._get_vm_states():
+ self._test_delete(delete_types.FORCE_DELETE, vm_state=vm_state)
+
+ def test_delete_forced_when_task_state_deleting(self):
+ for vm_state in self._get_vm_states():
+ self._test_delete(delete_types.FORCE_DELETE, vm_state=vm_state,
+ task_state=task_states.DELETING)
+
+ def test_no_delete_when_task_state_deleting(self):
+ if self.cell_type == 'api':
+ # In 'api' cell, the callback terminate_instance will
+ # get called, and quota will be committed before returning.
+ # It doesn't check for below condition, hence skipping the test.
+ """
+ if original_task_state in (task_states.DELETING,
+ task_states.SOFT_DELETING):
+ LOG.info(_('Instance is already in deleting state, '
+ 'ignoring this request'), instance=instance)
+ quotas.rollback()
+ return
+ """
+ self.skipTest("API cell doesn't delete instance directly.")
+
+ attrs = {}
+ fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE}
+
+ for vm_state in self._get_vm_states():
+ if vm_state in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED):
+ attrs.update({'system_metadata': fake_sys_meta})
+
+ attrs.update({'vm_state': vm_state, 'task_state': 'deleting'})
+ reservations = ['fake-resv']
+ inst = self._create_instance_obj()
+ inst.update(attrs)
+ inst._context = self.context
+ deltas = {'instances': -1,
+ 'cores': -inst.vcpus,
+ 'ram': -inst.memory_mb}
+ delete_time = datetime.datetime(1955, 11, 5, 9, 30,
+ tzinfo=iso8601.iso8601.Utc())
+ timeutils.set_time_override(delete_time)
+ bdms = []
+ migration = objects.Migration._from_db_object(
+ self.context, objects.Migration(),
+ test_migration.fake_db_migration())
+
+ fake_quotas = objects.Quotas.from_reservations(self.context,
+ ['rsvs'])
+
+ image_api = self.compute_api.image_api
+ rpcapi = self.compute_api.compute_rpcapi
+
+ with contextlib.nested(
+ mock.patch.object(image_api, 'delete'),
+ mock.patch.object(inst, 'save'),
+ mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid',
+ return_value=bdms),
+ mock.patch.object(objects.Migration,
+ 'get_by_instance_and_status'),
+ mock.patch.object(quota.QUOTAS, 'reserve',
+ return_value=reservations),
+ mock.patch.object(self.context, 'elevated',
+ return_value=self.context),
+ mock.patch.object(db, 'service_get_by_compute_host',
+ return_value=test_service.fake_service),
+ mock.patch.object(self.compute_api.servicegroup_api,
+ 'service_is_up',
+ return_value=inst.host != 'down-host'),
+ mock.patch.object(self.compute_api,
+ '_downsize_quota_delta',
+ return_value=fake_quotas),
+ mock.patch.object(self.compute_api,
+ '_reserve_quota_delta'),
+ mock.patch.object(self.compute_api,
+ '_record_action_start'),
+ mock.patch.object(db, 'instance_update_and_get_original'),
+ mock.patch.object(inst.info_cache, 'delete'),
+ mock.patch.object(self.compute_api.network_api,
+ 'deallocate_for_instance'),
+ mock.patch.object(db, 'instance_system_metadata_get'),
+ mock.patch.object(db, 'instance_destroy'),
+ mock.patch.object(compute_utils,
+ 'notify_about_instance_usage'),
+ mock.patch.object(quota.QUOTAS, 'commit'),
+ mock.patch.object(quota.QUOTAS, 'rollback'),
+ mock.patch.object(rpcapi, 'confirm_resize'),
+ mock.patch.object(rpcapi, 'terminate_instance')
+ ) as (
+ image_delete,
+ save,
+ get_by_instance_uuid,
+ get_by_instance_and_status,
+ reserve,
+ elevated,
+ service_get_by_compute_host,
+ service_is_up,
+ _downsize_quota_delta,
+ _reserve_quota_delta,
+ _record_action_start,
+ instance_update_and_get_original,
+ delete,
+ deallocate_for_instance,
+ instance_system_metadata_get,
+ instance_destroy,
+ notify_about_instance_usage,
+ commit,
+ rollback,
+ confirm_resize,
+ terminate_instance
+ ):
+ if (inst.vm_state in (vm_states.SHELVED,
+ vm_states.SHELVED_OFFLOADED)):
+ image_delete.return_value = True
+
+ if inst.vm_state == vm_states.RESIZED:
+ get_by_instance_and_status.return_value = migration
+ _downsize_quota_delta.return_value = deltas
+
+ self.compute_api.delete(self.context, inst)
+ self.assertEqual(1, rollback.call_count)
+ self.assertEqual(0, terminate_instance.call_count)
+
+ def test_delete_fast_if_host_not_set(self):
+ inst = self._create_instance_obj()
+ inst.host = ''
+ quotas = quotas_obj.Quotas(self.context)
+ updates = {'progress': 0, 'task_state': task_states.DELETING}
+
+ self.mox.StubOutWithMock(inst, 'save')
+ self.mox.StubOutWithMock(db,
+ 'block_device_mapping_get_all_by_instance')
+
+ self.mox.StubOutWithMock(db, 'constraint')
+ self.mox.StubOutWithMock(db, 'instance_destroy')
+ self.mox.StubOutWithMock(self.compute_api, '_create_reservations')
+ self.mox.StubOutWithMock(compute_utils,
+ 'notify_about_instance_usage')
+ if self.cell_type == 'api':
+ rpcapi = self.compute_api.cells_rpcapi
+ else:
+ rpcapi = self.compute_api.compute_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'terminate_instance')
+
+ db.block_device_mapping_get_all_by_instance(self.context,
+ inst.uuid,
+ use_slave=False).AndReturn([])
+ inst.save()
+ self.compute_api._create_reservations(self.context,
+ inst, inst.task_state,
+ inst.project_id, inst.user_id
+ ).AndReturn(quotas)
+
+ if self.cell_type == 'api':
+ rpcapi.terminate_instance(
+ self.context, inst,
+ mox.IsA(objects.BlockDeviceMappingList),
+ reservations=None)
+ else:
+ compute_utils.notify_about_instance_usage(
+ self.compute_api.notifier, self.context,
+ inst, 'delete.start')
+ db.constraint(host=mox.IgnoreArg()).AndReturn('constraint')
+ delete_time = datetime.datetime(1955, 11, 5, 9, 30,
+ tzinfo=iso8601.iso8601.Utc())
+ updates['deleted_at'] = delete_time
+ updates['deleted'] = True
+ fake_inst = fake_instance.fake_db_instance(**updates)
+ db.instance_destroy(self.context, inst.uuid,
+ constraint='constraint').AndReturn(fake_inst)
+ compute_utils.notify_about_instance_usage(
+ self.compute_api.notifier, self.context,
+ inst, 'delete.end',
+ system_metadata=inst.system_metadata)
+
+ self.mox.ReplayAll()
+
+ self.compute_api.delete(self.context, inst)
+ for k, v in updates.items():
+ self.assertEqual(inst[k], v)
+
+ def test_local_delete_with_deleted_volume(self):
+ bdms = [objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 42, 'volume_id': 'volume_id',
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'delete_on_termination': False}))]
+
+ def _fake_do_delete(context, instance, bdms,
+ rservations=None, local=False):
+ pass
+
+ inst = self._create_instance_obj()
+ inst._context = self.context
+
+ self.mox.StubOutWithMock(inst, 'destroy')
+ self.mox.StubOutWithMock(self.context, 'elevated')
+ self.mox.StubOutWithMock(inst.info_cache, 'delete')
+ self.mox.StubOutWithMock(self.compute_api.network_api,
+ 'deallocate_for_instance')
+ self.mox.StubOutWithMock(db, 'instance_system_metadata_get')
+ self.mox.StubOutWithMock(compute_utils,
+ 'notify_about_instance_usage')
+ self.mox.StubOutWithMock(self.compute_api.volume_api,
+ 'terminate_connection')
+ self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'destroy')
+
+ inst.info_cache.delete()
+ compute_utils.notify_about_instance_usage(
+ self.compute_api.notifier, self.context,
+ inst, 'delete.start')
+ self.context.elevated().MultipleTimes().AndReturn(self.context)
+ if self.cell_type != 'api':
+ self.compute_api.network_api.deallocate_for_instance(
+ self.context, inst)
+
+ self.compute_api.volume_api.terminate_connection(
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).\
+ AndRaise(exception. VolumeNotFound('volume_id'))
+ bdms[0].destroy(self.context)
+
+ inst.destroy()
+ compute_utils.notify_about_instance_usage(
+ self.compute_api.notifier, self.context,
+ inst, 'delete.end',
+ system_metadata=inst.system_metadata)
+
+ self.mox.ReplayAll()
+ self.compute_api._local_delete(self.context, inst, bdms,
+ delete_types.DELETE,
+ _fake_do_delete)
+
+ def test_delete_disabled(self):
+ inst = self._create_instance_obj()
+ inst.disable_terminate = True
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.ReplayAll()
+ self.compute_api.delete(self.context, inst)
+
+ def test_delete_soft_rollback(self):
+ inst = self._create_instance_obj()
+ self.mox.StubOutWithMock(db,
+ 'block_device_mapping_get_all_by_instance')
+ self.mox.StubOutWithMock(inst, 'save')
+
+ delete_time = datetime.datetime(1955, 11, 5)
+ timeutils.set_time_override(delete_time)
+
+ db.block_device_mapping_get_all_by_instance(
+ self.context, inst.uuid, use_slave=False).AndReturn([])
+ inst.save().AndRaise(test.TestingException)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(test.TestingException,
+ self.compute_api.soft_delete, self.context, inst)
+
+ def _test_confirm_resize(self, mig_ref_passed=False):
+ params = dict(vm_state=vm_states.RESIZED)
+ fake_inst = self._create_instance_obj(params=params)
+ fake_mig = objects.Migration._from_db_object(
+ self.context, objects.Migration(),
+ test_migration.fake_db_migration())
+
+ self.mox.StubOutWithMock(self.context, 'elevated')
+ self.mox.StubOutWithMock(objects.Migration,
+ 'get_by_instance_and_status')
+ self.mox.StubOutWithMock(self.compute_api, '_downsize_quota_delta')
+ self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
+ self.mox.StubOutWithMock(fake_mig, 'save')
+ self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
+ self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
+ 'confirm_resize')
+
+ self.context.elevated().AndReturn(self.context)
+ if not mig_ref_passed:
+ objects.Migration.get_by_instance_and_status(
+ self.context, fake_inst['uuid'], 'finished').AndReturn(
+ fake_mig)
+ self.compute_api._downsize_quota_delta(self.context,
+ fake_inst).AndReturn('deltas')
+
+ resvs = ['resvs']
+ fake_quotas = objects.Quotas.from_reservations(self.context, resvs)
+
+ self.compute_api._reserve_quota_delta(self.context, 'deltas',
+ fake_inst).AndReturn(fake_quotas)
+
+ def _check_mig(expected_task_state=None):
+ self.assertEqual('confirming', fake_mig.status)
+
+ fake_mig.save().WithSideEffects(_check_mig)
+
+ if self.cell_type:
+ fake_quotas.commit(self.context)
+
+ self.compute_api._record_action_start(self.context, fake_inst,
+ 'confirmResize')
+
+ self.compute_api.compute_rpcapi.confirm_resize(
+ self.context, fake_inst, fake_mig, 'compute-source',
+ [] if self.cell_type else fake_quotas.reservations)
+
+ self.mox.ReplayAll()
+
+ if mig_ref_passed:
+ self.compute_api.confirm_resize(self.context, fake_inst,
+ migration=fake_mig)
+ else:
+ self.compute_api.confirm_resize(self.context, fake_inst)
+
+ def test_confirm_resize(self):
+ self._test_confirm_resize()
+
+ def test_confirm_resize_with_migration_ref(self):
+ self._test_confirm_resize(mig_ref_passed=True)
+
+ def _test_revert_resize(self):
+ params = dict(vm_state=vm_states.RESIZED)
+ fake_inst = self._create_instance_obj(params=params)
+ fake_mig = objects.Migration._from_db_object(
+ self.context, objects.Migration(),
+ test_migration.fake_db_migration())
+
+ self.mox.StubOutWithMock(self.context, 'elevated')
+ self.mox.StubOutWithMock(objects.Migration,
+ 'get_by_instance_and_status')
+ self.mox.StubOutWithMock(self.compute_api,
+ '_reverse_upsize_quota_delta')
+ self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
+ self.mox.StubOutWithMock(fake_inst, 'save')
+ self.mox.StubOutWithMock(fake_mig, 'save')
+ self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
+ self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
+ 'revert_resize')
+
+ self.context.elevated().AndReturn(self.context)
+ objects.Migration.get_by_instance_and_status(
+ self.context, fake_inst['uuid'], 'finished').AndReturn(
+ fake_mig)
+ self.compute_api._reverse_upsize_quota_delta(
+ self.context, fake_mig).AndReturn('deltas')
+
+ resvs = ['resvs']
+ fake_quotas = objects.Quotas.from_reservations(self.context, resvs)
+
+ self.compute_api._reserve_quota_delta(self.context, 'deltas',
+ fake_inst).AndReturn(fake_quotas)
+
+ def _check_state(expected_task_state=None):
+ self.assertEqual(task_states.RESIZE_REVERTING,
+ fake_inst.task_state)
+
+ fake_inst.save(expected_task_state=[None]).WithSideEffects(
+ _check_state)
+
+ def _check_mig(expected_task_state=None):
+ self.assertEqual('reverting', fake_mig.status)
+
+ fake_mig.save().WithSideEffects(_check_mig)
+
+ if self.cell_type:
+ fake_quotas.commit(self.context)
+
+ self.compute_api._record_action_start(self.context, fake_inst,
+ 'revertResize')
+
+ self.compute_api.compute_rpcapi.revert_resize(
+ self.context, fake_inst, fake_mig, 'compute-dest',
+ [] if self.cell_type else fake_quotas.reservations)
+
+ self.mox.ReplayAll()
+
+ self.compute_api.revert_resize(self.context, fake_inst)
+
+ def test_revert_resize(self):
+ self._test_revert_resize()
+
+ def test_revert_resize_concurent_fail(self):
+ params = dict(vm_state=vm_states.RESIZED)
+ fake_inst = self._create_instance_obj(params=params)
+ fake_mig = objects.Migration._from_db_object(
+ self.context, objects.Migration(),
+ test_migration.fake_db_migration())
+
+ self.mox.StubOutWithMock(self.context, 'elevated')
+ self.mox.StubOutWithMock(objects.Migration,
+ 'get_by_instance_and_status')
+ self.mox.StubOutWithMock(self.compute_api,
+ '_reverse_upsize_quota_delta')
+ self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
+ self.mox.StubOutWithMock(fake_inst, 'save')
+
+ self.context.elevated().AndReturn(self.context)
+ objects.Migration.get_by_instance_and_status(
+ self.context, fake_inst['uuid'], 'finished').AndReturn(fake_mig)
+
+ delta = ['delta']
+ self.compute_api._reverse_upsize_quota_delta(
+ self.context, fake_mig).AndReturn(delta)
+ resvs = ['resvs']
+ fake_quotas = objects.Quotas.from_reservations(self.context, resvs)
+ self.compute_api._reserve_quota_delta(
+ self.context, delta, fake_inst).AndReturn(fake_quotas)
+
+ exc = exception.UnexpectedTaskStateError(
+ actual=task_states.RESIZE_REVERTING, expected=None)
+ fake_inst.save(expected_task_state=[None]).AndRaise(exc)
+
+ fake_quotas.rollback(self.context)
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.UnexpectedTaskStateError,
+ self.compute_api.revert_resize,
+ self.context,
+ fake_inst)
+
+ def _test_resize(self, flavor_id_passed=True,
+ same_host=False, allow_same_host=False,
+ allow_mig_same_host=False,
+ project_id=None,
+ extra_kwargs=None,
+ same_flavor=False):
+ if extra_kwargs is None:
+ extra_kwargs = {}
+
+ self.flags(allow_resize_to_same_host=allow_same_host,
+ allow_migrate_to_same_host=allow_mig_same_host)
+
+ params = {}
+ if project_id is not None:
+ # To test instance w/ different project id than context (admin)
+ params['project_id'] = project_id
+ fake_inst = self._create_instance_obj(params=params)
+
+ self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
+ self.mox.StubOutWithMock(self.compute_api, '_upsize_quota_delta')
+ self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
+ self.mox.StubOutWithMock(fake_inst, 'save')
+ self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
+ self.mox.StubOutWithMock(self.compute_api.compute_task_api,
+ 'resize_instance')
+
+ current_flavor = flavors.extract_flavor(fake_inst)
+ if flavor_id_passed:
+ new_flavor = dict(id=200, flavorid='new-flavor-id',
+ name='new_flavor', disabled=False)
+ if same_flavor:
+ cur_flavor = flavors.extract_flavor(fake_inst)
+ new_flavor['id'] = cur_flavor['id']
+ flavors.get_flavor_by_flavor_id(
+ 'new-flavor-id',
+ read_deleted='no').AndReturn(new_flavor)
+ else:
+ new_flavor = current_flavor
+
+ if (self.cell_type == 'compute' or
+ not (flavor_id_passed and same_flavor)):
+ resvs = ['resvs']
+ project_id, user_id = quotas_obj.ids_from_instance(self.context,
+ fake_inst)
+ fake_quotas = objects.Quotas.from_reservations(self.context,
+ resvs)
+
+ self.compute_api._upsize_quota_delta(
+ self.context, new_flavor,
+ current_flavor).AndReturn('deltas')
+ self.compute_api._reserve_quota_delta(self.context, 'deltas',
+ fake_inst).AndReturn(fake_quotas)
+
+ def _check_state(expected_task_state=None):
+ self.assertEqual(task_states.RESIZE_PREP,
+ fake_inst.task_state)
+ self.assertEqual(fake_inst.progress, 0)
+ for key, value in extra_kwargs.items():
+ self.assertEqual(value, getattr(fake_inst, key))
+
+ fake_inst.save(expected_task_state=[None]).WithSideEffects(
+ _check_state)
+
+ if allow_same_host:
+ filter_properties = {'ignore_hosts': []}
+ else:
+ filter_properties = {'ignore_hosts': [fake_inst['host']]}
+
+ if not flavor_id_passed and not allow_mig_same_host:
+ filter_properties['ignore_hosts'].append(fake_inst['host'])
+
+ expected_reservations = fake_quotas.reservations
+ if self.cell_type == 'api':
+ fake_quotas.commit(self.context)
+ expected_reservations = []
+ mig = objects.Migration()
+
+ def _get_migration():
+ return mig
+
+ def _check_mig(ctxt):
+ self.assertEqual(fake_inst.uuid, mig.instance_uuid)
+ self.assertEqual(current_flavor['id'],
+ mig.old_instance_type_id)
+ self.assertEqual(new_flavor['id'],
+ mig.new_instance_type_id)
+ self.assertEqual('finished', mig.status)
+
+ self.stubs.Set(objects, 'Migration', _get_migration)
+ self.mox.StubOutWithMock(self.context, 'elevated')
+ self.mox.StubOutWithMock(mig, 'create')
+
+ self.context.elevated().AndReturn(self.context)
+ mig.create(self.context).WithSideEffects(_check_mig)
+
+ if flavor_id_passed:
+ self.compute_api._record_action_start(self.context, fake_inst,
+ 'resize')
+ else:
+ self.compute_api._record_action_start(self.context, fake_inst,
+ 'migrate')
+
+ scheduler_hint = {'filter_properties': filter_properties}
+
+ self.compute_api.compute_task_api.resize_instance(
+ self.context, fake_inst, extra_kwargs,
+ scheduler_hint=scheduler_hint,
+ flavor=new_flavor, reservations=expected_reservations)
+
+ self.mox.ReplayAll()
+
+ if flavor_id_passed:
+ self.compute_api.resize(self.context, fake_inst,
+ flavor_id='new-flavor-id',
+ **extra_kwargs)
+ else:
+ self.compute_api.resize(self.context, fake_inst, **extra_kwargs)
+
+ def _test_migrate(self, *args, **kwargs):
+ self._test_resize(*args, flavor_id_passed=False, **kwargs)
+
+ def test_resize(self):
+ self._test_resize()
+
+ def test_resize_with_kwargs(self):
+ self._test_resize(extra_kwargs=dict(cow='moo'))
+
+ def test_resize_same_host_and_allowed(self):
+ self._test_resize(same_host=True, allow_same_host=True)
+
+ def test_resize_same_host_and_not_allowed(self):
+ self._test_resize(same_host=True, allow_same_host=False)
+
+ def test_resize_different_project_id(self):
+ self._test_resize(project_id='different')
+
+ def test_migrate(self):
+ self._test_migrate()
+
+ def test_migrate_with_kwargs(self):
+ self._test_migrate(extra_kwargs=dict(cow='moo'))
+
+ def test_migrate_same_host_and_allowed(self):
+ self._test_migrate(same_host=True, allow_same_host=True)
+
+ def test_migrate_same_host_and_not_allowed(self):
+ self._test_migrate(same_host=True, allow_same_host=False)
+
+ def test_migrate_different_project_id(self):
+ self._test_migrate(project_id='different')
+
+ def test_resize_invalid_flavor_fails(self):
+ self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
+ # Should never reach these.
+ self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
+ self.mox.StubOutWithMock(self.compute_api, 'update')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
+ self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
+ self.mox.StubOutWithMock(self.compute_api.compute_task_api,
+ 'resize_instance')
+
+ fake_inst = obj_base.obj_to_primitive(self._create_instance_obj())
+ exc = exception.FlavorNotFound(flavor_id='flavor-id')
+
+ flavors.get_flavor_by_flavor_id('flavor-id',
+ read_deleted='no').AndRaise(exc)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.FlavorNotFound,
+ self.compute_api.resize, self.context,
+ fake_inst, flavor_id='flavor-id')
+
+ def test_resize_disabled_flavor_fails(self):
+ self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
+ # Should never reach these.
+ self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
+ self.mox.StubOutWithMock(self.compute_api, 'update')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
+ self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
+ self.mox.StubOutWithMock(self.compute_api.compute_task_api,
+ 'resize_instance')
+
+ fake_inst = obj_base.obj_to_primitive(self._create_instance_obj())
+ fake_flavor = dict(id=200, flavorid='flavor-id', name='foo',
+ disabled=True)
+
+ flavors.get_flavor_by_flavor_id(
+ 'flavor-id', read_deleted='no').AndReturn(fake_flavor)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.FlavorNotFound,
+ self.compute_api.resize, self.context,
+ fake_inst, flavor_id='flavor-id')
+
+ @mock.patch.object(flavors, 'get_flavor_by_flavor_id')
+ def test_resize_to_zero_disk_flavor_fails(self, get_flavor_by_flavor_id):
+ fake_inst = self._create_instance_obj()
+ fake_flavor = dict(id=200, flavorid='flavor-id', name='foo',
+ root_gb=0)
+
+ get_flavor_by_flavor_id.return_value = fake_flavor
+
+ self.assertRaises(exception.CannotResizeDisk,
+ self.compute_api.resize, self.context,
+ fake_inst, flavor_id='flavor-id')
+
+ def test_resize_quota_exceeds_fails(self):
+ self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
+ self.mox.StubOutWithMock(self.compute_api, '_upsize_quota_delta')
+ self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
+ # Should never reach these.
+ self.mox.StubOutWithMock(self.compute_api, 'update')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
+ self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
+ self.mox.StubOutWithMock(self.compute_api.compute_task_api,
+ 'resize_instance')
+
+ fake_inst = obj_base.obj_to_primitive(self._create_instance_obj())
+ current_flavor = flavors.extract_flavor(fake_inst)
+ fake_flavor = dict(id=200, flavorid='flavor-id', name='foo',
+ disabled=False)
+ flavors.get_flavor_by_flavor_id(
+ 'flavor-id', read_deleted='no').AndReturn(fake_flavor)
+ deltas = dict(resource=0)
+ self.compute_api._upsize_quota_delta(
+ self.context, fake_flavor,
+ current_flavor).AndReturn(deltas)
+ usage = dict(in_use=0, reserved=0)
+ quotas = {'resource': 0}
+ usages = {'resource': usage}
+ overs = ['resource']
+ headroom = {'resource': quotas['resource'] -
+ (usages['resource']['in_use'] + usages['resource']['reserved'])}
+ over_quota_args = dict(quotas=quotas,
+ usages=usages,
+ overs=overs,
+ headroom=headroom)
+
+ self.compute_api._reserve_quota_delta(self.context, deltas,
+ fake_inst).AndRaise(
+ exception.OverQuota(**over_quota_args))
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.TooManyInstances,
+ self.compute_api.resize, self.context,
+ fake_inst, flavor_id='flavor-id')
+
+ def test_pause(self):
+ # Ensure instance can be paused.
+ instance = self._create_instance_obj()
+ self.assertEqual(instance.vm_state, vm_states.ACTIVE)
+ self.assertIsNone(instance.task_state)
+
+ self.mox.StubOutWithMock(instance, 'save')
+ self.mox.StubOutWithMock(self.compute_api,
+ '_record_action_start')
+ if self.cell_type == 'api':
+ rpcapi = self.compute_api.cells_rpcapi
+ else:
+ rpcapi = self.compute_api.compute_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'pause_instance')
+
+ instance.save(expected_task_state=[None])
+ self.compute_api._record_action_start(self.context,
+ instance, instance_actions.PAUSE)
+ rpcapi.pause_instance(self.context, instance)
+
+ self.mox.ReplayAll()
+
+ self.compute_api.pause(self.context, instance)
+ self.assertEqual(vm_states.ACTIVE, instance.vm_state)
+ self.assertEqual(task_states.PAUSING,
+ instance.task_state)
+
+ def _test_pause_fails(self, vm_state):
+ params = dict(vm_state=vm_state)
+ instance = self._create_instance_obj(params=params)
+ self.assertIsNone(instance.task_state)
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.pause,
+ self.context, instance)
+
+ def test_pause_fails_invalid_states(self):
+ invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE]))
+ for state in invalid_vm_states:
+ self._test_pause_fails(state)
+
+ def test_unpause(self):
+ # Ensure instance can be unpaused.
+ params = dict(vm_state=vm_states.PAUSED)
+ instance = self._create_instance_obj(params=params)
+ self.assertEqual(instance.vm_state, vm_states.PAUSED)
+ self.assertIsNone(instance.task_state)
+
+ self.mox.StubOutWithMock(instance, 'save')
+ self.mox.StubOutWithMock(self.compute_api,
+ '_record_action_start')
+ if self.cell_type == 'api':
+ rpcapi = self.compute_api.cells_rpcapi
+ else:
+ rpcapi = self.compute_api.compute_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'unpause_instance')
+
+ instance.save(expected_task_state=[None])
+ self.compute_api._record_action_start(self.context,
+ instance, instance_actions.UNPAUSE)
+ rpcapi.unpause_instance(self.context, instance)
+
+ self.mox.ReplayAll()
+
+ self.compute_api.unpause(self.context, instance)
+ self.assertEqual(vm_states.PAUSED, instance.vm_state)
+ self.assertEqual(task_states.UNPAUSING, instance.task_state)
+
+ def test_swap_volume_volume_api_usage(self):
+ # This test ensures that volume_id arguments are passed to volume_api
+ # and that volumes return to previous states in case of error.
+ def fake_vol_api_begin_detaching(context, volume_id):
+ self.assertTrue(uuidutils.is_uuid_like(volume_id))
+ volumes[volume_id]['status'] = 'detaching'
+
+ def fake_vol_api_roll_detaching(context, volume_id):
+ self.assertTrue(uuidutils.is_uuid_like(volume_id))
+ if volumes[volume_id]['status'] == 'detaching':
+ volumes[volume_id]['status'] = 'in-use'
+
+ def fake_vol_api_reserve(context, volume_id):
+ self.assertTrue(uuidutils.is_uuid_like(volume_id))
+ self.assertEqual(volumes[volume_id]['status'], 'available')
+ volumes[volume_id]['status'] = 'attaching'
+
+ def fake_vol_api_unreserve(context, volume_id):
+ self.assertTrue(uuidutils.is_uuid_like(volume_id))
+ if volumes[volume_id]['status'] == 'attaching':
+ volumes[volume_id]['status'] = 'available'
+
+ def fake_swap_volume_exc(context, instance, old_volume_id,
+ new_volume_id):
+ raise AttributeError # Random exception
+
+ # Should fail if VM state is not valid
+ instance = {'vm_state': vm_states.BUILDING,
+ 'launched_at': timeutils.utcnow(),
+ 'locked': False,
+ 'availability_zone': 'fake_az',
+ 'uuid': 'fake'}
+ volumes = {}
+ old_volume_id = uuidutils.generate_uuid()
+ volumes[old_volume_id] = {'id': old_volume_id,
+ 'display_name': 'old_volume',
+ 'attach_status': 'attached',
+ 'instance_uuid': 'fake',
+ 'size': 5,
+ 'status': 'in-use'}
+ new_volume_id = uuidutils.generate_uuid()
+ volumes[new_volume_id] = {'id': new_volume_id,
+ 'display_name': 'new_volume',
+ 'attach_status': 'detached',
+ 'instance_uuid': None,
+ 'size': 5,
+ 'status': 'available'}
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.swap_volume, self.context, instance,
+ volumes[old_volume_id], volumes[new_volume_id])
+ instance['vm_state'] = vm_states.ACTIVE
+ instance['task_state'] = None
+
+ # Should fail if old volume is not attached
+ volumes[old_volume_id]['attach_status'] = 'detached'
+ self.assertRaises(exception.VolumeUnattached,
+ self.compute_api.swap_volume, self.context, instance,
+ volumes[old_volume_id], volumes[new_volume_id])
+ self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
+ self.assertEqual(volumes[new_volume_id]['status'], 'available')
+ volumes[old_volume_id]['attach_status'] = 'attached'
+
+ # Should fail if old volume's instance_uuid is not that of the instance
+ volumes[old_volume_id]['instance_uuid'] = 'fake2'
+ self.assertRaises(exception.InvalidVolume,
+ self.compute_api.swap_volume, self.context, instance,
+ volumes[old_volume_id], volumes[new_volume_id])
+ self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
+ self.assertEqual(volumes[new_volume_id]['status'], 'available')
+ volumes[old_volume_id]['instance_uuid'] = 'fake'
+
+ # Should fail if new volume is attached
+ volumes[new_volume_id]['attach_status'] = 'attached'
+ self.assertRaises(exception.InvalidVolume,
+ self.compute_api.swap_volume, self.context, instance,
+ volumes[old_volume_id], volumes[new_volume_id])
+ self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
+ self.assertEqual(volumes[new_volume_id]['status'], 'available')
+ volumes[new_volume_id]['attach_status'] = 'detached'
+
+ # Should fail if new volume is smaller than the old volume
+ volumes[new_volume_id]['size'] = 4
+ self.assertRaises(exception.InvalidVolume,
+ self.compute_api.swap_volume, self.context, instance,
+ volumes[old_volume_id], volumes[new_volume_id])
+ self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
+ self.assertEqual(volumes[new_volume_id]['status'], 'available')
+ volumes[new_volume_id]['size'] = 5
+
+ # Fail call to swap_volume
+ self.stubs.Set(self.compute_api.volume_api, 'begin_detaching',
+ fake_vol_api_begin_detaching)
+ self.stubs.Set(self.compute_api.volume_api, 'roll_detaching',
+ fake_vol_api_roll_detaching)
+ self.stubs.Set(self.compute_api.volume_api, 'reserve_volume',
+ fake_vol_api_reserve)
+ self.stubs.Set(self.compute_api.volume_api, 'unreserve_volume',
+ fake_vol_api_unreserve)
+ self.stubs.Set(self.compute_api.compute_rpcapi, 'swap_volume',
+ fake_swap_volume_exc)
+ self.assertRaises(AttributeError,
+ self.compute_api.swap_volume, self.context, instance,
+ volumes[old_volume_id], volumes[new_volume_id])
+ self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
+ self.assertEqual(volumes[new_volume_id]['status'], 'available')
+
+ # Should succeed
+ self.stubs.Set(self.compute_api.compute_rpcapi, 'swap_volume',
+ lambda c, instance, old_volume_id, new_volume_id: True)
+ self.compute_api.swap_volume(self.context, instance,
+ volumes[old_volume_id],
+ volumes[new_volume_id])
+
+ def _test_snapshot_and_backup(self, is_snapshot=True,
+ with_base_ref=False, min_ram=None,
+ min_disk=None,
+ create_fails=False,
+ instance_vm_state=vm_states.ACTIVE):
+ # 'cache_in_nova' is for testing non-inheritable properties
+ # 'user_id' should also not be carried from sys_meta into
+ # image property...since it should be set explicitly by
+ # _create_image() in compute api.
+ fake_sys_meta = dict(image_foo='bar', blah='bug?',
+ image_cache_in_nova='dropped',
+ cache_in_nova='dropped',
+ user_id='meow')
+ if with_base_ref:
+ fake_sys_meta['image_base_image_ref'] = 'fake-base-ref'
+ params = dict(system_metadata=fake_sys_meta, locked=True)
+ instance = self._create_instance_obj(params=params)
+ instance.vm_state = instance_vm_state
+ fake_sys_meta.update(instance.system_metadata)
+ extra_props = dict(cow='moo', cat='meow')
+
+ self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
+ self.mox.StubOutWithMock(self.compute_api.image_api,
+ 'create')
+ self.mox.StubOutWithMock(instance, 'save')
+ self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
+ 'snapshot_instance')
+ self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
+ 'backup_instance')
+
+ image_type = is_snapshot and 'snapshot' or 'backup'
+
+ expected_sys_meta = dict(fake_sys_meta)
+ expected_sys_meta.pop('cache_in_nova')
+ expected_sys_meta.pop('image_cache_in_nova')
+ expected_sys_meta.pop('user_id')
+ expected_sys_meta['foo'] = expected_sys_meta.pop('image_foo')
+ if with_base_ref:
+ expected_sys_meta['base_image_ref'] = expected_sys_meta.pop(
+ 'image_base_image_ref')
+
+ expected_props = {'instance_uuid': instance.uuid,
+ 'user_id': self.context.user_id,
+ 'image_type': image_type}
+ expected_props.update(extra_props)
+ expected_props.update(expected_sys_meta)
+ expected_meta = {'name': 'fake-name',
+ 'is_public': False,
+ 'properties': expected_props}
+ if is_snapshot:
+ if min_ram is not None:
+ expected_meta['min_ram'] = min_ram
+ if min_disk is not None:
+ expected_meta['min_disk'] = min_disk
+ else:
+ expected_props['backup_type'] = 'fake-backup-type'
+
+ compute_utils.get_image_metadata(
+ self.context, self.compute_api.image_api,
+ FAKE_IMAGE_REF, instance).AndReturn(expected_meta)
+
+ fake_image = dict(id='fake-image-id')
+ mock_method = self.compute_api.image_api.create(
+ self.context, expected_meta)
+ if create_fails:
+ mock_method.AndRaise(test.TestingException())
+ else:
+ mock_method.AndReturn(fake_image)
+
+ def check_state(expected_task_state=None):
+ expected_state = (is_snapshot and
+ task_states.IMAGE_SNAPSHOT_PENDING or
+ task_states.IMAGE_BACKUP)
+ self.assertEqual(expected_state, instance.task_state)
+
+ if not create_fails:
+ instance.save(expected_task_state=[None]).WithSideEffects(
+ check_state)
+ if is_snapshot:
+ self.compute_api.compute_rpcapi.snapshot_instance(
+ self.context, instance, fake_image['id'])
+ else:
+ self.compute_api.compute_rpcapi.backup_instance(
+ self.context, instance, fake_image['id'],
+ 'fake-backup-type', 'fake-rotation')
+
+ self.mox.ReplayAll()
+
+ got_exc = False
+ try:
+ if is_snapshot:
+ res = self.compute_api.snapshot(self.context, instance,
+ 'fake-name',
+ extra_properties=extra_props)
+ else:
+ res = self.compute_api.backup(self.context, instance,
+ 'fake-name',
+ 'fake-backup-type',
+ 'fake-rotation',
+ extra_properties=extra_props)
+ self.assertEqual(fake_image, res)
+ except test.TestingException:
+ got_exc = True
+ self.assertEqual(create_fails, got_exc)
+ self.mox.UnsetStubs()
+
+ def test_snapshot(self):
+ self._test_snapshot_and_backup()
+
+ def test_snapshot_fails(self):
+ self._test_snapshot_and_backup(create_fails=True)
+
+ def test_snapshot_invalid_state(self):
+ instance = self._create_instance_obj()
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.IMAGE_SNAPSHOT
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.snapshot,
+ self.context, instance, 'fake-name')
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.IMAGE_BACKUP
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.snapshot,
+ self.context, instance, 'fake-name')
+ instance.vm_state = vm_states.BUILDING
+ instance.task_state = None
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.snapshot,
+ self.context, instance, 'fake-name')
+
+ def test_snapshot_with_base_image_ref(self):
+ self._test_snapshot_and_backup(with_base_ref=True)
+
+ def test_snapshot_min_ram(self):
+ self._test_snapshot_and_backup(min_ram=42)
+
+ def test_snapshot_min_disk(self):
+ self._test_snapshot_and_backup(min_disk=42)
+
+ def test_backup(self):
+ for state in [vm_states.ACTIVE, vm_states.STOPPED,
+ vm_states.PAUSED, vm_states.SUSPENDED]:
+ self._test_snapshot_and_backup(is_snapshot=False,
+ instance_vm_state=state)
+
+ def test_backup_fails(self):
+ self._test_snapshot_and_backup(is_snapshot=False, create_fails=True)
+
+ def test_backup_invalid_state(self):
+ instance = self._create_instance_obj()
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.IMAGE_SNAPSHOT
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.backup,
+ self.context, instance, 'fake-name',
+ 'fake', 'fake')
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.IMAGE_BACKUP
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.backup,
+ self.context, instance, 'fake-name',
+ 'fake', 'fake')
+ instance.vm_state = vm_states.BUILDING
+ instance.task_state = None
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.backup,
+ self.context, instance, 'fake-name',
+ 'fake', 'fake')
+
+ def test_backup_with_base_image_ref(self):
+ self._test_snapshot_and_backup(is_snapshot=False,
+ with_base_ref=True)
+
+ def test_snapshot_volume_backed(self):
+ params = dict(locked=True)
+ instance = self._create_instance_obj(params=params)
+ instance['root_device_name'] = 'vda'
+
+ instance_bdms = []
+
+ image_meta = {
+ 'id': 'fake-image-id',
+ 'properties': {'mappings': []},
+ 'status': 'fake-status',
+ 'location': 'far-away',
+ 'owner': 'fake-tenant',
+ }
+
+ expect_meta = {
+ 'name': 'test-snapshot',
+ 'properties': {'root_device_name': 'vda',
+ 'mappings': 'DONTCARE'},
+ 'size': 0,
+ 'is_public': False
+ }
+
+ def fake_get_all_by_instance(context, instance, use_slave=False):
+ return copy.deepcopy(instance_bdms)
+
+ def fake_image_create(context, image_meta, data=None):
+ self.assertThat(image_meta, matchers.DictMatches(expect_meta))
+
+ def fake_volume_get(context, volume_id):
+ return {'id': volume_id, 'display_description': ''}
+
+ def fake_volume_create_snapshot(context, volume_id, name, description):
+ return {'id': '%s-snapshot' % volume_id}
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_get_all_by_instance)
+ self.stubs.Set(self.compute_api.image_api, 'create',
+ fake_image_create)
+ self.stubs.Set(self.compute_api.volume_api, 'get',
+ fake_volume_get)
+ self.stubs.Set(self.compute_api.volume_api, 'create_snapshot_force',
+ fake_volume_create_snapshot)
+
+ # No block devices defined
+ self.compute_api.snapshot_volume_backed(
+ self.context, instance, copy.deepcopy(image_meta), 'test-snapshot')
+
+ bdm = fake_block_device.FakeDbBlockDeviceDict(
+ {'no_device': False, 'volume_id': '1', 'boot_index': 0,
+ 'connection_info': 'inf', 'device_name': '/dev/vda',
+ 'source_type': 'volume', 'destination_type': 'volume'})
+ instance_bdms.append(bdm)
+
+ expect_meta['properties']['bdm_v2'] = True
+ expect_meta['properties']['block_device_mapping'] = []
+ expect_meta['properties']['block_device_mapping'].append(
+ {'guest_format': None, 'boot_index': 0, 'no_device': None,
+ 'image_id': None, 'volume_id': None, 'disk_bus': None,
+ 'volume_size': None, 'source_type': 'snapshot',
+ 'device_type': None, 'snapshot_id': '1-snapshot',
+ 'destination_type': 'volume', 'delete_on_termination': None})
+
+ # All the db_only fields and the volume ones are removed
+ self.compute_api.snapshot_volume_backed(
+ self.context, instance, copy.deepcopy(image_meta), 'test-snapshot')
+
+ image_mappings = [{'virtual': 'ami', 'device': 'vda'},
+ {'device': 'vda', 'virtual': 'ephemeral0'},
+ {'device': 'vdb', 'virtual': 'swap'},
+ {'device': 'vdc', 'virtual': 'ephemeral1'}]
+
+ image_meta['properties']['mappings'] = image_mappings
+
+ expect_meta['properties']['mappings'] = [
+ {'virtual': 'ami', 'device': 'vda'}]
+
+ # Check that the mappgins from the image properties are included
+ self.compute_api.snapshot_volume_backed(
+ self.context, instance, copy.deepcopy(image_meta), 'test-snapshot')
+
+ def test_volume_snapshot_create(self):
+ volume_id = '1'
+ create_info = {'id': 'eyedee'}
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 123,
+ 'device_name': '/dev/sda2',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'volume_id': 1,
+ 'boot_index': -1})
+ fake_bdm['instance'] = fake_instance.fake_db_instance()
+ fake_bdm['instance_uuid'] = fake_bdm['instance']['uuid']
+ fake_bdm = objects.BlockDeviceMapping._from_db_object(
+ self.context, objects.BlockDeviceMapping(),
+ fake_bdm, expected_attrs=['instance'])
+
+ self.mox.StubOutWithMock(objects.BlockDeviceMapping,
+ 'get_by_volume_id')
+ self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
+ 'volume_snapshot_create')
+
+ objects.BlockDeviceMapping.get_by_volume_id(
+ self.context, volume_id,
+ expected_attrs=['instance']).AndReturn(fake_bdm)
+ self.compute_api.compute_rpcapi.volume_snapshot_create(self.context,
+ fake_bdm['instance'], volume_id, create_info)
+
+ self.mox.ReplayAll()
+
+ snapshot = self.compute_api.volume_snapshot_create(self.context,
+ volume_id, create_info)
+
+ expected_snapshot = {
+ 'snapshot': {
+ 'id': create_info['id'],
+ 'volumeId': volume_id,
+ },
+ }
+ self.assertEqual(snapshot, expected_snapshot)
+
+ def test_volume_snapshot_delete(self):
+ volume_id = '1'
+ snapshot_id = '2'
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 123,
+ 'device_name': '/dev/sda2',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'volume_id': 1,
+ 'boot_index': -1})
+ fake_bdm['instance'] = fake_instance.fake_db_instance()
+ fake_bdm['instance_uuid'] = fake_bdm['instance']['uuid']
+ fake_bdm = objects.BlockDeviceMapping._from_db_object(
+ self.context, objects.BlockDeviceMapping(),
+ fake_bdm, expected_attrs=['instance'])
+
+ self.mox.StubOutWithMock(objects.BlockDeviceMapping,
+ 'get_by_volume_id')
+ self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
+ 'volume_snapshot_delete')
+
+ objects.BlockDeviceMapping.get_by_volume_id(
+ self.context, volume_id,
+ expected_attrs=['instance']).AndReturn(fake_bdm)
+ self.compute_api.compute_rpcapi.volume_snapshot_delete(self.context,
+ fake_bdm['instance'], volume_id, snapshot_id, {})
+
+ self.mox.ReplayAll()
+
+ self.compute_api.volume_snapshot_delete(self.context, volume_id,
+ snapshot_id, {})
+
+ def _test_boot_volume_bootable(self, is_bootable=False):
+ def get_vol_data(*args, **kwargs):
+ return {'bootable': is_bootable}
+ block_device_mapping = [{
+ 'id': 1,
+ 'device_name': 'vda',
+ 'no_device': None,
+ 'virtual_name': None,
+ 'snapshot_id': None,
+ 'volume_id': '1',
+ 'delete_on_termination': False,
+ }]
+
+ expected_meta = {'min_disk': 0, 'min_ram': 0, 'properties': {},
+ 'size': 0, 'status': 'active'}
+
+ with mock.patch.object(self.compute_api.volume_api, 'get',
+ side_effect=get_vol_data):
+ if not is_bootable:
+ self.assertRaises(exception.InvalidBDMVolumeNotBootable,
+ self.compute_api._get_bdm_image_metadata,
+ self.context, block_device_mapping)
+ else:
+ meta = self.compute_api._get_bdm_image_metadata(self.context,
+ block_device_mapping)
+ self.assertEqual(expected_meta, meta)
+
+ def test_boot_volume_non_bootable(self):
+ self._test_boot_volume_bootable(False)
+
+ def test_boot_volume_bootable(self):
+ self._test_boot_volume_bootable(True)
+
+ def test_boot_volume_basic_property(self):
+ block_device_mapping = [{
+ 'id': 1,
+ 'device_name': 'vda',
+ 'no_device': None,
+ 'virtual_name': None,
+ 'snapshot_id': None,
+ 'volume_id': '1',
+ 'delete_on_termination': False,
+ }]
+ fake_volume = {"volume_image_metadata":
+ {"min_ram": 256, "min_disk": 128, "foo": "bar"}}
+ with mock.patch.object(self.compute_api.volume_api, 'get',
+ return_value=fake_volume):
+ meta = self.compute_api._get_bdm_image_metadata(
+ self.context, block_device_mapping)
+ self.assertEqual(256, meta['min_ram'])
+ self.assertEqual(128, meta['min_disk'])
+ self.assertEqual('active', meta['status'])
+ self.assertEqual('bar', meta['properties']['foo'])
+
+ def test_boot_volume_snapshot_basic_property(self):
+ block_device_mapping = [{
+ 'id': 1,
+ 'device_name': 'vda',
+ 'no_device': None,
+ 'virtual_name': None,
+ 'snapshot_id': '2',
+ 'volume_id': None,
+ 'delete_on_termination': False,
+ }]
+ fake_volume = {"volume_image_metadata":
+ {"min_ram": 256, "min_disk": 128, "foo": "bar"}}
+ fake_snapshot = {"volume_id": "1"}
+ with contextlib.nested(
+ mock.patch.object(self.compute_api.volume_api, 'get',
+ return_value=fake_volume),
+ mock.patch.object(self.compute_api.volume_api, 'get_snapshot',
+ return_value=fake_snapshot)) as (
+ volume_get, volume_get_snapshot):
+ meta = self.compute_api._get_bdm_image_metadata(
+ self.context, block_device_mapping)
+ self.assertEqual(256, meta['min_ram'])
+ self.assertEqual(128, meta['min_disk'])
+ self.assertEqual('active', meta['status'])
+ self.assertEqual('bar', meta['properties']['foo'])
+ volume_get_snapshot.assert_called_once_with(self.context,
+ block_device_mapping[0]['snapshot_id'])
+ volume_get.assert_called_once_with(self.context,
+ fake_snapshot['volume_id'])
+
+ def _create_instance_with_disabled_disk_config(self, object=False):
+ sys_meta = {"image_auto_disk_config": "Disabled"}
+ params = {"system_metadata": sys_meta}
+ instance = self._create_instance_obj(params=params)
+ if object:
+ return instance
+ return obj_base.obj_to_primitive(instance)
+
+ def _setup_fake_image_with_disabled_disk_config(self):
+ self.fake_image = {
+ 'id': 1,
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {"auto_disk_config": "Disabled"},
+ }
+
+ def fake_show(obj, context, image_id, **kwargs):
+ return self.fake_image
+ fake_image.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ return self.fake_image['id']
+
+ def test_resize_with_disabled_auto_disk_config_fails(self):
+ fake_inst = self._create_instance_with_disabled_disk_config()
+
+ self.assertRaises(exception.AutoDiskConfigDisabledByImage,
+ self.compute_api.resize,
+ self.context, fake_inst,
+ auto_disk_config=True)
+
+ def test_create_with_disabled_auto_disk_config_fails(self):
+ image_id = self._setup_fake_image_with_disabled_disk_config()
+
+ self.assertRaises(exception.AutoDiskConfigDisabledByImage,
+ self.compute_api.create, self.context,
+ "fake_flavor", image_id, auto_disk_config=True)
+
+ def test_rebuild_with_disabled_auto_disk_config_fails(self):
+ fake_inst = self._create_instance_with_disabled_disk_config(
+ object=True)
+ image_id = self._setup_fake_image_with_disabled_disk_config()
+ self.assertRaises(exception.AutoDiskConfigDisabledByImage,
+ self.compute_api.rebuild,
+ self.context,
+ fake_inst,
+ image_id,
+ "new password",
+ auto_disk_config=True)
+
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(objects.Instance, 'get_flavor')
+ @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
+ @mock.patch.object(compute_api.API, '_get_image')
+ @mock.patch.object(compute_api.API, '_check_auto_disk_config')
+ @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ def test_rebuild(self, _record_action_start,
+ _checks_for_create_and_rebuild, _check_auto_disk_config,
+ _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save):
+ orig_system_metadata = {}
+ instance = fake_instance.fake_instance_obj(self.context,
+ vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata=orig_system_metadata,
+ expected_attrs=['system_metadata'])
+ get_flavor.return_value = test_flavor.fake_flavor
+ flavor = instance.get_flavor()
+ image_href = ''
+ image = {"min_ram": 10, "min_disk": 1,
+ "properties": {'architecture': arch.X86_64}}
+ admin_pass = ''
+ files_to_inject = []
+ bdms = []
+
+ _get_image.return_value = (None, image)
+ bdm_get_by_instance_uuid.return_value = bdms
+
+ with mock.patch.object(self.compute_api.compute_task_api,
+ 'rebuild_instance') as rebuild_instance:
+ self.compute_api.rebuild(self.context, instance, image_href,
+ admin_pass, files_to_inject)
+
+ rebuild_instance.assert_called_once_with(self.context,
+ instance=instance, new_pass=admin_pass,
+ injected_files=files_to_inject, image_ref=image_href,
+ orig_image_ref=image_href,
+ orig_sys_metadata=orig_system_metadata, bdms=bdms,
+ preserve_ephemeral=False, host=instance.host, kwargs={})
+
+ _check_auto_disk_config.assert_called_once_with(image=image)
+ _checks_for_create_and_rebuild.assert_called_once_with(self.context,
+ None, image, flavor, {}, [])
+ self.assertNotEqual(orig_system_metadata, instance.system_metadata)
+
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(objects.Instance, 'get_flavor')
+ @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
+ @mock.patch.object(compute_api.API, '_get_image')
+ @mock.patch.object(compute_api.API, '_check_auto_disk_config')
+ @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ def test_rebuild_change_image(self, _record_action_start,
+ _checks_for_create_and_rebuild, _check_auto_disk_config,
+ _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save):
+ orig_system_metadata = {}
+ get_flavor.return_value = test_flavor.fake_flavor
+ orig_image_href = 'orig_image'
+ orig_image = {"min_ram": 10, "min_disk": 1,
+ "properties": {'architecture': arch.X86_64,
+ 'vm_mode': 'hvm'}}
+ new_image_href = 'new_image'
+ new_image = {"min_ram": 10, "min_disk": 1,
+ "properties": {'architecture': arch.X86_64,
+ 'vm_mode': 'xen'}}
+ admin_pass = ''
+ files_to_inject = []
+ bdms = []
+
+ instance = fake_instance.fake_instance_obj(self.context,
+ vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata=orig_system_metadata,
+ expected_attrs=['system_metadata'],
+ image_ref=orig_image_href,
+ vm_mode=vm_mode.HVM)
+ flavor = instance.get_flavor()
+
+ def get_image(context, image_href):
+ if image_href == new_image_href:
+ return (None, new_image)
+ if image_href == orig_image_href:
+ return (None, orig_image)
+ _get_image.side_effect = get_image
+ bdm_get_by_instance_uuid.return_value = bdms
+
+ with mock.patch.object(self.compute_api.compute_task_api,
+ 'rebuild_instance') as rebuild_instance:
+ self.compute_api.rebuild(self.context, instance, new_image_href,
+ admin_pass, files_to_inject)
+
+ rebuild_instance.assert_called_once_with(self.context,
+ instance=instance, new_pass=admin_pass,
+ injected_files=files_to_inject, image_ref=new_image_href,
+ orig_image_ref=orig_image_href,
+ orig_sys_metadata=orig_system_metadata, bdms=bdms,
+ preserve_ephemeral=False, host=instance.host, kwargs={})
+
+ _check_auto_disk_config.assert_called_once_with(image=new_image)
+ _checks_for_create_and_rebuild.assert_called_once_with(self.context,
+ None, new_image, flavor, {}, [])
+ self.assertEqual(vm_mode.XEN, instance.vm_mode)
+
+ def _test_check_injected_file_quota_onset_file_limit_exceeded(self,
+ side_effect):
+ injected_files = [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "foo"
+ }
+ ]
+ with mock.patch.object(quota.QUOTAS, 'limit_check',
+ side_effect=side_effect):
+ self.compute_api._check_injected_file_quota(
+ self.context, injected_files)
+
+ def test_check_injected_file_quota_onset_file_limit_exceeded(self):
+ # This is the first call to limit_check.
+ side_effect = exception.OverQuota(overs='injected_files')
+ self.assertRaises(exception.OnsetFileLimitExceeded,
+ self._test_check_injected_file_quota_onset_file_limit_exceeded,
+ side_effect)
+
+ def test_check_injected_file_quota_onset_file_path_limit(self):
+ # This is the second call to limit_check.
+ side_effect = (mock.DEFAULT,
+ exception.OverQuota(overs='injected_file_path_bytes'))
+ self.assertRaises(exception.OnsetFilePathLimitExceeded,
+ self._test_check_injected_file_quota_onset_file_limit_exceeded,
+ side_effect)
+
+ def test_check_injected_file_quota_onset_file_content_limit(self):
+ # This is the second call to limit_check but with different overs.
+ side_effect = (mock.DEFAULT,
+ exception.OverQuota(overs='injected_file_content_bytes'))
+ self.assertRaises(exception.OnsetFileContentLimitExceeded,
+ self._test_check_injected_file_quota_onset_file_limit_exceeded,
+ side_effect)
+
+ @mock.patch('nova.objects.Quotas.commit')
+ @mock.patch('nova.objects.Quotas.reserve')
+ @mock.patch('nova.objects.Instance.save')
+ @mock.patch('nova.objects.InstanceAction.action_start')
+ def test_restore(self, action_start, instance_save, quota_reserve,
+ quota_commit):
+ instance = self._create_instance_obj()
+ instance.vm_state = vm_states.SOFT_DELETED
+ instance.task_state = None
+ instance.save()
+ with mock.patch.object(self.compute_api, 'compute_rpcapi') as rpc:
+ self.compute_api.restore(self.context, instance)
+ rpc.restore_instance.assert_called_once_with(self.context,
+ instance)
+ self.assertEqual(instance.task_state, task_states.RESTORING)
+ self.assertEqual(1, quota_commit.call_count)
+
+ def test_external_instance_event(self):
+ instances = [
+ objects.Instance(uuid='uuid1', host='host1'),
+ objects.Instance(uuid='uuid2', host='host1'),
+ objects.Instance(uuid='uuid3', host='host2'),
+ ]
+ events = [
+ objects.InstanceExternalEvent(instance_uuid='uuid1'),
+ objects.InstanceExternalEvent(instance_uuid='uuid2'),
+ objects.InstanceExternalEvent(instance_uuid='uuid3'),
+ ]
+ self.compute_api.compute_rpcapi = mock.MagicMock()
+ self.compute_api.external_instance_event(self.context,
+ instances, events)
+ method = self.compute_api.compute_rpcapi.external_instance_event
+ method.assert_any_call(self.context, instances[0:2], events[0:2])
+ method.assert_any_call(self.context, instances[2:], events[2:])
+ self.assertEqual(2, method.call_count)
+
+ def test_volume_ops_invalid_task_state(self):
+ instance = self._create_instance_obj()
+ self.assertEqual(instance.vm_state, vm_states.ACTIVE)
+ instance.task_state = 'Any'
+ volume_id = uuidutils.generate_uuid()
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.attach_volume,
+ self.context, instance, volume_id)
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.detach_volume,
+ self.context, instance, volume_id)
+
+ new_volume_id = uuidutils.generate_uuid()
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.swap_volume,
+ self.context, instance,
+ volume_id, new_volume_id)
+
+ @mock.patch.object(cinder.API, 'get',
+ side_effect=exception.CinderConnectionFailed(reason='error'))
+ def test_get_bdm_image_metadata_with_cinder_down(self, mock_get):
+ bdms = [objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {
+ 'id': 1,
+ 'volume_id': 1,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': 'vda',
+ }))]
+ self.assertRaises(exception.CinderConnectionFailed,
+ self.compute_api._get_bdm_image_metadata,
+ self.context,
+ bdms, legacy_bdm=True)
+
+ @mock.patch.object(cinder.API, 'get')
+ @mock.patch.object(cinder.API, 'check_attach',
+ side_effect=exception.InvalidVolume(reason='error'))
+ def test_validate_bdm_with_error_volume(self, mock_check_attach, mock_get):
+ # Tests that an InvalidVolume exception raised from
+ # volume_api.check_attach due to the volume status not being
+ # 'available' results in _validate_bdm re-raising InvalidVolume.
+ instance = self._create_instance_obj()
+ instance_type = self._create_flavor()
+ volume_id = 'e856840e-9f5b-4894-8bde-58c6e29ac1e8'
+ volume_info = {'status': 'error',
+ 'attach_status': 'detached',
+ 'id': volume_id}
+ mock_get.return_value = volume_info
+ bdms = [objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {
+ 'boot_index': 0,
+ 'volume_id': volume_id,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': 'vda',
+ }))]
+
+ self.assertRaises(exception.InvalidVolume,
+ self.compute_api._validate_bdm,
+ self.context,
+ instance, instance_type, bdms)
+
+ mock_get.assert_called_once_with(self.context, volume_id)
+ mock_check_attach.assert_called_once_with(
+ self.context, volume_info, instance=instance)
+
+ @mock.patch.object(cinder.API, 'get_snapshot',
+ side_effect=exception.CinderConnectionFailed(reason='error'))
+ @mock.patch.object(cinder.API, 'get',
+ side_effect=exception.CinderConnectionFailed(reason='error'))
+ def test_validate_bdm_with_cinder_down(self, mock_get, mock_get_snapshot):
+ instance = self._create_instance_obj()
+ instance_type = self._create_flavor()
+ bdm = [objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {
+ 'id': 1,
+ 'volume_id': 1,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': 'vda',
+ 'boot_index': 0,
+ }))]
+ bdms = [objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {
+ 'id': 1,
+ 'snapshot_id': 1,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': 'vda',
+ 'boot_index': 0,
+ }))]
+ self.assertRaises(exception.CinderConnectionFailed,
+ self.compute_api._validate_bdm,
+ self.context,
+ instance, instance_type, bdm)
+ self.assertRaises(exception.CinderConnectionFailed,
+ self.compute_api._validate_bdm,
+ self.context,
+ instance, instance_type, bdms)
+
+ def _test_create_db_entry_for_new_instance_with_cinder_error(self,
+ expected_exception):
+
+ @mock.patch.object(objects.Instance, 'create')
+ @mock.patch.object(compute_api.SecurityGroupAPI, 'ensure_default')
+ @mock.patch.object(compute_api.API, '_populate_instance_names')
+ @mock.patch.object(compute_api.API, '_populate_instance_for_create')
+ def do_test(self, mock_create, mock_names, mock_ensure,
+ mock_inst_create):
+ instance = self._create_instance_obj()
+ instance['display_name'] = 'FAKE_DISPLAY_NAME'
+ instance['shutdown_terminate'] = False
+ instance_type = self._create_flavor()
+ fake_image = {
+ 'id': 'fake-image-id',
+ 'properties': {'mappings': []},
+ 'status': 'fake-status',
+ 'location': 'far-away'}
+ fake_security_group = None
+ fake_num_instances = 1
+ fake_index = 1
+ bdm = [objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {
+ 'id': 1,
+ 'volume_id': 1,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': 'vda',
+ 'boot_index': 0,
+ }))]
+ with mock.patch.object(instance, "destroy") as destroy:
+ self.assertRaises(expected_exception,
+ self.compute_api.
+ create_db_entry_for_new_instance,
+ self.context,
+ instance_type,
+ fake_image,
+ instance,
+ fake_security_group,
+ bdm,
+ fake_num_instances,
+ fake_index)
+ destroy.assert_called_once_with(self.context)
+
+ # We use a nested method so we can decorate with the mocks.
+ do_test(self)
+
+ @mock.patch.object(cinder.API, 'get',
+ side_effect=exception.CinderConnectionFailed(reason='error'))
+ def test_create_db_entry_for_new_instancewith_cinder_down(self, mock_get):
+ self._test_create_db_entry_for_new_instance_with_cinder_error(
+ expected_exception=exception.CinderConnectionFailed)
+
+ @mock.patch.object(cinder.API, 'get',
+ return_value={'id': 1, 'status': 'error',
+ 'attach_status': 'detached'})
+ def test_create_db_entry_for_new_instancewith_error_volume(self, mock_get):
+ self._test_create_db_entry_for_new_instance_with_cinder_error(
+ expected_exception=exception.InvalidVolume)
+
+ def _test_rescue(self, vm_state):
+ instance = self._create_instance_obj(params={'vm_state': vm_state})
+ bdms = []
+ with contextlib.nested(
+ mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid', return_value=bdms),
+ mock.patch.object(self.compute_api, 'is_volume_backed_instance',
+ return_value=False),
+ mock.patch.object(instance, 'save'),
+ mock.patch.object(self.compute_api, '_record_action_start'),
+ mock.patch.object(self.compute_api.compute_rpcapi,
+ 'rescue_instance')
+ ) as (
+ bdm_get_by_instance_uuid, volume_backed_inst, instance_save,
+ record_action_start, rpcapi_rescue_instance
+ ):
+ self.compute_api.rescue(self.context, instance)
+ # assert field values set on the instance object
+ self.assertEqual(task_states.RESCUING, instance.task_state)
+ # assert our mock calls
+ bdm_get_by_instance_uuid.assert_called_once_with(
+ self.context, instance.uuid)
+ volume_backed_inst.assert_called_once_with(
+ self.context, instance, bdms)
+ instance_save.assert_called_once_with(expected_task_state=[None])
+ record_action_start.assert_called_once_with(
+ self.context, instance, instance_actions.RESCUE)
+ rpcapi_rescue_instance.assert_called_once_with(
+ self.context, instance=instance, rescue_password=None,
+ rescue_image_ref=None)
+
+ def test_rescue_active(self):
+ self._test_rescue(vm_state=vm_states.ACTIVE)
+
+ def test_rescue_stopped(self):
+ self._test_rescue(vm_state=vm_states.STOPPED)
+
+ def test_rescue_error(self):
+ self._test_rescue(vm_state=vm_states.ERROR)
+
+ def test_unrescue(self):
+ instance = self._create_instance_obj(
+ params={'vm_state': vm_states.RESCUED})
+ with contextlib.nested(
+ mock.patch.object(instance, 'save'),
+ mock.patch.object(self.compute_api, '_record_action_start'),
+ mock.patch.object(self.compute_api.compute_rpcapi,
+ 'unrescue_instance')
+ ) as (
+ instance_save, record_action_start, rpcapi_unrescue_instance
+ ):
+ self.compute_api.unrescue(self.context, instance)
+ # assert field values set on the instance object
+ self.assertEqual(task_states.UNRESCUING, instance.task_state)
+ # assert our mock calls
+ instance_save.assert_called_once_with(expected_task_state=[None])
+ record_action_start.assert_called_once_with(
+ self.context, instance, instance_actions.UNRESCUE)
+ rpcapi_unrescue_instance.assert_called_once_with(
+ self.context, instance=instance)
+
+ def test_set_admin_password_invalid_state(self):
+ # Tests that InstanceInvalidState is raised when not ACTIVE.
+ instance = self._create_instance_obj({'vm_state': vm_states.STOPPED})
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.set_admin_password,
+ self.context, instance)
+
+ def test_set_admin_password(self):
+ # Ensure instance can have its admin password set.
+ instance = self._create_instance_obj()
+
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(self.compute_api, '_record_action_start')
+ @mock.patch.object(self.compute_api.compute_rpcapi,
+ 'set_admin_password')
+ def do_test(compute_rpcapi_mock, record_mock, instance_save_mock):
+ # call the API
+ self.compute_api.set_admin_password(self.context, instance)
+ # make our assertions
+ instance_save_mock.assert_called_once_with(
+ expected_task_state=[None])
+ record_mock.assert_called_once_with(
+ self.context, instance, instance_actions.CHANGE_PASSWORD)
+ compute_rpcapi_mock.assert_called_once_with(
+ self.context, instance=instance, new_pass=None)
+
+ do_test()
+
+ def _test_attach_interface_invalid_state(self, state):
+ instance = self._create_instance_obj(
+ params={'vm_state': state})
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.attach_interface,
+ self.context, instance, '', '', '', [])
+
+ def test_attach_interface_invalid_state(self):
+ for state in [vm_states.BUILDING, vm_states.DELETED,
+ vm_states.ERROR, vm_states.RESCUED,
+ vm_states.RESIZED, vm_states.SOFT_DELETED,
+ vm_states.SUSPENDED, vm_states.SHELVED,
+ vm_states.SHELVED_OFFLOADED]:
+ self._test_attach_interface_invalid_state(state)
+
+ def _test_detach_interface_invalid_state(self, state):
+ instance = self._create_instance_obj(
+ params={'vm_state': state})
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.detach_interface,
+ self.context, instance, '', '', '', [])
+
+ def test_detach_interface_invalid_state(self):
+ for state in [vm_states.BUILDING, vm_states.DELETED,
+ vm_states.ERROR, vm_states.RESCUED,
+ vm_states.RESIZED, vm_states.SOFT_DELETED,
+ vm_states.SUSPENDED, vm_states.SHELVED,
+ vm_states.SHELVED_OFFLOADED]:
+ self._test_detach_interface_invalid_state(state)
+
+
+class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
+ def setUp(self):
+ super(ComputeAPIUnitTestCase, self).setUp()
+ self.compute_api = compute_api.API()
+ self.cell_type = None
+
+ def test_resize_same_flavor_fails(self):
+ self.assertRaises(exception.CannotResizeToSameFlavor,
+ self._test_resize, same_flavor=True)
+
+
+class ComputeAPIAPICellUnitTestCase(_ComputeAPIUnitTestMixIn,
+ test.NoDBTestCase):
+ def setUp(self):
+ super(ComputeAPIAPICellUnitTestCase, self).setUp()
+ self.flags(cell_type='api', enable=True, group='cells')
+ self.compute_api = compute_cells_api.ComputeCellsAPI()
+ self.cell_type = 'api'
+
+ def test_resize_same_flavor_fails(self):
+ self.assertRaises(exception.CannotResizeToSameFlavor,
+ self._test_resize, same_flavor=True)
+
+
+class ComputeAPIComputeCellUnitTestCase(_ComputeAPIUnitTestMixIn,
+ test.NoDBTestCase):
+ def setUp(self):
+ super(ComputeAPIComputeCellUnitTestCase, self).setUp()
+ self.flags(cell_type='compute', enable=True, group='cells')
+ self.compute_api = compute_api.API()
+ self.cell_type = 'compute'
+
+ def test_resize_same_flavor_passes(self):
+ self._test_resize(same_flavor=True)
+
+
+class DiffDictTestCase(test.NoDBTestCase):
+ """Unit tests for _diff_dict()."""
+
+ def test_no_change(self):
+ old = dict(a=1, b=2, c=3)
+ new = dict(a=1, b=2, c=3)
+ diff = compute_api._diff_dict(old, new)
+
+ self.assertEqual(diff, {})
+
+ def test_new_key(self):
+ old = dict(a=1, b=2, c=3)
+ new = dict(a=1, b=2, c=3, d=4)
+ diff = compute_api._diff_dict(old, new)
+
+ self.assertEqual(diff, dict(d=['+', 4]))
+
+ def test_changed_key(self):
+ old = dict(a=1, b=2, c=3)
+ new = dict(a=1, b=4, c=3)
+ diff = compute_api._diff_dict(old, new)
+
+ self.assertEqual(diff, dict(b=['+', 4]))
+
+ def test_removed_key(self):
+ old = dict(a=1, b=2, c=3)
+ new = dict(a=1, c=3)
+ diff = compute_api._diff_dict(old, new)
+
+ self.assertEqual(diff, dict(b=['-']))
+
+
+class SecurityGroupAPITest(test.NoDBTestCase):
+ def setUp(self):
+ super(SecurityGroupAPITest, self).setUp()
+ self.secgroup_api = compute_api.SecurityGroupAPI()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id)
+
+ @mock.patch('nova.objects.security_group.SecurityGroupList.'
+ 'get_by_instance')
+ def test_get_instance_security_groups(self, mock_get):
+ groups = objects.SecurityGroupList()
+ groups.objects = [objects.SecurityGroup(name='foo'),
+ objects.SecurityGroup(name='bar')]
+ mock_get.return_value = groups
+ names = self.secgroup_api.get_instance_security_groups(self.context,
+ 'fake-uuid')
+ self.assertEqual([{'name': 'bar'}, {'name': 'foo'}], sorted(names))
+ self.assertEqual(1, mock_get.call_count)
+ self.assertEqual('fake-uuid', mock_get.call_args_list[0][0][1].uuid)
diff --git a/nova/tests/unit/compute/test_compute_cells.py b/nova/tests/unit/compute/test_compute_cells.py
new file mode 100644
index 0000000000..9908e6aad3
--- /dev/null
+++ b/nova/tests/unit/compute/test_compute_cells.py
@@ -0,0 +1,332 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Compute w/ Cells
+"""
+import functools
+import inspect
+
+import mock
+from oslo.config import cfg
+from oslo.utils import timeutils
+
+from nova.cells import manager
+from nova.compute import api as compute_api
+from nova.compute import cells_api as compute_cells_api
+from nova.compute import delete_types
+from nova.compute import flavors
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import objects
+from nova import quota
+from nova import test
+from nova.tests.unit.compute import test_compute
+from nova.tests.unit import fake_instance
+
+
+ORIG_COMPUTE_API = None
+cfg.CONF.import_opt('enable', 'nova.cells.opts', group='cells')
+
+
+def stub_call_to_cells(context, instance, method, *args, **kwargs):
+ fn = getattr(ORIG_COMPUTE_API, method)
+ original_instance = kwargs.pop('original_instance', None)
+ if original_instance:
+ instance = original_instance
+ # Restore this in 'child cell DB'
+ db.instance_update(context, instance['uuid'],
+ dict(vm_state=instance['vm_state'],
+ task_state=instance['task_state']))
+
+ # Use NoopQuotaDriver in child cells.
+ saved_quotas = quota.QUOTAS
+ quota.QUOTAS = quota.QuotaEngine(
+ quota_driver_class=quota.NoopQuotaDriver())
+ compute_api.QUOTAS = quota.QUOTAS
+ try:
+ return fn(context, instance, *args, **kwargs)
+ finally:
+ quota.QUOTAS = saved_quotas
+ compute_api.QUOTAS = saved_quotas
+
+
+def stub_cast_to_cells(context, instance, method, *args, **kwargs):
+ fn = getattr(ORIG_COMPUTE_API, method)
+ original_instance = kwargs.pop('original_instance', None)
+ if original_instance:
+ instance = original_instance
+ # Restore this in 'child cell DB'
+ db.instance_update(context, instance['uuid'],
+ dict(vm_state=instance['vm_state'],
+ task_state=instance['task_state']))
+
+ # Use NoopQuotaDriver in child cells.
+ saved_quotas = quota.QUOTAS
+ quota.QUOTAS = quota.QuotaEngine(
+ quota_driver_class=quota.NoopQuotaDriver())
+ compute_api.QUOTAS = quota.QUOTAS
+ try:
+ fn(context, instance, *args, **kwargs)
+ finally:
+ quota.QUOTAS = saved_quotas
+ compute_api.QUOTAS = saved_quotas
+
+
+def deploy_stubs(stubs, api, original_instance=None):
+ call = stub_call_to_cells
+ cast = stub_cast_to_cells
+
+ if original_instance:
+ kwargs = dict(original_instance=original_instance)
+ call = functools.partial(stub_call_to_cells, **kwargs)
+ cast = functools.partial(stub_cast_to_cells, **kwargs)
+
+ stubs.Set(api, '_call_to_cells', call)
+ stubs.Set(api, '_cast_to_cells', cast)
+
+
+class CellsComputeAPITestCase(test_compute.ComputeAPITestCase):
+ def setUp(self):
+ super(CellsComputeAPITestCase, self).setUp()
+ global ORIG_COMPUTE_API
+ ORIG_COMPUTE_API = self.compute_api
+ self.flags(enable=True, group='cells')
+
+ def _fake_cell_read_only(*args, **kwargs):
+ return False
+
+ def _fake_validate_cell(*args, **kwargs):
+ return
+
+ def _nop_update(context, instance, **kwargs):
+ return instance
+
+ self.compute_api = compute_cells_api.ComputeCellsAPI()
+ self.stubs.Set(self.compute_api, '_cell_read_only',
+ _fake_cell_read_only)
+ self.stubs.Set(self.compute_api, '_validate_cell',
+ _fake_validate_cell)
+
+ # NOTE(belliott) Don't update the instance state
+ # for the tests at the API layer. Let it happen after
+ # the stub cast to cells so that expected_task_states
+ # match.
+ self.stubs.Set(self.compute_api, 'update', _nop_update)
+
+ deploy_stubs(self.stubs, self.compute_api)
+
+ def tearDown(self):
+ global ORIG_COMPUTE_API
+ self.compute_api = ORIG_COMPUTE_API
+ super(CellsComputeAPITestCase, self).tearDown()
+
+ def test_instance_metadata(self):
+ self.skipTest("Test is incompatible with cells.")
+
+ def test_evacuate(self):
+ self.skipTest("Test is incompatible with cells.")
+
+ def test_error_evacuate(self):
+ self.skipTest("Test is incompatible with cells.")
+
+ def test_delete_instance_no_cell(self):
+ cells_rpcapi = self.compute_api.cells_rpcapi
+ self.mox.StubOutWithMock(cells_rpcapi,
+ 'instance_delete_everywhere')
+ inst = self._create_fake_instance_obj()
+ cells_rpcapi.instance_delete_everywhere(self.context,
+ inst, delete_types.DELETE)
+ self.mox.ReplayAll()
+ self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
+ lambda *a, **kw: None)
+ self.compute_api.delete(self.context, inst)
+
+ def test_soft_delete_instance_no_cell(self):
+ cells_rpcapi = self.compute_api.cells_rpcapi
+ self.mox.StubOutWithMock(cells_rpcapi,
+ 'instance_delete_everywhere')
+ inst = self._create_fake_instance_obj()
+ cells_rpcapi.instance_delete_everywhere(self.context,
+ inst, delete_types.SOFT_DELETE)
+ self.mox.ReplayAll()
+ self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
+ lambda *a, **kw: None)
+ self.compute_api.soft_delete(self.context, inst)
+
+ def test_get_migrations(self):
+ filters = {'cell_name': 'ChildCell', 'status': 'confirmed'}
+ migrations = {'migrations': [{'id': 1234}]}
+ cells_rpcapi = self.compute_api.cells_rpcapi
+ self.mox.StubOutWithMock(cells_rpcapi, 'get_migrations')
+ cells_rpcapi.get_migrations(self.context,
+ filters).AndReturn(migrations)
+ self.mox.ReplayAll()
+
+ response = self.compute_api.get_migrations(self.context, filters)
+
+ self.assertEqual(migrations, response)
+
+ @mock.patch('nova.cells.messaging._TargetedMessage')
+ def test_rebuild_sig(self, mock_msg):
+ # TODO(belliott) Cells could benefit from better testing to ensure API
+ # and manager signatures stay up to date
+
+ def wire(version):
+ # wire the rpc cast directly to the manager method to make sure
+ # the signature matches
+ cells_mgr = manager.CellsManager()
+
+ def cast(context, method, *args, **kwargs):
+ fn = getattr(cells_mgr, method)
+ fn(context, *args, **kwargs)
+
+ cells_mgr.cast = cast
+ return cells_mgr
+
+ cells_rpcapi = self.compute_api.cells_rpcapi
+ client = cells_rpcapi.client
+
+ with mock.patch.object(client, 'prepare', side_effect=wire):
+ inst = self._create_fake_instance_obj()
+ inst.cell_name = 'mycell'
+
+ cells_rpcapi.rebuild_instance(self.context, inst, 'pass', None,
+ None, None, None, None,
+ recreate=False,
+ on_shared_storage=False, host='host',
+ preserve_ephemeral=True, kwargs=None)
+
+ # one targeted message should have been created
+ self.assertEqual(1, mock_msg.call_count)
+
+
+class CellsConductorAPIRPCRedirect(test.NoDBTestCase):
+ def setUp(self):
+ super(CellsConductorAPIRPCRedirect, self).setUp()
+
+ self.compute_api = compute_cells_api.ComputeCellsAPI()
+ self.cells_rpcapi = mock.MagicMock()
+ self.compute_api._compute_task_api.cells_rpcapi = self.cells_rpcapi
+
+ self.context = context.RequestContext('fake', 'fake')
+
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ @mock.patch.object(compute_api.API, '_provision_instances')
+ @mock.patch.object(compute_api.API, '_check_and_transform_bdm')
+ @mock.patch.object(compute_api.API, '_get_image')
+ @mock.patch.object(compute_api.API, '_validate_and_build_base_options')
+ def test_build_instances(self, _validate, _get_image, _check_bdm,
+ _provision, _record_action_start):
+ _get_image.return_value = (None, 'fake-image')
+ _validate.return_value = ({}, 1)
+ _check_bdm.return_value = 'bdms'
+ _provision.return_value = 'instances'
+
+ self.compute_api.create(self.context, 'fake-flavor', 'fake-image')
+
+ # Subsequent tests in class are verifying the hooking. We don't check
+ # args since this is verified in compute test code.
+ self.assertTrue(self.cells_rpcapi.build_instances.called)
+
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ @mock.patch.object(compute_api.API, '_resize_cells_support')
+ @mock.patch.object(compute_api.API, '_reserve_quota_delta')
+ @mock.patch.object(compute_api.API, '_upsize_quota_delta')
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(flavors, 'extract_flavor')
+ @mock.patch.object(compute_api.API, '_check_auto_disk_config')
+ def test_resize_instance(self, _check, _extract, _save, _upsize, _reserve,
+ _cells, _record):
+ _extract.return_value = {'name': 'fake', 'id': 'fake'}
+ orig_system_metadata = {}
+ instance = fake_instance.fake_instance_obj(self.context,
+ vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata=orig_system_metadata,
+ expected_attrs=['system_metadata'])
+
+ self.compute_api.resize(self.context, instance)
+ self.assertTrue(self.cells_rpcapi.resize_instance.called)
+
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ @mock.patch.object(objects.Instance, 'save')
+ def test_live_migrate_instance(self, instance_save, _record):
+ orig_system_metadata = {}
+ instance = fake_instance.fake_instance_obj(self.context,
+ vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata=orig_system_metadata,
+ expected_attrs=['system_metadata'])
+
+ self.compute_api.live_migrate(self.context, instance,
+ True, True, 'fake_dest_host')
+
+ self.assertTrue(self.cells_rpcapi.live_migrate_instance.called)
+
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(objects.Instance, 'get_flavor')
+ @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
+ @mock.patch.object(compute_api.API, '_get_image')
+ @mock.patch.object(compute_api.API, '_check_auto_disk_config')
+ @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ def test_rebuild_instance(self, _record_action_start,
+ _checks_for_create_and_rebuild, _check_auto_disk_config,
+ _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save):
+ orig_system_metadata = {}
+ instance = fake_instance.fake_instance_obj(self.context,
+ vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata=orig_system_metadata,
+ expected_attrs=['system_metadata'])
+ get_flavor.return_value = ''
+ image_href = ''
+ image = {"min_ram": 10, "min_disk": 1,
+ "properties": {'architecture': 'x86_64'}}
+ admin_pass = ''
+ files_to_inject = []
+ bdms = []
+
+ _get_image.return_value = (None, image)
+ bdm_get_by_instance_uuid.return_value = bdms
+
+ self.compute_api.rebuild(self.context, instance, image_href,
+ admin_pass, files_to_inject)
+
+ self.assertTrue(self.cells_rpcapi.rebuild_instance.called)
+
+ def test_check_equal(self):
+ task_api = self.compute_api.compute_task_api
+ tests = set()
+ for (name, value) in inspect.getmembers(self, inspect.ismethod):
+ if name.startswith('test_') and name != 'test_check_equal':
+ tests.add(name[5:])
+ if tests != set(task_api.cells_compatible):
+ self.fail("Testcases not equivalent to cells_compatible list")
+
+
+class CellsComputePolicyTestCase(test_compute.ComputePolicyTestCase):
+ def setUp(self):
+ super(CellsComputePolicyTestCase, self).setUp()
+ global ORIG_COMPUTE_API
+ ORIG_COMPUTE_API = self.compute_api
+ self.compute_api = compute_cells_api.ComputeCellsAPI()
+ deploy_stubs(self.stubs, self.compute_api)
+
+ def tearDown(self):
+ global ORIG_COMPUTE_API
+ self.compute_api = ORIG_COMPUTE_API
+ super(CellsComputePolicyTestCase, self).tearDown()
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
new file mode 100644
index 0000000000..04b9f6bdc6
--- /dev/null
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -0,0 +1,3053 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for ComputeManager()."""
+
+import contextlib
+import time
+
+from cinderclient import exceptions as cinder_exception
+from eventlet import event as eventlet_event
+import mock
+import mox
+from oslo.config import cfg
+from oslo import messaging
+from oslo.utils import importutils
+
+from nova.compute import manager
+from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova.conductor import rpcapi as conductor_rpcapi
+from nova import context
+from nova import db
+from nova import exception
+from nova.network import api as network_api
+from nova.network import model as network_model
+from nova import objects
+from nova.objects import block_device as block_device_obj
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit.compute import fake_resource_tracker
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit.objects import test_instance_fault
+from nova.tests.unit.objects import test_instance_info_cache
+from nova import utils
+
+
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+
+
+class ComputeManagerUnitTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(ComputeManagerUnitTestCase, self).setUp()
+ self.compute = importutils.import_object(CONF.compute_manager)
+ self.context = context.RequestContext('fake', 'fake')
+
+ def test_allocate_network_succeeds_after_retries(self):
+ self.flags(network_allocate_retries=8)
+
+ nwapi = self.compute.network_api
+ self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
+ self.mox.StubOutWithMock(self.compute, '_instance_update')
+ self.mox.StubOutWithMock(time, 'sleep')
+
+ instance = fake_instance.fake_instance_obj(
+ self.context, expected_attrs=['system_metadata'])
+
+ is_vpn = 'fake-is-vpn'
+ req_networks = 'fake-req-networks'
+ macs = 'fake-macs'
+ sec_groups = 'fake-sec-groups'
+ final_result = 'meow'
+ dhcp_options = None
+
+ expected_sleep_times = [1, 2, 4, 8, 16, 30, 30, 30]
+
+ for sleep_time in expected_sleep_times:
+ nwapi.allocate_for_instance(
+ self.context, instance, vpn=is_vpn,
+ requested_networks=req_networks, macs=macs,
+ security_groups=sec_groups,
+ dhcp_options=dhcp_options).AndRaise(
+ test.TestingException())
+ time.sleep(sleep_time)
+
+ nwapi.allocate_for_instance(
+ self.context, instance, vpn=is_vpn,
+ requested_networks=req_networks, macs=macs,
+ security_groups=sec_groups,
+ dhcp_options=dhcp_options).AndReturn(final_result)
+ self.compute._instance_update(self.context, instance['uuid'],
+ system_metadata={'network_allocated': 'True'})
+
+ self.mox.ReplayAll()
+
+ res = self.compute._allocate_network_async(self.context, instance,
+ req_networks,
+ macs,
+ sec_groups,
+ is_vpn,
+ dhcp_options)
+ self.assertEqual(final_result, res)
+
+ def test_allocate_network_maintains_context(self):
+ # override tracker with a version that doesn't need the database:
+ class FakeResourceTracker(object):
+ def instance_claim(self, context, instance, limits):
+ return mox.MockAnything()
+
+ self.mox.StubOutWithMock(self.compute, '_get_resource_tracker')
+ self.mox.StubOutWithMock(self.compute, '_allocate_network')
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ mox.IgnoreArg(), instance.uuid).AndReturn([])
+
+ node = 'fake_node'
+ self.compute._get_resource_tracker(node).AndReturn(
+ FakeResourceTracker())
+
+ self.admin_context = False
+
+ def fake_allocate(context, *args, **kwargs):
+ if context.is_admin:
+ self.admin_context = True
+
+ # NOTE(vish): The nice mox parameter matchers here don't work well
+ # because they raise an exception that gets wrapped by
+ # the retry exception handling, so use a side effect
+ # to keep track of whether allocate was called with admin
+ # context.
+ self.compute._allocate_network(mox.IgnoreArg(), instance,
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).WithSideEffects(fake_allocate)
+
+ self.mox.ReplayAll()
+
+ instance, nw_info = self.compute._build_instance(self.context, {}, {},
+ None, None, None, True,
+ node, instance,
+ {}, False)
+ self.assertFalse(self.admin_context,
+ "_allocate_network called with admin context")
+ self.assertEqual(vm_states.BUILDING, instance.vm_state)
+ self.assertEqual(task_states.BLOCK_DEVICE_MAPPING, instance.task_state)
+
+ def test_reschedule_maintains_context(self):
+ # override tracker with a version that causes a reschedule
+ class FakeResourceTracker(object):
+ def instance_claim(self, context, instance, limits):
+ raise test.TestingException()
+
+ self.mox.StubOutWithMock(self.compute, '_get_resource_tracker')
+ self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ mox.IgnoreArg(), instance.uuid).AndReturn([])
+
+ node = 'fake_node'
+ self.compute._get_resource_tracker(node).AndReturn(
+ FakeResourceTracker())
+
+ self.admin_context = False
+
+ def fake_retry_or_error(context, *args, **kwargs):
+ if context.is_admin:
+ self.admin_context = True
+
+ # NOTE(vish): we could use a mos parameter matcher here but it leads
+ # to a very cryptic error message, so use the same method
+ # as the allocate_network_maintains_context test.
+ self.compute._reschedule_or_error(mox.IgnoreArg(), instance,
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).WithSideEffects(fake_retry_or_error)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(test.TestingException,
+ self.compute._build_instance, self.context, {}, {},
+ None, None, None, True, node, instance, {}, False)
+ self.assertFalse(self.admin_context,
+ "_reschedule_or_error called with admin context")
+
+ def test_allocate_network_fails(self):
+ self.flags(network_allocate_retries=0)
+
+ nwapi = self.compute.network_api
+ self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
+
+ instance = {}
+ is_vpn = 'fake-is-vpn'
+ req_networks = 'fake-req-networks'
+ macs = 'fake-macs'
+ sec_groups = 'fake-sec-groups'
+ dhcp_options = None
+
+ nwapi.allocate_for_instance(
+ self.context, instance, vpn=is_vpn,
+ requested_networks=req_networks, macs=macs,
+ security_groups=sec_groups,
+ dhcp_options=dhcp_options).AndRaise(test.TestingException())
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(test.TestingException,
+ self.compute._allocate_network_async,
+ self.context, instance, req_networks, macs,
+ sec_groups, is_vpn, dhcp_options)
+
+ def test_allocate_network_neg_conf_value_treated_as_zero(self):
+ self.flags(network_allocate_retries=-1)
+
+ nwapi = self.compute.network_api
+ self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
+
+ instance = {}
+ is_vpn = 'fake-is-vpn'
+ req_networks = 'fake-req-networks'
+ macs = 'fake-macs'
+ sec_groups = 'fake-sec-groups'
+ dhcp_options = None
+
+ # Only attempted once.
+ nwapi.allocate_for_instance(
+ self.context, instance, vpn=is_vpn,
+ requested_networks=req_networks, macs=macs,
+ security_groups=sec_groups,
+ dhcp_options=dhcp_options).AndRaise(test.TestingException())
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(test.TestingException,
+ self.compute._allocate_network_async,
+ self.context, instance, req_networks, macs,
+ sec_groups, is_vpn, dhcp_options)
+
+ @mock.patch.object(network_api.API, 'allocate_for_instance')
+ @mock.patch.object(manager.ComputeManager, '_instance_update')
+ @mock.patch.object(time, 'sleep')
+ def test_allocate_network_with_conf_value_is_one(
+ self, sleep, _instance_update, allocate_for_instance):
+ self.flags(network_allocate_retries=1)
+
+ instance = fake_instance.fake_instance_obj(
+ self.context, expected_attrs=['system_metadata'])
+ is_vpn = 'fake-is-vpn'
+ req_networks = 'fake-req-networks'
+ macs = 'fake-macs'
+ sec_groups = 'fake-sec-groups'
+ dhcp_options = None
+ final_result = 'zhangtralon'
+
+ allocate_for_instance.side_effect = [test.TestingException(),
+ final_result]
+ res = self.compute._allocate_network_async(self.context, instance,
+ req_networks,
+ macs,
+ sec_groups,
+ is_vpn,
+ dhcp_options)
+ self.assertEqual(final_result, res)
+ self.assertEqual(1, sleep.call_count)
+
+ def test_init_host(self):
+ our_host = self.compute.host
+ fake_context = 'fake-context'
+ inst = fake_instance.fake_db_instance(
+ vm_state=vm_states.ACTIVE,
+ info_cache=dict(test_instance_info_cache.fake_info_cache,
+ network_info=None),
+ security_groups=None)
+ startup_instances = [inst, inst, inst]
+
+ def _do_mock_calls(defer_iptables_apply):
+ self.compute.driver.init_host(host=our_host)
+ context.get_admin_context().AndReturn(fake_context)
+ db.instance_get_all_by_host(
+ fake_context, our_host, columns_to_join=['info_cache'],
+ use_slave=False
+ ).AndReturn(startup_instances)
+ if defer_iptables_apply:
+ self.compute.driver.filter_defer_apply_on()
+ self.compute._destroy_evacuated_instances(fake_context)
+ self.compute._init_instance(fake_context,
+ mox.IsA(objects.Instance))
+ self.compute._init_instance(fake_context,
+ mox.IsA(objects.Instance))
+ self.compute._init_instance(fake_context,
+ mox.IsA(objects.Instance))
+ if defer_iptables_apply:
+ self.compute.driver.filter_defer_apply_off()
+
+ self.mox.StubOutWithMock(self.compute.driver, 'init_host')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'filter_defer_apply_on')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'filter_defer_apply_off')
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ self.mox.StubOutWithMock(context, 'get_admin_context')
+ self.mox.StubOutWithMock(self.compute,
+ '_destroy_evacuated_instances')
+ self.mox.StubOutWithMock(self.compute,
+ '_init_instance')
+
+ # Test with defer_iptables_apply
+ self.flags(defer_iptables_apply=True)
+ _do_mock_calls(True)
+
+ self.mox.ReplayAll()
+ self.compute.init_host()
+ self.mox.VerifyAll()
+
+ # Test without defer_iptables_apply
+ self.mox.ResetAll()
+ self.flags(defer_iptables_apply=False)
+ _do_mock_calls(False)
+
+ self.mox.ReplayAll()
+ self.compute.init_host()
+ # tearDown() uses context.get_admin_context(), so we have
+ # to do the verification here and unstub it.
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ @mock.patch('nova.objects.InstanceList')
+ def test_cleanup_host(self, mock_instance_list):
+ # just testing whether the cleanup_host method
+ # when fired will invoke the underlying driver's
+ # equivalent method.
+
+ mock_instance_list.get_by_host.return_value = []
+
+ with mock.patch.object(self.compute, 'driver') as mock_driver:
+ self.compute.init_host()
+ mock_driver.init_host.assert_called_once_with(host='fake-mini')
+
+ self.compute.cleanup_host()
+ mock_driver.cleanup_host.assert_called_once_with(host='fake-mini')
+
+ def test_init_host_with_deleted_migration(self):
+ our_host = self.compute.host
+ not_our_host = 'not-' + our_host
+ fake_context = 'fake-context'
+
+ deleted_instance = fake_instance.fake_instance_obj(
+ self.context, host=not_our_host, uuid='fake-uuid')
+
+ self.mox.StubOutWithMock(self.compute.driver, 'init_host')
+ self.mox.StubOutWithMock(self.compute.driver, 'destroy')
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ self.mox.StubOutWithMock(context, 'get_admin_context')
+ self.mox.StubOutWithMock(self.compute, 'init_virt_events')
+ self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
+ self.mox.StubOutWithMock(self.compute, '_init_instance')
+ self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
+
+ self.compute.driver.init_host(host=our_host)
+ context.get_admin_context().AndReturn(fake_context)
+ db.instance_get_all_by_host(fake_context, our_host,
+ columns_to_join=['info_cache'],
+ use_slave=False
+ ).AndReturn([])
+ self.compute.init_virt_events()
+
+ # simulate failed instance
+ self.compute._get_instances_on_driver(
+ fake_context, {'deleted': False}).AndReturn([deleted_instance])
+ self.compute._get_instance_nw_info(fake_context, deleted_instance
+ ).AndRaise(exception.InstanceNotFound(
+ instance_id=deleted_instance['uuid']))
+ # ensure driver.destroy is called so that driver may
+ # clean up any dangling files
+ self.compute.driver.destroy(fake_context, deleted_instance,
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.compute.init_host()
+ # tearDown() uses context.get_admin_context(), so we have
+ # to do the verification here and unstub it.
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def test_init_instance_failed_resume_sets_error(self):
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid='fake-uuid',
+ info_cache=None,
+ power_state=power_state.RUNNING,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ expected_attrs=['info_cache'])
+
+ self.flags(resume_guests_state_on_host_boot=True)
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'resume_state_on_host_boot')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_block_device_info')
+ self.mox.StubOutWithMock(self.compute,
+ '_set_instance_error_state')
+ self.compute._get_power_state(mox.IgnoreArg(),
+ instance).AndReturn(power_state.SHUTDOWN)
+ self.compute._get_power_state(mox.IgnoreArg(),
+ instance).AndReturn(power_state.SHUTDOWN)
+ self.compute._get_power_state(mox.IgnoreArg(),
+ instance).AndReturn(power_state.SHUTDOWN)
+ self.compute.driver.plug_vifs(instance, mox.IgnoreArg())
+ self.compute._get_instance_block_device_info(mox.IgnoreArg(),
+ instance).AndReturn('fake-bdm')
+ self.compute.driver.resume_state_on_host_boot(mox.IgnoreArg(),
+ instance, mox.IgnoreArg(),
+ 'fake-bdm').AndRaise(test.TestingException)
+ self.compute._set_instance_error_state(mox.IgnoreArg(), instance)
+ self.mox.ReplayAll()
+ self.compute._init_instance('fake-context', instance)
+
+ def test_init_instance_stuck_in_deleting(self):
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid='fake-uuid',
+ power_state=power_state.RUNNING,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.DELETING)
+
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ self.mox.StubOutWithMock(self.compute, '_delete_instance')
+ self.mox.StubOutWithMock(instance, 'obj_load_attr')
+
+ bdms = []
+ instance.obj_load_attr('metadata')
+ instance.obj_load_attr('system_metadata')
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ self.context, instance.uuid).AndReturn(bdms)
+ self.compute._delete_instance(self.context, instance, bdms,
+ mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.compute._init_instance(self.context, instance)
+
+ def _test_init_instance_reverts_crashed_migrations(self,
+ old_vm_state=None):
+ power_on = True if (not old_vm_state or
+ old_vm_state == vm_states.ACTIVE) else False
+ sys_meta = {
+ 'old_vm_state': old_vm_state
+ }
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid='foo',
+ vm_state=vm_states.ERROR,
+ task_state=task_states.RESIZE_MIGRATING,
+ power_state=power_state.SHUTDOWN,
+ system_metadata=sys_meta,
+ expected_attrs=['system_metadata'])
+
+ self.mox.StubOutWithMock(compute_utils, 'get_nw_info_for_instance')
+ self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'finish_revert_migration')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_block_device_info')
+ self.mox.StubOutWithMock(self.compute.driver, 'get_info')
+ self.mox.StubOutWithMock(instance, 'save')
+ self.mox.StubOutWithMock(self.compute, '_retry_reboot')
+
+ self.compute._retry_reboot(self.context, instance).AndReturn(
+ (False, None))
+ compute_utils.get_nw_info_for_instance(instance).AndReturn(
+ network_model.NetworkInfo())
+ self.compute.driver.plug_vifs(instance, [])
+ self.compute._get_instance_block_device_info(
+ self.context, instance).AndReturn([])
+ self.compute.driver.finish_revert_migration(self.context, instance,
+ [], [], power_on)
+ instance.save()
+ self.compute.driver.get_info(instance).AndReturn(
+ {'state': power_state.SHUTDOWN})
+ self.compute.driver.get_info(instance).AndReturn(
+ {'state': power_state.SHUTDOWN})
+
+ self.mox.ReplayAll()
+
+ self.compute._init_instance(self.context, instance)
+ self.assertIsNone(instance.task_state)
+
+ def test_init_instance_reverts_crashed_migration_from_active(self):
+ self._test_init_instance_reverts_crashed_migrations(
+ old_vm_state=vm_states.ACTIVE)
+
+ def test_init_instance_reverts_crashed_migration_from_stopped(self):
+ self._test_init_instance_reverts_crashed_migrations(
+ old_vm_state=vm_states.STOPPED)
+
+ def test_init_instance_reverts_crashed_migration_no_old_state(self):
+ self._test_init_instance_reverts_crashed_migrations(old_vm_state=None)
+
+ def test_init_instance_resets_crashed_live_migration(self):
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid='foo',
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.MIGRATING)
+ with contextlib.nested(
+ mock.patch.object(instance, 'save'),
+ mock.patch('nova.compute.utils.get_nw_info_for_instance',
+ return_value=network_model.NetworkInfo())
+ ) as (save, get_nw_info):
+ self.compute._init_instance(self.context, instance)
+ save.assert_called_once_with(expected_task_state=['migrating'])
+ get_nw_info.assert_called_once_with(instance)
+ self.assertIsNone(instance.task_state)
+ self.assertEqual(vm_states.ACTIVE, instance.vm_state)
+
+ def _test_init_instance_sets_building_error(self, vm_state,
+ task_state=None):
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid='foo',
+ vm_state=vm_state,
+ task_state=task_state)
+ with mock.patch.object(instance, 'save') as save:
+ self.compute._init_instance(self.context, instance)
+ save.assert_called_once_with()
+ self.assertIsNone(instance.task_state)
+ self.assertEqual(vm_states.ERROR, instance.vm_state)
+
+ def test_init_instance_sets_building_error(self):
+ self._test_init_instance_sets_building_error(vm_states.BUILDING)
+
+ def test_init_instance_sets_rebuilding_errors(self):
+ tasks = [task_states.REBUILDING,
+ task_states.REBUILD_BLOCK_DEVICE_MAPPING,
+ task_states.REBUILD_SPAWNING]
+ vms = [vm_states.ACTIVE, vm_states.STOPPED]
+
+ for vm_state in vms:
+ for task_state in tasks:
+ self._test_init_instance_sets_building_error(
+ vm_state, task_state)
+
+ def _test_init_instance_sets_building_tasks_error(self, instance):
+ with mock.patch.object(instance, 'save') as save:
+ self.compute._init_instance(self.context, instance)
+ save.assert_called_once_with()
+ self.assertIsNone(instance.task_state)
+ self.assertEqual(vm_states.ERROR, instance.vm_state)
+
+ def test_init_instance_sets_building_tasks_error_scheduling(self):
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid='foo',
+ vm_state=None,
+ task_state=task_states.SCHEDULING)
+ self._test_init_instance_sets_building_tasks_error(instance)
+
+ def test_init_instance_sets_building_tasks_error_block_device(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = None
+ instance.task_state = task_states.BLOCK_DEVICE_MAPPING
+ self._test_init_instance_sets_building_tasks_error(instance)
+
+ def test_init_instance_sets_building_tasks_error_networking(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = None
+ instance.task_state = task_states.NETWORKING
+ self._test_init_instance_sets_building_tasks_error(instance)
+
+ def test_init_instance_sets_building_tasks_error_spawning(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = None
+ instance.task_state = task_states.SPAWNING
+ self._test_init_instance_sets_building_tasks_error(instance)
+
+ def _test_init_instance_cleans_image_states(self, instance):
+ with mock.patch.object(instance, 'save') as save:
+ self.compute._get_power_state = mock.Mock()
+ self.compute.driver.post_interrupted_snapshot_cleanup = mock.Mock()
+ instance.info_cache = None
+ instance.power_state = power_state.RUNNING
+ self.compute._init_instance(self.context, instance)
+ save.assert_called_once_with()
+ self.compute.driver.post_interrupted_snapshot_cleanup.\
+ assert_called_once_with(self.context, instance)
+ self.assertIsNone(instance.task_state)
+
+ def test_init_instance_cleans_image_state_pending_upload(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.IMAGE_PENDING_UPLOAD
+ self._test_init_instance_cleans_image_states(instance)
+
+ def test_init_instance_cleans_image_state_uploading(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.IMAGE_UPLOADING
+ self._test_init_instance_cleans_image_states(instance)
+
+ def test_init_instance_cleans_image_state_snapshot(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.IMAGE_SNAPSHOT
+ self._test_init_instance_cleans_image_states(instance)
+
+ def test_init_instance_cleans_image_state_snapshot_pending(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
+ self._test_init_instance_cleans_image_states(instance)
+
+ def test_init_instance_errors_when_not_migrating(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = vm_states.ERROR
+ instance.task_state = task_states.IMAGE_UPLOADING
+ self.mox.StubOutWithMock(compute_utils, 'get_nw_info_for_instance')
+ self.mox.ReplayAll()
+ self.compute._init_instance(self.context, instance)
+ self.mox.VerifyAll()
+
+ def test_init_instance_deletes_error_deleting_instance(self):
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid='fake',
+ vm_state=vm_states.ERROR,
+ task_state=task_states.DELETING)
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ self.mox.StubOutWithMock(self.compute, '_delete_instance')
+ self.mox.StubOutWithMock(instance, 'obj_load_attr')
+
+ bdms = []
+ instance.obj_load_attr('metadata')
+ instance.obj_load_attr('system_metadata')
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ self.context, instance.uuid).AndReturn(bdms)
+ self.compute._delete_instance(self.context, instance, bdms,
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ self.compute._init_instance(self.context, instance)
+ self.mox.VerifyAll()
+
+ @mock.patch('nova.context.RequestContext.elevated')
+ @mock.patch('nova.compute.utils.get_nw_info_for_instance')
+ @mock.patch(
+ 'nova.compute.manager.ComputeManager._get_instance_block_device_info')
+ @mock.patch('nova.virt.driver.ComputeDriver.destroy')
+ @mock.patch('nova.virt.driver.ComputeDriver.get_volume_connector')
+ def test_shutdown_instance_endpoint_not_found(self, mock_connector,
+ mock_destroy, mock_blk_device_info, mock_nw_info, mock_elevated):
+ mock_connector.side_effect = cinder_exception.EndpointNotFound
+ mock_elevated.return_value = self.context
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid='fake',
+ vm_state=vm_states.ERROR,
+ task_state=task_states.DELETING)
+ bdms = [mock.Mock(id=1, is_volume=True)]
+
+ self.compute._shutdown_instance(self.context, instance, bdms,
+ notify=False, try_deallocate_networks=False)
+
+ def _test_init_instance_retries_reboot(self, instance, reboot_type,
+ return_power_state):
+ with contextlib.nested(
+ mock.patch.object(self.compute, '_get_power_state',
+ return_value=return_power_state),
+ mock.patch.object(self.compute.compute_rpcapi, 'reboot_instance'),
+ mock.patch.object(compute_utils, 'get_nw_info_for_instance')
+ ) as (
+ _get_power_state,
+ reboot_instance,
+ get_nw_info_for_instance
+ ):
+ self.compute._init_instance(self.context, instance)
+ call = mock.call(self.context, instance, block_device_info=None,
+ reboot_type=reboot_type)
+ reboot_instance.assert_has_calls([call])
+
+ def test_init_instance_retries_reboot_pending(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.task_state = task_states.REBOOT_PENDING
+ for state in vm_states.ALLOW_SOFT_REBOOT:
+ instance.vm_state = state
+ self._test_init_instance_retries_reboot(instance, 'SOFT',
+ power_state.RUNNING)
+
+ def test_init_instance_retries_reboot_pending_hard(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.task_state = task_states.REBOOT_PENDING_HARD
+ for state in vm_states.ALLOW_HARD_REBOOT:
+ # NOTE(dave-mcnally) while a reboot of a vm in error state is
+ # possible we don't attempt to recover an error during init
+ if state == vm_states.ERROR:
+ continue
+ instance.vm_state = state
+ self._test_init_instance_retries_reboot(instance, 'HARD',
+ power_state.RUNNING)
+
+ def test_init_instance_retries_reboot_started(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.REBOOT_STARTED
+ self._test_init_instance_retries_reboot(instance, 'HARD',
+ power_state.NOSTATE)
+
+ def test_init_instance_retries_reboot_started_hard(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.REBOOT_STARTED_HARD
+ self._test_init_instance_retries_reboot(instance, 'HARD',
+ power_state.NOSTATE)
+
+ def _test_init_instance_cleans_reboot_state(self, instance):
+ with contextlib.nested(
+ mock.patch.object(self.compute, '_get_power_state',
+ return_value=power_state.RUNNING),
+ mock.patch.object(instance, 'save', autospec=True),
+ mock.patch.object(compute_utils, 'get_nw_info_for_instance')
+ ) as (
+ _get_power_state,
+ instance_save,
+ get_nw_info_for_instance
+ ):
+ self.compute._init_instance(self.context, instance)
+ instance_save.assert_called_once_with()
+ self.assertIsNone(instance.task_state)
+ self.assertEqual(vm_states.ACTIVE, instance.vm_state)
+
+ def test_init_instance_cleans_image_state_reboot_started(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.REBOOT_STARTED
+ instance.power_state = power_state.RUNNING
+ self._test_init_instance_cleans_reboot_state(instance)
+
+ def test_init_instance_cleans_image_state_reboot_started_hard(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.REBOOT_STARTED_HARD
+ instance.power_state = power_state.RUNNING
+ self._test_init_instance_cleans_reboot_state(instance)
+
+ def test_init_instance_retries_power_off(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.id = 1
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.POWERING_OFF
+ with mock.patch.object(self.compute, 'stop_instance'):
+ self.compute._init_instance(self.context, instance)
+ call = mock.call(self.context, instance)
+ self.compute.stop_instance.assert_has_calls([call])
+
+ def test_init_instance_retries_power_on(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.id = 1
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.POWERING_ON
+ with mock.patch.object(self.compute, 'start_instance'):
+ self.compute._init_instance(self.context, instance)
+ call = mock.call(self.context, instance)
+ self.compute.start_instance.assert_has_calls([call])
+
+ def test_init_instance_retries_power_on_silent_exception(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.id = 1
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.POWERING_ON
+ with mock.patch.object(self.compute, 'start_instance',
+ return_value=Exception):
+ init_return = self.compute._init_instance(self.context, instance)
+ call = mock.call(self.context, instance)
+ self.compute.start_instance.assert_has_calls([call])
+ self.assertIsNone(init_return)
+
+ def test_init_instance_retries_power_off_silent_exception(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.id = 1
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.POWERING_OFF
+ with mock.patch.object(self.compute, 'stop_instance',
+ return_value=Exception):
+ init_return = self.compute._init_instance(self.context, instance)
+ call = mock.call(self.context, instance)
+ self.compute.stop_instance.assert_has_calls([call])
+ self.assertIsNone(init_return)
+
+ def test_get_instances_on_driver(self):
+ fake_context = context.get_admin_context()
+
+ driver_instances = []
+ for x in xrange(10):
+ driver_instances.append(fake_instance.fake_db_instance())
+
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'list_instance_uuids')
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
+
+ self.compute.driver.list_instance_uuids().AndReturn(
+ [inst['uuid'] for inst in driver_instances])
+ db.instance_get_all_by_filters(
+ fake_context,
+ {'uuid': [inst['uuid'] for
+ inst in driver_instances]},
+ 'created_at', 'desc', columns_to_join=None,
+ limit=None, marker=None,
+ use_slave=True).AndReturn(
+ driver_instances)
+
+ self.mox.ReplayAll()
+
+ result = self.compute._get_instances_on_driver(fake_context)
+ self.assertEqual([x['uuid'] for x in driver_instances],
+ [x['uuid'] for x in result])
+
+ def test_get_instances_on_driver_fallback(self):
+ # Test getting instances when driver doesn't support
+ # 'list_instance_uuids'
+ self.compute.host = 'host'
+ filters = {'host': self.compute.host}
+ fake_context = context.get_admin_context()
+
+ self.flags(instance_name_template='inst-%i')
+
+ all_instances = []
+ driver_instances = []
+ for x in xrange(10):
+ instance = fake_instance.fake_db_instance(name='inst-%i' % x,
+ id=x)
+ if x % 2:
+ driver_instances.append(instance)
+ all_instances.append(instance)
+
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'list_instance_uuids')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'list_instances')
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
+
+ self.compute.driver.list_instance_uuids().AndRaise(
+ NotImplementedError())
+ self.compute.driver.list_instances().AndReturn(
+ [inst['name'] for inst in driver_instances])
+ db.instance_get_all_by_filters(
+ fake_context, filters,
+ 'created_at', 'desc', columns_to_join=None,
+ limit=None, marker=None,
+ use_slave=True).AndReturn(all_instances)
+
+ self.mox.ReplayAll()
+
+ result = self.compute._get_instances_on_driver(fake_context, filters)
+ self.assertEqual([x['uuid'] for x in driver_instances],
+ [x['uuid'] for x in result])
+
+ def test_instance_usage_audit(self):
+ instances = [objects.Instance(uuid='foo')]
+
+ @classmethod
+ def fake_get(*a, **k):
+ return instances
+
+ self.flags(instance_usage_audit=True)
+ self.stubs.Set(compute_utils, 'has_audit_been_run',
+ lambda *a, **k: False)
+ self.stubs.Set(objects.InstanceList,
+ 'get_active_by_window_joined', fake_get)
+ self.stubs.Set(compute_utils, 'start_instance_usage_audit',
+ lambda *a, **k: None)
+ self.stubs.Set(compute_utils, 'finish_instance_usage_audit',
+ lambda *a, **k: None)
+
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ 'notify_usage_exists')
+ self.compute.conductor_api.notify_usage_exists(
+ self.context, instances[0], ignore_missing_network_data=False)
+ self.mox.ReplayAll()
+ self.compute._instance_usage_audit(self.context)
+
+ def _get_sync_instance(self, power_state, vm_state, task_state=None,
+ shutdown_terminate=False):
+ instance = objects.Instance()
+ instance.uuid = 'fake-uuid'
+ instance.power_state = power_state
+ instance.vm_state = vm_state
+ instance.host = self.compute.host
+ instance.task_state = task_state
+ instance.shutdown_terminate = shutdown_terminate
+ self.mox.StubOutWithMock(instance, 'refresh')
+ self.mox.StubOutWithMock(instance, 'save')
+ return instance
+
+ def test_sync_instance_power_state_match(self):
+ instance = self._get_sync_instance(power_state.RUNNING,
+ vm_states.ACTIVE)
+ instance.refresh(use_slave=False)
+ self.mox.ReplayAll()
+ self.compute._sync_instance_power_state(self.context, instance,
+ power_state.RUNNING)
+
+ def test_sync_instance_power_state_running_stopped(self):
+ instance = self._get_sync_instance(power_state.RUNNING,
+ vm_states.ACTIVE)
+ instance.refresh(use_slave=False)
+ instance.save()
+ self.mox.ReplayAll()
+ self.compute._sync_instance_power_state(self.context, instance,
+ power_state.SHUTDOWN)
+ self.assertEqual(instance.power_state, power_state.SHUTDOWN)
+
+ def _test_sync_to_stop(self, power_state, vm_state, driver_power_state,
+ stop=True, force=False, shutdown_terminate=False):
+ instance = self._get_sync_instance(
+ power_state, vm_state, shutdown_terminate=shutdown_terminate)
+ instance.refresh(use_slave=False)
+ instance.save()
+ self.mox.StubOutWithMock(self.compute.compute_api, 'stop')
+ self.mox.StubOutWithMock(self.compute.compute_api, 'delete')
+ self.mox.StubOutWithMock(self.compute.compute_api, 'force_stop')
+ if shutdown_terminate:
+ self.compute.compute_api.delete(self.context, instance)
+ elif stop:
+ if force:
+ self.compute.compute_api.force_stop(self.context, instance)
+ else:
+ self.compute.compute_api.stop(self.context, instance)
+ self.mox.ReplayAll()
+ self.compute._sync_instance_power_state(self.context, instance,
+ driver_power_state)
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def test_sync_instance_power_state_to_stop(self):
+ for ps in (power_state.SHUTDOWN, power_state.CRASHED,
+ power_state.SUSPENDED):
+ self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps)
+
+ for ps in (power_state.SHUTDOWN, power_state.CRASHED):
+ self._test_sync_to_stop(power_state.PAUSED, vm_states.PAUSED, ps,
+ force=True)
+
+ self._test_sync_to_stop(power_state.SHUTDOWN, vm_states.STOPPED,
+ power_state.RUNNING, force=True)
+
+ def test_sync_instance_power_state_to_terminate(self):
+ self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE,
+ power_state.SHUTDOWN,
+ force=False, shutdown_terminate=True)
+
+ def test_sync_instance_power_state_to_no_stop(self):
+ for ps in (power_state.PAUSED, power_state.NOSTATE):
+ self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps,
+ stop=False)
+ for vs in (vm_states.SOFT_DELETED, vm_states.DELETED):
+ for ps in (power_state.NOSTATE, power_state.SHUTDOWN):
+ self._test_sync_to_stop(power_state.RUNNING, vs, ps,
+ stop=False)
+
+ @mock.patch('nova.compute.manager.ComputeManager.'
+ '_sync_instance_power_state')
+ def test_query_driver_power_state_and_sync_pending_task(
+ self, mock_sync_power_state):
+ with mock.patch.object(self.compute.driver,
+ 'get_info') as mock_get_info:
+ db_instance = objects.Instance(uuid='fake-uuid',
+ task_state=task_states.POWERING_OFF)
+ self.compute._query_driver_power_state_and_sync(self.context,
+ db_instance)
+ self.assertFalse(mock_get_info.called)
+ self.assertFalse(mock_sync_power_state.called)
+
+ @mock.patch('nova.compute.manager.ComputeManager.'
+ '_sync_instance_power_state')
+ def test_query_driver_power_state_and_sync_not_found_driver(
+ self, mock_sync_power_state):
+ error = exception.InstanceNotFound(instance_id=1)
+ with mock.patch.object(self.compute.driver,
+ 'get_info', side_effect=error) as mock_get_info:
+ db_instance = objects.Instance(uuid='fake-uuid', task_state=None)
+ self.compute._query_driver_power_state_and_sync(self.context,
+ db_instance)
+ mock_get_info.assert_called_once_with(db_instance)
+ mock_sync_power_state.assert_called_once_with(self.context,
+ db_instance,
+ power_state.NOSTATE,
+ use_slave=True)
+
+ def test_run_pending_deletes(self):
+ self.flags(instance_delete_interval=10)
+
+ class FakeInstance(object):
+ def __init__(self, uuid, name, smd):
+ self.uuid = uuid
+ self.name = name
+ self.system_metadata = smd
+ self.cleaned = False
+
+ def __getitem__(self, name):
+ return getattr(self, name)
+
+ def save(self, context):
+ pass
+
+ class FakeInstanceList(object):
+ def get_by_filters(self, *args, **kwargs):
+ return []
+
+ a = FakeInstance('123', 'apple', {'clean_attempts': '100'})
+ b = FakeInstance('456', 'orange', {'clean_attempts': '3'})
+ c = FakeInstance('789', 'banana', {})
+
+ self.mox.StubOutWithMock(objects.InstanceList,
+ 'get_by_filters')
+ objects.InstanceList.get_by_filters(
+ {'read_deleted': 'yes'},
+ {'deleted': True, 'soft_deleted': False, 'host': 'fake-mini',
+ 'cleaned': False},
+ expected_attrs=['info_cache', 'security_groups',
+ 'system_metadata'],
+ use_slave=True).AndReturn([a, b, c])
+
+ self.mox.StubOutWithMock(self.compute.driver, 'delete_instance_files')
+ self.compute.driver.delete_instance_files(
+ mox.IgnoreArg()).AndReturn(True)
+ self.compute.driver.delete_instance_files(
+ mox.IgnoreArg()).AndReturn(False)
+
+ self.mox.ReplayAll()
+
+ self.compute._run_pending_deletes({})
+ self.assertFalse(a.cleaned)
+ self.assertEqual('100', a.system_metadata['clean_attempts'])
+ self.assertTrue(b.cleaned)
+ self.assertEqual('4', b.system_metadata['clean_attempts'])
+ self.assertFalse(c.cleaned)
+ self.assertEqual('1', c.system_metadata['clean_attempts'])
+
+ def test_attach_interface_failure(self):
+ # Test that the fault methods are invoked when an attach fails
+ db_instance = fake_instance.fake_db_instance()
+ f_instance = objects.Instance._from_db_object(self.context,
+ objects.Instance(),
+ db_instance)
+ e = exception.InterfaceAttachFailed(instance_uuid=f_instance.uuid)
+
+ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
+ @mock.patch.object(self.compute.network_api,
+ 'allocate_port_for_instance',
+ side_effect=e)
+ def do_test(meth, add_fault):
+ self.assertRaises(exception.InterfaceAttachFailed,
+ self.compute.attach_interface,
+ self.context, f_instance, 'net_id', 'port_id',
+ None)
+ add_fault.assert_has_calls(
+ mock.call(self.context, f_instance, e,
+ mock.ANY))
+
+ do_test()
+
+ def test_detach_interface_failure(self):
+ # Test that the fault methods are invoked when a detach fails
+
+ # Build test data that will cause a PortNotFound exception
+ f_instance = mock.MagicMock()
+ f_instance.info_cache = mock.MagicMock()
+ f_instance.info_cache.network_info = []
+
+ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
+ @mock.patch.object(self.compute, '_set_instance_error_state')
+ def do_test(meth, add_fault):
+ self.assertRaises(exception.PortNotFound,
+ self.compute.detach_interface,
+ self.context, f_instance, 'port_id')
+ add_fault.assert_has_calls(
+ mock.call(self.context, f_instance, mock.ANY, mock.ANY))
+
+ do_test()
+
+ def test_swap_volume_volume_api_usage(self):
+ # This test ensures that volume_id arguments are passed to volume_api
+ # and that volume states are OK
+ volumes = {}
+ old_volume_id = uuidutils.generate_uuid()
+ volumes[old_volume_id] = {'id': old_volume_id,
+ 'display_name': 'old_volume',
+ 'status': 'detaching',
+ 'size': 1}
+ new_volume_id = uuidutils.generate_uuid()
+ volumes[new_volume_id] = {'id': new_volume_id,
+ 'display_name': 'new_volume',
+ 'status': 'available',
+ 'size': 2}
+
+ def fake_vol_api_roll_detaching(context, volume_id):
+ self.assertTrue(uuidutils.is_uuid_like(volume_id))
+ if volumes[volume_id]['status'] == 'detaching':
+ volumes[volume_id]['status'] = 'in-use'
+
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
+ {'device_name': '/dev/vdb', 'source_type': 'volume',
+ 'destination_type': 'volume', 'instance_uuid': 'fake',
+ 'connection_info': '{"foo": "bar"}'})
+
+ def fake_vol_api_func(context, volume, *args):
+ self.assertTrue(uuidutils.is_uuid_like(volume))
+ return {}
+
+ def fake_vol_get(context, volume_id):
+ self.assertTrue(uuidutils.is_uuid_like(volume_id))
+ return volumes[volume_id]
+
+ def fake_vol_unreserve(context, volume_id):
+ self.assertTrue(uuidutils.is_uuid_like(volume_id))
+ if volumes[volume_id]['status'] == 'attaching':
+ volumes[volume_id]['status'] = 'available'
+
+ def fake_vol_migrate_volume_completion(context, old_volume_id,
+ new_volume_id, error=False):
+ self.assertTrue(uuidutils.is_uuid_like(old_volume_id))
+ self.assertTrue(uuidutils.is_uuid_like(new_volume_id))
+ volumes[old_volume_id]['status'] = 'in-use'
+ return {'save_volume_id': new_volume_id}
+
+ def fake_func_exc(*args, **kwargs):
+ raise AttributeError # Random exception
+
+ def fake_swap_volume(old_connection_info, new_connection_info,
+ instance, mountpoint, resize_to):
+ self.assertEqual(resize_to, 2)
+
+ self.stubs.Set(self.compute.volume_api, 'roll_detaching',
+ fake_vol_api_roll_detaching)
+ self.stubs.Set(self.compute.volume_api, 'get', fake_vol_get)
+ self.stubs.Set(self.compute.volume_api, 'initialize_connection',
+ fake_vol_api_func)
+ self.stubs.Set(self.compute.volume_api, 'unreserve_volume',
+ fake_vol_unreserve)
+ self.stubs.Set(self.compute.volume_api, 'terminate_connection',
+ fake_vol_api_func)
+ self.stubs.Set(db, 'block_device_mapping_get_by_volume_id',
+ lambda x, y, z: fake_bdm)
+ self.stubs.Set(self.compute.driver, 'get_volume_connector',
+ lambda x: {})
+ self.stubs.Set(self.compute.driver, 'swap_volume',
+ fake_swap_volume)
+ self.stubs.Set(self.compute.volume_api, 'migrate_volume_completion',
+ fake_vol_migrate_volume_completion)
+ self.stubs.Set(db, 'block_device_mapping_update',
+ lambda *a, **k: fake_bdm)
+ self.stubs.Set(db,
+ 'instance_fault_create',
+ lambda x, y:
+ test_instance_fault.fake_faults['fake-uuid'][0])
+
+ # Good path
+ self.compute.swap_volume(self.context, old_volume_id, new_volume_id,
+ fake_instance.fake_instance_obj(
+ self.context, **{'uuid': 'fake'}))
+ self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
+
+ # Error paths
+ volumes[old_volume_id]['status'] = 'detaching'
+ volumes[new_volume_id]['status'] = 'attaching'
+ self.stubs.Set(self.compute.driver, 'swap_volume', fake_func_exc)
+ self.assertRaises(AttributeError, self.compute.swap_volume,
+ self.context, old_volume_id, new_volume_id,
+ fake_instance.fake_instance_obj(
+ self.context, **{'uuid': 'fake'}))
+ self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
+ self.assertEqual(volumes[new_volume_id]['status'], 'available')
+
+ volumes[old_volume_id]['status'] = 'detaching'
+ volumes[new_volume_id]['status'] = 'attaching'
+ self.stubs.Set(self.compute.volume_api, 'initialize_connection',
+ fake_func_exc)
+ self.assertRaises(AttributeError, self.compute.swap_volume,
+ self.context, old_volume_id, new_volume_id,
+ fake_instance.fake_instance_obj(
+ self.context, **{'uuid': 'fake'}))
+ self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
+ self.assertEqual(volumes[new_volume_id]['status'], 'available')
+
+ def test_check_can_live_migrate_source(self):
+ is_volume_backed = 'volume_backed'
+ dest_check_data = dict(foo='bar')
+ db_instance = fake_instance.fake_db_instance()
+ instance = objects.Instance._from_db_object(
+ self.context, objects.Instance(), db_instance)
+ expected_dest_check_data = dict(dest_check_data,
+ is_volume_backed=is_volume_backed)
+
+ self.mox.StubOutWithMock(self.compute.compute_api,
+ 'is_volume_backed_instance')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_block_device_info')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'check_can_live_migrate_source')
+
+ self.compute.compute_api.is_volume_backed_instance(
+ self.context, instance).AndReturn(is_volume_backed)
+ self.compute._get_instance_block_device_info(
+ self.context, instance, refresh_conn_info=True
+ ).AndReturn({'block_device_mapping': 'fake'})
+ self.compute.driver.check_can_live_migrate_source(
+ self.context, instance, expected_dest_check_data,
+ {'block_device_mapping': 'fake'})
+
+ self.mox.ReplayAll()
+
+ self.compute.check_can_live_migrate_source(
+ self.context, instance=instance,
+ dest_check_data=dest_check_data)
+
+ def _test_check_can_live_migrate_destination(self, do_raise=False,
+ has_mig_data=False):
+ db_instance = fake_instance.fake_db_instance(host='fake-host')
+ instance = objects.Instance._from_db_object(
+ self.context, objects.Instance(), db_instance)
+ instance.host = 'fake-host'
+ block_migration = 'block_migration'
+ disk_over_commit = 'disk_over_commit'
+ src_info = 'src_info'
+ dest_info = 'dest_info'
+ dest_check_data = dict(foo='bar')
+ mig_data = dict(cow='moo')
+ expected_result = dict(mig_data)
+ if has_mig_data:
+ dest_check_data['migrate_data'] = dict(cat='meow')
+ expected_result.update(cat='meow')
+
+ self.mox.StubOutWithMock(self.compute, '_get_compute_info')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'check_can_live_migrate_destination')
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'check_can_live_migrate_source')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'check_can_live_migrate_destination_cleanup')
+
+ self.compute._get_compute_info(self.context,
+ 'fake-host').AndReturn(src_info)
+ self.compute._get_compute_info(self.context,
+ CONF.host).AndReturn(dest_info)
+ self.compute.driver.check_can_live_migrate_destination(
+ self.context, instance, src_info, dest_info,
+ block_migration, disk_over_commit).AndReturn(dest_check_data)
+
+ mock_meth = self.compute.compute_rpcapi.check_can_live_migrate_source(
+ self.context, instance, dest_check_data)
+ if do_raise:
+ mock_meth.AndRaise(test.TestingException())
+ self.mox.StubOutWithMock(db, 'instance_fault_create')
+ db.instance_fault_create(
+ self.context, mox.IgnoreArg()).AndReturn(
+ test_instance_fault.fake_faults['fake-uuid'][0])
+ else:
+ mock_meth.AndReturn(mig_data)
+ self.compute.driver.check_can_live_migrate_destination_cleanup(
+ self.context, dest_check_data)
+
+ self.mox.ReplayAll()
+
+ result = self.compute.check_can_live_migrate_destination(
+ self.context, instance=instance,
+ block_migration=block_migration,
+ disk_over_commit=disk_over_commit)
+ self.assertEqual(expected_result, result)
+
+ def test_check_can_live_migrate_destination_success(self):
+ self._test_check_can_live_migrate_destination()
+
+ def test_check_can_live_migrate_destination_success_w_mig_data(self):
+ self._test_check_can_live_migrate_destination(has_mig_data=True)
+
+ def test_check_can_live_migrate_destination_fail(self):
+ self.assertRaises(
+ test.TestingException,
+ self._test_check_can_live_migrate_destination,
+ do_raise=True)
+
+ @mock.patch('nova.compute.manager.InstanceEvents._lock_name')
+ def test_prepare_for_instance_event(self, lock_name_mock):
+ inst_obj = objects.Instance(uuid='foo')
+ result = self.compute.instance_events.prepare_for_instance_event(
+ inst_obj, 'test-event')
+ self.assertIn('foo', self.compute.instance_events._events)
+ self.assertIn('test-event',
+ self.compute.instance_events._events['foo'])
+ self.assertEqual(
+ result,
+ self.compute.instance_events._events['foo']['test-event'])
+ self.assertTrue(hasattr(result, 'send'))
+ lock_name_mock.assert_called_once_with(inst_obj)
+
+ @mock.patch('nova.compute.manager.InstanceEvents._lock_name')
+ def test_pop_instance_event(self, lock_name_mock):
+ event = eventlet_event.Event()
+ self.compute.instance_events._events = {
+ 'foo': {
+ 'test-event': event,
+ }
+ }
+ inst_obj = objects.Instance(uuid='foo')
+ event_obj = objects.InstanceExternalEvent(name='test-event',
+ tag=None)
+ result = self.compute.instance_events.pop_instance_event(inst_obj,
+ event_obj)
+ self.assertEqual(result, event)
+ lock_name_mock.assert_called_once_with(inst_obj)
+
+ @mock.patch('nova.compute.manager.InstanceEvents._lock_name')
+ def test_clear_events_for_instance(self, lock_name_mock):
+ event = eventlet_event.Event()
+ self.compute.instance_events._events = {
+ 'foo': {
+ 'test-event': event,
+ }
+ }
+ inst_obj = objects.Instance(uuid='foo')
+ result = self.compute.instance_events.clear_events_for_instance(
+ inst_obj)
+ self.assertEqual(result, {'test-event': event})
+ lock_name_mock.assert_called_once_with(inst_obj)
+
+ def test_instance_events_lock_name(self):
+ inst_obj = objects.Instance(uuid='foo')
+ result = self.compute.instance_events._lock_name(inst_obj)
+ self.assertEqual(result, 'foo-events')
+
+ def test_prepare_for_instance_event_again(self):
+ inst_obj = objects.Instance(uuid='foo')
+ self.compute.instance_events.prepare_for_instance_event(
+ inst_obj, 'test-event')
+ # A second attempt will avoid creating a new list; make sure we
+ # get the current list
+ result = self.compute.instance_events.prepare_for_instance_event(
+ inst_obj, 'test-event')
+ self.assertIn('foo', self.compute.instance_events._events)
+ self.assertIn('test-event',
+ self.compute.instance_events._events['foo'])
+ self.assertEqual(
+ result,
+ self.compute.instance_events._events['foo']['test-event'])
+ self.assertTrue(hasattr(result, 'send'))
+
+ def test_process_instance_event(self):
+ event = eventlet_event.Event()
+ self.compute.instance_events._events = {
+ 'foo': {
+ 'test-event': event,
+ }
+ }
+ inst_obj = objects.Instance(uuid='foo')
+ event_obj = objects.InstanceExternalEvent(name='test-event', tag=None)
+ self.compute._process_instance_event(inst_obj, event_obj)
+ self.assertTrue(event.ready())
+ self.assertEqual(event_obj, event.wait())
+ self.assertEqual({}, self.compute.instance_events._events)
+
+ def test_external_instance_event(self):
+ instances = [
+ objects.Instance(id=1, uuid='uuid1'),
+ objects.Instance(id=2, uuid='uuid2')]
+ events = [
+ objects.InstanceExternalEvent(name='network-changed',
+ tag='tag1',
+ instance_uuid='uuid1'),
+ objects.InstanceExternalEvent(name='foo', instance_uuid='uuid2',
+ tag='tag2')]
+
+ @mock.patch.object(self.compute.network_api, 'get_instance_nw_info')
+ @mock.patch.object(self.compute, '_process_instance_event')
+ def do_test(_process_instance_event, get_instance_nw_info):
+ self.compute.external_instance_event(self.context,
+ instances, events)
+ get_instance_nw_info.assert_called_once_with(self.context,
+ instances[0])
+ _process_instance_event.assert_called_once_with(instances[1],
+ events[1])
+ do_test()
+
+ def test_retry_reboot_pending_soft(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.task_state = task_states.REBOOT_PENDING
+ instance.vm_state = vm_states.ACTIVE
+ with mock.patch.object(self.compute, '_get_power_state',
+ return_value=power_state.RUNNING):
+ allow_reboot, reboot_type = self.compute._retry_reboot(
+ context, instance)
+ self.assertTrue(allow_reboot)
+ self.assertEqual(reboot_type, 'SOFT')
+
+ def test_retry_reboot_pending_hard(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.task_state = task_states.REBOOT_PENDING_HARD
+ instance.vm_state = vm_states.ACTIVE
+ with mock.patch.object(self.compute, '_get_power_state',
+ return_value=power_state.RUNNING):
+ allow_reboot, reboot_type = self.compute._retry_reboot(
+ context, instance)
+ self.assertTrue(allow_reboot)
+ self.assertEqual(reboot_type, 'HARD')
+
+ def test_retry_reboot_starting_soft_off(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.task_state = task_states.REBOOT_STARTED
+ with mock.patch.object(self.compute, '_get_power_state',
+ return_value=power_state.NOSTATE):
+ allow_reboot, reboot_type = self.compute._retry_reboot(
+ context, instance)
+ self.assertTrue(allow_reboot)
+ self.assertEqual(reboot_type, 'HARD')
+
+ def test_retry_reboot_starting_hard_off(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.task_state = task_states.REBOOT_STARTED_HARD
+ with mock.patch.object(self.compute, '_get_power_state',
+ return_value=power_state.NOSTATE):
+ allow_reboot, reboot_type = self.compute._retry_reboot(
+ context, instance)
+ self.assertTrue(allow_reboot)
+ self.assertEqual(reboot_type, 'HARD')
+
+ def test_retry_reboot_starting_hard_on(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.task_state = task_states.REBOOT_STARTED_HARD
+ with mock.patch.object(self.compute, '_get_power_state',
+ return_value=power_state.RUNNING):
+ allow_reboot, reboot_type = self.compute._retry_reboot(
+ context, instance)
+ self.assertFalse(allow_reboot)
+ self.assertEqual(reboot_type, 'HARD')
+
+ def test_retry_reboot_no_reboot(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.task_state = 'bar'
+ with mock.patch.object(self.compute, '_get_power_state',
+ return_value=power_state.RUNNING):
+ allow_reboot, reboot_type = self.compute._retry_reboot(
+ context, instance)
+ self.assertFalse(allow_reboot)
+ self.assertEqual(reboot_type, 'HARD')
+
+ @mock.patch('nova.objects.BlockDeviceMapping.get_by_volume_id')
+ @mock.patch('nova.compute.manager.ComputeManager._detach_volume')
+ @mock.patch('nova.objects.Instance._from_db_object')
+ def test_remove_volume_connection(self, inst_from_db, detach, bdm_get):
+ bdm = mock.sentinel.bdm
+ inst_obj = mock.sentinel.inst_obj
+ bdm_get.return_value = bdm
+ inst_from_db.return_value = inst_obj
+ with mock.patch.object(self.compute, 'volume_api'):
+ self.compute.remove_volume_connection(self.context, 'vol',
+ inst_obj)
+ detach.assert_called_once_with(self.context, inst_obj, bdm)
+
+ def _test_rescue(self, clean_shutdown=True):
+ instance = fake_instance.fake_instance_obj(
+ self.context, vm_state=vm_states.ACTIVE)
+ fake_nw_info = network_model.NetworkInfo()
+ rescue_image_meta = {'id': 'fake', 'name': 'fake'}
+ with contextlib.nested(
+ mock.patch.object(objects.InstanceActionEvent, 'event_start'),
+ mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure'),
+ mock.patch.object(self.context, 'elevated',
+ return_value=self.context),
+ mock.patch.object(self.compute, '_get_instance_nw_info',
+ return_value=fake_nw_info),
+ mock.patch.object(self.compute, '_get_rescue_image',
+ return_value=rescue_image_meta),
+ mock.patch.object(self.compute, '_notify_about_instance_usage'),
+ mock.patch.object(self.compute, '_power_off_instance'),
+ mock.patch.object(self.compute.driver, 'rescue'),
+ mock.patch.object(self.compute.conductor_api,
+ 'notify_usage_exists'),
+ mock.patch.object(self.compute, '_get_power_state',
+ return_value=power_state.RUNNING),
+ mock.patch.object(instance, 'save')
+ ) as (
+ event_start, event_finish, elevated_context, get_nw_info,
+ get_rescue_image, notify_instance_usage, power_off_instance,
+ driver_rescue, notify_usage_exists, get_power_state, instance_save
+ ):
+ self.compute.rescue_instance(
+ self.context, instance, rescue_password='verybadpass',
+ rescue_image_ref=None, clean_shutdown=clean_shutdown)
+
+ # assert the field values on the instance object
+ self.assertEqual(vm_states.RESCUED, instance.vm_state)
+ self.assertIsNone(instance.task_state)
+ self.assertEqual(power_state.RUNNING, instance.power_state)
+ self.assertIsNotNone(instance.launched_at)
+
+ # assert our mock calls
+ get_nw_info.assert_called_once_with(self.context, instance)
+ get_rescue_image.assert_called_once_with(
+ self.context, instance, None)
+
+ extra_usage_info = {'rescue_image_name': 'fake'}
+ notify_calls = [
+ mock.call(self.context, instance, "rescue.start",
+ extra_usage_info=extra_usage_info,
+ network_info=fake_nw_info),
+ mock.call(self.context, instance, "rescue.end",
+ extra_usage_info=extra_usage_info,
+ network_info=fake_nw_info)
+ ]
+ notify_instance_usage.assert_has_calls(notify_calls)
+
+ power_off_instance.assert_called_once_with(self.context, instance,
+ clean_shutdown)
+
+ driver_rescue.assert_called_once_with(
+ self.context, instance, fake_nw_info, rescue_image_meta,
+ 'verybadpass')
+
+ notify_usage_exists.assert_called_once_with(
+ self.context, instance, current_period=True)
+
+ instance_save.assert_called_once_with(
+ expected_task_state=task_states.RESCUING)
+
+ def test_rescue(self):
+ self._test_rescue()
+
+ def test_rescue_forced_shutdown(self):
+ self._test_rescue(clean_shutdown=False)
+
+ def test_unrescue(self):
+ instance = fake_instance.fake_instance_obj(
+ self.context, vm_state=vm_states.RESCUED)
+ fake_nw_info = network_model.NetworkInfo()
+ with contextlib.nested(
+ mock.patch.object(objects.InstanceActionEvent, 'event_start'),
+ mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure'),
+ mock.patch.object(self.context, 'elevated',
+ return_value=self.context),
+ mock.patch.object(self.compute, '_get_instance_nw_info',
+ return_value=fake_nw_info),
+ mock.patch.object(self.compute, '_notify_about_instance_usage'),
+ mock.patch.object(self.compute.driver, 'unrescue'),
+ mock.patch.object(self.compute, '_get_power_state',
+ return_value=power_state.RUNNING),
+ mock.patch.object(instance, 'save')
+ ) as (
+ event_start, event_finish, elevated_context, get_nw_info,
+ notify_instance_usage, driver_unrescue, get_power_state,
+ instance_save
+ ):
+ self.compute.unrescue_instance(self.context, instance)
+
+ # assert the field values on the instance object
+ self.assertEqual(vm_states.ACTIVE, instance.vm_state)
+ self.assertIsNone(instance.task_state)
+ self.assertEqual(power_state.RUNNING, instance.power_state)
+
+ # assert our mock calls
+ get_nw_info.assert_called_once_with(self.context, instance)
+
+ notify_calls = [
+ mock.call(self.context, instance, "unrescue.start",
+ network_info=fake_nw_info),
+ mock.call(self.context, instance, "unrescue.end",
+ network_info=fake_nw_info)
+ ]
+ notify_instance_usage.assert_has_calls(notify_calls)
+
+ driver_unrescue.assert_called_once_with(instance, fake_nw_info)
+
+ instance_save.assert_called_once_with(
+ expected_task_state=task_states.UNRESCUING)
+
+ @mock.patch.object(objects.InstanceActionEvent, 'event_start')
+ @mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure')
+ @mock.patch('nova.compute.manager.ComputeManager._get_power_state',
+ return_value=power_state.RUNNING)
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch('nova.utils.generate_password', return_value='fake-pass')
+ def test_set_admin_password(self, gen_password_mock,
+ instance_save_mock, power_state_mock,
+ event_finish_mock, event_start_mock):
+ # Ensure instance can have its admin password set.
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.UPDATING_PASSWORD)
+
+ @mock.patch.object(self.context, 'elevated', return_value=self.context)
+ @mock.patch.object(self.compute.driver, 'set_admin_password')
+ def do_test(driver_mock, elevated_mock):
+ # call the manager method
+ self.compute.set_admin_password(self.context, instance, None)
+ # make our assertions
+ self.assertEqual(vm_states.ACTIVE, instance.vm_state)
+ self.assertIsNone(instance.task_state)
+
+ power_state_mock.assert_called_once_with(self.context, instance)
+ driver_mock.assert_called_once_with(instance, 'fake-pass')
+ instance_save_mock.assert_called_once_with(
+ expected_task_state=task_states.UPDATING_PASSWORD)
+
+ do_test()
+
+ @mock.patch.object(objects.InstanceActionEvent, 'event_start')
+ @mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure')
+ @mock.patch('nova.compute.manager.ComputeManager._get_power_state',
+ return_value=power_state.NOSTATE)
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
+ def test_set_admin_password_bad_state(self, add_fault_mock,
+ instance_save_mock, power_state_mock,
+ event_finish_mock, event_start_mock):
+ # Test setting password while instance is rebuilding.
+ instance = fake_instance.fake_instance_obj(self.context)
+ with mock.patch.object(self.context, 'elevated',
+ return_value=self.context):
+ # call the manager method
+ self.assertRaises(exception.InstancePasswordSetFailed,
+ self.compute.set_admin_password,
+ self.context, instance, None)
+
+ # make our assertions
+ power_state_mock.assert_called_once_with(self.context, instance)
+ instance_save_mock.assert_called_once_with(
+ expected_task_state=task_states.UPDATING_PASSWORD)
+ add_fault_mock.assert_called_once_with(
+ self.context, instance, mock.ANY, mock.ANY)
+
+ @mock.patch.object(objects.InstanceActionEvent, 'event_start')
+ @mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure')
+ @mock.patch('nova.utils.generate_password', return_value='fake-pass')
+ @mock.patch('nova.compute.manager.ComputeManager._get_power_state',
+ return_value=power_state.RUNNING)
+ @mock.patch('nova.compute.manager.ComputeManager._instance_update')
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
+ def _do_test_set_admin_password_driver_error(self, exc,
+ expected_vm_state,
+ expected_task_state,
+ expected_exception,
+ add_fault_mock,
+ instance_save_mock,
+ update_mock,
+ power_state_mock,
+ gen_password_mock,
+ event_finish_mock,
+ event_start_mock):
+ # Ensure expected exception is raised if set_admin_password fails.
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.UPDATING_PASSWORD)
+
+ @mock.patch.object(self.context, 'elevated', return_value=self.context)
+ @mock.patch.object(self.compute.driver, 'set_admin_password',
+ side_effect=exc)
+ def do_test(driver_mock, elevated_mock):
+ # error raised from the driver should not reveal internal
+ # information so a new error is raised
+ self.assertRaises(expected_exception,
+ self.compute.set_admin_password,
+ self.context,
+ instance=instance,
+ new_pass=None)
+
+ if expected_exception == NotImplementedError:
+ instance_save_mock.assert_called_once_with(
+ expected_task_state=task_states.UPDATING_PASSWORD)
+ else:
+ # setting the instance to error state
+ instance_save_mock.assert_called_once_with()
+
+ self.assertEqual(expected_vm_state, instance.vm_state)
+ # check revert_task_state decorator
+ update_mock.assert_called_once_with(
+ self.context, instance.uuid,
+ task_state=expected_task_state)
+ # check wrap_instance_fault decorator
+ add_fault_mock.assert_called_once_with(
+ self.context, instance, mock.ANY, mock.ANY)
+
+ do_test()
+
+ def test_set_admin_password_driver_not_authorized(self):
+ # Ensure expected exception is raised if set_admin_password not
+ # authorized.
+ exc = exception.Forbidden('Internal error')
+ expected_exception = exception.InstancePasswordSetFailed
+ self._do_test_set_admin_password_driver_error(
+ exc, vm_states.ERROR, None, expected_exception)
+
+ def test_set_admin_password_driver_not_implemented(self):
+ # Ensure expected exception is raised if set_admin_password not
+ # implemented by driver.
+ exc = NotImplementedError()
+ expected_exception = NotImplementedError
+ self._do_test_set_admin_password_driver_error(
+ exc, vm_states.ACTIVE, None, expected_exception)
+
+ def _test_init_host_with_partial_migration(self, task_state=None,
+ vm_state=vm_states.ACTIVE):
+ our_host = self.compute.host
+ instance_1 = objects.Instance(self.context)
+ instance_1.uuid = 'foo'
+ instance_1.task_state = task_state
+ instance_1.vm_state = vm_state
+ instance_1.host = 'not-' + our_host
+ instance_2 = objects.Instance(self.context)
+ instance_2.uuid = 'bar'
+ instance_2.task_state = None
+ instance_2.vm_state = vm_states.ACTIVE
+ instance_2.host = 'not-' + our_host
+
+ with contextlib.nested(
+ mock.patch.object(self.compute, '_get_instances_on_driver',
+ return_value=[instance_1,
+ instance_2]),
+ mock.patch.object(self.compute, '_get_instance_nw_info',
+ return_value=None),
+ mock.patch.object(self.compute, '_get_instance_block_device_info',
+ return_value={}),
+ mock.patch.object(self.compute, '_is_instance_storage_shared',
+ return_value=False),
+ mock.patch.object(self.compute.driver, 'destroy')
+ ) as (_get_instances_on_driver, _get_instance_nw_info,
+ _get_instance_block_device_info, _is_instance_storage_shared,
+ destroy):
+ self.compute._destroy_evacuated_instances(self.context)
+ destroy.assert_called_once_with(self.context, instance_2, None,
+ {}, True)
+
+ def test_init_host_with_partial_migration_migrating(self):
+ self._test_init_host_with_partial_migration(
+ task_state=task_states.MIGRATING)
+
+ def test_init_host_with_partial_migration_resize_migrating(self):
+ self._test_init_host_with_partial_migration(
+ task_state=task_states.RESIZE_MIGRATING)
+
+ def test_init_host_with_partial_migration_resize_migrated(self):
+ self._test_init_host_with_partial_migration(
+ task_state=task_states.RESIZE_MIGRATED)
+
+ def test_init_host_with_partial_migration_finish_resize(self):
+ self._test_init_host_with_partial_migration(
+ task_state=task_states.RESIZE_FINISH)
+
+ def test_init_host_with_partial_migration_resized(self):
+ self._test_init_host_with_partial_migration(
+ vm_state=vm_states.RESIZED)
+
+ @mock.patch('nova.compute.manager.ComputeManager._instance_update')
+ def test_error_out_instance_on_exception_not_implemented_err(self,
+ inst_update_mock):
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ def do_test():
+ with self.compute._error_out_instance_on_exception(
+ self.context, instance, instance_state=vm_states.STOPPED):
+ raise NotImplementedError('test')
+
+ self.assertRaises(NotImplementedError, do_test)
+ inst_update_mock.assert_called_once_with(
+ self.context, instance.uuid,
+ vm_state=vm_states.STOPPED, task_state=None)
+
+ @mock.patch('nova.compute.manager.ComputeManager._instance_update')
+ def test_error_out_instance_on_exception_inst_fault_rollback(self,
+ inst_update_mock):
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ def do_test():
+ with self.compute._error_out_instance_on_exception(self.context,
+ instance):
+ raise exception.InstanceFaultRollback(
+ inner_exception=test.TestingException('test'))
+
+ self.assertRaises(test.TestingException, do_test)
+ inst_update_mock.assert_called_once_with(
+ self.context, instance.uuid,
+ vm_state=vm_states.ACTIVE, task_state=None)
+
+ @mock.patch('nova.compute.manager.ComputeManager.'
+ '_set_instance_error_state')
+ def test_error_out_instance_on_exception_unknown_with_quotas(self,
+ set_error):
+ instance = fake_instance.fake_instance_obj(self.context)
+ quotas = mock.create_autospec(objects.Quotas, spec_set=True)
+
+ def do_test():
+ with self.compute._error_out_instance_on_exception(
+ self.context, instance, quotas):
+ raise test.TestingException('test')
+
+ self.assertRaises(test.TestingException, do_test)
+ self.assertEqual(1, len(quotas.method_calls))
+ self.assertEqual(mock.call.rollback(), quotas.method_calls[0])
+ set_error.assert_called_once_with(self.context, instance)
+
+ def test_cleanup_volumes(self):
+ instance = fake_instance.fake_instance_obj(self.context)
+ bdm_do_not_delete_dict = fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': 'fake-id1', 'source_type': 'image',
+ 'delete_on_termination': False})
+ bdm_delete_dict = fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': 'fake-id2', 'source_type': 'image',
+ 'delete_on_termination': True})
+ bdms = block_device_obj.block_device_make_list(self.context,
+ [bdm_do_not_delete_dict, bdm_delete_dict])
+
+ with mock.patch.object(self.compute.volume_api,
+ 'delete') as volume_delete:
+ self.compute._cleanup_volumes(self.context, instance.uuid, bdms)
+ volume_delete.assert_called_once_with(self.context,
+ bdms[1].volume_id)
+
+ def test_cleanup_volumes_exception_do_not_raise(self):
+ instance = fake_instance.fake_instance_obj(self.context)
+ bdm_dict1 = fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': 'fake-id1', 'source_type': 'image',
+ 'delete_on_termination': True})
+ bdm_dict2 = fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': 'fake-id2', 'source_type': 'image',
+ 'delete_on_termination': True})
+ bdms = block_device_obj.block_device_make_list(self.context,
+ [bdm_dict1, bdm_dict2])
+
+ with mock.patch.object(self.compute.volume_api,
+ 'delete',
+ side_effect=[test.TestingException(), None]) as volume_delete:
+ self.compute._cleanup_volumes(self.context, instance.uuid, bdms,
+ raise_exc=False)
+ calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms]
+ self.assertEqual(calls, volume_delete.call_args_list)
+
+ def test_cleanup_volumes_exception_raise(self):
+ instance = fake_instance.fake_instance_obj(self.context)
+ bdm_dict1 = fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': 'fake-id1', 'source_type': 'image',
+ 'delete_on_termination': True})
+ bdm_dict2 = fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': 'fake-id2', 'source_type': 'image',
+ 'delete_on_termination': True})
+ bdms = block_device_obj.block_device_make_list(self.context,
+ [bdm_dict1, bdm_dict2])
+
+ with mock.patch.object(self.compute.volume_api,
+ 'delete',
+ side_effect=[test.TestingException(), None]) as volume_delete:
+ self.assertRaises(test.TestingException,
+ self.compute._cleanup_volumes, self.context, instance.uuid,
+ bdms)
+ calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms]
+ self.assertEqual(calls, volume_delete.call_args_list)
+
+ def test_start_building(self):
+ instance = fake_instance.fake_instance_obj(self.context)
+ with mock.patch.object(self.compute, '_instance_update') as update:
+ self.compute._start_building(self.context, instance)
+ update.assert_called_once_with(
+ self.context, instance.uuid, vm_state=vm_states.BUILDING,
+ task_state=None, expected_task_state=(task_states.SCHEDULING,
+ None))
+
+ def _test_prebuild_instance_build_abort_exception(self, exc):
+ instance = fake_instance.fake_instance_obj(self.context)
+ with contextlib.nested(
+ mock.patch.object(self.compute, '_check_instance_exists'),
+ mock.patch.object(self.compute, '_start_building',
+ side_effect=exc)
+ ) as (
+ check, start
+ ):
+ # run the code
+ self.assertRaises(exception.BuildAbortException,
+ self.compute._prebuild_instance,
+ self.context, instance)
+ # assert the calls
+ check.assert_called_once_with(self.context, instance)
+ start.assert_called_once_with(self.context, instance)
+
+ def test_prebuild_instance_instance_not_found(self):
+ self._test_prebuild_instance_build_abort_exception(
+ exception.InstanceNotFound(instance_id='fake'))
+
+ def test_prebuild_instance_unexpected_deleting_task_state_err(self):
+ self._test_prebuild_instance_build_abort_exception(
+ exception.UnexpectedDeletingTaskStateError(expected='foo',
+ actual='bar'))
+
+ def test_stop_instance_task_state_none_power_state_shutdown(self):
+ # Tests that stop_instance doesn't puke when the instance power_state
+ # is shutdown and the task_state is None.
+ instance = fake_instance.fake_instance_obj(
+ self.context, vm_state=vm_states.ACTIVE,
+ task_state=None, power_state=power_state.SHUTDOWN)
+
+ @mock.patch.object(objects.InstanceActionEvent, 'event_start')
+ @mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure')
+ @mock.patch.object(self.compute, '_get_power_state',
+ return_value=power_state.SHUTDOWN)
+ @mock.patch.object(self.compute, '_notify_about_instance_usage')
+ @mock.patch.object(self.compute, '_power_off_instance')
+ @mock.patch.object(instance, 'save')
+ def do_test(save_mock, power_off_mock, notify_mock, get_state_mock,
+ event_finish_mock, event_start_mock):
+ # run the code
+ self.compute.stop_instance(self.context, instance)
+ # assert the calls
+ self.assertEqual(2, get_state_mock.call_count)
+ notify_mock.assert_has_calls([
+ mock.call(self.context, instance, 'power_off.start'),
+ mock.call(self.context, instance, 'power_off.end')
+ ])
+ power_off_mock.assert_called_once_with(
+ self.context, instance, True)
+ save_mock.assert_called_once_with(
+ expected_task_state=[task_states.POWERING_OFF, None])
+ self.assertEqual(power_state.SHUTDOWN, instance.power_state)
+ self.assertIsNone(instance.task_state)
+ self.assertEqual(vm_states.STOPPED, instance.vm_state)
+
+ do_test()
+
+ def test_reset_network_driver_not_implemented(self):
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ @mock.patch.object(self.compute.driver, 'reset_network',
+ side_effect=NotImplementedError())
+ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
+ def do_test(mock_add_fault, mock_reset):
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.reset_network,
+ self.context,
+ instance)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(NotImplementedError,
+ self.compute.reset_network,
+ self.context,
+ instance)
+
+ do_test()
+
+ def test_rebuild_default_impl(self):
+ def _detach(context, bdms):
+ pass
+
+ def _attach(context, instance, bdms, do_check_attach=True):
+ return {'block_device_mapping': 'shared_block_storage'}
+
+ def _spawn(context, instance, image_meta, injected_files,
+ admin_password, network_info=None, block_device_info=None):
+ self.assertEqual(block_device_info['block_device_mapping'],
+ 'shared_block_storage')
+
+ with contextlib.nested(
+ mock.patch.object(self.compute.driver, 'destroy',
+ return_value=None),
+ mock.patch.object(self.compute.driver, 'spawn',
+ side_effect=_spawn),
+ mock.patch.object(objects.Instance, 'save',
+ return_value=None)
+ ) as(
+ mock_destroy,
+ mock_spawn,
+ mock_save
+ ):
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.task_state = task_states.REBUILDING
+ instance.save(expected_task_state=[task_states.REBUILDING])
+ self.compute._rebuild_default_impl(self.context,
+ instance,
+ None,
+ [],
+ admin_password='new_pass',
+ bdms=[],
+ detach_block_devices=_detach,
+ attach_block_devices=_attach,
+ network_info=None,
+ recreate=True,
+ block_device_info=None,
+ preserve_ephemeral=False)
+
+ self.assertFalse(mock_destroy.called)
+ self.assertTrue(mock_save.called)
+ self.assertTrue(mock_spawn.called)
+
+
+class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(ComputeManagerBuildInstanceTestCase, self).setUp()
+ self.compute = importutils.import_object(CONF.compute_manager)
+ self.context = context.RequestContext('fake', 'fake')
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ vm_state=vm_states.ACTIVE,
+ expected_attrs=['metadata', 'system_metadata', 'info_cache'])
+ self.admin_pass = 'pass'
+ self.injected_files = []
+ self.image = {}
+ self.node = 'fake-node'
+ self.limits = {}
+ self.requested_networks = []
+ self.security_groups = []
+ self.block_device_mapping = []
+ self.filter_properties = {'retry': {'num_attempts': 1,
+ 'hosts': [[self.compute.host,
+ 'fake-node']]}}
+
+ def fake_network_info():
+ return network_model.NetworkInfo()
+
+ self.network_info = network_model.NetworkInfoAsyncWrapper(
+ fake_network_info)
+ self.block_device_info = self.compute._prep_block_device(context,
+ self.instance, self.block_device_mapping)
+
+ # override tracker with a version that doesn't need the database:
+ fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
+ self.compute.driver, self.node)
+ self.compute._resource_tracker_dict[self.node] = fake_rt
+
+ def _do_build_instance_update(self, reschedule_update=False):
+ self.mox.StubOutWithMock(self.instance, 'save')
+ self.instance.save(
+ expected_task_state=(task_states.SCHEDULING, None)).AndReturn(
+ self.instance)
+ if reschedule_update:
+ self.instance.save().AndReturn(self.instance)
+
+ def _build_and_run_instance_update(self):
+ self.mox.StubOutWithMock(self.instance, 'save')
+ self._build_resources_instance_update(stub=False)
+ self.instance.save(expected_task_state=
+ task_states.BLOCK_DEVICE_MAPPING).AndReturn(self.instance)
+
+ def _build_resources_instance_update(self, stub=True):
+ if stub:
+ self.mox.StubOutWithMock(self.instance, 'save')
+ self.instance.save().AndReturn(self.instance)
+
+ def _notify_about_instance_usage(self, event, stub=True, **kwargs):
+ if stub:
+ self.mox.StubOutWithMock(self.compute,
+ '_notify_about_instance_usage')
+ self.compute._notify_about_instance_usage(self.context, self.instance,
+ event, **kwargs)
+
+ def _instance_action_events(self):
+ self.mox.StubOutWithMock(objects.InstanceActionEvent, 'event_start')
+ self.mox.StubOutWithMock(objects.InstanceActionEvent,
+ 'event_finish_with_failure')
+ objects.InstanceActionEvent.event_start(
+ self.context, self.instance.uuid, mox.IgnoreArg(),
+ want_result=False)
+ objects.InstanceActionEvent.event_finish_with_failure(
+ self.context, self.instance.uuid, mox.IgnoreArg(),
+ exc_val=mox.IgnoreArg(), exc_tb=mox.IgnoreArg(),
+ want_result=False)
+
+ @mock.patch('nova.utils.spawn_n')
+ def test_build_and_run_instance_called_with_proper_args(self, mock_spawn):
+ mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
+ self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
+ self._do_build_instance_update()
+ self.compute._build_and_run_instance(self.context, self.instance,
+ self.image, self.injected_files, self.admin_pass,
+ self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node, self.limits,
+ self.filter_properties)
+ self._instance_action_events()
+ self.mox.ReplayAll()
+
+ self.compute.build_and_run_instance(self.context, self.instance,
+ self.image, request_spec={},
+ filter_properties=self.filter_properties,
+ injected_files=self.injected_files,
+ admin_password=self.admin_pass,
+ requested_networks=self.requested_networks,
+ security_groups=self.security_groups,
+ block_device_mapping=self.block_device_mapping, node=self.node,
+ limits=self.limits)
+
+ # This test when sending an icehouse compatible rpc call to juno compute
+ # node, NetworkRequest object can load from three items tuple.
+ @mock.patch('nova.objects.InstanceActionEvent.event_finish_with_failure')
+ @mock.patch('nova.objects.InstanceActionEvent.event_start')
+ @mock.patch('nova.objects.Instance.save')
+ @mock.patch('nova.compute.manager.ComputeManager._build_and_run_instance')
+ @mock.patch('nova.utils.spawn_n')
+ def test_build_and_run_instance_with_icehouse_requested_network(
+ self, mock_spawn, mock_build_and_run, mock_save, mock_event_start,
+ mock_event_finish):
+ mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
+ mock_save.return_value = self.instance
+ self.compute.build_and_run_instance(self.context, self.instance,
+ self.image, request_spec={},
+ filter_properties=self.filter_properties,
+ injected_files=self.injected_files,
+ admin_password=self.admin_pass,
+ requested_networks=[('fake_network_id', '10.0.0.1',
+ 'fake_port_id')],
+ security_groups=self.security_groups,
+ block_device_mapping=self.block_device_mapping, node=self.node,
+ limits=self.limits)
+ requested_network = mock_build_and_run.call_args[0][5][0]
+ self.assertEqual('fake_network_id', requested_network.network_id)
+ self.assertEqual('10.0.0.1', str(requested_network.address))
+ self.assertEqual('fake_port_id', requested_network.port_id)
+
+ @mock.patch('nova.utils.spawn_n')
+ def test_build_abort_exception(self, mock_spawn):
+ def fake_spawn(f, *args, **kwargs):
+ # NOTE(danms): Simulate the detached nature of spawn so that
+ # we confirm that the inner task has the fault logic
+ try:
+ return f(*args, **kwargs)
+ except Exception:
+ pass
+
+ mock_spawn.side_effect = fake_spawn
+
+ self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(self.compute, '_set_instance_error_state')
+ self.mox.StubOutWithMock(self.compute.compute_task_api,
+ 'build_instances')
+ self._do_build_instance_update()
+ self.compute._build_and_run_instance(self.context, self.instance,
+ self.image, self.injected_files, self.admin_pass,
+ self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node, self.limits,
+ self.filter_properties).AndRaise(
+ exception.BuildAbortException(reason='',
+ instance_uuid=self.instance.uuid))
+ self.compute._cleanup_allocated_networks(self.context, self.instance,
+ self.requested_networks)
+ self.compute._cleanup_volumes(self.context, self.instance.uuid,
+ self.block_device_mapping, raise_exc=False)
+ compute_utils.add_instance_fault_from_exc(self.context,
+ self.instance, mox.IgnoreArg(), mox.IgnoreArg())
+ self.compute._set_instance_error_state(self.context, self.instance)
+ self._instance_action_events()
+ self.mox.ReplayAll()
+
+ self.compute.build_and_run_instance(self.context, self.instance,
+ self.image, request_spec={},
+ filter_properties=self.filter_properties,
+ injected_files=self.injected_files,
+ admin_password=self.admin_pass,
+ requested_networks=self.requested_networks,
+ security_groups=self.security_groups,
+ block_device_mapping=self.block_device_mapping, node=self.node,
+ limits=self.limits)
+
+ @mock.patch('nova.utils.spawn_n')
+ def test_rescheduled_exception(self, mock_spawn):
+ mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
+ self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
+ self.mox.StubOutWithMock(self.compute, '_set_instance_error_state')
+ self.mox.StubOutWithMock(self.compute.compute_task_api,
+ 'build_instances')
+ self._do_build_instance_update(reschedule_update=True)
+ self.compute._build_and_run_instance(self.context, self.instance,
+ self.image, self.injected_files, self.admin_pass,
+ self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node, self.limits,
+ self.filter_properties).AndRaise(
+ exception.RescheduledException(reason='',
+ instance_uuid=self.instance.uuid))
+ self.compute.compute_task_api.build_instances(self.context,
+ [self.instance], self.image, self.filter_properties,
+ self.admin_pass, self.injected_files, self.requested_networks,
+ self.security_groups, self.block_device_mapping)
+ self._instance_action_events()
+ self.mox.ReplayAll()
+
+ self.compute.build_and_run_instance(self.context, self.instance,
+ self.image, request_spec={},
+ filter_properties=self.filter_properties,
+ injected_files=self.injected_files,
+ admin_password=self.admin_pass,
+ requested_networks=self.requested_networks,
+ security_groups=self.security_groups,
+ block_device_mapping=self.block_device_mapping, node=self.node,
+ limits=self.limits)
+
+ def test_rescheduled_exception_with_non_ascii_exception(self):
+ exc = exception.NovaException(u's\xe9quence')
+ self.mox.StubOutWithMock(self.compute.driver, 'spawn')
+ self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI,
+ 'instance_update')
+ self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.compute._build_networks_for_instance(self.context, self.instance,
+ self.requested_networks, self.security_groups).AndReturn(
+ self.network_info)
+ self.compute._shutdown_instance(self.context, self.instance,
+ self.block_device_mapping, self.requested_networks,
+ try_deallocate_networks=False)
+ self._notify_about_instance_usage('create.start',
+ extra_usage_info={'image_name': self.image.get('name')})
+ self._build_and_run_instance_update()
+ self.compute.driver.spawn(self.context, self.instance, self.image,
+ self.injected_files, self.admin_pass,
+ network_info=self.network_info,
+ block_device_info=self.block_device_info).AndRaise(exc)
+ self._notify_about_instance_usage('create.error',
+ fault=exc, stub=False)
+ conductor_rpcapi.ConductorAPI.instance_update(
+ self.context, self.instance['uuid'], mox.IgnoreArg(), 'conductor')
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.RescheduledException,
+ self.compute._build_and_run_instance, self.context,
+ self.instance, self.image, self.injected_files,
+ self.admin_pass, self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node,
+ self.limits, self.filter_properties)
+
+ @mock.patch('nova.utils.spawn_n')
+ def test_rescheduled_exception_without_retry(self, mock_spawn):
+ mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
+ self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(self.compute, '_set_instance_error_state')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
+ self._do_build_instance_update()
+ self.compute._build_and_run_instance(self.context, self.instance,
+ self.image, self.injected_files, self.admin_pass,
+ self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node, self.limits,
+ {}).AndRaise(
+ exception.RescheduledException(reason='',
+ instance_uuid=self.instance.uuid))
+ self.compute._cleanup_allocated_networks(self.context, self.instance,
+ self.requested_networks)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ mox.IgnoreArg(), mox.IgnoreArg())
+ self.compute._set_instance_error_state(self.context,
+ self.instance)
+ self._instance_action_events()
+ self.mox.ReplayAll()
+
+ self.compute.build_and_run_instance(self.context, self.instance,
+ self.image, request_spec={},
+ filter_properties={},
+ injected_files=self.injected_files,
+ admin_password=self.admin_pass,
+ requested_networks=self.requested_networks,
+ security_groups=self.security_groups,
+ block_device_mapping=self.block_device_mapping, node=self.node,
+ limits=self.limits)
+
+ @mock.patch('nova.utils.spawn_n')
+ def test_rescheduled_exception_do_not_deallocate_network(self, mock_spawn):
+ mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
+ self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'deallocate_networks_on_reschedule')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
+ self.mox.StubOutWithMock(self.compute.compute_task_api,
+ 'build_instances')
+ self._do_build_instance_update(reschedule_update=True)
+ self.compute._build_and_run_instance(self.context, self.instance,
+ self.image, self.injected_files, self.admin_pass,
+ self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node, self.limits,
+ self.filter_properties).AndRaise(
+ exception.RescheduledException(reason='',
+ instance_uuid=self.instance.uuid))
+ self.compute.driver.deallocate_networks_on_reschedule(
+ self.instance).AndReturn(False)
+ self.compute.compute_task_api.build_instances(self.context,
+ [self.instance], self.image, self.filter_properties,
+ self.admin_pass, self.injected_files, self.requested_networks,
+ self.security_groups, self.block_device_mapping)
+ self._instance_action_events()
+ self.mox.ReplayAll()
+
+ self.compute.build_and_run_instance(self.context, self.instance,
+ self.image, request_spec={},
+ filter_properties=self.filter_properties,
+ injected_files=self.injected_files,
+ admin_password=self.admin_pass,
+ requested_networks=self.requested_networks,
+ security_groups=self.security_groups,
+ block_device_mapping=self.block_device_mapping, node=self.node,
+ limits=self.limits)
+
+ @mock.patch('nova.utils.spawn_n')
+ def test_rescheduled_exception_deallocate_network(self, mock_spawn):
+ mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
+ self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'deallocate_networks_on_reschedule')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
+ self.mox.StubOutWithMock(self.compute.compute_task_api,
+ 'build_instances')
+ self._do_build_instance_update(reschedule_update=True)
+ self.compute._build_and_run_instance(self.context, self.instance,
+ self.image, self.injected_files, self.admin_pass,
+ self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node, self.limits,
+ self.filter_properties).AndRaise(
+ exception.RescheduledException(reason='',
+ instance_uuid=self.instance.uuid))
+ self.compute.driver.deallocate_networks_on_reschedule(
+ self.instance).AndReturn(True)
+ self.compute._cleanup_allocated_networks(self.context, self.instance,
+ self.requested_networks)
+ self.compute.compute_task_api.build_instances(self.context,
+ [self.instance], self.image, self.filter_properties,
+ self.admin_pass, self.injected_files, self.requested_networks,
+ self.security_groups, self.block_device_mapping)
+ self._instance_action_events()
+ self.mox.ReplayAll()
+
+ self.compute.build_and_run_instance(self.context, self.instance,
+ self.image, request_spec={},
+ filter_properties=self.filter_properties,
+ injected_files=self.injected_files,
+ admin_password=self.admin_pass,
+ requested_networks=self.requested_networks,
+ security_groups=self.security_groups,
+ block_device_mapping=self.block_device_mapping, node=self.node,
+ limits=self.limits)
+
+ def _test_build_and_run_exceptions(self, exc, set_error=False,
+ cleanup_volumes=False):
+ self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
+ self.mox.StubOutWithMock(self.compute.compute_task_api,
+ 'build_instances')
+ self._do_build_instance_update()
+ self.compute._build_and_run_instance(self.context, self.instance,
+ self.image, self.injected_files, self.admin_pass,
+ self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node, self.limits,
+ self.filter_properties).AndRaise(exc)
+ self.compute._cleanup_allocated_networks(self.context, self.instance,
+ self.requested_networks)
+ if cleanup_volumes:
+ self.compute._cleanup_volumes(self.context, self.instance.uuid,
+ self.block_device_mapping, raise_exc=False)
+ if set_error:
+ self.mox.StubOutWithMock(self.compute, '_set_instance_error_state')
+ self.mox.StubOutWithMock(compute_utils,
+ 'add_instance_fault_from_exc')
+ compute_utils.add_instance_fault_from_exc(self.context,
+ self.instance, mox.IgnoreArg(), mox.IgnoreArg())
+ self.compute._set_instance_error_state(self.context, self.instance)
+ self._instance_action_events()
+ self.mox.ReplayAll()
+
+ with mock.patch('nova.utils.spawn_n') as mock_spawn:
+ mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
+ self.compute.build_and_run_instance(self.context, self.instance,
+ self.image, request_spec={},
+ filter_properties=self.filter_properties,
+ injected_files=self.injected_files,
+ admin_password=self.admin_pass,
+ requested_networks=self.requested_networks,
+ security_groups=self.security_groups,
+ block_device_mapping=self.block_device_mapping, node=self.node,
+ limits=self.limits)
+
+ def test_build_and_run_notfound_exception(self):
+ self._test_build_and_run_exceptions(exception.InstanceNotFound(
+ instance_id=''))
+
+ def test_build_and_run_unexpecteddeleting_exception(self):
+ self._test_build_and_run_exceptions(
+ exception.UnexpectedDeletingTaskStateError(expected='',
+ actual=''))
+
+ def test_build_and_run_buildabort_exception(self):
+ self._test_build_and_run_exceptions(exception.BuildAbortException(
+ instance_uuid='', reason=''), set_error=True, cleanup_volumes=True)
+
+ def test_build_and_run_unhandled_exception(self):
+ self._test_build_and_run_exceptions(test.TestingException(),
+ set_error=True, cleanup_volumes=True)
+
+ def test_instance_not_found(self):
+ exc = exception.InstanceNotFound(instance_id=1)
+ self.mox.StubOutWithMock(self.compute.driver, 'spawn')
+ self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI,
+ 'instance_update')
+ self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.compute._build_networks_for_instance(self.context, self.instance,
+ self.requested_networks, self.security_groups).AndReturn(
+ self.network_info)
+ self.compute._shutdown_instance(self.context, self.instance,
+ self.block_device_mapping, self.requested_networks,
+ try_deallocate_networks=False)
+ self._notify_about_instance_usage('create.start',
+ extra_usage_info={'image_name': self.image.get('name')})
+ self._build_and_run_instance_update()
+ self.compute.driver.spawn(self.context, self.instance, self.image,
+ self.injected_files, self.admin_pass,
+ network_info=self.network_info,
+ block_device_info=self.block_device_info).AndRaise(exc)
+ self._notify_about_instance_usage('create.end',
+ fault=exc, stub=False)
+ conductor_rpcapi.ConductorAPI.instance_update(
+ self.context, self.instance.uuid, mox.IgnoreArg(), 'conductor')
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.InstanceNotFound,
+ self.compute._build_and_run_instance, self.context,
+ self.instance, self.image, self.injected_files,
+ self.admin_pass, self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node,
+ self.limits, self.filter_properties)
+
+ def test_reschedule_on_exception(self):
+ self.mox.StubOutWithMock(self.compute.driver, 'spawn')
+ self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI,
+ 'instance_update')
+ self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.compute._build_networks_for_instance(self.context, self.instance,
+ self.requested_networks, self.security_groups).AndReturn(
+ self.network_info)
+ self.compute._shutdown_instance(self.context, self.instance,
+ self.block_device_mapping, self.requested_networks,
+ try_deallocate_networks=False)
+ self._notify_about_instance_usage('create.start',
+ extra_usage_info={'image_name': self.image.get('name')})
+ self._build_and_run_instance_update()
+ exc = test.TestingException()
+ self.compute.driver.spawn(self.context, self.instance, self.image,
+ self.injected_files, self.admin_pass,
+ network_info=self.network_info,
+ block_device_info=self.block_device_info).AndRaise(exc)
+ conductor_rpcapi.ConductorAPI.instance_update(
+ self.context, self.instance.uuid, mox.IgnoreArg(), 'conductor')
+ self._notify_about_instance_usage('create.error',
+ fault=exc, stub=False)
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.RescheduledException,
+ self.compute._build_and_run_instance, self.context,
+ self.instance, self.image, self.injected_files,
+ self.admin_pass, self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node,
+ self.limits, self.filter_properties)
+
+ def test_spawn_network_alloc_failure(self):
+ # Because network allocation is asynchronous, failures may not present
+ # themselves until the virt spawn method is called.
+ self._test_build_and_run_spawn_exceptions(exception.NoMoreNetworks())
+
+ def test_build_and_run_flavor_disk_too_small_exception(self):
+ self._test_build_and_run_spawn_exceptions(
+ exception.FlavorDiskTooSmall())
+
+ def test_build_and_run_flavor_memory_too_small_exception(self):
+ self._test_build_and_run_spawn_exceptions(
+ exception.FlavorMemoryTooSmall())
+
+ def test_build_and_run_image_not_active_exception(self):
+ self._test_build_and_run_spawn_exceptions(
+ exception.ImageNotActive(image_id=self.image.get('id')))
+
+ def test_build_and_run_image_unacceptable_exception(self):
+ self._test_build_and_run_spawn_exceptions(
+ exception.ImageUnacceptable(image_id=self.image.get('id'),
+ reason=""))
+
+ def _test_build_and_run_spawn_exceptions(self, exc):
+ with contextlib.nested(
+ mock.patch.object(self.compute.driver, 'spawn',
+ side_effect=exc),
+ mock.patch.object(conductor_rpcapi.ConductorAPI,
+ 'instance_update'),
+ mock.patch.object(self.instance, 'save',
+ side_effect=[self.instance, self.instance]),
+ mock.patch.object(self.compute,
+ '_build_networks_for_instance',
+ return_value=self.network_info),
+ mock.patch.object(self.compute,
+ '_notify_about_instance_usage'),
+ mock.patch.object(self.compute,
+ '_shutdown_instance'),
+ mock.patch.object(self.compute,
+ '_validate_instance_group_policy')
+ ) as (spawn, instance_update, save,
+ _build_networks_for_instance, _notify_about_instance_usage,
+ _shutdown_instance, _validate_instance_group_policy):
+
+ self.assertRaises(exception.BuildAbortException,
+ self.compute._build_and_run_instance, self.context,
+ self.instance, self.image, self.injected_files,
+ self.admin_pass, self.requested_networks,
+ self.security_groups, self.block_device_mapping, self.node,
+ self.limits, self.filter_properties)
+
+ _validate_instance_group_policy.assert_called_once_with(
+ self.context, self.instance, self.filter_properties)
+ _build_networks_for_instance.assert_has_calls(
+ mock.call(self.context, self.instance,
+ self.requested_networks, self.security_groups))
+
+ _notify_about_instance_usage.assert_has_calls([
+ mock.call(self.context, self.instance, 'create.start',
+ extra_usage_info={'image_name': self.image.get('name')}),
+ mock.call(self.context, self.instance, 'create.error',
+ fault=exc)])
+
+ save.assert_has_calls([
+ mock.call(),
+ mock.call(
+ expected_task_state=task_states.BLOCK_DEVICE_MAPPING)])
+
+ spawn.assert_has_calls(mock.call(self.context, self.instance,
+ self.image, self.injected_files, self.admin_pass,
+ network_info=self.network_info,
+ block_device_info=self.block_device_info))
+
+ instance_update.assert_has_calls(mock.call(self.context,
+ self.instance.uuid, mock.ANY, 'conductor'))
+
+ _shutdown_instance.assert_called_once_with(self.context,
+ self.instance, self.block_device_mapping,
+ self.requested_networks, try_deallocate_networks=False)
+
+ @mock.patch('nova.compute.manager.ComputeManager._get_power_state')
+ def test_spawn_waits_for_network_and_saves_info_cache(self, gps):
+ inst = mock.MagicMock()
+ network_info = mock.MagicMock()
+ with mock.patch.object(self.compute, 'driver'):
+ self.compute._spawn(self.context, inst, {}, network_info, None,
+ None, None)
+ network_info.wait.assert_called_once_with(do_raise=True)
+ self.assertEqual(network_info, inst.info_cache.network_info)
+ inst.save.assert_called_with(expected_task_state=task_states.SPAWNING)
+
+ @mock.patch('nova.utils.spawn_n')
+ def test_reschedule_on_resources_unavailable(self, mock_spawn):
+ mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
+ reason = 'resource unavailable'
+ exc = exception.ComputeResourcesUnavailable(reason=reason)
+
+ class FakeResourceTracker(object):
+ def instance_claim(self, context, instance, limits):
+ raise exc
+
+ self.mox.StubOutWithMock(self.compute, '_get_resource_tracker')
+ self.mox.StubOutWithMock(self.compute.compute_task_api,
+ 'build_instances')
+ self.compute._get_resource_tracker(self.node).AndReturn(
+ FakeResourceTracker())
+ self._do_build_instance_update(reschedule_update=True)
+ self._notify_about_instance_usage('create.start',
+ extra_usage_info={'image_name': self.image.get('name')})
+ self._notify_about_instance_usage('create.error',
+ fault=exc, stub=False)
+ self.compute.compute_task_api.build_instances(self.context,
+ [self.instance], self.image, self.filter_properties,
+ self.admin_pass, self.injected_files, self.requested_networks,
+ self.security_groups, self.block_device_mapping)
+ self._instance_action_events()
+ self.mox.ReplayAll()
+
+ self.compute.build_and_run_instance(self.context, self.instance,
+ self.image, request_spec={},
+ filter_properties=self.filter_properties,
+ injected_files=self.injected_files,
+ admin_password=self.admin_pass,
+ requested_networks=self.requested_networks,
+ security_groups=self.security_groups,
+ block_device_mapping=self.block_device_mapping, node=self.node,
+ limits=self.limits)
+
+ def test_build_resources_buildabort_reraise(self):
+ exc = exception.BuildAbortException(
+ instance_uuid=self.instance.uuid, reason='')
+ self.mox.StubOutWithMock(self.compute, '_build_resources')
+ self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI,
+ 'instance_update')
+ conductor_rpcapi.ConductorAPI.instance_update(
+ self.context, self.instance.uuid, mox.IgnoreArg(), 'conductor')
+ self._notify_about_instance_usage('create.start',
+ extra_usage_info={'image_name': self.image.get('name')})
+ self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups, self.image,
+ self.block_device_mapping).AndRaise(exc)
+ self._notify_about_instance_usage('create.error',
+ fault=exc, stub=False)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.BuildAbortException,
+ self.compute._build_and_run_instance, self.context,
+ self.instance, self.image, self.injected_files,
+ self.admin_pass, self.requested_networks,
+ self.security_groups, self.block_device_mapping, self.node,
+ self.limits, self.filter_properties)
+
+ def test_build_resources_reraises_on_failed_bdm_prep(self):
+ self.mox.StubOutWithMock(self.compute, '_prep_block_device')
+ self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
+ self.compute._build_networks_for_instance(self.context, self.instance,
+ self.requested_networks, self.security_groups).AndReturn(
+ self.network_info)
+ self._build_resources_instance_update()
+ self.compute._prep_block_device(self.context, self.instance,
+ self.block_device_mapping).AndRaise(test.TestingException())
+ self.mox.ReplayAll()
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping):
+ pass
+ except Exception as e:
+ self.assertIsInstance(e, exception.BuildAbortException)
+
+ def test_failed_bdm_prep_from_delete_raises_unexpected(self):
+ with contextlib.nested(
+ mock.patch.object(self.compute,
+ '_build_networks_for_instance',
+ return_value=self.network_info),
+ mock.patch.object(self.instance, 'save',
+ side_effect=exception.UnexpectedDeletingTaskStateError(
+ actual=task_states.DELETING, expected='None')),
+ ) as (_build_networks_for_instance, save):
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping):
+ pass
+ except Exception as e:
+ self.assertIsInstance(e,
+ exception.UnexpectedDeletingTaskStateError)
+
+ _build_networks_for_instance.assert_has_calls(
+ mock.call(self.context, self.instance,
+ self.requested_networks, self.security_groups))
+
+ save.assert_has_calls(mock.call())
+
+ def test_build_resources_aborts_on_failed_network_alloc(self):
+ self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
+ self.compute._build_networks_for_instance(self.context, self.instance,
+ self.requested_networks, self.security_groups).AndRaise(
+ test.TestingException())
+ self.mox.ReplayAll()
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups, self.image,
+ self.block_device_mapping):
+ pass
+ except Exception as e:
+ self.assertIsInstance(e, exception.BuildAbortException)
+
+ def test_failed_network_alloc_from_delete_raises_unexpected(self):
+ with mock.patch.object(self.compute,
+ '_build_networks_for_instance') as _build_networks:
+
+ exc = exception.UnexpectedDeletingTaskStateError
+ _build_networks.side_effect = exc(actual=task_states.DELETING,
+ expected='None')
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping):
+ pass
+ except Exception as e:
+ self.assertIsInstance(e, exc)
+
+ _build_networks.assert_has_calls(
+ mock.call(self.context, self.instance,
+ self.requested_networks, self.security_groups))
+
+ def test_build_resources_with_network_info_obj_on_spawn_failure(self):
+ self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.compute._build_networks_for_instance(self.context, self.instance,
+ self.requested_networks, self.security_groups).AndReturn(
+ network_model.NetworkInfo())
+ self.compute._shutdown_instance(self.context, self.instance,
+ self.block_device_mapping, self.requested_networks,
+ try_deallocate_networks=False)
+ self._build_resources_instance_update()
+ self.mox.ReplayAll()
+
+ test_exception = test.TestingException()
+
+ def fake_spawn():
+ raise test_exception
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping):
+ fake_spawn()
+ except Exception as e:
+ self.assertEqual(test_exception, e)
+
+ def test_build_resources_cleans_up_and_reraises_on_spawn_failure(self):
+ self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.compute._build_networks_for_instance(self.context, self.instance,
+ self.requested_networks, self.security_groups).AndReturn(
+ self.network_info)
+ self.compute._shutdown_instance(self.context, self.instance,
+ self.block_device_mapping, self.requested_networks,
+ try_deallocate_networks=False)
+ self._build_resources_instance_update()
+ self.mox.ReplayAll()
+
+ test_exception = test.TestingException()
+
+ def fake_spawn():
+ raise test_exception
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping):
+ fake_spawn()
+ except Exception as e:
+ self.assertEqual(test_exception, e)
+
+ @mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait')
+ @mock.patch(
+ 'nova.compute.manager.ComputeManager._build_networks_for_instance')
+ @mock.patch('nova.objects.Instance.save')
+ def test_build_resources_instance_not_found_before_yield(
+ self, mock_save, mock_build_network, mock_info_wait):
+ mock_build_network.return_value = self.network_info
+ expected_exc = exception.InstanceNotFound(
+ instance_id=self.instance.uuid)
+ mock_save.side_effect = expected_exc
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping):
+ raise
+ except Exception as e:
+ self.assertEqual(expected_exc, e)
+ mock_build_network.assert_called_once_with(self.context, self.instance,
+ self.requested_networks, self.security_groups)
+ mock_info_wait.assert_called_once_with(do_raise=False)
+
+ @mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait')
+ @mock.patch(
+ 'nova.compute.manager.ComputeManager._build_networks_for_instance')
+ @mock.patch('nova.objects.Instance.save')
+ def test_build_resources_unexpected_task_error_before_yield(
+ self, mock_save, mock_build_network, mock_info_wait):
+ mock_build_network.return_value = self.network_info
+ mock_save.side_effect = exception.UnexpectedTaskStateError(
+ expected='', actual='')
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping):
+ raise
+ except exception.BuildAbortException:
+ pass
+ mock_build_network.assert_called_once_with(self.context, self.instance,
+ self.requested_networks, self.security_groups)
+ mock_info_wait.assert_called_once_with(do_raise=False)
+
+ @mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait')
+ @mock.patch(
+ 'nova.compute.manager.ComputeManager._build_networks_for_instance')
+ @mock.patch('nova.objects.Instance.save')
+ def test_build_resources_exception_before_yield(
+ self, mock_save, mock_build_network, mock_info_wait):
+ mock_build_network.return_value = self.network_info
+ mock_save.side_effect = Exception()
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping):
+ raise
+ except exception.BuildAbortException:
+ pass
+ mock_build_network.assert_called_once_with(self.context, self.instance,
+ self.requested_networks, self.security_groups)
+ mock_info_wait.assert_called_once_with(do_raise=False)
+
+ def test_build_resources_aborts_on_cleanup_failure(self):
+ self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.compute._build_networks_for_instance(self.context, self.instance,
+ self.requested_networks, self.security_groups).AndReturn(
+ self.network_info)
+ self.compute._shutdown_instance(self.context, self.instance,
+ self.block_device_mapping, self.requested_networks,
+ try_deallocate_networks=False).AndRaise(
+ test.TestingException())
+ self._build_resources_instance_update()
+ self.mox.ReplayAll()
+
+ def fake_spawn():
+ raise test.TestingException()
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping):
+ fake_spawn()
+ except Exception as e:
+ self.assertIsInstance(e, exception.BuildAbortException)
+
+ def test_build_networks_if_not_allocated(self):
+ instance = fake_instance.fake_instance_obj(self.context,
+ system_metadata={},
+ expected_attrs=['system_metadata'])
+
+ self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute, '_allocate_network')
+ self.compute._allocate_network(self.context, instance,
+ self.requested_networks, None, self.security_groups, None)
+ self.mox.ReplayAll()
+
+ self.compute._build_networks_for_instance(self.context, instance,
+ self.requested_networks, self.security_groups)
+
+ def test_build_networks_if_allocated_false(self):
+ instance = fake_instance.fake_instance_obj(self.context,
+ system_metadata=dict(network_allocated='False'),
+ expected_attrs=['system_metadata'])
+
+ self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute, '_allocate_network')
+ self.compute._allocate_network(self.context, instance,
+ self.requested_networks, None, self.security_groups, None)
+ self.mox.ReplayAll()
+
+ self.compute._build_networks_for_instance(self.context, instance,
+ self.requested_networks, self.security_groups)
+
+ def test_return_networks_if_found(self):
+ instance = fake_instance.fake_instance_obj(self.context,
+ system_metadata=dict(network_allocated='True'),
+ expected_attrs=['system_metadata'])
+
+ def fake_network_info():
+ return network_model.NetworkInfo([{'address': '123.123.123.123'}])
+
+ self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute, '_allocate_network')
+ self.compute._get_instance_nw_info(self.context, instance).AndReturn(
+ network_model.NetworkInfoAsyncWrapper(fake_network_info))
+ self.mox.ReplayAll()
+
+ self.compute._build_networks_for_instance(self.context, instance,
+ self.requested_networks, self.security_groups)
+
+ def test_cleanup_allocated_networks_instance_not_found(self):
+ with contextlib.nested(
+ mock.patch.object(self.compute, '_deallocate_network'),
+ mock.patch.object(self.instance, 'save',
+ side_effect=exception.InstanceNotFound(instance_id=''))
+ ) as (_deallocate_network, save):
+ # Testing that this doesn't raise an exeption
+ self.compute._cleanup_allocated_networks(self.context,
+ self.instance, self.requested_networks)
+ save.assert_called_once_with()
+ self.assertEqual('False',
+ self.instance.system_metadata['network_allocated'])
+
+ @mock.patch.object(conductor_rpcapi.ConductorAPI, 'instance_update')
+ def test_launched_at_in_create_end_notification(self,
+ mock_instance_update):
+
+ def fake_notify(*args, **kwargs):
+ if args[2] == 'create.end':
+ # Check that launched_at is set on the instance
+ self.assertIsNotNone(args[1].launched_at)
+
+ with contextlib.nested(
+ mock.patch.object(self.compute.driver, 'spawn'),
+ mock.patch.object(self.compute,
+ '_build_networks_for_instance', return_value=[]),
+ mock.patch.object(self.instance, 'save'),
+ mock.patch.object(self.compute, '_notify_about_instance_usage',
+ side_effect=fake_notify)
+ ) as (mock_spawn, mock_networks, mock_save, mock_notify):
+ self.compute._build_and_run_instance(self.context, self.instance,
+ self.image, self.injected_files, self.admin_pass,
+ self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node, self.limits,
+ self.filter_properties)
+ expected_call = mock.call(self.context, self.instance,
+ 'create.end', extra_usage_info={'message': u'Success'},
+ network_info=[])
+ create_end_call = mock_notify.call_args_list[
+ mock_notify.call_count - 1]
+ self.assertEqual(expected_call, create_end_call)
+
+ @mock.patch.object(conductor_rpcapi.ConductorAPI, 'instance_update')
+ def test_create_end_on_instance_delete(self, mock_instance_update):
+
+ def fake_notify(*args, **kwargs):
+ if args[2] == 'create.end':
+ # Check that launched_at is set on the instance
+ self.assertIsNotNone(args[1].launched_at)
+
+ exc = exception.InstanceNotFound(instance_id='')
+
+ with contextlib.nested(
+ mock.patch.object(self.compute.driver, 'spawn'),
+ mock.patch.object(self.compute,
+ '_build_networks_for_instance', return_value=[]),
+ mock.patch.object(self.instance, 'save',
+ side_effect=[None, None, exc]),
+ mock.patch.object(self.compute, '_notify_about_instance_usage',
+ side_effect=fake_notify)
+ ) as (mock_spawn, mock_networks, mock_save, mock_notify):
+ self.assertRaises(exception.InstanceNotFound,
+ self.compute._build_and_run_instance, self.context,
+ self.instance, self.image, self.injected_files,
+ self.admin_pass, self.requested_networks,
+ self.security_groups, self.block_device_mapping, self.node,
+ self.limits, self.filter_properties)
+ expected_call = mock.call(self.context, self.instance,
+ 'create.end', fault=exc)
+ create_end_call = mock_notify.call_args_list[
+ mock_notify.call_count - 1]
+ self.assertEqual(expected_call, create_end_call)
+
+
+class ComputeManagerMigrationTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(ComputeManagerMigrationTestCase, self).setUp()
+ self.compute = importutils.import_object(CONF.compute_manager)
+ self.context = context.RequestContext('fake', 'fake')
+ self.image = {}
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ vm_state=vm_states.ACTIVE,
+ expected_attrs=['metadata', 'system_metadata', 'info_cache'])
+ self.migration = objects.Migration()
+ self.migration.status = 'migrating'
+
+ def test_finish_resize_failure(self):
+ elevated_context = self.context.elevated()
+ with contextlib.nested(
+ mock.patch.object(self.compute, '_finish_resize',
+ side_effect=exception.ResizeError(reason='')),
+ mock.patch.object(objects.InstanceActionEvent, 'event_start'),
+ mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure'),
+ mock.patch.object(db, 'instance_fault_create'),
+ mock.patch.object(self.compute, '_instance_update'),
+ mock.patch.object(self.migration, 'save'),
+ mock.patch.object(self.context, 'elevated',
+ return_value=elevated_context)
+ ) as (meth, event_start, event_finish, fault_create, instance_update,
+ migration_save, context_elevated):
+ fault_create.return_value = (
+ test_instance_fault.fake_faults['fake-uuid'][0])
+ self.assertRaises(
+ exception.ResizeError, self.compute.finish_resize,
+ context=self.context, disk_info=[], image=self.image,
+ instance=self.instance, reservations=[],
+ migration=self.migration
+ )
+ self.assertEqual("error", self.migration.status)
+ migration_save.assert_has_calls([mock.call(elevated_context)])
+
+ def test_resize_instance_failure(self):
+ elevated_context = self.context.elevated()
+ self.migration.dest_host = None
+ with contextlib.nested(
+ mock.patch.object(self.compute.driver,
+ 'migrate_disk_and_power_off',
+ side_effect=exception.ResizeError(reason='')),
+ mock.patch.object(objects.InstanceActionEvent, 'event_start'),
+ mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure'),
+ mock.patch.object(db, 'instance_fault_create'),
+ mock.patch.object(self.compute, '_instance_update'),
+ mock.patch.object(self.migration, 'save'),
+ mock.patch.object(self.context, 'elevated',
+ return_value=elevated_context),
+ mock.patch.object(self.compute, '_get_instance_nw_info',
+ return_value=None),
+ mock.patch.object(self.instance, 'save'),
+ mock.patch.object(self.compute, '_notify_about_instance_usage'),
+ mock.patch.object(self.compute,
+ '_get_instance_block_device_info',
+ return_value=None),
+ mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid',
+ return_value=None)
+ ) as (meth, event_start, event_finish, fault_create, instance_update,
+ migration_save, context_elevated, nw_info, save_inst, notify,
+ vol_block_info, bdm):
+ fault_create.return_value = (
+ test_instance_fault.fake_faults['fake-uuid'][0])
+ self.assertRaises(
+ exception.ResizeError, self.compute.resize_instance,
+ context=self.context, instance=self.instance, image=self.image,
+ reservations=[], migration=self.migration, instance_type='type'
+ )
+ self.assertEqual("error", self.migration.status)
+ migration_save.assert_has_calls([mock.call(elevated_context)])
diff --git a/nova/tests/unit/compute/test_compute_utils.py b/nova/tests/unit/compute/test_compute_utils.py
new file mode 100644
index 0000000000..6234ae30f6
--- /dev/null
+++ b/nova/tests/unit/compute/test_compute_utils.py
@@ -0,0 +1,827 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests For miscellaneous util methods used with compute."""
+
+import copy
+import string
+
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import importutils
+import six
+import testtools
+
+from nova.compute import flavors
+from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import utils as compute_utils
+from nova import context
+from nova import db
+from nova import exception
+from nova.image import glance
+from nova.network import api as network_api
+from nova import objects
+from nova.objects import block_device as block_device_obj
+from nova.objects import instance as instance_obj
+from nova import rpc
+from nova import test
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_notifier
+from nova.tests.unit import fake_server_actions
+import nova.tests.unit.image.fake
+from nova.tests.unit import matchers
+from nova import utils
+from nova.virt import driver
+
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+
+
+class ComputeValidateDeviceTestCase(test.TestCase):
+ def setUp(self):
+ super(ComputeValidateDeviceTestCase, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+ # check if test name includes "xen"
+ if 'xen' in self.id():
+ self.flags(compute_driver='xenapi.XenAPIDriver')
+ self.instance = {
+ 'uuid': 'fake',
+ 'root_device_name': None,
+ 'instance_type_id': 'fake',
+ }
+ else:
+ self.instance = {
+ 'uuid': 'fake',
+ 'root_device_name': '/dev/vda',
+ 'default_ephemeral_device': '/dev/vdb',
+ 'instance_type_id': 'fake',
+ }
+ self.data = []
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ lambda context, instance, use_slave=False: self.data)
+
+ def _update_flavor(self, flavor_info):
+ self.flavor = {
+ 'id': 1,
+ 'name': 'foo',
+ 'memory_mb': 128,
+ 'vcpus': 1,
+ 'root_gb': 10,
+ 'ephemeral_gb': 10,
+ 'flavorid': 1,
+ 'swap': 0,
+ 'rxtx_factor': 1.0,
+ 'vcpu_weight': 1,
+ }
+ self.flavor.update(flavor_info)
+ self.instance['system_metadata'] = [{'key': 'instance_type_%s' % key,
+ 'value': value}
+ for key, value in
+ self.flavor.items()]
+
+ def _validate_device(self, device=None):
+ bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
+ self.context, self.instance['uuid'])
+ return compute_utils.get_device_name_for_instance(
+ self.context, self.instance, bdms, device)
+
+ @staticmethod
+ def _fake_bdm(device):
+ return fake_block_device.FakeDbBlockDeviceDict({
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': device,
+ 'no_device': None,
+ 'volume_id': 'fake',
+ 'snapshot_id': None,
+ 'guest_format': None
+ })
+
+ def test_wrap(self):
+ self.data = []
+ for letter in string.ascii_lowercase[2:]:
+ self.data.append(self._fake_bdm('/dev/vd' + letter))
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/vdaa')
+
+ def test_wrap_plus_one(self):
+ self.data = []
+ for letter in string.ascii_lowercase[2:]:
+ self.data.append(self._fake_bdm('/dev/vd' + letter))
+ self.data.append(self._fake_bdm('/dev/vdaa'))
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/vdab')
+
+ def test_later(self):
+ self.data = [
+ self._fake_bdm('/dev/vdc'),
+ self._fake_bdm('/dev/vdd'),
+ self._fake_bdm('/dev/vde'),
+ ]
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/vdf')
+
+ def test_gap(self):
+ self.data = [
+ self._fake_bdm('/dev/vdc'),
+ self._fake_bdm('/dev/vde'),
+ ]
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/vdd')
+
+ def test_no_bdms(self):
+ self.data = []
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/vdc')
+
+ def test_lxc_names_work(self):
+ self.instance['root_device_name'] = '/dev/a'
+ self.instance['ephemeral_device_name'] = '/dev/b'
+ self.data = []
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/c')
+
+ def test_name_conversion(self):
+ self.data = []
+ device = self._validate_device('/dev/c')
+ self.assertEqual(device, '/dev/vdc')
+ device = self._validate_device('/dev/sdc')
+ self.assertEqual(device, '/dev/vdc')
+ device = self._validate_device('/dev/xvdc')
+ self.assertEqual(device, '/dev/vdc')
+
+ def test_invalid_device_prefix(self):
+ self.assertRaises(exception.InvalidDevicePath,
+ self._validate_device, '/baddata/vdc')
+
+ def test_device_in_use(self):
+ exc = self.assertRaises(exception.DevicePathInUse,
+ self._validate_device, '/dev/vda')
+ self.assertIn('/dev/vda', six.text_type(exc))
+
+ def test_swap(self):
+ self.instance['default_swap_device'] = "/dev/vdc"
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/vdd')
+
+ def test_swap_no_ephemeral(self):
+ del self.instance['default_ephemeral_device']
+ self.instance['default_swap_device'] = "/dev/vdb"
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/vdc')
+
+ def test_ephemeral_xenapi(self):
+ self._update_flavor({
+ 'ephemeral_gb': 10,
+ 'swap': 0,
+ })
+ self.stubs.Set(flavors, 'get_flavor',
+ lambda instance_type_id, ctxt=None: self.flavor)
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/xvdc')
+
+ def test_swap_xenapi(self):
+ self._update_flavor({
+ 'ephemeral_gb': 0,
+ 'swap': 10,
+ })
+ self.stubs.Set(flavors, 'get_flavor',
+ lambda instance_type_id, ctxt=None: self.flavor)
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/xvdb')
+
+ def test_swap_and_ephemeral_xenapi(self):
+ self._update_flavor({
+ 'ephemeral_gb': 10,
+ 'swap': 10,
+ })
+ self.stubs.Set(flavors, 'get_flavor',
+ lambda instance_type_id, ctxt=None: self.flavor)
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/xvdd')
+
+ def test_swap_and_one_attachment_xenapi(self):
+ self._update_flavor({
+ 'ephemeral_gb': 0,
+ 'swap': 10,
+ })
+ self.stubs.Set(flavors, 'get_flavor',
+ lambda instance_type_id, ctxt=None: self.flavor)
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/xvdb')
+ self.data.append(self._fake_bdm(device))
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/xvdd')
+
+ def test_no_dev_root_device_name_get_next_name(self):
+ self.instance['root_device_name'] = 'vda'
+ device = self._validate_device()
+ self.assertEqual('/dev/vdc', device)
+
+
+class DefaultDeviceNamesForInstanceTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(DefaultDeviceNamesForInstanceTestCase, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+ self.ephemerals = block_device_obj.block_device_make_list(
+ self.context,
+ [fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdb',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'delete_on_termination': True,
+ 'guest_format': None,
+ 'boot_index': -1})])
+
+ self.swap = block_device_obj.block_device_make_list(
+ self.context,
+ [fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdc',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'delete_on_termination': True,
+ 'guest_format': 'swap',
+ 'boot_index': -1})])
+
+ self.block_device_mapping = block_device_obj.block_device_make_list(
+ self.context,
+ [fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vda',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id-1',
+ 'boot_index': 0}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 4, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdd',
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'boot_index': -1}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 5, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vde',
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
+ 'boot_index': -1})])
+ self.flavor = {'swap': 4}
+ self.instance = {'uuid': 'fake_instance', 'ephemeral_gb': 2}
+ self.is_libvirt = False
+ self.root_device_name = '/dev/vda'
+ self.update_called = False
+
+ def fake_extract_flavor(instance):
+ return self.flavor
+
+ def fake_driver_matches(driver_string):
+ if driver_string == 'libvirt.LibvirtDriver':
+ return self.is_libvirt
+ return False
+
+ self.patchers = []
+ self.patchers.append(
+ mock.patch.object(objects.BlockDeviceMapping, 'save'))
+ self.patchers.append(
+ mock.patch.object(
+ flavors, 'extract_flavor',
+ new=mock.Mock(side_effect=fake_extract_flavor)))
+ self.patchers.append(
+ mock.patch.object(driver,
+ 'compute_driver_matches',
+ new=mock.Mock(
+ side_effect=fake_driver_matches)))
+ for patcher in self.patchers:
+ patcher.start()
+
+ def tearDown(self):
+ super(DefaultDeviceNamesForInstanceTestCase, self).tearDown()
+ for patcher in self.patchers:
+ patcher.stop()
+
+ def _test_default_device_names(self, *block_device_lists):
+ compute_utils.default_device_names_for_instance(self.instance,
+ self.root_device_name,
+ *block_device_lists)
+
+ def test_only_block_device_mapping(self):
+ # Test no-op
+ original_bdm = copy.deepcopy(self.block_device_mapping)
+ self._test_default_device_names([], [], self.block_device_mapping)
+ for original, new in zip(original_bdm, self.block_device_mapping):
+ self.assertEqual(original.device_name, new.device_name)
+
+ # Assert it defaults the missing one as expected
+ self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
+ self._test_default_device_names([], [], self.block_device_mapping)
+ self.assertEqual('/dev/vdb',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vdc',
+ self.block_device_mapping[2]['device_name'])
+
+ def test_with_ephemerals(self):
+ # Test ephemeral gets assigned
+ self.ephemerals[0]['device_name'] = None
+ self._test_default_device_names(self.ephemerals, [],
+ self.block_device_mapping)
+ self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb')
+
+ self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
+ self._test_default_device_names(self.ephemerals, [],
+ self.block_device_mapping)
+ self.assertEqual('/dev/vdc',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[2]['device_name'])
+
+ def test_with_swap(self):
+ # Test swap only
+ self.swap[0]['device_name'] = None
+ self._test_default_device_names([], self.swap, [])
+ self.assertEqual(self.swap[0]['device_name'], '/dev/vdb')
+
+ # Test swap and block_device_mapping
+ self.swap[0]['device_name'] = None
+ self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
+ self._test_default_device_names([], self.swap,
+ self.block_device_mapping)
+ self.assertEqual(self.swap[0]['device_name'], '/dev/vdb')
+ self.assertEqual('/dev/vdc',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[2]['device_name'])
+
+ def test_all_together(self):
+ # Test swap missing
+ self.swap[0]['device_name'] = None
+ self._test_default_device_names(self.ephemerals,
+ self.swap, self.block_device_mapping)
+ self.assertEqual(self.swap[0]['device_name'], '/dev/vdc')
+
+ # Test swap and eph missing
+ self.swap[0]['device_name'] = None
+ self.ephemerals[0]['device_name'] = None
+ self._test_default_device_names(self.ephemerals,
+ self.swap, self.block_device_mapping)
+ self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb')
+ self.assertEqual(self.swap[0]['device_name'], '/dev/vdc')
+
+ # Test all missing
+ self.swap[0]['device_name'] = None
+ self.ephemerals[0]['device_name'] = None
+ self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
+ self._test_default_device_names(self.ephemerals,
+ self.swap, self.block_device_mapping)
+ self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb')
+ self.assertEqual(self.swap[0]['device_name'], '/dev/vdc')
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vde',
+ self.block_device_mapping[2]['device_name'])
+
+
+class UsageInfoTestCase(test.TestCase):
+
+ def setUp(self):
+ def fake_get_nw_info(cls, ctxt, instance):
+ self.assertTrue(ctxt.is_admin)
+ return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
+
+ super(UsageInfoTestCase, self).setUp()
+ self.stubs.Set(network_api.API, 'get_instance_nw_info',
+ fake_get_nw_info)
+
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ self.flags(use_local=True, group='conductor')
+ self.flags(compute_driver='nova.virt.fake.FakeDriver',
+ network_manager='nova.network.manager.FlatManager')
+ self.compute = importutils.import_object(CONF.compute_manager)
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ def fake_show(meh, context, id, **kwargs):
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
+
+ self.stubs.Set(nova.tests.unit.image.fake._FakeImageService,
+ 'show', fake_show)
+ fake_network.set_stub_network_methods(self.stubs)
+ fake_server_actions.stub_out_action_events(self.stubs)
+
+ def _create_instance(self, params=None):
+ """Create a test instance."""
+ params = params or {}
+ flavor = flavors.get_flavor_by_name('m1.tiny')
+ sys_meta = flavors.save_flavor_info({}, flavor)
+ inst = {}
+ inst['image_ref'] = 1
+ inst['reservation_id'] = 'r-fakeres'
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
+ inst['instance_type_id'] = flavor['id']
+ inst['system_metadata'] = sys_meta
+ inst['ami_launch_index'] = 0
+ inst['root_gb'] = 0
+ inst['ephemeral_gb'] = 0
+ inst['info_cache'] = {'network_info': '[]'}
+ inst.update(params)
+ return db.instance_create(self.context, inst)['id']
+
+ def test_notify_usage_exists(self):
+ # Ensure 'exists' notification generates appropriate usage data.
+ instance_id = self._create_instance()
+ instance = objects.Instance.get_by_id(self.context, instance_id)
+ # Set some system metadata
+ sys_metadata = {'image_md_key1': 'val1',
+ 'image_md_key2': 'val2',
+ 'other_data': 'meow'}
+ instance.system_metadata.update(sys_metadata)
+ instance.save()
+ compute_utils.notify_usage_exists(
+ rpc.get_notifier('compute'), self.context, instance)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.priority, 'INFO')
+ self.assertEqual(msg.event_type, 'compute.instance.exists')
+ payload = msg.payload
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], instance['uuid'])
+ self.assertEqual(payload['instance_type'], 'm1.tiny')
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ self.assertEqual(str(payload['instance_type_id']), str(type_id))
+ flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
+ self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
+ for attr in ('display_name', 'created_at', 'launched_at',
+ 'state', 'state_description',
+ 'bandwidth', 'audit_period_beginning',
+ 'audit_period_ending', 'image_meta'):
+ self.assertTrue(attr in payload,
+ msg="Key %s not in payload" % attr)
+ self.assertEqual(payload['image_meta'],
+ {'md_key1': 'val1', 'md_key2': 'val2'})
+ image_ref_url = "%s/images/1" % glance.generate_glance_url()
+ self.assertEqual(payload['image_ref_url'], image_ref_url)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_notify_usage_exists_deleted_instance(self):
+ # Ensure 'exists' notification generates appropriate usage data.
+ instance_id = self._create_instance()
+ instance = objects.Instance.get_by_id(self.context, instance_id,
+ expected_attrs=['metadata', 'system_metadata', 'info_cache'])
+ # Set some system metadata
+ sys_metadata = {'image_md_key1': 'val1',
+ 'image_md_key2': 'val2',
+ 'other_data': 'meow'}
+ instance.system_metadata.update(sys_metadata)
+ instance.save()
+ self.compute.terminate_instance(self.context, instance, [], [])
+ instance = objects.Instance.get_by_id(
+ self.context.elevated(read_deleted='yes'), instance_id,
+ expected_attrs=['system_metadata'])
+ compute_utils.notify_usage_exists(
+ rpc.get_notifier('compute'), self.context, instance)
+ msg = fake_notifier.NOTIFICATIONS[-1]
+ self.assertEqual(msg.priority, 'INFO')
+ self.assertEqual(msg.event_type, 'compute.instance.exists')
+ payload = msg.payload
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], instance['uuid'])
+ self.assertEqual(payload['instance_type'], 'm1.tiny')
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ self.assertEqual(str(payload['instance_type_id']), str(type_id))
+ flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
+ self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
+ for attr in ('display_name', 'created_at', 'launched_at',
+ 'state', 'state_description',
+ 'bandwidth', 'audit_period_beginning',
+ 'audit_period_ending', 'image_meta'):
+ self.assertTrue(attr in payload,
+ msg="Key %s not in payload" % attr)
+ self.assertEqual(payload['image_meta'],
+ {'md_key1': 'val1', 'md_key2': 'val2'})
+ image_ref_url = "%s/images/1" % glance.generate_glance_url()
+ self.assertEqual(payload['image_ref_url'], image_ref_url)
+
+ def test_notify_usage_exists_instance_not_found(self):
+ # Ensure 'exists' notification generates appropriate usage data.
+ instance_id = self._create_instance()
+ instance = objects.Instance.get_by_id(self.context, instance_id,
+ expected_attrs=['metadata', 'system_metadata', 'info_cache'])
+ self.compute.terminate_instance(self.context, instance, [], [])
+ compute_utils.notify_usage_exists(
+ rpc.get_notifier('compute'), self.context, instance)
+ msg = fake_notifier.NOTIFICATIONS[-1]
+ self.assertEqual(msg.priority, 'INFO')
+ self.assertEqual(msg.event_type, 'compute.instance.exists')
+ payload = msg.payload
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], instance['uuid'])
+ self.assertEqual(payload['instance_type'], 'm1.tiny')
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ self.assertEqual(str(payload['instance_type_id']), str(type_id))
+ flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
+ self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
+ for attr in ('display_name', 'created_at', 'launched_at',
+ 'state', 'state_description',
+ 'bandwidth', 'audit_period_beginning',
+ 'audit_period_ending', 'image_meta'):
+ self.assertTrue(attr in payload,
+ msg="Key %s not in payload" % attr)
+ self.assertEqual(payload['image_meta'], {})
+ image_ref_url = "%s/images/1" % glance.generate_glance_url()
+ self.assertEqual(payload['image_ref_url'], image_ref_url)
+
+ def test_notify_about_instance_usage(self):
+ instance_id = self._create_instance()
+ instance = objects.Instance.get_by_id(self.context, instance_id,
+ expected_attrs=['metadata', 'system_metadata', 'info_cache'])
+ # Set some system metadata
+ sys_metadata = {'image_md_key1': 'val1',
+ 'image_md_key2': 'val2',
+ 'other_data': 'meow'}
+ instance.system_metadata.update(sys_metadata)
+ instance.save()
+ extra_usage_info = {'image_name': 'fake_name'}
+ compute_utils.notify_about_instance_usage(
+ rpc.get_notifier('compute'),
+ self.context, instance, 'create.start',
+ extra_usage_info=extra_usage_info)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.priority, 'INFO')
+ self.assertEqual(msg.event_type, 'compute.instance.create.start')
+ payload = msg.payload
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], instance['uuid'])
+ self.assertEqual(payload['instance_type'], 'm1.tiny')
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ self.assertEqual(str(payload['instance_type_id']), str(type_id))
+ flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
+ self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
+ for attr in ('display_name', 'created_at', 'launched_at',
+ 'state', 'state_description', 'image_meta'):
+ self.assertTrue(attr in payload,
+ msg="Key %s not in payload" % attr)
+ self.assertEqual(payload['image_meta'],
+ {'md_key1': 'val1', 'md_key2': 'val2'})
+ self.assertEqual(payload['image_name'], 'fake_name')
+ image_ref_url = "%s/images/1" % glance.generate_glance_url()
+ self.assertEqual(payload['image_ref_url'], image_ref_url)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_notify_about_aggregate_update_with_id(self):
+ # Set aggregate payload
+ aggregate_payload = {'aggregate_id': 1}
+ compute_utils.notify_about_aggregate_update(self.context,
+ "create.end",
+ aggregate_payload)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.priority, 'INFO')
+ self.assertEqual(msg.event_type, 'aggregate.create.end')
+ payload = msg.payload
+ self.assertEqual(payload['aggregate_id'], 1)
+
+ def test_notify_about_aggregate_update_with_name(self):
+ # Set aggregate payload
+ aggregate_payload = {'name': 'fakegroup'}
+ compute_utils.notify_about_aggregate_update(self.context,
+ "create.start",
+ aggregate_payload)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.priority, 'INFO')
+ self.assertEqual(msg.event_type, 'aggregate.create.start')
+ payload = msg.payload
+ self.assertEqual(payload['name'], 'fakegroup')
+
+ def test_notify_about_aggregate_update_without_name_id(self):
+ # Set empty aggregate payload
+ aggregate_payload = {}
+ compute_utils.notify_about_aggregate_update(self.context,
+ "create.start",
+ aggregate_payload)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
+
+
+class ComputeGetImageMetadataTestCase(test.TestCase):
+ def setUp(self):
+ super(ComputeGetImageMetadataTestCase, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+
+ self.image = {
+ "min_ram": 10,
+ "min_disk": 1,
+ "disk_format": "raw",
+ "container_format": "bare",
+ "properties": {},
+ }
+
+ self.mock_image_api = mock.Mock()
+ self.mock_image_api.get.return_value = self.image
+
+ self.ctx = context.RequestContext('fake', 'fake')
+
+ sys_meta = {
+ 'image_min_ram': 10,
+ 'image_min_disk': 1,
+ 'image_disk_format': 'raw',
+ 'image_container_format': 'bare',
+ 'instance_type_id': 0,
+ 'instance_type_name': 'm1.fake',
+ 'instance_type_memory_mb': 10,
+ 'instance_type_vcpus': 1,
+ 'instance_type_root_gb': 1,
+ 'instance_type_ephemeral_gb': 1,
+ 'instance_type_flavorid': '0',
+ 'instance_type_swap': 1,
+ 'instance_type_rxtx_factor': 0.0,
+ 'instance_type_vcpu_weight': None,
+ }
+
+ self.instance = fake_instance.fake_db_instance(
+ memory_mb=0, root_gb=0,
+ system_metadata=sys_meta)
+
+ @property
+ def instance_obj(self):
+ return objects.Instance._from_db_object(
+ self.ctx, objects.Instance(), self.instance,
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
+
+ def test_get_image_meta(self):
+ image_meta = compute_utils.get_image_metadata(
+ self.ctx, self.mock_image_api, 'fake-image', self.instance_obj)
+
+ self.image['properties'] = 'DONTCARE'
+ self.assertThat(self.image, matchers.DictMatches(image_meta))
+
+ def test_get_image_meta_with_image_id_none(self):
+ self.image['properties'] = {'fake_property': 'fake_value'}
+
+ with mock.patch.object(flavors,
+ "extract_flavor") as mock_extract_flavor:
+ with mock.patch.object(utils, "get_system_metadata_from_image"
+ ) as mock_get_sys_metadata:
+ image_meta = compute_utils.get_image_metadata(
+ self.ctx, self.mock_image_api, None, self.instance_obj)
+
+ self.assertEqual(0, self.mock_image_api.get.call_count)
+ self.assertEqual(0, mock_extract_flavor.call_count)
+ self.assertEqual(0, mock_get_sys_metadata.call_count)
+ self.assertNotIn('fake_property', image_meta['properties'])
+
+ # Checking mock_image_api_get is called with 0 image_id
+ # as 0 is a valid image ID
+ image_meta = compute_utils.get_image_metadata(self.ctx,
+ self.mock_image_api,
+ 0, self.instance_obj)
+ self.assertEqual(1, self.mock_image_api.get.call_count)
+ self.assertIn('fake_property', image_meta['properties'])
+
+ def _test_get_image_meta_exception(self, error):
+ self.mock_image_api.get.side_effect = error
+
+ image_meta = compute_utils.get_image_metadata(
+ self.ctx, self.mock_image_api, 'fake-image', self.instance_obj)
+
+ self.image['properties'] = 'DONTCARE'
+ # NOTE(danms): The trip through system_metadata will stringify things
+ for key in self.image:
+ self.image[key] = str(self.image[key])
+ self.assertThat(self.image, matchers.DictMatches(image_meta))
+
+ def test_get_image_meta_no_image(self):
+ error = exception.ImageNotFound(image_id='fake-image')
+ self._test_get_image_meta_exception(error)
+
+ def test_get_image_meta_not_authorized(self):
+ error = exception.ImageNotAuthorized(image_id='fake-image')
+ self._test_get_image_meta_exception(error)
+
+ def test_get_image_meta_bad_request(self):
+ error = exception.Invalid()
+ self._test_get_image_meta_exception(error)
+
+ def test_get_image_meta_unexpected_exception(self):
+ error = test.TestingException()
+ with testtools.ExpectedException(test.TestingException):
+ self._test_get_image_meta_exception(error)
+
+ def test_get_image_meta_no_image_system_meta(self):
+ for k in self.instance['system_metadata'].keys():
+ if k.startswith('image_'):
+ del self.instance['system_metadata'][k]
+
+ image_meta = compute_utils.get_image_metadata(
+ self.ctx, self.mock_image_api, 'fake-image', self.instance_obj)
+
+ self.image['properties'] = 'DONTCARE'
+ self.assertThat(self.image, matchers.DictMatches(image_meta))
+
+ def test_get_image_meta_no_image_no_image_system_meta(self):
+ e = exception.ImageNotFound(image_id='fake-image')
+ self.mock_image_api.get.side_effect = e
+
+ for k in self.instance['system_metadata'].keys():
+ if k.startswith('image_'):
+ del self.instance['system_metadata'][k]
+
+ image_meta = compute_utils.get_image_metadata(
+ self.ctx, self.mock_image_api, 'fake-image', self.instance_obj)
+
+ expected = {'properties': 'DONTCARE'}
+ self.assertThat(expected, matchers.DictMatches(image_meta))
+
+
+class ComputeUtilsGetValFromSysMetadata(test.TestCase):
+
+ def test_get_value_from_system_metadata(self):
+ instance = fake_instance.fake_instance_obj('fake-context')
+ system_meta = {'int_val': 1,
+ 'int_string': '2',
+ 'not_int': 'Nope'}
+ instance.system_metadata = system_meta
+
+ result = compute_utils.get_value_from_system_metadata(
+ instance, 'int_val', int, 0)
+ self.assertEqual(1, result)
+
+ result = compute_utils.get_value_from_system_metadata(
+ instance, 'int_string', int, 0)
+ self.assertEqual(2, result)
+
+ result = compute_utils.get_value_from_system_metadata(
+ instance, 'not_int', int, 0)
+ self.assertEqual(0, result)
+
+
+class ComputeUtilsGetNWInfo(test.TestCase):
+ def test_instance_object_none_info_cache(self):
+ inst = fake_instance.fake_instance_obj('fake-context',
+ expected_attrs=['info_cache'])
+ self.assertIsNone(inst.info_cache)
+ result = compute_utils.get_nw_info_for_instance(inst)
+ self.assertEqual(jsonutils.dumps([]), result.json())
+
+ def test_instance_dict_none_info_cache(self):
+ inst = fake_instance.fake_db_instance(info_cache=None)
+ self.assertIsNone(inst['info_cache'])
+ result = compute_utils.get_nw_info_for_instance(inst)
+ self.assertEqual(jsonutils.dumps([]), result.json())
+
+
+class ComputeUtilsGetRebootTypes(test.TestCase):
+ def setUp(self):
+ super(ComputeUtilsGetRebootTypes, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+
+ def test_get_reboot_type_started_soft(self):
+ reboot_type = compute_utils.get_reboot_type(task_states.REBOOT_STARTED,
+ power_state.RUNNING)
+ self.assertEqual(reboot_type, 'SOFT')
+
+ def test_get_reboot_type_pending_soft(self):
+ reboot_type = compute_utils.get_reboot_type(task_states.REBOOT_PENDING,
+ power_state.RUNNING)
+ self.assertEqual(reboot_type, 'SOFT')
+
+ def test_get_reboot_type_hard(self):
+ reboot_type = compute_utils.get_reboot_type('foo', power_state.RUNNING)
+ self.assertEqual(reboot_type, 'HARD')
+
+ def test_get_reboot_not_running_hard(self):
+ reboot_type = compute_utils.get_reboot_type('foo', 'bar')
+ self.assertEqual(reboot_type, 'HARD')
diff --git a/nova/tests/unit/compute/test_compute_xen.py b/nova/tests/unit/compute/test_compute_xen.py
new file mode 100644
index 0000000000..90a81e9d13
--- /dev/null
+++ b/nova/tests/unit/compute/test_compute_xen.py
@@ -0,0 +1,67 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for expectations of behaviour from the Xen driver."""
+
+from oslo.config import cfg
+from oslo.utils import importutils
+
+from nova.compute import power_state
+from nova import context
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova.tests.unit.compute import eventlet_utils
+from nova.tests.unit import fake_instance
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import vm_utils
+
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+
+
+class ComputeXenTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(ComputeXenTestCase, self).setUp()
+ self.flags(compute_driver='xenapi.XenAPIDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.compute = importutils.import_object(CONF.compute_manager)
+ # execute power syncing synchronously for testing:
+ self.compute._sync_power_pool = eventlet_utils.SyncPool()
+
+ def test_sync_power_states_instance_not_found(self):
+ db_instance = fake_instance.fake_db_instance()
+ ctxt = context.get_admin_context()
+ instance_list = instance_obj._make_instance_list(ctxt,
+ objects.InstanceList(), [db_instance], None)
+ instance = instance_list[0]
+
+ self.mox.StubOutWithMock(objects.InstanceList, 'get_by_host')
+ self.mox.StubOutWithMock(self.compute.driver, 'get_num_instances')
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')
+
+ objects.InstanceList.get_by_host(ctxt,
+ self.compute.host, use_slave=True).AndReturn(instance_list)
+ self.compute.driver.get_num_instances().AndReturn(1)
+ vm_utils.lookup(self.compute.driver._session, instance['name'],
+ False).AndReturn(None)
+ self.compute._sync_instance_power_state(ctxt, instance,
+ power_state.NOSTATE)
+
+ self.mox.ReplayAll()
+
+ self.compute._sync_power_states(ctxt)
diff --git a/nova/tests/unit/compute/test_flavors.py b/nova/tests/unit/compute/test_flavors.py
new file mode 100644
index 0000000000..cece4b3f39
--- /dev/null
+++ b/nova/tests/unit/compute/test_flavors.py
@@ -0,0 +1,61 @@
+# Copyright 2014 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for flavor basic functions"""
+
+from nova.compute import flavors
+from nova import exception
+from nova import test
+
+
+class ExtraSpecTestCase(test.NoDBTestCase):
+ def _flavor_validate_extra_spec_keys_invalid_input(self, key_name_list):
+ self.assertRaises(exception.InvalidInput,
+ flavors.validate_extra_spec_keys, key_name_list)
+
+ def test_flavor_validate_extra_spec_keys_invalid_input(self):
+ lists = [['', ], ['*', ], ['+', ]]
+ for x in lists:
+ self._flavor_validate_extra_spec_keys_invalid_input(x)
+
+ def test_flavor_validate_extra_spec_keys(self):
+ key_name_list = ['abc', 'ab c', 'a-b-c', 'a_b-c', 'a:bc']
+ flavors.validate_extra_spec_keys(key_name_list)
+
+
+class CreateFlavorTestCase(test.TestCase):
+ def test_create_flavor_ram_error(self):
+ args = ("ram_test", "9999999999", "1", "10", "1")
+ try:
+ flavors.create(*args)
+ self.fail("Be sure this will never be executed.")
+ except exception.InvalidInput as e:
+ self.assertIn("ram", e.message)
+
+ def test_create_flavor_disk_error(self):
+ args = ("disk_test", "1024", "1", "9999999999", "1")
+ try:
+ flavors.create(*args)
+ self.fail("Be sure this will never be executed.")
+ except exception.InvalidInput as e:
+ self.assertIn("disk", e.message)
+
+ def test_create_flavor_ephemeral_error(self):
+ args = ("ephemeral_test", "1024", "1", "10", "9999999999")
+ try:
+ flavors.create(*args)
+ self.fail("Be sure this will never be executed.")
+ except exception.InvalidInput as e:
+ self.assertIn("ephemeral", e.message)
diff --git a/nova/tests/unit/compute/test_host_api.py b/nova/tests/unit/compute/test_host_api.py
new file mode 100644
index 0000000000..348d2dea3d
--- /dev/null
+++ b/nova/tests/unit/compute/test_host_api.py
@@ -0,0 +1,480 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+
+import mock
+
+from nova.cells import utils as cells_utils
+from nova import compute
+from nova import context
+from nova import exception
+from nova import objects
+from nova import test
+from nova.tests.unit import fake_notifier
+from nova.tests.unit.objects import test_objects
+from nova.tests.unit.objects import test_service
+
+
+class ComputeHostAPITestCase(test.TestCase):
+ def setUp(self):
+ super(ComputeHostAPITestCase, self).setUp()
+ self.host_api = compute.HostAPI()
+ self.ctxt = context.get_admin_context()
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ def _compare_obj(self, obj, db_obj):
+ test_objects.compare_obj(self, obj, db_obj,
+ allow_missing=test_service.OPTIONAL)
+
+ def _compare_objs(self, obj_list, db_obj_list):
+ for index, obj in enumerate(obj_list):
+ self._compare_obj(obj, db_obj_list[index])
+
+ def _mock_rpc_call(self, method, **kwargs):
+ self.mox.StubOutWithMock(self.host_api.rpcapi, method)
+ getattr(self.host_api.rpcapi, method)(
+ self.ctxt, **kwargs).AndReturn('fake-result')
+
+ def _mock_assert_host_exists(self):
+ """Sets it so that the host API always thinks that 'fake_host'
+ exists.
+ """
+ def fake_assert_host_exists(context, host_name, must_be_up=False):
+ return 'fake_host'
+ self.stubs.Set(self.host_api, '_assert_host_exists',
+ fake_assert_host_exists)
+
+ def test_set_host_enabled(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call('set_host_enabled',
+ host='fake_host',
+ enabled='fake_enabled')
+ self.mox.ReplayAll()
+ fake_notifier.NOTIFICATIONS = []
+ result = self.host_api.set_host_enabled(self.ctxt, 'fake_host',
+ 'fake_enabled')
+ self.assertEqual('fake-result', result)
+ self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('HostAPI.set_enabled.start', msg.event_type)
+ self.assertEqual('api.fake_host', msg.publisher_id)
+ self.assertEqual('INFO', msg.priority)
+ self.assertEqual('fake_enabled', msg.payload['enabled'])
+ self.assertEqual('fake_host', msg.payload['host_name'])
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual('HostAPI.set_enabled.end', msg.event_type)
+ self.assertEqual('api.fake_host', msg.publisher_id)
+ self.assertEqual('INFO', msg.priority)
+ self.assertEqual('fake_enabled', msg.payload['enabled'])
+ self.assertEqual('fake_host', msg.payload['host_name'])
+
+ def test_host_name_from_assert_hosts_exists(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call('set_host_enabled',
+ host='fake_host',
+ enabled='fake_enabled')
+ self.mox.ReplayAll()
+ result = self.host_api.set_host_enabled(self.ctxt, 'fake_hosT',
+ 'fake_enabled')
+ self.assertEqual('fake-result', result)
+
+ def test_get_host_uptime(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call('get_host_uptime',
+ host='fake_host')
+ self.mox.ReplayAll()
+ result = self.host_api.get_host_uptime(self.ctxt, 'fake_host')
+ self.assertEqual('fake-result', result)
+
+ def test_get_host_uptime_service_down(self):
+ def fake_service_get_by_compute_host(context, host_name):
+ return dict(test_service.fake_service, id=1)
+ self.stubs.Set(self.host_api.db, 'service_get_by_compute_host',
+ fake_service_get_by_compute_host)
+
+ def fake_service_is_up(service):
+ return False
+ self.stubs.Set(self.host_api.servicegroup_api,
+ 'service_is_up', fake_service_is_up)
+
+ self.assertRaises(exception.ComputeServiceUnavailable,
+ self.host_api.get_host_uptime, self.ctxt,
+ 'fake_host')
+
+ def test_host_power_action(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call('host_power_action',
+ host='fake_host',
+ action='fake_action')
+ self.mox.ReplayAll()
+ fake_notifier.NOTIFICATIONS = []
+ result = self.host_api.host_power_action(self.ctxt, 'fake_host',
+ 'fake_action')
+ self.assertEqual('fake-result', result)
+ self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('HostAPI.power_action.start', msg.event_type)
+ self.assertEqual('api.fake_host', msg.publisher_id)
+ self.assertEqual('INFO', msg.priority)
+ self.assertEqual('fake_action', msg.payload['action'])
+ self.assertEqual('fake_host', msg.payload['host_name'])
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual('HostAPI.power_action.end', msg.event_type)
+ self.assertEqual('api.fake_host', msg.publisher_id)
+ self.assertEqual('INFO', msg.priority)
+ self.assertEqual('fake_action', msg.payload['action'])
+ self.assertEqual('fake_host', msg.payload['host_name'])
+
+ def test_set_host_maintenance(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call('host_maintenance_mode',
+ host='fake_host',
+ host_param='fake_host',
+ mode='fake_mode')
+ self.mox.ReplayAll()
+ fake_notifier.NOTIFICATIONS = []
+ result = self.host_api.set_host_maintenance(self.ctxt, 'fake_host',
+ 'fake_mode')
+ self.assertEqual('fake-result', result)
+ self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('HostAPI.set_maintenance.start', msg.event_type)
+ self.assertEqual('api.fake_host', msg.publisher_id)
+ self.assertEqual('INFO', msg.priority)
+ self.assertEqual('fake_host', msg.payload['host_name'])
+ self.assertEqual('fake_mode', msg.payload['mode'])
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual('HostAPI.set_maintenance.end', msg.event_type)
+ self.assertEqual('api.fake_host', msg.publisher_id)
+ self.assertEqual('INFO', msg.priority)
+ self.assertEqual('fake_host', msg.payload['host_name'])
+ self.assertEqual('fake_mode', msg.payload['mode'])
+
+ def test_service_get_all_no_zones(self):
+ services = [dict(test_service.fake_service,
+ id=1, topic='compute', host='host1'),
+ dict(test_service.fake_service,
+ topic='compute', host='host2')]
+
+ self.mox.StubOutWithMock(self.host_api.db,
+ 'service_get_all')
+
+ # Test no filters
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt)
+ self.mox.VerifyAll()
+ self._compare_objs(result, services)
+
+ # Test no filters #2
+ self.mox.ResetAll()
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt, filters={})
+ self.mox.VerifyAll()
+ self._compare_objs(result, services)
+
+ # Test w/ filter
+ self.mox.ResetAll()
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt,
+ filters=dict(host='host2'))
+ self.mox.VerifyAll()
+ self._compare_objs(result, [services[1]])
+
+ def test_service_get_all(self):
+ services = [dict(test_service.fake_service,
+ topic='compute', host='host1'),
+ dict(test_service.fake_service,
+ topic='compute', host='host2')]
+ exp_services = []
+ for service in services:
+ exp_service = {}
+ exp_service.update(availability_zone='nova', **service)
+ exp_services.append(exp_service)
+
+ self.mox.StubOutWithMock(self.host_api.db,
+ 'service_get_all')
+
+ # Test no filters
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt, set_zones=True)
+ self.mox.VerifyAll()
+ self._compare_objs(result, exp_services)
+
+ # Test no filters #2
+ self.mox.ResetAll()
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt, filters={},
+ set_zones=True)
+ self.mox.VerifyAll()
+ self._compare_objs(result, exp_services)
+
+ # Test w/ filter
+ self.mox.ResetAll()
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt,
+ filters=dict(host='host2'),
+ set_zones=True)
+ self.mox.VerifyAll()
+ self._compare_objs(result, [exp_services[1]])
+
+ # Test w/ zone filter but no set_zones arg.
+ self.mox.ResetAll()
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
+ self.mox.ReplayAll()
+ filters = {'availability_zone': 'nova'}
+ result = self.host_api.service_get_all(self.ctxt,
+ filters=filters)
+ self.mox.VerifyAll()
+ self._compare_objs(result, exp_services)
+
+ def test_service_get_by_compute_host(self):
+ self.mox.StubOutWithMock(self.host_api.db,
+ 'service_get_by_compute_host')
+
+ self.host_api.db.service_get_by_compute_host(self.ctxt,
+ 'fake-host').AndReturn(test_service.fake_service)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_by_compute_host(self.ctxt,
+ 'fake-host')
+ self.assertEqual(test_service.fake_service['id'], result.id)
+
+ def test_service_update(self):
+ host_name = 'fake-host'
+ binary = 'nova-compute'
+ params_to_update = dict(disabled=True)
+ service_id = 42
+ expected_result = dict(test_service.fake_service, id=service_id)
+
+ self.mox.StubOutWithMock(self.host_api.db, 'service_get_by_args')
+ self.host_api.db.service_get_by_args(self.ctxt,
+ host_name, binary).AndReturn(expected_result)
+
+ self.mox.StubOutWithMock(self.host_api.db, 'service_update')
+ self.host_api.db.service_update(
+ self.ctxt, service_id, params_to_update).AndReturn(expected_result)
+
+ self.mox.ReplayAll()
+
+ result = self.host_api.service_update(
+ self.ctxt, host_name, binary, params_to_update)
+ self._compare_obj(result, expected_result)
+
+ def test_instance_get_all_by_host(self):
+ self.mox.StubOutWithMock(self.host_api.db,
+ 'instance_get_all_by_host')
+
+ self.host_api.db.instance_get_all_by_host(self.ctxt,
+ 'fake-host').AndReturn(['fake-responses'])
+ self.mox.ReplayAll()
+ result = self.host_api.instance_get_all_by_host(self.ctxt,
+ 'fake-host')
+ self.assertEqual(['fake-responses'], result)
+
+ def test_task_log_get_all(self):
+ self.mox.StubOutWithMock(self.host_api.db, 'task_log_get_all')
+
+ self.host_api.db.task_log_get_all(self.ctxt,
+ 'fake-name', 'fake-begin', 'fake-end', host='fake-host',
+ state='fake-state').AndReturn('fake-response')
+ self.mox.ReplayAll()
+ result = self.host_api.task_log_get_all(self.ctxt, 'fake-name',
+ 'fake-begin', 'fake-end', host='fake-host',
+ state='fake-state')
+ self.assertEqual('fake-response', result)
+
+ def test_service_delete(self):
+ with contextlib.nested(
+ mock.patch.object(objects.Service, 'get_by_id',
+ return_value=objects.Service()),
+ mock.patch.object(objects.Service, 'destroy')
+ ) as (
+ get_by_id, destroy
+ ):
+ self.host_api.service_delete(self.ctxt, 1)
+ get_by_id.assert_called_once_with(self.ctxt, 1)
+ destroy.assert_called_once_with()
+
+
+class ComputeHostAPICellsTestCase(ComputeHostAPITestCase):
+ def setUp(self):
+ self.flags(enable=True, group='cells')
+ self.flags(cell_type='api', group='cells')
+ super(ComputeHostAPICellsTestCase, self).setUp()
+
+ def _mock_rpc_call(self, method, **kwargs):
+ if 'host_param' in kwargs:
+ kwargs.pop('host_param')
+ else:
+ kwargs.pop('host')
+ rpc_message = {
+ 'method': method,
+ 'namespace': None,
+ 'args': kwargs,
+ 'version': self.host_api.rpcapi.client.target.version,
+ }
+ cells_rpcapi = self.host_api.rpcapi.client.cells_rpcapi
+ self.mox.StubOutWithMock(cells_rpcapi, 'proxy_rpc_to_manager')
+ cells_rpcapi.proxy_rpc_to_manager(self.ctxt,
+ rpc_message,
+ 'compute.fake_host',
+ call=True).AndReturn('fake-result')
+
+ def test_service_get_all_no_zones(self):
+ services = [dict(test_service.fake_service,
+ id='cell1@1', topic='compute', host='host1'),
+ dict(test_service.fake_service,
+ id='cell1@2', topic='compute', host='host2')]
+ exp_services = [s.copy() for s in services]
+
+ fake_filters = {'host': 'host1'}
+ self.mox.StubOutWithMock(self.host_api.cells_rpcapi,
+ 'service_get_all')
+ self.host_api.cells_rpcapi.service_get_all(self.ctxt,
+ filters=fake_filters).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt,
+ filters=fake_filters)
+ self._compare_objs(result, exp_services)
+
+ def _test_service_get_all(self, fake_filters, **kwargs):
+ services = [dict(test_service.fake_service,
+ id='cell1@1', key1='val1', key2='val2',
+ topic='compute', host='host1'),
+ dict(test_service.fake_service,
+ id='cell1@2', key1='val2', key3='val3',
+ topic='compute', host='host2')]
+ exp_services = []
+ for service in services:
+ exp_service = {}
+ exp_service.update(availability_zone='nova', **service)
+ exp_services.append(exp_service)
+
+ self.mox.StubOutWithMock(self.host_api.cells_rpcapi,
+ 'service_get_all')
+ self.host_api.cells_rpcapi.service_get_all(self.ctxt,
+ filters=fake_filters).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt,
+ filters=fake_filters,
+ **kwargs)
+ self.mox.VerifyAll()
+ self._compare_objs(result, exp_services)
+
+ def test_service_get_all(self):
+ fake_filters = {'availability_zone': 'nova'}
+ self._test_service_get_all(fake_filters)
+
+ def test_service_get_all_set_zones(self):
+ fake_filters = {'key1': 'val1'}
+ self._test_service_get_all(fake_filters, set_zones=True)
+
+ def test_service_get_by_compute_host(self):
+ self.mox.StubOutWithMock(self.host_api.cells_rpcapi,
+ 'service_get_by_compute_host')
+
+ self.host_api.cells_rpcapi.service_get_by_compute_host(self.ctxt,
+ 'fake-host').AndReturn(test_service.fake_service)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_by_compute_host(self.ctxt,
+ 'fake-host')
+ self._compare_obj(result, test_service.fake_service)
+
+ def test_service_update(self):
+ host_name = 'fake-host'
+ binary = 'nova-compute'
+ params_to_update = dict(disabled=True)
+ service_id = 42
+ expected_result = dict(test_service.fake_service, id=service_id)
+
+ self.mox.StubOutWithMock(self.host_api.cells_rpcapi, 'service_update')
+ self.host_api.cells_rpcapi.service_update(
+ self.ctxt, host_name,
+ binary, params_to_update).AndReturn(expected_result)
+
+ self.mox.ReplayAll()
+
+ result = self.host_api.service_update(
+ self.ctxt, host_name, binary, params_to_update)
+ self._compare_obj(result, expected_result)
+
+ def test_service_delete(self):
+ cell_service_id = cells_utils.cell_with_item('cell1', 1)
+ with mock.patch.object(self.host_api.cells_rpcapi,
+ 'service_delete') as service_delete:
+ self.host_api.service_delete(self.ctxt, cell_service_id)
+ service_delete.assert_called_once_with(
+ self.ctxt, cell_service_id)
+
+ def test_instance_get_all_by_host(self):
+ instances = [dict(id=1, cell_name='cell1', host='host1'),
+ dict(id=2, cell_name='cell2', host='host1'),
+ dict(id=3, cell_name='cell1', host='host2')]
+
+ self.mox.StubOutWithMock(self.host_api.db,
+ 'instance_get_all_by_host')
+
+ self.host_api.db.instance_get_all_by_host(self.ctxt,
+ 'fake-host').AndReturn(instances)
+ self.mox.ReplayAll()
+ expected_result = [instances[0], instances[2]]
+ cell_and_host = cells_utils.cell_with_item('cell1', 'fake-host')
+ result = self.host_api.instance_get_all_by_host(self.ctxt,
+ cell_and_host)
+ self.assertEqual(expected_result, result)
+
+ def test_task_log_get_all(self):
+ self.mox.StubOutWithMock(self.host_api.cells_rpcapi,
+ 'task_log_get_all')
+
+ self.host_api.cells_rpcapi.task_log_get_all(self.ctxt,
+ 'fake-name', 'fake-begin', 'fake-end', host='fake-host',
+ state='fake-state').AndReturn('fake-response')
+ self.mox.ReplayAll()
+ result = self.host_api.task_log_get_all(self.ctxt, 'fake-name',
+ 'fake-begin', 'fake-end', host='fake-host',
+ state='fake-state')
+ self.assertEqual('fake-response', result)
+
+ def test_get_host_uptime_service_down(self):
+ # The corresponding Compute test case depends on the
+ # _assert_host_exists which is a no-op in the cells api
+ pass
+
+ def test_get_host_uptime(self):
+ self.mox.StubOutWithMock(self.host_api.cells_rpcapi,
+ 'get_host_uptime')
+
+ self.host_api.cells_rpcapi.get_host_uptime(self.ctxt,
+ 'fake-host'). \
+ AndReturn('fake-response')
+ self.mox.ReplayAll()
+ result = self.host_api.get_host_uptime(self.ctxt, 'fake-host')
+ self.assertEqual('fake-response', result)
diff --git a/nova/tests/unit/compute/test_hvtype.py b/nova/tests/unit/compute/test_hvtype.py
new file mode 100644
index 0000000000..93cb245e10
--- /dev/null
+++ b/nova/tests/unit/compute/test_hvtype.py
@@ -0,0 +1,46 @@
+# Copyright (C) 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.compute import hvtype
+from nova import exception
+from nova import test
+
+
+class HvTypeTest(test.NoDBTestCase):
+
+ def test_valid_string(self):
+ self.assertTrue(hvtype.is_valid("vmware"))
+
+ def test_valid_constant(self):
+ self.assertTrue(hvtype.is_valid(hvtype.QEMU))
+
+ def test_valid_bogus(self):
+ self.assertFalse(hvtype.is_valid("acmehypervisor"))
+
+ def test_canonicalize_none(self):
+ self.assertIsNone(hvtype.canonicalize(None))
+
+ def test_canonicalize_case(self):
+ self.assertEqual(hvtype.QEMU, hvtype.canonicalize("QeMu"))
+
+ def test_canonicalize_xapi(self):
+ self.assertEqual(hvtype.XEN, hvtype.canonicalize("xapi"))
+
+ def test_canonicalize_powervm(self):
+ self.assertEqual(hvtype.PHYP, hvtype.canonicalize("POWERVM"))
+
+ def test_canonicalize_invalid(self):
+ self.assertRaises(exception.InvalidHypervisorVirtType,
+ hvtype.canonicalize,
+ "wibble")
diff --git a/nova/tests/unit/compute/test_keypairs.py b/nova/tests/unit/compute/test_keypairs.py
new file mode 100644
index 0000000000..ecdbcff103
--- /dev/null
+++ b/nova/tests/unit/compute/test_keypairs.py
@@ -0,0 +1,221 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Tests for keypair API."""
+
+from oslo.config import cfg
+import six
+
+from nova.compute import api as compute_api
+from nova import context
+from nova import db
+from nova import exception
+from nova.i18n import _
+from nova import quota
+from nova.tests.unit.compute import test_compute
+from nova.tests.unit import fake_notifier
+from nova.tests.unit.objects import test_keypair
+
+CONF = cfg.CONF
+QUOTAS = quota.QUOTAS
+
+
+class KeypairAPITestCase(test_compute.BaseTestCase):
+ def setUp(self):
+ super(KeypairAPITestCase, self).setUp()
+ self.keypair_api = compute_api.KeypairAPI()
+ self.ctxt = context.RequestContext('fake', 'fake')
+ self._keypair_db_call_stubs()
+ self.existing_key_name = 'fake existing key name'
+ self.pub_key = ('ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLnVkqJu9WVf'
+ '/5StU3JCrBR2r1s1j8K1tux+5XeSvdqaM8lMFNorzbY5iyoBbR'
+ 'S56gy1jmm43QsMPJsrpfUZKcJpRENSe3OxIIwWXRoiapZe78u/'
+ 'a9xKwj0avFYMcws9Rk9iAB7W4K1nEJbyCPl5lRBoyqeHBqrnnu'
+ 'XWEgGxJCK0Ah6wcOzwlEiVjdf4kxzXrwPHyi7Ea1qvnNXTziF8'
+ 'yYmUlH4C8UXfpTQckwSwpDyxZUc63P8q+vPbs3Q2kw+/7vvkCK'
+ 'HJAXVI+oCiyMMfffoTq16M1xfV58JstgtTqAXG+ZFpicGajREU'
+ 'E/E3hO5MGgcHmyzIrWHKpe1n3oEGuz')
+ self.fingerprint = '4e:48:c6:a0:4a:f9:dd:b5:4c:85:54:5a:af:43:47:5a'
+ self.key_destroyed = False
+
+ def _keypair_db_call_stubs(self):
+
+ def db_key_pair_get_all_by_user(context, user_id):
+ return [dict(test_keypair.fake_keypair,
+ name=self.existing_key_name,
+ public_key=self.pub_key,
+ fingerprint=self.fingerprint)]
+
+ def db_key_pair_create(context, keypair):
+ return dict(test_keypair.fake_keypair, **keypair)
+
+ def db_key_pair_destroy(context, user_id, name):
+ if name == self.existing_key_name:
+ self.key_destroyed = True
+
+ def db_key_pair_get(context, user_id, name):
+ if name == self.existing_key_name and not self.key_destroyed:
+ return dict(test_keypair.fake_keypair,
+ name=self.existing_key_name,
+ public_key=self.pub_key,
+ fingerprint=self.fingerprint)
+ else:
+ raise exception.KeypairNotFound(user_id=user_id, name=name)
+
+ self.stubs.Set(db, "key_pair_get_all_by_user",
+ db_key_pair_get_all_by_user)
+ self.stubs.Set(db, "key_pair_create",
+ db_key_pair_create)
+ self.stubs.Set(db, "key_pair_destroy",
+ db_key_pair_destroy)
+ self.stubs.Set(db, "key_pair_get",
+ db_key_pair_get)
+
+ def _check_notifications(self, action='create', key_name='foo'):
+ self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
+
+ n1 = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('INFO', n1.priority)
+ self.assertEqual('keypair.%s.start' % action, n1.event_type)
+ self.assertEqual('api.%s' % CONF.host, n1.publisher_id)
+ self.assertEqual('fake', n1.payload['user_id'])
+ self.assertEqual('fake', n1.payload['tenant_id'])
+ self.assertEqual(key_name, n1.payload['key_name'])
+
+ n2 = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual('INFO', n2.priority)
+ self.assertEqual('keypair.%s.end' % action, n2.event_type)
+ self.assertEqual('api.%s' % CONF.host, n2.publisher_id)
+ self.assertEqual('fake', n2.payload['user_id'])
+ self.assertEqual('fake', n2.payload['tenant_id'])
+ self.assertEqual(key_name, n2.payload['key_name'])
+
+
+class CreateImportSharedTestMixIn(object):
+ """Tests shared between create and import_key.
+
+ Mix-in pattern is used here so that these `test_*` methods aren't picked
+ up by the test runner unless they are part of a 'concrete' test case.
+ """
+
+ def assertKeyNameRaises(self, exc_class, expected_message, name):
+ func = getattr(self.keypair_api, self.func_name)
+
+ args = []
+ if self.func_name == 'import_key_pair':
+ args.append(self.pub_key)
+
+ exc = self.assertRaises(exc_class, func, self.ctxt, self.ctxt.user_id,
+ name, *args)
+ self.assertEqual(expected_message, six.text_type(exc))
+
+ def assertInvalidKeypair(self, expected_message, name):
+ msg = _('Keypair data is invalid: %s') % expected_message
+ self.assertKeyNameRaises(exception.InvalidKeypair, msg, name)
+
+ def test_name_too_short(self):
+ msg = _('Keypair name must be string and between 1 '
+ 'and 255 characters long')
+ self.assertInvalidKeypair(msg, '')
+
+ def test_name_too_long(self):
+ msg = _('Keypair name must be string and between 1 '
+ 'and 255 characters long')
+ self.assertInvalidKeypair(msg, 'x' * 256)
+
+ def test_invalid_chars(self):
+ msg = _("Keypair name contains unsafe characters")
+ self.assertInvalidKeypair(msg, '* BAD CHARACTERS! *')
+
+ def test_already_exists(self):
+ def db_key_pair_create_duplicate(context, keypair):
+ raise exception.KeyPairExists(key_name=keypair.get('name', ''))
+
+ self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate)
+
+ msg = (_("Key pair '%(key_name)s' already exists.") %
+ {'key_name': self.existing_key_name})
+ self.assertKeyNameRaises(exception.KeyPairExists, msg,
+ self.existing_key_name)
+
+ def test_quota_limit(self):
+ def fake_quotas_count(self, context, resource, *args, **kwargs):
+ return CONF.quota_key_pairs
+
+ self.stubs.Set(QUOTAS, "count", fake_quotas_count)
+
+ msg = _("Maximum number of key pairs exceeded")
+ self.assertKeyNameRaises(exception.KeypairLimitExceeded, msg, 'foo')
+
+
+class CreateKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
+ func_name = 'create_key_pair'
+
+ def test_success(self):
+ keypair, private_key = self.keypair_api.create_key_pair(
+ self.ctxt, self.ctxt.user_id, 'foo')
+ self.assertEqual('foo', keypair['name'])
+ self._check_notifications()
+
+
+class ImportKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
+ func_name = 'import_key_pair'
+
+ def test_success(self):
+ keypair = self.keypair_api.import_key_pair(self.ctxt,
+ self.ctxt.user_id,
+ 'foo',
+ self.pub_key)
+
+ self.assertEqual('foo', keypair['name'])
+ self.assertEqual(self.fingerprint, keypair['fingerprint'])
+ self.assertEqual(self.pub_key, keypair['public_key'])
+ self._check_notifications(action='import')
+
+ def test_bad_key_data(self):
+ exc = self.assertRaises(exception.InvalidKeypair,
+ self.keypair_api.import_key_pair,
+ self.ctxt, self.ctxt.user_id, 'foo',
+ 'bad key data')
+ msg = u'Keypair data is invalid: failed to generate fingerprint'
+ self.assertEqual(msg, six.text_type(exc))
+
+
+class GetKeypairTestCase(KeypairAPITestCase):
+ def test_success(self):
+ keypair = self.keypair_api.get_key_pair(self.ctxt,
+ self.ctxt.user_id,
+ self.existing_key_name)
+ self.assertEqual(self.existing_key_name, keypair['name'])
+
+
+class GetKeypairsTestCase(KeypairAPITestCase):
+ def test_success(self):
+ keypairs = self.keypair_api.get_key_pairs(self.ctxt, self.ctxt.user_id)
+ self.assertEqual([self.existing_key_name],
+ [k['name'] for k in keypairs])
+
+
+class DeleteKeypairTestCase(KeypairAPITestCase):
+ def test_success(self):
+ self.keypair_api.get_key_pair(self.ctxt, self.ctxt.user_id,
+ self.existing_key_name)
+ self.keypair_api.delete_key_pair(self.ctxt, self.ctxt.user_id,
+ self.existing_key_name)
+ self.assertRaises(exception.KeypairNotFound,
+ self.keypair_api.get_key_pair, self.ctxt, self.ctxt.user_id,
+ self.existing_key_name)
+
+ self._check_notifications(action='delete',
+ key_name=self.existing_key_name)
diff --git a/nova/tests/unit/compute/test_multiple_nodes.py b/nova/tests/unit/compute/test_multiple_nodes.py
new file mode 100644
index 0000000000..7362534b44
--- /dev/null
+++ b/nova/tests/unit/compute/test_multiple_nodes.py
@@ -0,0 +1,169 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Tests for compute service with multiple compute nodes."""
+
+from oslo.config import cfg
+from oslo.utils import importutils
+
+from nova import context
+from nova import db
+from nova import objects
+from nova import test
+from nova.virt import fake
+
+
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+
+
+class BaseTestCase(test.TestCase):
+ def tearDown(self):
+ fake.restore_nodes()
+ super(BaseTestCase, self).tearDown()
+
+
+class FakeDriverSingleNodeTestCase(BaseTestCase):
+ def setUp(self):
+ super(FakeDriverSingleNodeTestCase, self).setUp()
+ self.driver = fake.FakeDriver(virtapi=None)
+ fake.set_nodes(['xyz'])
+
+ def test_get_available_resource(self):
+ res = self.driver.get_available_resource('xyz')
+ self.assertEqual(res['hypervisor_hostname'], 'xyz')
+
+
+class FakeDriverMultiNodeTestCase(BaseTestCase):
+ def setUp(self):
+ super(FakeDriverMultiNodeTestCase, self).setUp()
+ self.driver = fake.FakeDriver(virtapi=None)
+ fake.set_nodes(['aaa', 'bbb'])
+
+ def test_get_available_resource(self):
+ res_a = self.driver.get_available_resource('aaa')
+ self.assertEqual(res_a['hypervisor_hostname'], 'aaa')
+
+ res_b = self.driver.get_available_resource('bbb')
+ self.assertEqual(res_b['hypervisor_hostname'], 'bbb')
+
+ res_x = self.driver.get_available_resource('xxx')
+ self.assertEqual(res_x, {})
+
+
+class MultiNodeComputeTestCase(BaseTestCase):
+ def setUp(self):
+ super(MultiNodeComputeTestCase, self).setUp()
+ self.flags(compute_driver='nova.virt.fake.FakeDriver')
+ self.compute = importutils.import_object(CONF.compute_manager)
+ self.flags(use_local=True, group='conductor')
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
+
+ def fake_get_compute_nodes_in_db(context, use_slave=False):
+ fake_compute_nodes = [{'local_gb': 259,
+ 'vcpus_used': 0,
+ 'deleted': 0,
+ 'hypervisor_type': 'powervm',
+ 'created_at': '2013-04-01T00:27:06.000000',
+ 'local_gb_used': 0,
+ 'updated_at': '2013-04-03T00:35:41.000000',
+ 'hypervisor_hostname': 'fake_phyp1',
+ 'memory_mb_used': 512,
+ 'memory_mb': 131072,
+ 'current_workload': 0,
+ 'vcpus': 16,
+ 'cpu_info': 'ppc64,powervm,3940',
+ 'running_vms': 0,
+ 'free_disk_gb': 259,
+ 'service_id': 7,
+ 'hypervisor_version': 7,
+ 'disk_available_least': 265856,
+ 'deleted_at': None,
+ 'free_ram_mb': 130560,
+ 'metrics': '',
+ 'numa_topology': '',
+ 'stats': '',
+ 'id': 2,
+ 'host_ip': '127.0.0.1'}]
+ return [objects.ComputeNode._from_db_object(
+ context, objects.ComputeNode(), cn)
+ for cn in fake_compute_nodes]
+
+ def fake_compute_node_delete(context, compute_node_id):
+ self.assertEqual(2, compute_node_id)
+
+ self.stubs.Set(self.compute, '_get_compute_nodes_in_db',
+ fake_get_compute_nodes_in_db)
+ self.stubs.Set(db, 'compute_node_delete',
+ fake_compute_node_delete)
+
+ def test_update_available_resource_add_remove_node(self):
+ ctx = context.get_admin_context()
+ fake.set_nodes(['A', 'B', 'C'])
+ self.compute.update_available_resource(ctx)
+ self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
+ ['A', 'B', 'C'])
+
+ fake.set_nodes(['A', 'B'])
+ self.compute.update_available_resource(ctx)
+ self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
+ ['A', 'B'])
+
+ fake.set_nodes(['A', 'B', 'C'])
+ self.compute.update_available_resource(ctx)
+ self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
+ ['A', 'B', 'C'])
+
+ def test_compute_manager_removes_deleted_node(self):
+ ctx = context.get_admin_context()
+ fake.set_nodes(['A', 'B'])
+
+ fake_compute_nodes = [
+ objects.ComputeNode(
+ context=ctx, hypervisor_hostname='A', id=2),
+ objects.ComputeNode(
+ context=ctx, hypervisor_hostname='B', id=3),
+ ]
+
+ def fake_get_compute_nodes_in_db(context, use_slave=False):
+ return fake_compute_nodes
+
+ def fake_compute_node_delete(context, compute_node_id):
+ for cn in fake_compute_nodes:
+ if compute_node_id == cn.id:
+ fake_compute_nodes.remove(cn)
+ return
+
+ self.stubs.Set(self.compute, '_get_compute_nodes_in_db',
+ fake_get_compute_nodes_in_db)
+ self.stubs.Set(db, 'compute_node_delete',
+ fake_compute_node_delete)
+
+ self.compute.update_available_resource(ctx)
+
+ # Verify nothing is deleted if driver and db compute nodes match
+ self.assertEqual(len(fake_compute_nodes), 2)
+ self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
+ ['A', 'B'])
+
+ fake.set_nodes(['A'])
+ self.compute.update_available_resource(ctx)
+
+ # Verify B gets deleted since now only A is reported by driver
+ self.assertEqual(len(fake_compute_nodes), 1)
+ self.assertEqual(fake_compute_nodes[0]['hypervisor_hostname'], 'A')
+ self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
+ ['A'])
diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py
new file mode 100644
index 0000000000..e646fb19ad
--- /dev/null
+++ b/nova/tests/unit/compute/test_resource_tracker.py
@@ -0,0 +1,1539 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for compute resource tracking."""
+
+import uuid
+
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+
+from nova.compute import flavors
+from nova.compute import resource_tracker
+from nova.compute import resources
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import objects
+from nova.objects import base as obj_base
+from nova import rpc
+from nova import test
+from nova.tests.unit.compute.monitors import test_monitors
+from nova.tests.unit.objects import test_migration
+from nova.tests.unit.pci import fakes as pci_fakes
+from nova.virt import driver
+from nova.virt import hardware
+
+
+FAKE_VIRT_MEMORY_MB = 5
+FAKE_VIRT_MEMORY_OVERHEAD = 1
+FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
+ FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
+FAKE_VIRT_NUMA_TOPOLOGY = hardware.VirtNUMAHostTopology(
+ cells=[hardware.VirtNUMATopologyCellUsage(0, set([1, 2]), 3072),
+ hardware.VirtNUMATopologyCellUsage(1, set([3, 4]), 3072)])
+FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = hardware.VirtNUMALimitTopology(
+ cells=[hardware.VirtNUMATopologyCellLimit(
+ 0, set([1, 2]), 3072, 4, 10240),
+ hardware.VirtNUMATopologyCellLimit(
+ 1, set([3, 4]), 3072, 4, 10240)])
+ROOT_GB = 5
+EPHEMERAL_GB = 1
+FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
+FAKE_VIRT_VCPUS = 1
+FAKE_VIRT_STATS = {'virt_stat': 10}
+FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
+RESOURCE_NAMES = ['vcpu']
+CONF = cfg.CONF
+
+
+class UnsupportedVirtDriver(driver.ComputeDriver):
+ """Pretend version of a lame virt driver."""
+
+ def __init__(self):
+ super(UnsupportedVirtDriver, self).__init__(None)
+
+ def get_host_ip_addr(self):
+ return '127.0.0.1'
+
+ def get_available_resource(self, nodename):
+ # no support for getting resource usage info
+ return {}
+
+
+class FakeVirtDriver(driver.ComputeDriver):
+
+ def __init__(self, pci_support=False, stats=None,
+ numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
+ super(FakeVirtDriver, self).__init__(None)
+ self.memory_mb = FAKE_VIRT_MEMORY_MB
+ self.local_gb = FAKE_VIRT_LOCAL_GB
+ self.vcpus = FAKE_VIRT_VCPUS
+ self.numa_topology = numa_topology
+
+ self.memory_mb_used = 0
+ self.local_gb_used = 0
+ self.pci_support = pci_support
+ self.pci_devices = [{
+ 'label': 'forza-napoli',
+ 'dev_type': 'foo',
+ 'compute_node_id': 1,
+ 'address': '0000:00:00.1',
+ 'product_id': 'p1',
+ 'vendor_id': 'v1',
+ 'status': 'available',
+ 'extra_k1': 'v1'}] if self.pci_support else []
+ self.pci_stats = [{
+ 'count': 1,
+ 'vendor_id': 'v1',
+ 'product_id': 'p1'}] if self.pci_support else []
+ if stats is not None:
+ self.stats = stats
+
+ def get_host_ip_addr(self):
+ return '127.0.0.1'
+
+ def get_available_resource(self, nodename):
+ d = {
+ 'vcpus': self.vcpus,
+ 'memory_mb': self.memory_mb,
+ 'local_gb': self.local_gb,
+ 'vcpus_used': 0,
+ 'memory_mb_used': self.memory_mb_used,
+ 'local_gb_used': self.local_gb_used,
+ 'hypervisor_type': 'fake',
+ 'hypervisor_version': 0,
+ 'hypervisor_hostname': 'fakehost',
+ 'cpu_info': '',
+ 'numa_topology': (
+ self.numa_topology.to_json() if self.numa_topology else None),
+ }
+ if self.pci_support:
+ d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
+ if hasattr(self, 'stats'):
+ d['stats'] = self.stats
+ return d
+
+ def estimate_instance_overhead(self, instance_info):
+ instance_info['memory_mb'] # make sure memory value is present
+ overhead = {
+ 'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
+ }
+ return overhead # just return a constant value for testing
+
+
+class BaseTestCase(test.TestCase):
+
+ def setUp(self):
+ super(BaseTestCase, self).setUp()
+
+ self.flags(reserved_host_disk_mb=0,
+ reserved_host_memory_mb=0)
+
+ self.context = context.get_admin_context()
+
+ self.flags(use_local=True, group='conductor')
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
+
+ self._instances = {}
+ self._numa_topologies = {}
+ self._instance_types = {}
+
+ self.stubs.Set(self.conductor.db,
+ 'instance_get_all_by_host_and_node',
+ self._fake_instance_get_all_by_host_and_node)
+ self.stubs.Set(db, 'instance_extra_get_by_instance_uuid',
+ self._fake_instance_extra_get_by_instance_uuid)
+ self.stubs.Set(self.conductor.db,
+ 'instance_update_and_get_original',
+ self._fake_instance_update_and_get_original)
+ self.stubs.Set(self.conductor.db,
+ 'flavor_get', self._fake_flavor_get)
+
+ self.host = 'fakehost'
+
+ def _create_compute_node(self, values=None):
+ compute = {
+ "id": 1,
+ "service_id": 1,
+ "vcpus": 1,
+ "memory_mb": 1,
+ "local_gb": 1,
+ "vcpus_used": 1,
+ "memory_mb_used": 1,
+ "local_gb_used": 1,
+ "free_ram_mb": 1,
+ "free_disk_gb": 1,
+ "current_workload": 1,
+ "running_vms": 0,
+ "cpu_info": None,
+ "numa_topology": None,
+ "stats": {
+ "num_instances": "1",
+ },
+ "hypervisor_hostname": "fakenode",
+ }
+ if values:
+ compute.update(values)
+ return compute
+
+ def _create_service(self, host="fakehost", compute=None):
+ if compute:
+ compute = [compute]
+
+ service = {
+ "id": 1,
+ "host": host,
+ "binary": "nova-compute",
+ "topic": "compute",
+ "compute_node": compute,
+ }
+ return service
+
+ def _fake_instance_system_metadata(self, instance_type, prefix=''):
+ sys_meta = []
+ for key in flavors.system_metadata_flavor_props.keys():
+ sys_meta.append({'key': '%sinstance_type_%s' % (prefix, key),
+ 'value': instance_type[key]})
+ return sys_meta
+
+ def _fake_instance(self, stash=True, flavor=None, **kwargs):
+
+ # Default to an instance ready to resize to or from the same
+ # instance_type
+ flavor = flavor or self._fake_flavor_create()
+ sys_meta = self._fake_instance_system_metadata(flavor)
+
+ if stash:
+ # stash instance types in system metadata.
+ sys_meta = (sys_meta +
+ self._fake_instance_system_metadata(flavor, 'new_') +
+ self._fake_instance_system_metadata(flavor, 'old_'))
+
+ instance_uuid = str(uuid.uuid1())
+ instance = {
+ 'uuid': instance_uuid,
+ 'vm_state': vm_states.RESIZED,
+ 'task_state': None,
+ 'ephemeral_key_uuid': None,
+ 'os_type': 'Linux',
+ 'project_id': '123456',
+ 'host': None,
+ 'node': None,
+ 'instance_type_id': flavor['id'],
+ 'memory_mb': flavor['memory_mb'],
+ 'vcpus': flavor['vcpus'],
+ 'root_gb': flavor['root_gb'],
+ 'ephemeral_gb': flavor['ephemeral_gb'],
+ 'launched_on': None,
+ 'system_metadata': sys_meta,
+ 'availability_zone': None,
+ 'vm_mode': None,
+ 'reservation_id': None,
+ 'display_name': None,
+ 'default_swap_device': None,
+ 'power_state': None,
+ 'scheduled_at': None,
+ 'access_ip_v6': None,
+ 'access_ip_v4': None,
+ 'key_name': None,
+ 'updated_at': None,
+ 'cell_name': None,
+ 'locked': None,
+ 'locked_by': None,
+ 'launch_index': None,
+ 'architecture': None,
+ 'auto_disk_config': None,
+ 'terminated_at': None,
+ 'ramdisk_id': None,
+ 'user_data': None,
+ 'cleaned': None,
+ 'deleted_at': None,
+ 'id': 333,
+ 'disable_terminate': None,
+ 'hostname': None,
+ 'display_description': None,
+ 'key_data': None,
+ 'deleted': None,
+ 'default_ephemeral_device': None,
+ 'progress': None,
+ 'launched_at': None,
+ 'config_drive': None,
+ 'kernel_id': None,
+ 'user_id': None,
+ 'shutdown_terminate': None,
+ 'created_at': None,
+ 'image_ref': None,
+ 'root_device_name': None,
+ }
+ numa_topology = kwargs.pop('numa_topology', None)
+ if numa_topology:
+ numa_topology = {
+ 'id': 1, 'created_at': None, 'updated_at': None,
+ 'deleted_at': None, 'deleted': None,
+ 'instance_uuid': instance['uuid'],
+ 'numa_topology': numa_topology.to_json()
+ }
+ instance.update(kwargs)
+
+ self._instances[instance_uuid] = instance
+ self._numa_topologies[instance_uuid] = numa_topology
+ return instance
+
+ def _fake_flavor_create(self, **kwargs):
+ instance_type = {
+ 'id': 1,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'disabled': False,
+ 'is_public': True,
+ 'name': 'fakeitype',
+ 'memory_mb': FAKE_VIRT_MEMORY_MB,
+ 'vcpus': FAKE_VIRT_VCPUS,
+ 'root_gb': ROOT_GB,
+ 'ephemeral_gb': EPHEMERAL_GB,
+ 'swap': 0,
+ 'rxtx_factor': 1.0,
+ 'vcpu_weight': 1,
+ 'flavorid': 'fakeflavor',
+ 'extra_specs': {},
+ }
+ instance_type.update(**kwargs)
+
+ id_ = instance_type['id']
+ self._instance_types[id_] = instance_type
+ return instance_type
+
+ def _fake_instance_get_all_by_host_and_node(self, context, host, nodename):
+ return [i for i in self._instances.values() if i['host'] == host]
+
+ def _fake_instance_extra_get_by_instance_uuid(self, context,
+ instance_uuid, columns=None):
+ return self._numa_topologies.get(instance_uuid)
+
+ def _fake_flavor_get(self, ctxt, id_):
+ return self._instance_types[id_]
+
+ def _fake_instance_update_and_get_original(self, context, instance_uuid,
+ values):
+ instance = self._instances[instance_uuid]
+ instance.update(values)
+ # the test doesn't care what the original instance values are, it's
+ # only used in the subsequent notification:
+ return (instance, instance)
+
+ def _driver(self):
+ return FakeVirtDriver()
+
+ def _tracker(self, host=None):
+
+ if host is None:
+ host = self.host
+
+ node = "fakenode"
+
+ driver = self._driver()
+
+ tracker = resource_tracker.ResourceTracker(host, driver, node)
+ tracker.ext_resources_handler = \
+ resources.ResourceHandler(RESOURCE_NAMES, True)
+ return tracker
+
+
+class UnsupportedDriverTestCase(BaseTestCase):
+ """Resource tracking should be disabled when the virt driver doesn't
+ support it.
+ """
+ def setUp(self):
+ super(UnsupportedDriverTestCase, self).setUp()
+ self.tracker = self._tracker()
+ # seed tracker with data:
+ self.tracker.update_available_resource(self.context)
+
+ def _driver(self):
+ return UnsupportedVirtDriver()
+
+ def test_disabled(self):
+ # disabled = no compute node stats
+ self.assertTrue(self.tracker.disabled)
+ self.assertIsNone(self.tracker.compute_node)
+
+ def test_disabled_claim(self):
+ # basic claim:
+ instance = self._fake_instance()
+ claim = self.tracker.instance_claim(self.context, instance)
+ self.assertEqual(0, claim.memory_mb)
+
+ def test_disabled_instance_claim(self):
+ # instance variation:
+ instance = self._fake_instance()
+ claim = self.tracker.instance_claim(self.context, instance)
+ self.assertEqual(0, claim.memory_mb)
+
+ def test_disabled_instance_context_claim(self):
+ # instance context manager variation:
+ instance = self._fake_instance()
+ claim = self.tracker.instance_claim(self.context, instance)
+ with self.tracker.instance_claim(self.context, instance) as claim:
+ self.assertEqual(0, claim.memory_mb)
+
+ def test_disabled_updated_usage(self):
+ instance = self._fake_instance(host='fakehost', memory_mb=5,
+ root_gb=10)
+ self.tracker.update_usage(self.context, instance)
+
+ def test_disabled_resize_claim(self):
+ instance = self._fake_instance()
+ instance_type = self._fake_flavor_create()
+ claim = self.tracker.resize_claim(self.context, instance,
+ instance_type)
+ self.assertEqual(0, claim.memory_mb)
+ self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
+ self.assertEqual(instance_type['id'],
+ claim.migration['new_instance_type_id'])
+
+ def test_disabled_resize_context_claim(self):
+ instance = self._fake_instance()
+ instance_type = self._fake_flavor_create()
+ with self.tracker.resize_claim(self.context, instance, instance_type) \
+ as claim:
+ self.assertEqual(0, claim.memory_mb)
+
+
+class MissingServiceTestCase(BaseTestCase):
+ def setUp(self):
+ super(MissingServiceTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ self.tracker = self._tracker()
+
+ def test_missing_service(self):
+ self.tracker.update_available_resource(self.context)
+ self.assertTrue(self.tracker.disabled)
+
+
+class MissingComputeNodeTestCase(BaseTestCase):
+ def setUp(self):
+ super(MissingComputeNodeTestCase, self).setUp()
+ self.tracker = self._tracker()
+
+ self.stubs.Set(db, 'service_get_by_compute_host',
+ self._fake_service_get_by_compute_host)
+ self.stubs.Set(db, 'compute_node_create',
+ self._fake_create_compute_node)
+ self.tracker.scheduler_client.update_resource_stats = mock.Mock()
+
+ def _fake_create_compute_node(self, context, values):
+ self.created = True
+ return self._create_compute_node()
+
+ def _fake_service_get_by_compute_host(self, ctx, host):
+ # return a service with no joined compute
+ service = self._create_service()
+ return service
+
+ def test_create_compute_node(self):
+ self.tracker.update_available_resource(self.context)
+ self.assertTrue(self.created)
+
+ def test_enabled(self):
+ self.tracker.update_available_resource(self.context)
+ self.assertFalse(self.tracker.disabled)
+
+
+class BaseTrackerTestCase(BaseTestCase):
+
+ def setUp(self):
+ # setup plumbing for a working resource tracker with required
+ # database models and a compatible compute driver:
+ super(BaseTrackerTestCase, self).setUp()
+
+ self.updated = False
+ self.deleted = False
+ self.update_call_count = 0
+
+ self.tracker = self._tracker()
+ self._migrations = {}
+
+ self.stubs.Set(db, 'service_get_by_compute_host',
+ self._fake_service_get_by_compute_host)
+ self.stubs.Set(db, 'compute_node_update',
+ self._fake_compute_node_update)
+ self.stubs.Set(db, 'compute_node_delete',
+ self._fake_compute_node_delete)
+ self.stubs.Set(db, 'migration_update',
+ self._fake_migration_update)
+ self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
+ self._fake_migration_get_in_progress_by_host_and_node)
+
+ # Note that this must be called before the call to _init_tracker()
+ patcher = pci_fakes.fake_pci_whitelist()
+ self.addCleanup(patcher.stop)
+
+ self._init_tracker()
+ self.limits = self._limits()
+
+ def _fake_service_get_by_compute_host(self, ctx, host):
+ self.compute = self._create_compute_node()
+ self.service = self._create_service(host, compute=self.compute)
+ return self.service
+
+ def _fake_compute_node_update(self, ctx, compute_node_id, values,
+ prune_stats=False):
+ self.update_call_count += 1
+ self.updated = True
+ self.compute.update(values)
+ return self.compute
+
+ def _fake_compute_node_delete(self, ctx, compute_node_id):
+ self.deleted = True
+ self.compute.update({'deleted': 1})
+ return self.compute
+
+ def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
+ node):
+ status = ['confirmed', 'reverted', 'error']
+ migrations = []
+
+ for migration in self._migrations.values():
+ migration = obj_base.obj_to_primitive(migration)
+ if migration['status'] in status:
+ continue
+
+ uuid = migration['instance_uuid']
+ migration['instance'] = self._instances[uuid]
+ migrations.append(migration)
+
+ return migrations
+
+ def _fake_migration_update(self, ctxt, migration_id, values):
+ # cheat and assume there's only 1 migration present
+ migration = self._migrations.values()[0]
+ migration.update(values)
+ return migration
+
+ def _init_tracker(self):
+ self.tracker.update_available_resource(self.context)
+
+ def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
+ disk_gb=FAKE_VIRT_LOCAL_GB,
+ vcpus=FAKE_VIRT_VCPUS,
+ numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD):
+ """Create limits dictionary used for oversubscribing resources."""
+
+ return {
+ 'memory_mb': memory_mb,
+ 'disk_gb': disk_gb,
+ 'vcpu': vcpus,
+ 'numa_topology': numa_topology.to_json() if numa_topology else None
+ }
+
+ def assertEqualNUMAHostTopology(self, expected, got):
+ attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
+ if None in (expected, got):
+ if expected != got:
+ raise AssertionError("Topologies don't match. Expected: "
+ "%(expected)s, but got: %(got)s" %
+ {'expected': expected, 'got': got})
+ else:
+ return
+
+ if len(expected) != len(got):
+ raise AssertionError("Topologies don't match due to different "
+ "number of cells. Expected: "
+ "%(expected)s, but got: %(got)s" %
+ {'expected': expected, 'got': got})
+ for exp_cell, got_cell in zip(expected.cells, got.cells):
+ for attr in attrs:
+ if getattr(exp_cell, attr) != getattr(got_cell, attr):
+ raise AssertionError("Topologies don't match. Expected: "
+ "%(expected)s, but got: %(got)s" %
+ {'expected': expected, 'got': got})
+
+ def _assert(self, value, field, tracker=None):
+
+ if tracker is None:
+ tracker = self.tracker
+
+ if field not in tracker.compute_node:
+ raise test.TestingException(
+ "'%(field)s' not in compute node." % {'field': field})
+ x = tracker.compute_node[field]
+
+ if field == 'numa_topology':
+ self.assertEqualNUMAHostTopology(
+ value, hardware.VirtNUMAHostTopology.from_json(x))
+ else:
+ self.assertEqual(value, x)
+
+
+class TrackerTestCase(BaseTrackerTestCase):
+
+ def test_free_ram_resource_value(self):
+ driver = FakeVirtDriver()
+ mem_free = driver.memory_mb - driver.memory_mb_used
+ self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb'])
+
+ def test_free_disk_resource_value(self):
+ driver = FakeVirtDriver()
+ mem_free = driver.local_gb - driver.local_gb_used
+ self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb'])
+
+ def test_update_compute_node(self):
+ self.assertFalse(self.tracker.disabled)
+ self.assertTrue(self.updated)
+
+ def test_init(self):
+ driver = self._driver()
+ self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus')
+ self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
+ self._assert(0, 'memory_mb_used')
+ self._assert(0, 'local_gb_used')
+ self._assert(0, 'vcpus_used')
+ self._assert(0, 'running_vms')
+ self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
+ self.assertFalse(self.tracker.disabled)
+ self.assertEqual(0, self.tracker.compute_node['current_workload'])
+ self.assertEqual(driver.pci_stats,
+ jsonutils.loads(self.tracker.compute_node['pci_stats']))
+
+
+class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
+
+ def setUp(self):
+ super(SchedulerClientTrackerTestCase, self).setUp()
+ self.tracker.scheduler_client.update_resource_stats = mock.Mock()
+
+ def test_create_resource(self):
+ self.tracker._write_ext_resources = mock.Mock()
+ self.tracker.conductor_api.compute_node_create = mock.Mock(
+ return_value=dict(id=1))
+ values = {'stats': {}, 'foo': 'bar', 'baz_count': 0}
+ self.tracker._create(self.context, values)
+
+ expected = {'stats': '{}', 'foo': 'bar', 'baz_count': 0,
+ 'id': 1}
+ self.tracker.scheduler_client.update_resource_stats.\
+ assert_called_once_with(self.context,
+ ("fakehost", "fakenode"),
+ expected)
+
+ def test_update_resource(self):
+ self.tracker._write_ext_resources = mock.Mock()
+ values = {'stats': {}, 'foo': 'bar', 'baz_count': 0}
+ self.tracker._update(self.context, values)
+
+ expected = {'stats': '{}', 'foo': 'bar', 'baz_count': 0,
+ 'id': 1}
+ self.tracker.scheduler_client.update_resource_stats.\
+ assert_called_once_with(self.context,
+ ("fakehost", "fakenode"),
+ expected)
+
+
+class TrackerPciStatsTestCase(BaseTrackerTestCase):
+
+ def test_update_compute_node(self):
+ self.assertFalse(self.tracker.disabled)
+ self.assertTrue(self.updated)
+
+ def test_init(self):
+ driver = self._driver()
+ self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus')
+ self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
+ self._assert(0, 'memory_mb_used')
+ self._assert(0, 'local_gb_used')
+ self._assert(0, 'vcpus_used')
+ self._assert(0, 'running_vms')
+ self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
+ self.assertFalse(self.tracker.disabled)
+ self.assertEqual(0, self.tracker.compute_node['current_workload'])
+ self.assertEqual(driver.pci_stats,
+ jsonutils.loads(self.tracker.compute_node['pci_stats']))
+
+ def _driver(self):
+ return FakeVirtDriver(pci_support=True)
+
+
+class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
+
+ def setUp(self):
+ super(TrackerExtraResourcesTestCase, self).setUp()
+ self.driver = self._driver()
+
+ def _driver(self):
+ return FakeVirtDriver()
+
+ def test_set_empty_ext_resources(self):
+ resources = self.driver.get_available_resource(self.tracker.nodename)
+ self.assertNotIn('stats', resources)
+ self.tracker._write_ext_resources(resources)
+ self.assertIn('stats', resources)
+
+ def test_set_extra_resources(self):
+ def fake_write_resources(resources):
+ resources['stats']['resA'] = '123'
+ resources['stats']['resB'] = 12
+
+ self.stubs.Set(self.tracker.ext_resources_handler,
+ 'write_resources',
+ fake_write_resources)
+
+ resources = self.driver.get_available_resource(self.tracker.nodename)
+ self.tracker._write_ext_resources(resources)
+
+ expected = {"resA": "123", "resB": 12}
+ self.assertEqual(sorted(expected),
+ sorted(resources['stats']))
+
+
+class InstanceClaimTestCase(BaseTrackerTestCase):
+ def _instance_topology(self, mem):
+ mem = mem * 1024
+ return hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), mem),
+ hardware.VirtNUMATopologyCellInstance(1, set([3]), mem)])
+
+ def _claim_topology(self, mem, cpus=1):
+ if self.tracker.driver.numa_topology is None:
+ return None
+ mem = mem * 1024
+ return hardware.VirtNUMAHostTopology(
+ cells=[hardware.VirtNUMATopologyCellUsage(
+ 0, set([1, 2]), 3072, cpu_usage=cpus,
+ memory_usage=mem),
+ hardware.VirtNUMATopologyCellUsage(
+ 1, set([3, 4]), 3072, cpu_usage=cpus,
+ memory_usage=mem)])
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_update_usage_only_for_tracked(self, mock_get):
+ flavor = self._fake_flavor_create()
+ claim_mem = flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD
+ claim_gb = flavor['root_gb'] + flavor['ephemeral_gb']
+ claim_topology = self._claim_topology(claim_mem / 2)
+
+ instance_topology = self._instance_topology(claim_mem / 2)
+
+ instance = self._fake_instance(
+ flavor=flavor, task_state=None,
+ numa_topology=instance_topology)
+ self.tracker.update_usage(self.context, instance)
+
+ self._assert(0, 'memory_mb_used')
+ self._assert(0, 'local_gb_used')
+ self._assert(0, 'current_workload')
+ self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
+
+ claim = self.tracker.instance_claim(self.context, instance,
+ self.limits)
+ self.assertNotEqual(0, claim.memory_mb)
+ self._assert(claim_mem, 'memory_mb_used')
+ self._assert(claim_gb, 'local_gb_used')
+ self._assert(claim_topology, 'numa_topology')
+
+ # now update should actually take effect
+ instance['task_state'] = task_states.SCHEDULING
+ self.tracker.update_usage(self.context, instance)
+
+ self._assert(claim_mem, 'memory_mb_used')
+ self._assert(claim_gb, 'local_gb_used')
+ self._assert(claim_topology, 'numa_topology')
+ self._assert(1, 'current_workload')
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_claim_and_audit(self, mock_get):
+ claim_mem = 3
+ claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
+ claim_disk = 2
+ claim_topology = self._claim_topology(claim_mem_total / 2)
+
+ instance_topology = self._instance_topology(claim_mem_total / 2)
+ instance = self._fake_instance(memory_mb=claim_mem, root_gb=claim_disk,
+ ephemeral_gb=0, numa_topology=instance_topology)
+
+ self.tracker.instance_claim(self.context, instance, self.limits)
+
+ self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["memory_mb"])
+ self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
+ self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
+ self.compute["free_ram_mb"])
+ self.assertEqualNUMAHostTopology(
+ claim_topology, hardware.VirtNUMAHostTopology.from_json(
+ self.compute['numa_topology']))
+
+ self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["local_gb"])
+ self.assertEqual(claim_disk, self.compute["local_gb_used"])
+ self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
+ self.compute["free_disk_gb"])
+
+ # 1st pretend that the compute operation finished and claimed the
+ # desired resources from the virt layer
+ driver = self.tracker.driver
+ driver.memory_mb_used = claim_mem
+ driver.local_gb_used = claim_disk
+
+ self.tracker.update_available_resource(self.context)
+
+ # confirm tracker is adding in host_ip
+ self.assertIsNotNone(self.compute.get('host_ip'))
+
+ # confirm that resource usage is derived from instance usages,
+ # not virt layer:
+ self.assertEqual(claim_mem_total, self.compute['memory_mb_used'])
+ self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
+ self.compute['free_ram_mb'])
+ self.assertEqualNUMAHostTopology(
+ claim_topology, hardware.VirtNUMAHostTopology.from_json(
+ self.compute['numa_topology']))
+
+ self.assertEqual(claim_disk, self.compute['local_gb_used'])
+ self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
+ self.compute['free_disk_gb'])
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_claim_and_abort(self, mock_get):
+ claim_mem = 3
+ claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
+ claim_disk = 2
+ claim_topology = self._claim_topology(claim_mem_total / 2)
+
+ instance_topology = self._instance_topology(claim_mem_total / 2)
+ instance = self._fake_instance(memory_mb=claim_mem,
+ root_gb=claim_disk, ephemeral_gb=0,
+ numa_topology=instance_topology)
+
+ claim = self.tracker.instance_claim(self.context, instance,
+ self.limits)
+ self.assertIsNotNone(claim)
+
+ self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
+ self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
+ self.compute["free_ram_mb"])
+ self.assertEqualNUMAHostTopology(
+ claim_topology, hardware.VirtNUMAHostTopology.from_json(
+ self.compute['numa_topology']))
+
+ self.assertEqual(claim_disk, self.compute["local_gb_used"])
+ self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
+ self.compute["free_disk_gb"])
+
+ claim.abort()
+
+ self.assertEqual(0, self.compute["memory_mb_used"])
+ self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["free_ram_mb"])
+ self.assertEqualNUMAHostTopology(
+ FAKE_VIRT_NUMA_TOPOLOGY,
+ hardware.VirtNUMAHostTopology.from_json(
+ self.compute['numa_topology']))
+
+ self.assertEqual(0, self.compute["local_gb_used"])
+ self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["free_disk_gb"])
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_instance_claim_with_oversubscription(self, mock_get):
+ memory_mb = FAKE_VIRT_MEMORY_MB * 2
+ root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
+ vcpus = FAKE_VIRT_VCPUS * 2
+ claim_topology = self._claim_topology(memory_mb)
+ instance_topology = self._instance_topology(memory_mb)
+
+ limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
+ 'disk_gb': root_gb * 2,
+ 'vcpu': vcpus,
+ 'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD.to_json()}
+
+ instance = self._fake_instance(memory_mb=memory_mb,
+ root_gb=root_gb, ephemeral_gb=ephemeral_gb,
+ numa_topology=instance_topology)
+
+ self.tracker.instance_claim(self.context, instance, limits)
+ self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
+ self.tracker.compute_node['memory_mb_used'])
+ self.assertEqualNUMAHostTopology(
+ claim_topology,
+ hardware.VirtNUMAHostTopology.from_json(
+ self.compute['numa_topology']))
+ self.assertEqual(root_gb * 2,
+ self.tracker.compute_node['local_gb_used'])
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_additive_claims(self, mock_get):
+ self.limits['vcpu'] = 2
+ claim_topology = self._claim_topology(2, cpus=2)
+
+ flavor = self._fake_flavor_create(
+ memory_mb=1, root_gb=1, ephemeral_gb=0)
+ instance_topology = self._instance_topology(1)
+ instance = self._fake_instance(
+ flavor=flavor, numa_topology=instance_topology)
+ with self.tracker.instance_claim(self.context, instance, self.limits):
+ pass
+ instance = self._fake_instance(
+ flavor=flavor, numa_topology=instance_topology)
+ with self.tracker.instance_claim(self.context, instance, self.limits):
+ pass
+
+ self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
+ self.tracker.compute_node['memory_mb_used'])
+ self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
+ self.tracker.compute_node['local_gb_used'])
+ self.assertEqual(2 * flavor['vcpus'],
+ self.tracker.compute_node['vcpus_used'])
+
+ self.assertEqualNUMAHostTopology(
+ claim_topology,
+ hardware.VirtNUMAHostTopology.from_json(
+ self.compute['numa_topology']))
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_context_claim_with_exception(self, mock_get):
+ instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1)
+ try:
+ with self.tracker.instance_claim(self.context, instance):
+ # <insert exciting things that utilize resources>
+ raise test.TestingException()
+ except test.TestingException:
+ pass
+
+ self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
+ self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
+ self.assertEqual(0, self.compute['memory_mb_used'])
+ self.assertEqual(0, self.compute['local_gb_used'])
+ self.assertEqualNUMAHostTopology(
+ FAKE_VIRT_NUMA_TOPOLOGY,
+ hardware.VirtNUMAHostTopology.from_json(
+ self.compute['numa_topology']))
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_instance_context_claim(self, mock_get):
+ flavor = self._fake_flavor_create(
+ memory_mb=1, root_gb=2, ephemeral_gb=3)
+ claim_topology = self._claim_topology(1)
+
+ instance_topology = self._instance_topology(1)
+ instance = self._fake_instance(
+ flavor=flavor, numa_topology=instance_topology)
+ with self.tracker.instance_claim(self.context, instance):
+ # <insert exciting things that utilize resources>
+ self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
+ self.tracker.compute_node['memory_mb_used'])
+ self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
+ self.tracker.compute_node['local_gb_used'])
+ self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
+ self.compute['memory_mb_used'])
+ self.assertEqualNUMAHostTopology(
+ claim_topology,
+ hardware.VirtNUMAHostTopology.from_json(
+ self.compute['numa_topology']))
+ self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
+ self.compute['local_gb_used'])
+
+ # after exiting claim context, build is marked as finished. usage
+ # totals should be same:
+ self.tracker.update_available_resource(self.context)
+ self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
+ self.tracker.compute_node['memory_mb_used'])
+ self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
+ self.tracker.compute_node['local_gb_used'])
+ self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
+ self.compute['memory_mb_used'])
+ self.assertEqualNUMAHostTopology(
+ claim_topology,
+ hardware.VirtNUMAHostTopology.from_json(
+ self.compute['numa_topology']))
+ self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
+ self.compute['local_gb_used'])
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_update_load_stats_for_instance(self, mock_get):
+ instance = self._fake_instance(task_state=task_states.SCHEDULING)
+ with self.tracker.instance_claim(self.context, instance):
+ pass
+
+ self.assertEqual(1, self.tracker.compute_node['current_workload'])
+
+ instance['vm_state'] = vm_states.ACTIVE
+ instance['task_state'] = None
+ instance['host'] = 'fakehost'
+
+ self.tracker.update_usage(self.context, instance)
+ self.assertEqual(0, self.tracker.compute_node['current_workload'])
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_cpu_stats(self, mock_get):
+ limits = {'disk_gb': 100, 'memory_mb': 100}
+ self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
+
+ vcpus = 1
+ instance = self._fake_instance(vcpus=vcpus)
+
+ # should not do anything until a claim is made:
+ self.tracker.update_usage(self.context, instance)
+ self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
+
+ with self.tracker.instance_claim(self.context, instance, limits):
+ pass
+ self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
+
+ # instance state can change without modifying vcpus in use:
+ instance['task_state'] = task_states.SCHEDULING
+ self.tracker.update_usage(self.context, instance)
+ self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
+
+ add_vcpus = 10
+ vcpus += add_vcpus
+ instance = self._fake_instance(vcpus=add_vcpus)
+ with self.tracker.instance_claim(self.context, instance, limits):
+ pass
+ self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
+
+ instance['vm_state'] = vm_states.DELETED
+ self.tracker.update_usage(self.context, instance)
+ vcpus -= add_vcpus
+ self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
+
+ def test_skip_deleted_instances(self):
+ # ensure that the audit process skips instances that have vm_state
+ # DELETED, but the DB record is not yet deleted.
+ self._fake_instance(vm_state=vm_states.DELETED, host=self.host)
+ self.tracker.update_available_resource(self.context)
+
+ self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
+ self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
+
+
+class ResizeClaimTestCase(BaseTrackerTestCase):
+
+ def setUp(self):
+ super(ResizeClaimTestCase, self).setUp()
+
+ def _fake_migration_create(mig_self, ctxt):
+ self._migrations[mig_self.instance_uuid] = mig_self
+ mig_self.obj_reset_changes()
+
+ self.stubs.Set(objects.Migration, 'create',
+ _fake_migration_create)
+
+ self.instance = self._fake_instance()
+ self.instance_type = self._fake_flavor_create()
+
+ def _fake_migration_create(self, context, values=None):
+ instance_uuid = str(uuid.uuid1())
+ mig_dict = test_migration.fake_db_migration()
+ mig_dict.update({
+ 'id': 1,
+ 'source_compute': 'host1',
+ 'source_node': 'fakenode',
+ 'dest_compute': 'host2',
+ 'dest_node': 'fakenode',
+ 'dest_host': '127.0.0.1',
+ 'old_instance_type_id': 1,
+ 'new_instance_type_id': 2,
+ 'instance_uuid': instance_uuid,
+ 'status': 'pre-migrating',
+ 'updated_at': timeutils.utcnow()
+ })
+ if values:
+ mig_dict.update(values)
+
+ migration = objects.Migration()
+ migration.update(mig_dict)
+ # This hits the stub in setUp()
+ migration.create('fake')
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_claim(self, mock_get):
+ self.tracker.resize_claim(self.context, self.instance,
+ self.instance_type, self.limits)
+ self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
+ self.assertEqual(1, len(self.tracker.tracked_migrations))
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_abort(self, mock_get):
+ try:
+ with self.tracker.resize_claim(self.context, self.instance,
+ self.instance_type, self.limits):
+ raise test.TestingException("abort")
+ except test.TestingException:
+ pass
+
+ self._assert(0, 'memory_mb_used')
+ self._assert(0, 'local_gb_used')
+ self._assert(0, 'vcpus_used')
+ self.assertEqual(0, len(self.tracker.tracked_migrations))
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_additive_claims(self, mock_get):
+
+ limits = self._limits(
+ 2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
+ 2 * FAKE_VIRT_LOCAL_GB,
+ 2 * FAKE_VIRT_VCPUS)
+ self.tracker.resize_claim(self.context, self.instance,
+ self.instance_type, limits)
+ instance2 = self._fake_instance()
+ self.tracker.resize_claim(self.context, instance2, self.instance_type,
+ limits)
+
+ self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
+ self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
+ self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_claim_and_audit(self, mock_get):
+ self.tracker.resize_claim(self.context, self.instance,
+ self.instance_type, self.limits)
+
+ self.tracker.update_available_resource(self.context)
+
+ self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_same_host(self, mock_get):
+ self.limits['vcpu'] = 3
+
+ src_dict = {
+ 'memory_mb': 1, 'root_gb': 1, 'ephemeral_gb': 0, 'vcpus': 1}
+ dest_dict = dict((k, v + 1) for (k, v) in src_dict.iteritems())
+ src_type = self._fake_flavor_create(
+ id=10, name="srcflavor", **src_dict)
+ dest_type = self._fake_flavor_create(
+ id=11, name="destflavor", **dest_dict)
+
+ # make an instance of src_type:
+ instance = self._fake_instance(flavor=src_type)
+ instance['system_metadata'] = self._fake_instance_system_metadata(
+ dest_type)
+ self.tracker.instance_claim(self.context, instance, self.limits)
+
+ # resize to dest_type:
+ claim = self.tracker.resize_claim(self.context, instance,
+ dest_type, self.limits)
+
+ self._assert(src_dict['memory_mb'] + dest_dict['memory_mb']
+ + 2 * FAKE_VIRT_MEMORY_OVERHEAD, 'memory_mb_used')
+ self._assert(src_dict['root_gb'] + src_dict['ephemeral_gb']
+ + dest_dict['root_gb'] + dest_dict['ephemeral_gb'],
+ 'local_gb_used')
+ self._assert(src_dict['vcpus'] + dest_dict['vcpus'], 'vcpus_used')
+
+ self.tracker.update_available_resource(self.context)
+ claim.abort()
+
+ # only the original instance should remain, not the migration:
+ self._assert(src_dict['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
+ 'memory_mb_used')
+ self._assert(src_dict['root_gb'] + src_dict['ephemeral_gb'],
+ 'local_gb_used')
+ self._assert(src_dict['vcpus'], 'vcpus_used')
+ self.assertEqual(1, len(self.tracker.tracked_instances))
+ self.assertEqual(0, len(self.tracker.tracked_migrations))
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_revert(self, mock_get):
+ self.tracker.resize_claim(self.context, self.instance,
+ self.instance_type, {}, self.limits)
+ self.tracker.drop_resize_claim(self.context, self.instance)
+
+ self.assertEqual(0, len(self.tracker.tracked_instances))
+ self.assertEqual(0, len(self.tracker.tracked_migrations))
+ self._assert(0, 'memory_mb_used')
+ self._assert(0, 'local_gb_used')
+ self._assert(0, 'vcpus_used')
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_revert_reserve_source(self, mock_get):
+ # if a revert has started at the API and audit runs on
+ # the source compute before the instance flips back to source,
+ # resources should still be held at the source based on the
+ # migration:
+ dest = "desthost"
+ dest_tracker = self._tracker(host=dest)
+ dest_tracker.update_available_resource(self.context)
+
+ self.instance = self._fake_instance(memory_mb=FAKE_VIRT_MEMORY_MB,
+ root_gb=FAKE_VIRT_LOCAL_GB, ephemeral_gb=0,
+ vcpus=FAKE_VIRT_VCPUS, instance_type_id=1)
+
+ values = {'source_compute': self.host, 'dest_compute': dest,
+ 'old_instance_type_id': 1, 'new_instance_type_id': 1,
+ 'status': 'post-migrating',
+ 'instance_uuid': self.instance['uuid']}
+ self._fake_migration_create(self.context, values)
+
+ # attach an instance to the destination host tracker:
+ dest_tracker.instance_claim(self.context, self.instance)
+
+ self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD,
+ 'memory_mb_used', tracker=dest_tracker)
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
+ tracker=dest_tracker)
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
+ tracker=dest_tracker)
+
+ # audit and recheck to confirm migration doesn't get double counted
+ # on dest:
+ dest_tracker.update_available_resource(self.context)
+
+ self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD,
+ 'memory_mb_used', tracker=dest_tracker)
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
+ tracker=dest_tracker)
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
+ tracker=dest_tracker)
+
+ # apply the migration to the source host tracker:
+ self.tracker.update_available_resource(self.context)
+
+ self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
+
+ # flag the instance and migration as reverting and re-audit:
+ self.instance['vm_state'] = vm_states.RESIZED
+ self.instance['task_state'] = task_states.RESIZE_REVERTING
+ self.tracker.update_available_resource(self.context)
+
+ self._assert(FAKE_VIRT_MEMORY_MB + 1, 'memory_mb_used')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
+
+ def test_resize_filter(self):
+ instance = self._fake_instance(vm_state=vm_states.ACTIVE,
+ task_state=task_states.SUSPENDING)
+ self.assertFalse(self.tracker._instance_in_resize_state(instance))
+
+ instance = self._fake_instance(vm_state=vm_states.RESIZED,
+ task_state=task_states.SUSPENDING)
+ self.assertTrue(self.tracker._instance_in_resize_state(instance))
+
+ states = [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING,
+ task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH]
+ for vm_state in [vm_states.ACTIVE, vm_states.STOPPED]:
+ for task_state in states:
+ instance = self._fake_instance(vm_state=vm_state,
+ task_state=task_state)
+ result = self.tracker._instance_in_resize_state(instance)
+ self.assertTrue(result)
+
+ def test_dupe_filter(self):
+ instance = self._fake_instance(host=self.host)
+
+ values = {'source_compute': self.host, 'dest_compute': self.host,
+ 'instance_uuid': instance['uuid'], 'new_instance_type_id': 2}
+ self._fake_flavor_create(id=2)
+ self._fake_migration_create(self.context, values)
+ self._fake_migration_create(self.context, values)
+
+ self.tracker.update_available_resource(self.context)
+ self.assertEqual(1, len(self.tracker.tracked_migrations))
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_set_instance_host_and_node(self, mock_get):
+ instance = self._fake_instance()
+ self.assertIsNone(instance['host'])
+ self.assertIsNone(instance['launched_on'])
+ self.assertIsNone(instance['node'])
+
+ claim = self.tracker.instance_claim(self.context, instance)
+ self.assertNotEqual(0, claim.memory_mb)
+
+ self.assertEqual('fakehost', instance['host'])
+ self.assertEqual('fakehost', instance['launched_on'])
+ self.assertEqual('fakenode', instance['node'])
+
+
+class NoInstanceTypesInSysMetadata(ResizeClaimTestCase):
+ """Make sure we handle the case where the following are true:
+
+ #) Compute node C gets upgraded to code that looks for instance types in
+ system metadata. AND
+ #) C already has instances in the process of migrating that do not have
+ stashed instance types.
+
+ bug 1164110
+ """
+ def setUp(self):
+ super(NoInstanceTypesInSysMetadata, self).setUp()
+ self.instance = self._fake_instance(stash=False)
+
+ def test_get_instance_type_stash_false(self):
+ with (mock.patch.object(objects.Flavor, 'get_by_id',
+ return_value=self.instance_type)):
+ flavor = self.tracker._get_instance_type(self.context,
+ self.instance, "new_")
+ self.assertEqual(self.instance_type, flavor)
+
+
+class OrphanTestCase(BaseTrackerTestCase):
+ def _driver(self):
+ class OrphanVirtDriver(FakeVirtDriver):
+ def get_per_instance_usage(self):
+ return {
+ '1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
+ 'uuid': '1-2-3-4-5'},
+ '2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
+ 'uuid': '2-3-4-5-6'},
+ }
+
+ return OrphanVirtDriver()
+
+ def test_usage(self):
+ self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
+ self.tracker.compute_node['memory_mb_used'])
+
+ def test_find(self):
+ # create one legit instance and verify the 2 orphans remain
+ self._fake_instance()
+ orphans = self.tracker._find_orphaned_instances()
+
+ self.assertEqual(2, len(orphans))
+
+
+class ComputeMonitorTestCase(BaseTestCase):
+ def setUp(self):
+ super(ComputeMonitorTestCase, self).setUp()
+ fake_monitors = [
+ 'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass1',
+ 'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass2']
+ self.flags(compute_available_monitors=fake_monitors)
+ self.tracker = self._tracker()
+ self.node_name = 'nodename'
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.info = {}
+ self.context = context.RequestContext(self.user_id,
+ self.project_id)
+
+ def test_get_host_metrics_none(self):
+ self.flags(compute_monitors=['FakeMontorClass1', 'FakeMonitorClass4'])
+ self.tracker.monitors = []
+ metrics = self.tracker._get_host_metrics(self.context,
+ self.node_name)
+ self.assertEqual(len(metrics), 0)
+
+ def test_get_host_metrics_one_failed(self):
+ self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass4'])
+ class1 = test_monitors.FakeMonitorClass1(self.tracker)
+ class4 = test_monitors.FakeMonitorClass4(self.tracker)
+ self.tracker.monitors = [class1, class4]
+ metrics = self.tracker._get_host_metrics(self.context,
+ self.node_name)
+ self.assertTrue(len(metrics) > 0)
+
+ def test_get_host_metrics(self):
+ self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass2'])
+ class1 = test_monitors.FakeMonitorClass1(self.tracker)
+ class2 = test_monitors.FakeMonitorClass2(self.tracker)
+ self.tracker.monitors = [class1, class2]
+
+ mock_notifier = mock.Mock()
+
+ with mock.patch.object(rpc, 'get_notifier',
+ return_value=mock_notifier) as mock_get:
+ metrics = self.tracker._get_host_metrics(self.context,
+ self.node_name)
+ mock_get.assert_called_once_with(service='compute',
+ host=self.node_name)
+
+ expected_metrics = [{
+ 'timestamp': 1232,
+ 'name': 'key1',
+ 'value': 2600,
+ 'source': 'libvirt'
+ }, {
+ 'name': 'key2',
+ 'source': 'libvirt',
+ 'timestamp': 123,
+ 'value': 1600
+ }]
+
+ payload = {
+ 'metrics': expected_metrics,
+ 'host': self.tracker.host,
+ 'host_ip': CONF.my_ip,
+ 'nodename': self.node_name
+ }
+
+ mock_notifier.info.assert_called_once_with(
+ self.context, 'compute.metrics.update', payload)
+
+ self.assertEqual(metrics, expected_metrics)
+
+
+class TrackerPeriodicTestCase(BaseTrackerTestCase):
+
+ def test_periodic_status_update(self):
+ # verify update called on instantiation
+ self.assertEqual(1, self.update_call_count)
+
+ # verify update not called if no change to resources
+ self.tracker.update_available_resource(self.context)
+ self.assertEqual(1, self.update_call_count)
+
+ # verify update is called when resources change
+ driver = self.tracker.driver
+ driver.memory_mb += 1
+ self.tracker.update_available_resource(self.context)
+ self.assertEqual(2, self.update_call_count)
+
+ def test_update_available_resource_calls_locked_inner(self):
+ @mock.patch.object(self.tracker, 'driver')
+ @mock.patch.object(self.tracker,
+ '_update_available_resource')
+ @mock.patch.object(self.tracker, '_verify_resources')
+ @mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
+ def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
+ resources = {'there is someone in my head': 'but it\'s not me'}
+ mock_driver.get_available_resource.return_value = resources
+ self.tracker.update_available_resource(self.context)
+ mock_uar.assert_called_once_with(self.context, resources)
+
+ _test()
+
+
+class StatsDictTestCase(BaseTrackerTestCase):
+ """Test stats handling for a virt driver that provides
+ stats as a dictionary.
+ """
+ def _driver(self):
+ return FakeVirtDriver(stats=FAKE_VIRT_STATS)
+
+ def _get_stats(self):
+ return jsonutils.loads(self.tracker.compute_node['stats'])
+
+ def test_virt_stats(self):
+ # start with virt driver stats
+ stats = self._get_stats()
+ self.assertEqual(FAKE_VIRT_STATS, stats)
+
+ # adding an instance should keep virt driver stats
+ self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
+ self.tracker.update_available_resource(self.context)
+
+ stats = self._get_stats()
+ expected_stats = {}
+ expected_stats.update(FAKE_VIRT_STATS)
+ expected_stats.update(self.tracker.stats)
+ self.assertEqual(expected_stats, stats)
+
+ # removing the instances should keep only virt driver stats
+ self._instances = {}
+ self.tracker.update_available_resource(self.context)
+
+ stats = self._get_stats()
+ self.assertEqual(FAKE_VIRT_STATS, stats)
+
+
+class StatsJsonTestCase(BaseTrackerTestCase):
+ """Test stats handling for a virt driver that provides
+ stats as a json string.
+ """
+ def _driver(self):
+ return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON)
+
+ def _get_stats(self):
+ return jsonutils.loads(self.tracker.compute_node['stats'])
+
+ def test_virt_stats(self):
+ # start with virt driver stats
+ stats = self._get_stats()
+ self.assertEqual(FAKE_VIRT_STATS, stats)
+
+ # adding an instance should keep virt driver stats
+ # and add rt stats
+ self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
+ self.tracker.update_available_resource(self.context)
+
+ stats = self._get_stats()
+ expected_stats = {}
+ expected_stats.update(FAKE_VIRT_STATS)
+ expected_stats.update(self.tracker.stats)
+ self.assertEqual(expected_stats, stats)
+
+ # removing the instances should keep only virt driver stats
+ self._instances = {}
+ self.tracker.update_available_resource(self.context)
+ stats = self._get_stats()
+ self.assertEqual(FAKE_VIRT_STATS, stats)
+
+
+class StatsInvalidJsonTestCase(BaseTrackerTestCase):
+ """Test stats handling for a virt driver that provides
+ an invalid type for stats.
+ """
+ def _driver(self):
+ return FakeVirtDriver(stats='this is not json')
+
+ def _init_tracker(self):
+ # do not do initial update in setup
+ pass
+
+ def test_virt_stats(self):
+ # should throw exception for string that does not parse as json
+ self.assertRaises(ValueError,
+ self.tracker.update_available_resource,
+ context=self.context)
+
+
+class StatsInvalidTypeTestCase(BaseTrackerTestCase):
+ """Test stats handling for a virt driver that provides
+ an invalid type for stats.
+ """
+ def _driver(self):
+ return FakeVirtDriver(stats=10)
+
+ def _init_tracker(self):
+ # do not do initial update in setup
+ pass
+
+ def test_virt_stats(self):
+ # should throw exception for incorrect stats value type
+ self.assertRaises(ValueError,
+ self.tracker.update_available_resource,
+ context=self.context)
diff --git a/nova/tests/unit/compute/test_resources.py b/nova/tests/unit/compute/test_resources.py
new file mode 100644
index 0000000000..cdd1585e34
--- /dev/null
+++ b/nova/tests/unit/compute/test_resources.py
@@ -0,0 +1,344 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for the compute extra resources framework."""
+
+
+from oslo.config import cfg
+from stevedore import extension
+from stevedore import named
+
+from nova.compute import resources
+from nova.compute.resources import base
+from nova.compute.resources import vcpu
+from nova import context
+from nova.i18n import _
+from nova.objects import flavor as flavor_obj
+from nova import test
+from nova.tests.unit.fake_instance import fake_instance_obj
+
+CONF = cfg.CONF
+
+
+class FakeResourceHandler(resources.ResourceHandler):
+ def __init__(self, extensions):
+ self._mgr = \
+ named.NamedExtensionManager.make_test_instance(extensions)
+
+
+class FakeResource(base.Resource):
+
+ def __init__(self):
+ self.total_res = 0
+ self.used_res = 0
+
+ def _get_requested(self, usage):
+ if 'extra_specs' not in usage:
+ return
+ if self.resource_name not in usage['extra_specs']:
+ return
+ req = usage['extra_specs'][self.resource_name]
+ return int(req)
+
+ def _get_limit(self, limits):
+ if self.resource_name not in limits:
+ return
+ limit = limits[self.resource_name]
+ return int(limit)
+
+ def reset(self, resources, driver):
+ self.total_res = 0
+ self.used_res = 0
+
+ def test(self, usage, limits):
+ requested = self._get_requested(usage)
+ if not requested:
+ return
+
+ limit = self._get_limit(limits)
+ if not limit:
+ return
+
+ free = limit - self.used_res
+ if requested <= free:
+ return
+ else:
+ return (_('Free %(free)d < requested %(requested)d ') %
+ {'free': free, 'requested': requested})
+
+ def add_instance(self, usage):
+ requested = self._get_requested(usage)
+ if requested:
+ self.used_res += requested
+
+ def remove_instance(self, usage):
+ requested = self._get_requested(usage)
+ if requested:
+ self.used_res -= requested
+
+ def write(self, resources):
+ pass
+
+ def report_free(self):
+ return "Free %s" % (self.total_res - self.used_res)
+
+
+class ResourceA(FakeResource):
+
+ def reset(self, resources, driver):
+ # ResourceA uses a configuration option
+ self.total_res = int(CONF.resA)
+ self.used_res = 0
+ self.resource_name = 'resource:resA'
+
+ def write(self, resources):
+ resources['resA'] = self.total_res
+ resources['used_resA'] = self.used_res
+
+
+class ResourceB(FakeResource):
+
+ def reset(self, resources, driver):
+ # ResourceB uses resource details passed in parameter resources
+ self.total_res = resources['resB']
+ self.used_res = 0
+ self.resource_name = 'resource:resB'
+
+ def write(self, resources):
+ resources['resB'] = self.total_res
+ resources['used_resB'] = self.used_res
+
+
+def fake_flavor_obj(**updates):
+ flavor = flavor_obj.Flavor()
+ flavor.id = 1
+ flavor.name = 'fakeflavor'
+ flavor.memory_mb = 8000
+ flavor.vcpus = 3
+ flavor.root_gb = 11
+ flavor.ephemeral_gb = 4
+ flavor.swap = 0
+ flavor.rxtx_factor = 1.0
+ flavor.vcpu_weight = 1
+ if updates:
+ flavor.update(updates)
+ return flavor
+
+
+class BaseTestCase(test.TestCase):
+
+ def _initialize_used_res_counter(self):
+ # Initialize the value for the used resource
+ for ext in self.r_handler._mgr.extensions:
+ ext.obj.used_res = 0
+
+ def setUp(self):
+ super(BaseTestCase, self).setUp()
+
+ # initialize flavors and stub get_by_id to
+ # get flavors from here
+ self._flavors = {}
+ self.ctxt = context.get_admin_context()
+
+ # Create a flavor without extra_specs defined
+ _flavor_id = 1
+ _flavor = fake_flavor_obj(id=_flavor_id)
+ self._flavors[_flavor_id] = _flavor
+
+ # Create a flavor with extra_specs defined
+ _flavor_id = 2
+ requested_resA = 5
+ requested_resB = 7
+ requested_resC = 7
+ _extra_specs = {'resource:resA': requested_resA,
+ 'resource:resB': requested_resB,
+ 'resource:resC': requested_resC}
+ _flavor = fake_flavor_obj(id=_flavor_id,
+ extra_specs=_extra_specs)
+ self._flavors[_flavor_id] = _flavor
+
+ # create fake resource extensions and resource handler
+ _extensions = [
+ extension.Extension('resA', None, ResourceA, ResourceA()),
+ extension.Extension('resB', None, ResourceB, ResourceB()),
+ ]
+ self.r_handler = FakeResourceHandler(_extensions)
+
+ # Resources details can be passed to each plugin or can be specified as
+ # configuration options
+ driver_resources = {'resB': 5}
+ CONF.resA = '10'
+
+ # initialise the resources
+ self.r_handler.reset_resources(driver_resources, None)
+
+ def test_update_from_instance_with_extra_specs(self):
+ # Flavor with extra_specs
+ _flavor_id = 2
+ sign = 1
+ self.r_handler.update_from_instance(self._flavors[_flavor_id], sign)
+
+ expected_resA = self._flavors[_flavor_id].extra_specs['resource:resA']
+ expected_resB = self._flavors[_flavor_id].extra_specs['resource:resB']
+ self.assertEqual(int(expected_resA),
+ self.r_handler._mgr['resA'].obj.used_res)
+ self.assertEqual(int(expected_resB),
+ self.r_handler._mgr['resB'].obj.used_res)
+
+ def test_update_from_instance_without_extra_specs(self):
+ # Flavor id without extra spec
+ _flavor_id = 1
+ self._initialize_used_res_counter()
+ self.r_handler.resource_list = []
+ sign = 1
+ self.r_handler.update_from_instance(self._flavors[_flavor_id], sign)
+ self.assertEqual(0, self.r_handler._mgr['resA'].obj.used_res)
+ self.assertEqual(0, self.r_handler._mgr['resB'].obj.used_res)
+
+ def test_write_resources(self):
+ self._initialize_used_res_counter()
+ extra_resources = {}
+ expected = {'resA': 10, 'used_resA': 0, 'resB': 5, 'used_resB': 0}
+ self.r_handler.write_resources(extra_resources)
+ self.assertEqual(expected, extra_resources)
+
+ def test_test_resources_without_extra_specs(self):
+ limits = {}
+ # Flavor id without extra_specs
+ flavor = self._flavors[1]
+ result = self.r_handler.test_resources(flavor, limits)
+ self.assertEqual([None, None], result)
+
+ def test_test_resources_with_limits_for_different_resource(self):
+ limits = {'resource:resC': 20}
+ # Flavor id with extra_specs
+ flavor = self._flavors[2]
+ result = self.r_handler.test_resources(flavor, limits)
+ self.assertEqual([None, None], result)
+
+ def test_passing_test_resources(self):
+ limits = {'resource:resA': 10, 'resource:resB': 20}
+ # Flavor id with extra_specs
+ flavor = self._flavors[2]
+ self._initialize_used_res_counter()
+ result = self.r_handler.test_resources(flavor, limits)
+ self.assertEqual([None, None], result)
+
+ def test_failing_test_resources_for_single_resource(self):
+ limits = {'resource:resA': 4, 'resource:resB': 20}
+ # Flavor id with extra_specs
+ flavor = self._flavors[2]
+ self._initialize_used_res_counter()
+ result = self.r_handler.test_resources(flavor, limits)
+ expected = ['Free 4 < requested 5 ', None]
+ self.assertEqual(sorted(expected),
+ sorted(result))
+
+ def test_empty_resource_handler(self):
+ """An empty resource handler has no resource extensions,
+ should have no effect, and should raise no exceptions.
+ """
+ empty_r_handler = FakeResourceHandler([])
+
+ resources = {}
+ empty_r_handler.reset_resources(resources, None)
+
+ flavor = self._flavors[1]
+ sign = 1
+ empty_r_handler.update_from_instance(flavor, sign)
+
+ limits = {}
+ test_result = empty_r_handler.test_resources(flavor, limits)
+ self.assertEqual([], test_result)
+
+ sign = -1
+ empty_r_handler.update_from_instance(flavor, sign)
+
+ extra_resources = {}
+ expected_extra_resources = extra_resources
+ empty_r_handler.write_resources(extra_resources)
+ self.assertEqual(expected_extra_resources, extra_resources)
+
+ empty_r_handler.report_free_resources()
+
+ def test_vcpu_resource_load(self):
+ # load the vcpu example
+ names = ['vcpu']
+ real_r_handler = resources.ResourceHandler(names)
+ ext_names = real_r_handler._mgr.names()
+ self.assertEqual(names, ext_names)
+
+ # check the extension loaded is the one we expect
+ # and an instance of the object has been created
+ ext = real_r_handler._mgr['vcpu']
+ self.assertIsInstance(ext.obj, vcpu.VCPU)
+
+
+class TestVCPU(test.TestCase):
+
+ def setUp(self):
+ super(TestVCPU, self).setUp()
+ self._vcpu = vcpu.VCPU()
+ self._vcpu._total = 10
+ self._vcpu._used = 0
+ self._flavor = fake_flavor_obj(vcpus=5)
+ self._big_flavor = fake_flavor_obj(vcpus=20)
+ self._instance = fake_instance_obj(None)
+
+ def test_reset(self):
+ # set vcpu values to something different to test reset
+ self._vcpu._total = 10
+ self._vcpu._used = 5
+
+ driver_resources = {'vcpus': 20}
+ self._vcpu.reset(driver_resources, None)
+ self.assertEqual(20, self._vcpu._total)
+ self.assertEqual(0, self._vcpu._used)
+
+ def test_add_and_remove_instance(self):
+ self._vcpu.add_instance(self._flavor)
+ self.assertEqual(10, self._vcpu._total)
+ self.assertEqual(5, self._vcpu._used)
+
+ self._vcpu.remove_instance(self._flavor)
+ self.assertEqual(10, self._vcpu._total)
+ self.assertEqual(0, self._vcpu._used)
+
+ def test_test_pass_limited(self):
+ result = self._vcpu.test(self._flavor, {'vcpu': 10})
+ self.assertIsNone(result, 'vcpu test failed when it should pass')
+
+ def test_test_pass_unlimited(self):
+ result = self._vcpu.test(self._big_flavor, {})
+ self.assertIsNone(result, 'vcpu test failed when it should pass')
+
+ def test_test_fail(self):
+ result = self._vcpu.test(self._flavor, {'vcpu': 2})
+ expected = _('Free CPUs 2.00 VCPUs < requested 5 VCPUs')
+ self.assertEqual(expected, result)
+
+ def test_write(self):
+ resources = {'stats': {}}
+ self._vcpu.write(resources)
+ expected = {
+ 'vcpus': 10,
+ 'vcpus_used': 0,
+ 'stats': {
+ 'num_vcpus': 10,
+ 'num_vcpus_used': 0
+ }
+ }
+ self.assertEqual(sorted(expected),
+ sorted(resources))
diff --git a/nova/tests/unit/compute/test_rpcapi.py b/nova/tests/unit/compute/test_rpcapi.py
new file mode 100644
index 0000000000..bf8e41215e
--- /dev/null
+++ b/nova/tests/unit/compute/test_rpcapi.py
@@ -0,0 +1,486 @@
+# Copyright 2012, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Unit Tests for nova.compute.rpcapi
+"""
+
+import contextlib
+
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+
+from nova.compute import rpcapi as compute_rpcapi
+from nova import context
+from nova.objects import block_device as objects_block_dev
+from nova.objects import network_request as objects_network_request
+from nova import test
+from nova.tests.unit import fake_block_device
+from nova.tests.unit.fake_instance import fake_instance_obj
+
+CONF = cfg.CONF
+
+
+class ComputeRpcAPITestCase(test.TestCase):
+
+ def setUp(self):
+ super(ComputeRpcAPITestCase, self).setUp()
+ self.context = context.get_admin_context()
+ instance_attr = {'host': 'fake_host',
+ 'instance_type_id': 1}
+ self.fake_instance_obj = fake_instance_obj(self.context,
+ **instance_attr)
+ self.fake_instance = jsonutils.to_primitive(self.fake_instance_obj)
+ self.fake_volume_bdm = jsonutils.to_primitive(
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'source_type': 'volume', 'destination_type': 'volume',
+ 'instance_uuid': self.fake_instance['uuid'],
+ 'volume_id': 'fake-volume-id'}))
+
+ def test_serialized_instance_has_name(self):
+ self.assertIn('name', self.fake_instance)
+
+ def _test_compute_api(self, method, rpc_method, **kwargs):
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+
+ rpcapi = kwargs.pop('rpcapi_class', compute_rpcapi.ComputeAPI)()
+ self.assertIsNotNone(rpcapi.client)
+ self.assertEqual(rpcapi.client.target.topic, CONF.compute_topic)
+
+ orig_prepare = rpcapi.client.prepare
+ expected_version = kwargs.pop('version', rpcapi.client.target.version)
+
+ expected_kwargs = kwargs.copy()
+ if ('requested_networks' in expected_kwargs and
+ expected_version == '3.23'):
+ expected_kwargs['requested_networks'] = []
+ for requested_network in kwargs['requested_networks']:
+ expected_kwargs['requested_networks'].append(
+ (requested_network.network_id,
+ str(requested_network.address),
+ requested_network.port_id))
+ if 'host_param' in expected_kwargs:
+ expected_kwargs['host'] = expected_kwargs.pop('host_param')
+ else:
+ expected_kwargs.pop('host', None)
+ expected_kwargs.pop('destination', None)
+
+ cast_and_call = ['confirm_resize', 'stop_instance']
+ if rpc_method == 'call' and method in cast_and_call:
+ if method == 'confirm_resize':
+ kwargs['cast'] = False
+ else:
+ kwargs['do_cast'] = False
+ if 'host' in kwargs:
+ host = kwargs['host']
+ elif 'destination' in kwargs:
+ host = kwargs['destination']
+ elif 'instances' in kwargs:
+ host = kwargs['instances'][0]['host']
+ else:
+ host = kwargs['instance']['host']
+
+ with contextlib.nested(
+ mock.patch.object(rpcapi.client, rpc_method),
+ mock.patch.object(rpcapi.client, 'prepare'),
+ mock.patch.object(rpcapi.client, 'can_send_version'),
+ ) as (
+ rpc_mock, prepare_mock, csv_mock
+ ):
+ prepare_mock.return_value = rpcapi.client
+ if 'return_bdm_object' in kwargs:
+ del kwargs['return_bdm_object']
+ rpc_mock.return_value = objects_block_dev.BlockDeviceMapping()
+ elif rpc_method == 'call':
+ rpc_mock.return_value = 'foo'
+ else:
+ rpc_mock.return_value = None
+ csv_mock.side_effect = (
+ lambda v: orig_prepare(version=v).can_send_version())
+
+ retval = getattr(rpcapi, method)(ctxt, **kwargs)
+ self.assertEqual(retval, rpc_mock.return_value)
+
+ prepare_mock.assert_called_once_with(version=expected_version,
+ server=host)
+ rpc_mock.assert_called_once_with(ctxt, method, **expected_kwargs)
+
+ def test_add_aggregate_host(self):
+ self._test_compute_api('add_aggregate_host', 'cast',
+ aggregate={'id': 'fake_id'}, host_param='host', host='host',
+ slave_info={})
+
+ def test_add_fixed_ip_to_instance(self):
+ self._test_compute_api('add_fixed_ip_to_instance', 'cast',
+ instance=self.fake_instance_obj, network_id='id',
+ version='3.12')
+
+ def test_attach_interface(self):
+ self._test_compute_api('attach_interface', 'call',
+ instance=self.fake_instance_obj, network_id='id',
+ port_id='id2', version='3.17', requested_ip='192.168.1.50')
+
+ def test_attach_volume(self):
+ self._test_compute_api('attach_volume', 'cast',
+ instance=self.fake_instance_obj, volume_id='id',
+ mountpoint='mp', bdm=self.fake_volume_bdm, version='3.16')
+
+ def test_change_instance_metadata(self):
+ self._test_compute_api('change_instance_metadata', 'cast',
+ instance=self.fake_instance_obj, diff={}, version='3.7')
+
+ def test_check_can_live_migrate_destination(self):
+ self._test_compute_api('check_can_live_migrate_destination', 'call',
+ instance=self.fake_instance_obj,
+ destination='dest', block_migration=True,
+ disk_over_commit=True, version='3.32')
+
+ def test_check_can_live_migrate_source(self):
+ self._test_compute_api('check_can_live_migrate_source', 'call',
+ instance=self.fake_instance_obj,
+ dest_check_data={"test": "data"}, version='3.32')
+
+ def test_check_instance_shared_storage(self):
+ self._test_compute_api('check_instance_shared_storage', 'call',
+ instance=self.fake_instance_obj, data='foo',
+ version='3.29')
+
+ def test_confirm_resize_cast(self):
+ self._test_compute_api('confirm_resize', 'cast',
+ instance=self.fake_instance_obj, migration={'id': 'foo'},
+ host='host', reservations=list('fake_res'))
+
+ def test_confirm_resize_call(self):
+ self._test_compute_api('confirm_resize', 'call',
+ instance=self.fake_instance_obj, migration={'id': 'foo'},
+ host='host', reservations=list('fake_res'))
+
+ def test_detach_interface(self):
+ self._test_compute_api('detach_interface', 'cast',
+ version='3.17', instance=self.fake_instance_obj,
+ port_id='fake_id')
+
+ def test_detach_volume(self):
+ self._test_compute_api('detach_volume', 'cast',
+ instance=self.fake_instance_obj, volume_id='id',
+ version='3.25')
+
+ def test_finish_resize(self):
+ self._test_compute_api('finish_resize', 'cast',
+ instance=self.fake_instance_obj, migration={'id': 'foo'},
+ image='image', disk_info='disk_info', host='host',
+ reservations=list('fake_res'))
+
+ def test_finish_revert_resize(self):
+ self._test_compute_api('finish_revert_resize', 'cast',
+ instance=self.fake_instance_obj, migration={'id': 'fake_id'},
+ host='host', reservations=list('fake_res'))
+
+ def test_get_console_output(self):
+ self._test_compute_api('get_console_output', 'call',
+ instance=self.fake_instance_obj, tail_length='tl',
+ version='3.28')
+
+ def test_get_console_pool_info(self):
+ self._test_compute_api('get_console_pool_info', 'call',
+ console_type='type', host='host')
+
+ def test_get_console_topic(self):
+ self._test_compute_api('get_console_topic', 'call', host='host')
+
+ def test_get_diagnostics(self):
+ self._test_compute_api('get_diagnostics', 'call',
+ instance=self.fake_instance_obj, version='3.18')
+
+ def test_get_instance_diagnostics(self):
+ self._test_compute_api('get_instance_diagnostics', 'call',
+ instance=self.fake_instance, version='3.31')
+
+ def test_get_vnc_console(self):
+ self._test_compute_api('get_vnc_console', 'call',
+ instance=self.fake_instance_obj, console_type='type',
+ version='3.2')
+
+ def test_get_spice_console(self):
+ self._test_compute_api('get_spice_console', 'call',
+ instance=self.fake_instance_obj, console_type='type',
+ version='3.1')
+
+ def test_get_rdp_console(self):
+ self._test_compute_api('get_rdp_console', 'call',
+ instance=self.fake_instance_obj, console_type='type',
+ version='3.10')
+
+ def test_get_serial_console(self):
+ self._test_compute_api('get_serial_console', 'call',
+ instance=self.fake_instance, console_type='serial',
+ version='3.34')
+
+ def test_validate_console_port(self):
+ self._test_compute_api('validate_console_port', 'call',
+ instance=self.fake_instance_obj, port="5900",
+ console_type="novnc", version='3.3')
+
+ def test_host_maintenance_mode(self):
+ self._test_compute_api('host_maintenance_mode', 'call',
+ host_param='param', mode='mode', host='host')
+
+ def test_host_power_action(self):
+ self._test_compute_api('host_power_action', 'call', action='action',
+ host='host')
+
+ def test_inject_network_info(self):
+ self._test_compute_api('inject_network_info', 'cast',
+ instance=self.fake_instance_obj)
+
+ def test_live_migration(self):
+ self._test_compute_api('live_migration', 'cast',
+ instance=self.fake_instance_obj, dest='dest',
+ block_migration='blockity_block', host='tsoh',
+ migrate_data={}, version='3.26')
+
+ def test_post_live_migration_at_destination(self):
+ self._test_compute_api('post_live_migration_at_destination', 'cast',
+ instance=self.fake_instance_obj,
+ block_migration='block_migration', host='host', version='3.14')
+
+ def test_pause_instance(self):
+ self._test_compute_api('pause_instance', 'cast',
+ instance=self.fake_instance_obj)
+
+ def test_soft_delete_instance(self):
+ self._test_compute_api('soft_delete_instance', 'cast',
+ instance=self.fake_instance_obj,
+ reservations=['uuid1', 'uuid2'])
+
+ def test_swap_volume(self):
+ self._test_compute_api('swap_volume', 'cast',
+ instance=self.fake_instance_obj, old_volume_id='oldid',
+ new_volume_id='newid')
+
+ def test_restore_instance(self):
+ self._test_compute_api('restore_instance', 'cast',
+ instance=self.fake_instance_obj, version='3.20')
+
+ def test_pre_live_migration(self):
+ self._test_compute_api('pre_live_migration', 'call',
+ instance=self.fake_instance_obj,
+ block_migration='block_migration', disk='disk', host='host',
+ migrate_data=None, version='3.19')
+
+ def test_prep_resize(self):
+ self._test_compute_api('prep_resize', 'cast',
+ instance=self.fake_instance_obj, instance_type='fake_type',
+ image='fake_image', host='host',
+ reservations=list('fake_res'),
+ request_spec='fake_spec',
+ filter_properties={'fakeprop': 'fakeval'},
+ node='node')
+
+ def test_reboot_instance(self):
+ self.maxDiff = None
+ self._test_compute_api('reboot_instance', 'cast',
+ instance=self.fake_instance_obj,
+ block_device_info={},
+ reboot_type='type')
+
+ def test_rebuild_instance(self):
+ self._test_compute_api('rebuild_instance', 'cast', new_pass='None',
+ injected_files='None', image_ref='None', orig_image_ref='None',
+ bdms=[], instance=self.fake_instance_obj, host='new_host',
+ orig_sys_metadata=None, recreate=True, on_shared_storage=True,
+ preserve_ephemeral=True, version='3.21')
+
+ def test_reserve_block_device_name(self):
+ self._test_compute_api('reserve_block_device_name', 'call',
+ instance=self.fake_instance_obj, device='device',
+ volume_id='id', disk_bus='ide', device_type='cdrom',
+ version='3.35', return_bdm_object=True)
+
+ def refresh_provider_fw_rules(self):
+ self._test_compute_api('refresh_provider_fw_rules', 'cast',
+ host='host')
+
+ def test_refresh_security_group_rules(self):
+ self._test_compute_api('refresh_security_group_rules', 'cast',
+ rpcapi_class=compute_rpcapi.SecurityGroupAPI,
+ security_group_id='id', host='host')
+
+ def test_refresh_security_group_members(self):
+ self._test_compute_api('refresh_security_group_members', 'cast',
+ rpcapi_class=compute_rpcapi.SecurityGroupAPI,
+ security_group_id='id', host='host')
+
+ def test_remove_aggregate_host(self):
+ self._test_compute_api('remove_aggregate_host', 'cast',
+ aggregate={'id': 'fake_id'}, host_param='host', host='host',
+ slave_info={})
+
+ def test_remove_fixed_ip_from_instance(self):
+ self._test_compute_api('remove_fixed_ip_from_instance', 'cast',
+ instance=self.fake_instance_obj, address='addr',
+ version='3.13')
+
+ def test_remove_volume_connection(self):
+ self._test_compute_api('remove_volume_connection', 'call',
+ instance=self.fake_instance, volume_id='id', host='host',
+ version='3.30')
+
+ def test_rescue_instance(self):
+ self.flags(compute='3.9', group='upgrade_levels')
+ self._test_compute_api('rescue_instance', 'cast',
+ instance=self.fake_instance_obj, rescue_password='pw',
+ version='3.9')
+
+ def test_rescue_instance_with_rescue_image_ref_passed(self):
+ self._test_compute_api('rescue_instance', 'cast',
+ instance=self.fake_instance_obj, rescue_password='pw',
+ rescue_image_ref='fake_image_ref', version='3.24')
+
+ def test_reset_network(self):
+ self._test_compute_api('reset_network', 'cast',
+ instance=self.fake_instance_obj)
+
+ def test_resize_instance(self):
+ self._test_compute_api('resize_instance', 'cast',
+ instance=self.fake_instance_obj, migration={'id': 'fake_id'},
+ image='image', instance_type={'id': 1},
+ reservations=list('fake_res'))
+
+ def test_resume_instance(self):
+ self._test_compute_api('resume_instance', 'cast',
+ instance=self.fake_instance_obj)
+
+ def test_revert_resize(self):
+ self._test_compute_api('revert_resize', 'cast',
+ instance=self.fake_instance_obj, migration={'id': 'fake_id'},
+ host='host', reservations=list('fake_res'))
+
+ def test_rollback_live_migration_at_destination(self):
+ self._test_compute_api('rollback_live_migration_at_destination',
+ 'cast', instance=self.fake_instance_obj, host='host',
+ destroy_disks=True, migrate_data=None, version='3.32')
+
+ def test_run_instance(self):
+ self._test_compute_api('run_instance', 'cast',
+ instance=self.fake_instance_obj, host='fake_host',
+ request_spec='fake_spec', filter_properties={},
+ requested_networks='networks', injected_files='files',
+ admin_password='pw', is_first_time=True, node='node',
+ legacy_bdm_in_spec=False, version='3.27')
+
+ def test_set_admin_password(self):
+ self._test_compute_api('set_admin_password', 'call',
+ instance=self.fake_instance_obj, new_pass='pw',
+ version='3.8')
+
+ def test_set_host_enabled(self):
+ self._test_compute_api('set_host_enabled', 'call',
+ enabled='enabled', host='host')
+
+ def test_get_host_uptime(self):
+ self._test_compute_api('get_host_uptime', 'call', host='host')
+
+ def test_backup_instance(self):
+ self._test_compute_api('backup_instance', 'cast',
+ instance=self.fake_instance_obj, image_id='id',
+ backup_type='type', rotation='rotation')
+
+ def test_snapshot_instance(self):
+ self._test_compute_api('snapshot_instance', 'cast',
+ instance=self.fake_instance_obj, image_id='id')
+
+ def test_start_instance(self):
+ self._test_compute_api('start_instance', 'cast',
+ instance=self.fake_instance_obj)
+
+ def test_stop_instance_cast(self):
+ self._test_compute_api('stop_instance', 'cast',
+ instance=self.fake_instance_obj)
+
+ def test_stop_instance_call(self):
+ self._test_compute_api('stop_instance', 'call',
+ instance=self.fake_instance_obj)
+
+ def test_suspend_instance(self):
+ self._test_compute_api('suspend_instance', 'cast',
+ instance=self.fake_instance_obj)
+
+ def test_terminate_instance(self):
+ self._test_compute_api('terminate_instance', 'cast',
+ instance=self.fake_instance_obj, bdms=[],
+ reservations=['uuid1', 'uuid2'], version='3.22')
+
+ def test_unpause_instance(self):
+ self._test_compute_api('unpause_instance', 'cast',
+ instance=self.fake_instance_obj)
+
+ def test_unrescue_instance(self):
+ self._test_compute_api('unrescue_instance', 'cast',
+ instance=self.fake_instance_obj, version='3.11')
+
+ def test_shelve_instance(self):
+ self._test_compute_api('shelve_instance', 'cast',
+ instance=self.fake_instance_obj, image_id='image_id')
+
+ def test_shelve_offload_instance(self):
+ self._test_compute_api('shelve_offload_instance', 'cast',
+ instance=self.fake_instance_obj)
+
+ def test_unshelve_instance(self):
+ self._test_compute_api('unshelve_instance', 'cast',
+ instance=self.fake_instance_obj, host='host', image='image',
+ filter_properties={'fakeprop': 'fakeval'}, node='node',
+ version='3.15')
+
+ def test_volume_snapshot_create(self):
+ self._test_compute_api('volume_snapshot_create', 'cast',
+ instance=self.fake_instance, volume_id='fake_id',
+ create_info={}, version='3.6')
+
+ def test_volume_snapshot_delete(self):
+ self._test_compute_api('volume_snapshot_delete', 'cast',
+ instance=self.fake_instance_obj, volume_id='fake_id',
+ snapshot_id='fake_id2', delete_info={}, version='3.6')
+
+ def test_external_instance_event(self):
+ self._test_compute_api('external_instance_event', 'cast',
+ instances=[self.fake_instance_obj],
+ events=['event'],
+ version='3.23')
+
+ def test_build_and_run_instance(self):
+ self._test_compute_api('build_and_run_instance', 'cast',
+ instance=self.fake_instance_obj, host='host', image='image',
+ request_spec={'request': 'spec'}, filter_properties=[],
+ admin_password='passwd', injected_files=None,
+ requested_networks=['network1'], security_groups=None,
+ block_device_mapping=None, node='node', limits=[],
+ version='3.33')
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_build_and_run_instance_icehouse_compat(self, is_neutron):
+ self.flags(compute='icehouse', group='upgrade_levels')
+ self._test_compute_api('build_and_run_instance', 'cast',
+ instance=self.fake_instance_obj, host='host', image='image',
+ request_spec={'request': 'spec'}, filter_properties=[],
+ admin_password='passwd', injected_files=None,
+ requested_networks= objects_network_request.NetworkRequestList(
+ objects=[objects_network_request.NetworkRequest(
+ network_id="fake_network_id", address="10.0.0.1",
+ port_id="fake_port_id")]),
+ security_groups=None,
+ block_device_mapping=None, node='node', limits=[],
+ version='3.23')
diff --git a/nova/tests/unit/compute/test_shelve.py b/nova/tests/unit/compute/test_shelve.py
new file mode 100644
index 0000000000..3e792ae893
--- /dev/null
+++ b/nova/tests/unit/compute/test_shelve.py
@@ -0,0 +1,414 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import iso8601
+import mock
+import mox
+from oslo.config import cfg
+from oslo.utils import timeutils
+
+from nova.compute import claims
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import db
+from nova import objects
+from nova.objects import base as obj_base
+from nova.tests.unit.compute import test_compute
+from nova.tests.unit.image import fake as fake_image
+from nova import utils
+
+CONF = cfg.CONF
+CONF.import_opt('shelved_offload_time', 'nova.compute.manager')
+
+
+def _fake_resources():
+ resources = {
+ 'memory_mb': 2048,
+ 'memory_mb_used': 0,
+ 'free_ram_mb': 2048,
+ 'local_gb': 20,
+ 'local_gb_used': 0,
+ 'free_disk_gb': 20,
+ 'vcpus': 2,
+ 'vcpus_used': 0
+ }
+ return resources
+
+
+class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
+ def _shelve_instance(self, shelved_offload_time, clean_shutdown=True):
+ CONF.set_override('shelved_offload_time', shelved_offload_time)
+ instance = self._create_fake_instance_obj()
+ db_instance = obj_base.obj_to_primitive(instance)
+ image_id = 'fake_image_id'
+ host = 'fake-mini'
+ cur_time = timeutils.utcnow()
+ timeutils.set_time_override(cur_time)
+ instance.task_state = task_states.SHELVING
+ instance.save()
+ sys_meta = dict(instance.system_metadata)
+ sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
+ sys_meta['shelved_image_id'] = image_id
+ sys_meta['shelved_host'] = host
+ db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta)
+
+ self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
+ self.mox.StubOutWithMock(self.compute.driver, 'snapshot')
+ self.mox.StubOutWithMock(self.compute.driver, 'power_off')
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ self.compute._notify_about_instance_usage(self.context, instance,
+ 'shelve.start')
+ if clean_shutdown:
+ self.compute.driver.power_off(instance,
+ CONF.shutdown_timeout,
+ self.compute.SHUTDOWN_RETRY_INTERVAL)
+ else:
+ self.compute.driver.power_off(instance, 0, 0)
+ self.compute._get_power_state(self.context,
+ instance).AndReturn(123)
+ self.compute.driver.snapshot(self.context, instance, 'fake_image_id',
+ mox.IgnoreArg())
+
+ update_values = {'power_state': 123,
+ 'vm_state': vm_states.SHELVED,
+ 'task_state': None,
+ 'expected_task_state': [task_states.SHELVING,
+ task_states.SHELVING_IMAGE_UPLOADING],
+ 'system_metadata': sys_meta}
+ if CONF.shelved_offload_time == 0:
+ update_values['task_state'] = task_states.SHELVING_OFFLOADING
+ db.instance_update_and_get_original(self.context, instance['uuid'],
+ update_values, update_cells=False,
+ columns_to_join=['metadata', 'system_metadata', 'info_cache',
+ 'security_groups'],
+ ).AndReturn((db_instance,
+ db_instance))
+ self.compute._notify_about_instance_usage(self.context,
+ instance, 'shelve.end')
+ if CONF.shelved_offload_time == 0:
+ self.compute._notify_about_instance_usage(self.context, instance,
+ 'shelve_offload.start')
+ self.compute.driver.power_off(instance)
+ self.compute._get_power_state(self.context,
+ instance).AndReturn(123)
+ db.instance_update_and_get_original(self.context,
+ instance['uuid'],
+ {'power_state': 123, 'host': None, 'node': None,
+ 'vm_state': vm_states.SHELVED_OFFLOADED,
+ 'task_state': None,
+ 'expected_task_state': [task_states.SHELVING,
+ task_states.SHELVING_OFFLOADING]},
+ update_cells=False,
+ columns_to_join=['metadata', 'system_metadata',
+ 'info_cache',
+ 'security_groups'],
+ ).AndReturn((db_instance, db_instance))
+ self.compute._notify_about_instance_usage(self.context, instance,
+ 'shelve_offload.end')
+ self.mox.ReplayAll()
+
+ self.compute.shelve_instance(self.context, instance,
+ image_id=image_id, clean_shutdown=clean_shutdown)
+
+ def test_shelve(self):
+ self._shelve_instance(-1)
+
+ def test_shelve_forced_shutdown(self):
+ self._shelve_instance(-1, clean_shutdown=False)
+
+ def test_shelve_offload(self):
+ self._shelve_instance(0)
+
+ def test_shelve_volume_backed(self):
+ instance = self._create_fake_instance_obj()
+ instance.task_state = task_states.SHELVING
+ instance.save()
+ db_instance = obj_base.obj_to_primitive(instance)
+ host = 'fake-mini'
+ cur_time = timeutils.utcnow()
+ timeutils.set_time_override(cur_time)
+ sys_meta = dict(instance.system_metadata)
+ sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
+ sys_meta['shelved_image_id'] = None
+ sys_meta['shelved_host'] = host
+ db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta)
+
+ self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
+ self.mox.StubOutWithMock(self.compute.driver, 'power_off')
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ self.compute._notify_about_instance_usage(self.context, instance,
+ 'shelve_offload.start')
+ self.compute.driver.power_off(instance)
+ self.compute._get_power_state(self.context,
+ instance).AndReturn(123)
+ db.instance_update_and_get_original(self.context, instance['uuid'],
+ {'power_state': 123, 'host': None, 'node': None,
+ 'vm_state': vm_states.SHELVED_OFFLOADED,
+ 'task_state': None,
+ 'expected_task_state': [task_states.SHELVING,
+ task_states.SHELVING_OFFLOADING]},
+ update_cells=False,
+ columns_to_join=['metadata', 'system_metadata',
+ 'info_cache', 'security_groups'],
+ ).AndReturn((db_instance, db_instance))
+ self.compute._notify_about_instance_usage(self.context, instance,
+ 'shelve_offload.end')
+ self.mox.ReplayAll()
+
+ self.compute.shelve_offload_instance(self.context, instance)
+
+ def test_unshelve(self):
+ db_instance = self._create_fake_instance()
+ instance = objects.Instance.get_by_uuid(
+ self.context, db_instance['uuid'],
+ expected_attrs=['metadata', 'system_metadata'])
+ instance.task_state = task_states.UNSHELVING
+ instance.save()
+ image = {'id': 'fake_id'}
+ host = 'fake-mini'
+ node = test_compute.NODENAME
+ limits = {}
+ filter_properties = {'limits': limits}
+ cur_time = timeutils.utcnow()
+ cur_time_tz = cur_time.replace(tzinfo=iso8601.iso8601.Utc())
+ timeutils.set_time_override(cur_time)
+ sys_meta = dict(instance.system_metadata)
+ sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
+ sys_meta['shelved_image_id'] = image['id']
+ sys_meta['shelved_host'] = host
+
+ self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
+ self.mox.StubOutWithMock(self.compute, '_prep_block_device')
+ self.mox.StubOutWithMock(self.compute.driver, 'spawn')
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.rt, 'instance_claim')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'migrate_instance_finish')
+
+ self.deleted_image_id = None
+
+ def fake_delete(self2, ctxt, image_id):
+ self.deleted_image_id = image_id
+
+ def fake_claim(context, instance, limits):
+ instance.host = self.compute.host
+ return claims.Claim(context, db_instance,
+ self.rt, _fake_resources())
+
+ fake_image.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
+
+ self.compute._notify_about_instance_usage(self.context, instance,
+ 'unshelve.start')
+ db.instance_update_and_get_original(self.context, instance['uuid'],
+ {'task_state': task_states.SPAWNING},
+ update_cells=False,
+ columns_to_join=['metadata', 'system_metadata'],
+ ).AndReturn((db_instance, db_instance))
+ self.compute._prep_block_device(self.context, instance,
+ mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm')
+ db_instance['key_data'] = None
+ db_instance['auto_disk_config'] = None
+ self.compute.network_api.migrate_instance_finish(
+ self.context, instance, {'source_compute': '',
+ 'dest_compute': self.compute.host})
+ self.compute.driver.spawn(self.context, instance, image,
+ injected_files=[], admin_password=None,
+ network_info=[],
+ block_device_info='fake_bdm')
+ self.compute._get_power_state(self.context, instance).AndReturn(123)
+ db.instance_update_and_get_original(self.context, instance['uuid'],
+ {'power_state': 123,
+ 'vm_state': vm_states.ACTIVE,
+ 'task_state': None,
+ 'image_ref': instance['image_ref'],
+ 'key_data': None,
+ 'host': self.compute.host, # rt.instance_claim set this
+ 'auto_disk_config': False,
+ 'expected_task_state': task_states.SPAWNING,
+ 'launched_at': cur_time_tz},
+ update_cells=False,
+ columns_to_join=['metadata', 'system_metadata']
+ ).AndReturn((db_instance,
+ dict(db_instance,
+ host=self.compute.host,
+ metadata={})))
+ self.compute._notify_about_instance_usage(self.context, instance,
+ 'unshelve.end')
+ self.mox.ReplayAll()
+
+ with mock.patch.object(self.rt, 'instance_claim',
+ side_effect=fake_claim):
+ self.compute.unshelve_instance(self.context, instance, image=image,
+ filter_properties=filter_properties, node=node)
+ self.assertEqual(image['id'], self.deleted_image_id)
+ self.assertEqual(instance.host, self.compute.host)
+
+ def test_unshelve_volume_backed(self):
+ db_instance = self._create_fake_instance()
+ node = test_compute.NODENAME
+ limits = {}
+ filter_properties = {'limits': limits}
+ cur_time = timeutils.utcnow()
+ cur_time_tz = cur_time.replace(tzinfo=iso8601.iso8601.Utc())
+ timeutils.set_time_override(cur_time)
+ instance = objects.Instance.get_by_uuid(
+ self.context, db_instance['uuid'],
+ expected_attrs=['metadata', 'system_metadata'])
+ instance.task_state = task_states.UNSHELVING
+ instance.save()
+
+ self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
+ self.mox.StubOutWithMock(self.compute, '_prep_block_device')
+ self.mox.StubOutWithMock(self.compute.driver, 'spawn')
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.rt, 'instance_claim')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'migrate_instance_finish')
+
+ self.compute._notify_about_instance_usage(self.context, instance,
+ 'unshelve.start')
+ db.instance_update_and_get_original(self.context, instance['uuid'],
+ {'task_state': task_states.SPAWNING},
+ update_cells=False,
+ columns_to_join=['metadata', 'system_metadata']
+ ).AndReturn((db_instance, db_instance))
+ self.compute._prep_block_device(self.context, instance,
+ mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm')
+ db_instance['key_data'] = None
+ db_instance['auto_disk_config'] = None
+ self.compute.network_api.migrate_instance_finish(
+ self.context, instance, {'source_compute': '',
+ 'dest_compute': self.compute.host})
+ self.rt.instance_claim(self.context, instance, limits).AndReturn(
+ claims.Claim(self.context, db_instance, self.rt,
+ _fake_resources()))
+ self.compute.driver.spawn(self.context, instance, None,
+ injected_files=[], admin_password=None,
+ network_info=[],
+ block_device_info='fake_bdm')
+ self.compute._get_power_state(self.context, instance).AndReturn(123)
+ db.instance_update_and_get_original(self.context, instance['uuid'],
+ {'power_state': 123,
+ 'vm_state': vm_states.ACTIVE,
+ 'task_state': None,
+ 'key_data': None,
+ 'auto_disk_config': False,
+ 'expected_task_state': task_states.SPAWNING,
+ 'launched_at': cur_time_tz},
+ update_cells=False,
+ columns_to_join=['metadata', 'system_metadata']
+ ).AndReturn((db_instance, db_instance))
+ self.compute._notify_about_instance_usage(self.context, instance,
+ 'unshelve.end')
+ self.mox.ReplayAll()
+
+ self.compute.unshelve_instance(self.context, instance, image=None,
+ filter_properties=filter_properties, node=node)
+
+ def test_shelved_poll_none_exist(self):
+ self.mox.StubOutWithMock(self.compute.driver, 'destroy')
+ self.mox.StubOutWithMock(timeutils, 'is_older_than')
+ self.mox.ReplayAll()
+ self.compute._poll_shelved_instances(self.context)
+
+ def test_shelved_poll_not_timedout(self):
+ instance = self._create_fake_instance_obj()
+ sys_meta = instance.system_metadata
+ shelved_time = timeutils.utcnow()
+ timeutils.set_time_override(shelved_time)
+ timeutils.advance_time_seconds(CONF.shelved_offload_time - 1)
+ sys_meta['shelved_at'] = timeutils.strtime(at=shelved_time)
+ db.instance_update_and_get_original(self.context, instance['uuid'],
+ {'vm_state': vm_states.SHELVED, 'system_metadata': sys_meta})
+
+ self.mox.StubOutWithMock(self.compute.driver, 'destroy')
+ self.mox.ReplayAll()
+ self.compute._poll_shelved_instances(self.context)
+
+ def test_shelved_poll_timedout(self):
+ instance = self._create_fake_instance_obj()
+ sys_meta = instance.system_metadata
+ shelved_time = timeutils.utcnow()
+ timeutils.set_time_override(shelved_time)
+ timeutils.advance_time_seconds(CONF.shelved_offload_time + 1)
+ sys_meta['shelved_at'] = timeutils.strtime(at=shelved_time)
+ (old, instance) = db.instance_update_and_get_original(self.context,
+ instance['uuid'], {'vm_state': vm_states.SHELVED,
+ 'system_metadata': sys_meta})
+
+ def fake_destroy(inst, nw_info, bdm):
+ # NOTE(alaski) There are too many differences between an instance
+ # as returned by instance_update_and_get_original and
+ # instance_get_all_by_filters so just compare the uuid.
+ self.assertEqual(instance['uuid'], inst['uuid'])
+
+ self.stubs.Set(self.compute.driver, 'destroy', fake_destroy)
+ self.compute._poll_shelved_instances(self.context)
+
+
+class ShelveComputeAPITestCase(test_compute.BaseTestCase):
+ def test_shelve(self):
+ # Ensure instance can be shelved.
+ fake_instance = self._create_fake_instance_obj(
+ {'display_name': 'vm01'})
+ instance = fake_instance
+
+ self.assertIsNone(instance['task_state'])
+
+ def fake_init(self2):
+ # In original _FakeImageService.__init__(), some fake images are
+ # created. To verify the snapshot name of this test only, here
+ # sets a fake method.
+ self2.images = {}
+
+ def fake_create(self2, ctxt, metadata, data=None):
+ self.assertEqual(metadata['name'], 'vm01-shelved')
+ metadata['id'] = '8b24ed3f-ee57-43bc-bc2e-fb2e9482bc42'
+ return metadata
+
+ fake_image.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake_image._FakeImageService, '__init__', fake_init)
+ self.stubs.Set(fake_image._FakeImageService, 'create', fake_create)
+
+ self.compute_api.shelve(self.context, instance)
+
+ instance.refresh()
+ self.assertEqual(instance.task_state, task_states.SHELVING)
+
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_unshelve(self):
+ # Ensure instance can be unshelved.
+ instance = self._create_fake_instance_obj()
+
+ self.assertIsNone(instance['task_state'])
+
+ self.compute_api.shelve(self.context, instance)
+
+ instance.refresh()
+ instance.task_state = None
+ instance.vm_state = vm_states.SHELVED
+ instance.save()
+
+ self.compute_api.unshelve(self.context, instance)
+
+ instance.refresh()
+ self.assertEqual(instance.task_state, task_states.UNSHELVING)
+
+ db.instance_destroy(self.context, instance['uuid'])
diff --git a/nova/tests/unit/compute/test_stats.py b/nova/tests/unit/compute/test_stats.py
new file mode 100644
index 0000000000..c90314b0fc
--- /dev/null
+++ b/nova/tests/unit/compute/test_stats.py
@@ -0,0 +1,222 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for compute node stats."""
+
+from nova.compute import stats
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import test
+
+
+class StatsTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(StatsTestCase, self).setUp()
+ self.stats = stats.Stats()
+
+ def _create_instance(self, values=None):
+ instance = {
+ "os_type": "Linux",
+ "project_id": "1234",
+ "task_state": None,
+ "vm_state": vm_states.BUILDING,
+ "vcpus": 1,
+ "uuid": "12-34-56-78-90",
+ }
+ if values:
+ instance.update(values)
+ return instance
+
+ def test_os_type_count(self):
+ os_type = "Linux"
+ self.assertEqual(0, self.stats.num_os_type(os_type))
+ self.stats._increment("num_os_type_" + os_type)
+ self.stats._increment("num_os_type_" + os_type)
+ self.stats._increment("num_os_type_Vax")
+ self.assertEqual(2, self.stats.num_os_type(os_type))
+ self.stats["num_os_type_" + os_type] -= 1
+ self.assertEqual(1, self.stats.num_os_type(os_type))
+
+ def test_update_project_count(self):
+ proj_id = "1234"
+
+ def _get():
+ return self.stats.num_instances_for_project(proj_id)
+
+ self.assertEqual(0, _get())
+ self.stats._increment("num_proj_" + proj_id)
+ self.assertEqual(1, _get())
+ self.stats["num_proj_" + proj_id] -= 1
+ self.assertEqual(0, _get())
+
+ def test_instance_count(self):
+ self.assertEqual(0, self.stats.num_instances)
+ for i in range(5):
+ self.stats._increment("num_instances")
+ self.stats["num_instances"] -= 1
+ self.assertEqual(4, self.stats.num_instances)
+
+ def test_add_stats_for_instance(self):
+ instance = {
+ "os_type": "Linux",
+ "project_id": "1234",
+ "task_state": None,
+ "vm_state": vm_states.BUILDING,
+ "vcpus": 3,
+ "uuid": "12-34-56-78-90",
+ }
+ self.stats.update_stats_for_instance(instance)
+
+ instance = {
+ "os_type": "FreeBSD",
+ "project_id": "1234",
+ "task_state": task_states.SCHEDULING,
+ "vm_state": None,
+ "vcpus": 1,
+ "uuid": "23-45-67-89-01",
+ }
+ self.stats.update_stats_for_instance(instance)
+
+ instance = {
+ "os_type": "Linux",
+ "project_id": "2345",
+ "task_state": task_states.SCHEDULING,
+ "vm_state": vm_states.BUILDING,
+ "vcpus": 2,
+ "uuid": "34-56-78-90-12",
+ }
+
+ self.stats.update_stats_for_instance(instance)
+
+ instance = {
+ "os_type": "Linux",
+ "project_id": "2345",
+ "task_state": task_states.RESCUING,
+ "vm_state": vm_states.ACTIVE,
+ "vcpus": 2,
+ "uuid": "34-56-78-90-13",
+ }
+
+ self.stats.update_stats_for_instance(instance)
+
+ instance = {
+ "os_type": "Linux",
+ "project_id": "2345",
+ "task_state": task_states.UNSHELVING,
+ "vm_state": vm_states.ACTIVE,
+ "vcpus": 2,
+ "uuid": "34-56-78-90-14",
+ }
+
+ self.stats.update_stats_for_instance(instance)
+
+ self.assertEqual(4, self.stats.num_os_type("Linux"))
+ self.assertEqual(1, self.stats.num_os_type("FreeBSD"))
+
+ self.assertEqual(2, self.stats.num_instances_for_project("1234"))
+ self.assertEqual(3, self.stats.num_instances_for_project("2345"))
+
+ self.assertEqual(1, self.stats["num_task_None"])
+ self.assertEqual(2, self.stats["num_task_" + task_states.SCHEDULING])
+ self.assertEqual(1, self.stats["num_task_" + task_states.UNSHELVING])
+ self.assertEqual(1, self.stats["num_task_" + task_states.RESCUING])
+
+ self.assertEqual(1, self.stats["num_vm_None"])
+ self.assertEqual(2, self.stats["num_vm_" + vm_states.BUILDING])
+
+ def test_calculate_workload(self):
+ self.stats._increment("num_task_None")
+ self.stats._increment("num_task_" + task_states.SCHEDULING)
+ self.stats._increment("num_task_" + task_states.SCHEDULING)
+ self.assertEqual(2, self.stats.calculate_workload())
+
+ def test_update_stats_for_instance_no_change(self):
+ instance = self._create_instance()
+ self.stats.update_stats_for_instance(instance)
+
+ self.stats.update_stats_for_instance(instance) # no change
+ self.assertEqual(1, self.stats.num_instances)
+ self.assertEqual(1, self.stats.num_instances_for_project("1234"))
+ self.assertEqual(1, self.stats["num_os_type_Linux"])
+ self.assertEqual(1, self.stats["num_task_None"])
+ self.assertEqual(1, self.stats["num_vm_" + vm_states.BUILDING])
+
+ def test_update_stats_for_instance_vm_change(self):
+ instance = self._create_instance()
+ self.stats.update_stats_for_instance(instance)
+
+ instance["vm_state"] = vm_states.PAUSED
+ self.stats.update_stats_for_instance(instance)
+ self.assertEqual(1, self.stats.num_instances)
+ self.assertEqual(1, self.stats.num_instances_for_project(1234))
+ self.assertEqual(1, self.stats["num_os_type_Linux"])
+ self.assertEqual(0, self.stats["num_vm_%s" % vm_states.BUILDING])
+ self.assertEqual(1, self.stats["num_vm_%s" % vm_states.PAUSED])
+
+ def test_update_stats_for_instance_task_change(self):
+ instance = self._create_instance()
+ self.stats.update_stats_for_instance(instance)
+
+ instance["task_state"] = task_states.REBUILDING
+ self.stats.update_stats_for_instance(instance)
+ self.assertEqual(1, self.stats.num_instances)
+ self.assertEqual(1, self.stats.num_instances_for_project("1234"))
+ self.assertEqual(1, self.stats["num_os_type_Linux"])
+ self.assertEqual(0, self.stats["num_task_None"])
+ self.assertEqual(1, self.stats["num_task_%s" % task_states.REBUILDING])
+
+ def test_update_stats_for_instance_deleted(self):
+ instance = self._create_instance()
+ self.stats.update_stats_for_instance(instance)
+ self.assertEqual(1, self.stats["num_proj_1234"])
+
+ instance["vm_state"] = vm_states.DELETED
+ self.stats.update_stats_for_instance(instance)
+
+ self.assertEqual(0, self.stats.num_instances)
+ self.assertEqual(0, self.stats.num_instances_for_project("1234"))
+ self.assertEqual(0, self.stats.num_os_type("Linux"))
+ self.assertEqual(0, self.stats["num_vm_" + vm_states.BUILDING])
+
+ def test_io_workload(self):
+ vms = [vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED]
+ tasks = [task_states.RESIZE_MIGRATING, task_states.REBUILDING,
+ task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT,
+ task_states.IMAGE_BACKUP, task_states.RESCUING,
+ task_states.UNSHELVING, task_states.SHELVING]
+
+ for state in vms:
+ self.stats._increment("num_vm_" + state)
+ for state in tasks:
+ self.stats._increment("num_task_" + state)
+
+ self.assertEqual(8, self.stats.io_workload)
+
+ def test_io_workload_saved_to_stats(self):
+ values = {'task_state': task_states.RESIZE_MIGRATING}
+ instance = self._create_instance(values)
+ self.stats.update_stats_for_instance(instance)
+ self.assertEqual(2, self.stats["io_workload"])
+
+ def test_clear(self):
+ instance = self._create_instance()
+ self.stats.update_stats_for_instance(instance)
+
+ self.assertNotEqual(0, len(self.stats))
+ self.assertEqual(1, len(self.stats.states))
+ self.stats.clear()
+
+ self.assertEqual(0, len(self.stats))
+ self.assertEqual(0, len(self.stats.states))
diff --git a/nova/tests/unit/compute/test_tracker.py b/nova/tests/unit/compute/test_tracker.py
new file mode 100644
index 0000000000..0a49b93b0c
--- /dev/null
+++ b/nova/tests/unit/compute/test_tracker.py
@@ -0,0 +1,614 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import copy
+
+import mock
+
+from nova.compute import flavors
+from nova.compute import power_state
+from nova.compute import resource_tracker
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import objects
+from nova import test
+
+_VIRT_DRIVER_AVAIL_RESOURCES = {
+ 'vcpus': 4,
+ 'memory_mb': 512,
+ 'local_gb': 6,
+ 'vcpus_used': 0,
+ 'memory_mb_used': 0,
+ 'local_gb_used': 0,
+ 'hypervisor_type': 'fake',
+ 'hypervisor_version': 0,
+ 'hypervisor_hostname': 'fakehost',
+ 'cpu_info': '',
+ 'numa_topology': None,
+}
+
+_INSTANCE_TYPE_FIXTURES = {
+ 1: {
+ 'id': 1,
+ 'flavorid': 'fakeid-1',
+ 'name': 'fake1.small',
+ 'memory_mb': 128,
+ 'vcpus': 1,
+ 'root_gb': 1,
+ 'ephemeral_gb': 0,
+ 'swap': 0,
+ 'rxtx_factor': 0,
+ 'vcpu_weight': 1,
+ 'extra_specs': {},
+ },
+ 2: {
+ 'id': 2,
+ 'flavorid': 'fakeid-2',
+ 'name': 'fake1.medium',
+ 'memory_mb': 256,
+ 'vcpus': 2,
+ 'root_gb': 5,
+ 'ephemeral_gb': 0,
+ 'swap': 0,
+ 'rxtx_factor': 0,
+ 'vcpu_weight': 1,
+ 'extra_specs': {},
+ },
+}
+
+
+# A collection of system_metadata attributes that would exist in instances
+# that have the instance type ID matching the dictionary key.
+_INSTANCE_TYPE_SYS_META = {
+ 1: flavors.save_flavor_info({}, _INSTANCE_TYPE_FIXTURES[1]),
+ 2: flavors.save_flavor_info({}, _INSTANCE_TYPE_FIXTURES[2]),
+}
+
+
+_MIGRATION_SYS_META = flavors.save_flavor_info(
+ {}, _INSTANCE_TYPE_FIXTURES[1], 'old_')
+_MIGRATION_SYS_META = flavors.save_flavor_info(
+ _MIGRATION_SYS_META, _INSTANCE_TYPE_FIXTURES[2], 'new_')
+
+
+_INSTANCE_FIXTURES = [
+ objects.Instance(
+ id=1,
+ uuid='c17741a5-6f3d-44a8-ade8-773dc8c29124',
+ memory_mb=128,
+ vcpus=1,
+ root_gb=1,
+ ephemeral_gb=0,
+ numa_topology=None,
+ instance_type_id=1,
+ vm_state=vm_states.ACTIVE,
+ power_state=power_state.RUNNING,
+ task_state=None,
+ os_type='fake-os', # Used by the stats collector.
+ project_id='fake-project', # Used by the stats collector.
+ ),
+ objects.Instance(
+ id=2,
+ uuid='33805b54-dea6-47b8-acb2-22aeb1b57919',
+ memory_mb=256,
+ vcpus=2,
+ root_gb=5,
+ ephemeral_gb=0,
+ numa_topology=None,
+ instance_type_id=2,
+ vm_state=vm_states.DELETED,
+ power_state=power_state.SHUTDOWN,
+ task_state=None,
+ os_type='fake-os',
+ project_id='fake-project-2',
+ ),
+]
+
+_MIGRATION_FIXTURES = {
+ # A migration that has only this compute node as the source host
+ 'source-only': objects.Migration(
+ id=1,
+ instance_uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
+ source_compute='fake-host',
+ dest_compute='other-host',
+ source_node='fake-node',
+ dest_node='other-node',
+ old_instance_type_id=1,
+ new_instance_type_id=2,
+ status='migrating'
+ ),
+ # A migration that has only this compute node as the dest host
+ 'dest-only': objects.Migration(
+ id=2,
+ instance_uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
+ source_compute='other-host',
+ dest_compute='fake-host',
+ source_node='other-node',
+ dest_node='fake-node',
+ old_instance_type_id=1,
+ new_instance_type_id=2,
+ status='migrating'
+ ),
+ # A migration that has this compute node as both the source and dest host
+ 'source-and-dest': objects.Migration(
+ id=3,
+ instance_uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
+ source_compute='fake-host',
+ dest_compute='fake-host',
+ source_node='fake-node',
+ dest_node='fake-node',
+ old_instance_type_id=1,
+ new_instance_type_id=2,
+ status='migrating'
+ ),
+}
+
+_MIGRATION_INSTANCE_FIXTURES = {
+ # source-only
+ 'f15ecfb0-9bf6-42db-9837-706eb2c4bf08': objects.Instance(
+ id=101,
+ uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
+ memory_mb=128,
+ vcpus=1,
+ root_gb=1,
+ ephemeral_gb=0,
+ numa_topology=None,
+ instance_type_id=1,
+ vm_state=vm_states.ACTIVE,
+ power_state=power_state.RUNNING,
+ task_state=task_states.RESIZE_MIGRATING,
+ system_metadata=_MIGRATION_SYS_META,
+ os_type='fake-os',
+ project_id='fake-project',
+ ),
+ # dest-only
+ 'f6ed631a-8645-4b12-8e1e-2fff55795765': objects.Instance(
+ id=102,
+ uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
+ memory_mb=256,
+ vcpus=2,
+ root_gb=5,
+ ephemeral_gb=0,
+ numa_topology=None,
+ instance_type_id=2,
+ vm_state=vm_states.ACTIVE,
+ power_state=power_state.RUNNING,
+ task_state=task_states.RESIZE_MIGRATING,
+ system_metadata=_MIGRATION_SYS_META,
+ os_type='fake-os',
+ project_id='fake-project',
+ ),
+ # source-and-dest
+ 'f4f0bfea-fe7e-4264-b598-01cb13ef1997': objects.Instance(
+ id=3,
+ uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
+ memory_mb=256,
+ vcpus=2,
+ root_gb=5,
+ ephemeral_gb=0,
+ numa_topology=None,
+ instance_type_id=2,
+ vm_state=vm_states.ACTIVE,
+ power_state=power_state.RUNNING,
+ task_state=task_states.RESIZE_MIGRATING,
+ system_metadata=_MIGRATION_SYS_META,
+ os_type='fake-os',
+ project_id='fake-project',
+ ),
+}
+
+
+def overhead_zero(instance):
+ # Emulate that the driver does not adjust the memory
+ # of the instance...
+ return {
+ 'memory_mb': 0
+ }
+
+
+class BaseTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(BaseTestCase, self).setUp()
+ self.rt = None
+ self.flags(my_ip='fake-ip')
+
+ def _setup_rt(self, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES,
+ estimate_overhead=overhead_zero):
+ """Sets up the resource tracker instance with mock fixtures.
+
+ :param virt_resources: Optional override of the resource representation
+ returned by the virt driver's
+ `get_available_resource()` method.
+ :param estimate_overhead: Optional override of a function that should
+ return overhead of memory given an instance
+ object. Defaults to returning zero overhead.
+ """
+ self.cond_api_mock = mock.MagicMock()
+ self.sched_client_mock = mock.MagicMock()
+ self.notifier_mock = mock.MagicMock()
+ vd = mock.MagicMock()
+ # Make sure we don't change any global fixtures during tests
+ virt_resources = copy.deepcopy(virt_resources)
+ vd.get_available_resource.return_value = virt_resources
+ vd.estimate_instance_overhead.side_effect = estimate_overhead
+ self.driver_mock = vd
+
+ with contextlib.nested(
+ mock.patch('nova.conductor.API',
+ return_value=self.cond_api_mock),
+ mock.patch('nova.scheduler.client.SchedulerClient',
+ return_value=self.sched_client_mock),
+ mock.patch('nova.rpc.get_notifier',
+ return_value=self.notifier_mock)):
+ self.rt = resource_tracker.ResourceTracker('fake-host',
+ vd,
+ 'fake-node')
+
+
+class TestUpdateAvailableResources(BaseTestCase):
+
+ def _update_available_resources(self):
+ # We test RT._sync_compute_node separately, since the complexity
+ # of the update_available_resource() function is high enough as
+ # it is, we just want to focus here on testing the resources
+ # parameter that update_available_resource() eventually passes
+ # to _sync_compute_node().
+ with mock.patch.object(self.rt, '_sync_compute_node') as sync_mock:
+ self.rt.update_available_resource(mock.sentinel.ctx)
+ return sync_mock
+
+ @mock.patch('nova.objects.InstanceList.get_by_host_and_node')
+ def test_no_instances_no_migrations_no_reserved(self, get_mock):
+ self.flags(reserved_host_disk_mb=0,
+ reserved_host_memory_mb=0)
+ self._setup_rt()
+
+ get_mock.return_value = []
+ capi = self.cond_api_mock
+ migr_mock = capi.migration_get_in_progress_by_host_and_node
+ migr_mock.return_value = []
+
+ sync_mock = self._update_available_resources()
+
+ vd = self.driver_mock
+ vd.get_available_resource.assert_called_once_with('fake-node')
+ get_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
+ 'fake-node',
+ expected_attrs=[
+ 'system_metadata',
+ 'numa_topology'])
+ migr_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
+ 'fake-node')
+
+ expected_resources = {
+ 'host_ip': 'fake-ip',
+ 'numa_topology': None,
+ 'metrics': '[]',
+ 'cpu_info': '',
+ 'hypervisor_hostname': 'fakehost',
+ 'free_disk_gb': 6,
+ 'hypervisor_version': 0,
+ 'local_gb': 6,
+ 'free_ram_mb': 512,
+ 'memory_mb_used': 0,
+ 'pci_stats': '[]',
+ 'vcpus_used': 0,
+ 'hypervisor_type': 'fake',
+ 'local_gb_used': 0,
+ 'memory_mb': 512,
+ 'current_workload': 0,
+ 'vcpus': 4,
+ 'running_vms': 0
+ }
+ sync_mock.assert_called_once_with(mock.sentinel.ctx,
+ expected_resources)
+
+ @mock.patch('nova.objects.InstanceList.get_by_host_and_node')
+ def test_no_instances_no_migrations_reserved_disk_and_ram(
+ self, get_mock):
+ self.flags(reserved_host_disk_mb=1024,
+ reserved_host_memory_mb=512)
+ self._setup_rt()
+
+ get_mock.return_value = []
+ capi = self.cond_api_mock
+ migr_mock = capi.migration_get_in_progress_by_host_and_node
+ migr_mock.return_value = []
+
+ sync_mock = self._update_available_resources()
+
+ expected_resources = {
+ 'host_ip': 'fake-ip',
+ 'numa_topology': None,
+ 'metrics': '[]',
+ 'cpu_info': '',
+ 'hypervisor_hostname': 'fakehost',
+ 'free_disk_gb': 5, # 6GB avail - 1 GB reserved
+ 'hypervisor_version': 0,
+ 'local_gb': 6,
+ 'free_ram_mb': 0, # 512MB avail - 512MB reserved
+ 'memory_mb_used': 512, # 0MB used + 512MB reserved
+ 'pci_stats': '[]',
+ 'vcpus_used': 0,
+ 'hypervisor_type': 'fake',
+ 'local_gb_used': 1, # 0GB used + 1 GB reserved
+ 'memory_mb': 512,
+ 'current_workload': 0,
+ 'vcpus': 4,
+ 'running_vms': 0
+ }
+ sync_mock.assert_called_once_with(mock.sentinel.ctx,
+ expected_resources)
+
+ @mock.patch('nova.objects.InstanceList.get_by_host_and_node')
+ def test_some_instances_no_migrations(self, get_mock):
+ self.flags(reserved_host_disk_mb=0,
+ reserved_host_memory_mb=0)
+ self._setup_rt()
+
+ get_mock.return_value = _INSTANCE_FIXTURES
+ capi = self.cond_api_mock
+ migr_mock = capi.migration_get_in_progress_by_host_and_node
+ migr_mock.return_value = []
+
+ sync_mock = self._update_available_resources()
+
+ expected_resources = {
+ 'host_ip': 'fake-ip',
+ 'numa_topology': None,
+ 'metrics': '[]',
+ 'cpu_info': '',
+ 'hypervisor_hostname': 'fakehost',
+ 'free_disk_gb': 5, # 6 - 1 used
+ 'hypervisor_version': 0,
+ 'local_gb': 6,
+ 'free_ram_mb': 384, # 512 - 128 used
+ 'memory_mb_used': 128,
+ 'pci_stats': '[]',
+ # NOTE(jaypipes): Due to the design of the ERT, which now is used
+ # track VCPUs, the actual used VCPUs isn't
+ # "written" to the resources dictionary that is
+ # passed to _sync_compute_node() like all the other
+ # resources are. Instead, _sync_compute_node()
+ # calls the ERT's write_resources() method, which
+ # then queries each resource handler plugin for the
+ # changes in its resource usage and the plugin
+ # writes changes to the supplied "values" dict. For
+ # this reason, all other resources except VCPUs
+ # are accurate here. :(
+ 'vcpus_used': 0,
+ 'hypervisor_type': 'fake',
+ 'local_gb_used': 1,
+ 'memory_mb': 512,
+ 'current_workload': 0,
+ 'vcpus': 4,
+ 'running_vms': 1 # One active instance
+ }
+ sync_mock.assert_called_once_with(mock.sentinel.ctx,
+ expected_resources)
+
+ @mock.patch('nova.objects.InstanceList.get_by_host_and_node')
+ def test_orphaned_instances_no_migrations(self, get_mock):
+ self.flags(reserved_host_disk_mb=0,
+ reserved_host_memory_mb=0)
+ self._setup_rt()
+
+ get_mock.return_value = []
+ capi = self.cond_api_mock
+ migr_mock = capi.migration_get_in_progress_by_host_and_node
+ migr_mock.return_value = []
+
+ # Orphaned instances are those that the virt driver has on
+ # record as consuming resources on the compute node, but the
+ # Nova database has no record of the instance being active
+ # on the host. For some reason, the resource tracker only
+ # considers orphaned instance's memory usage in its calculations
+ # of free resources...
+ orphaned_usages = {
+ '71ed7ef6-9d2e-4c65-9f4e-90bb6b76261d': {
+ # Yes, the return result format of get_per_instance_usage
+ # is indeed this stupid and redundant. Also note that the
+ # libvirt driver just returns an empty dict always for this
+ # method and so who the heck knows whether this stuff
+ # actually works.
+ 'uuid': '71ed7ef6-9d2e-4c65-9f4e-90bb6b76261d',
+ 'memory_mb': 64
+ }
+ }
+ vd = self.driver_mock
+ vd.get_per_instance_usage.return_value = orphaned_usages
+
+ sync_mock = self._update_available_resources()
+
+ expected_resources = {
+ 'host_ip': 'fake-ip',
+ 'numa_topology': None,
+ 'metrics': '[]',
+ 'cpu_info': '',
+ 'hypervisor_hostname': 'fakehost',
+ 'free_disk_gb': 6,
+ 'hypervisor_version': 0,
+ 'local_gb': 6,
+ 'free_ram_mb': 448, # 512 - 64 orphaned usage
+ 'memory_mb_used': 64,
+ 'pci_stats': '[]',
+ 'vcpus_used': 0,
+ 'hypervisor_type': 'fake',
+ 'local_gb_used': 0,
+ 'memory_mb': 512,
+ 'current_workload': 0,
+ 'vcpus': 4,
+ # Yep, for some reason, orphaned instances are not counted
+ # as running VMs...
+ 'running_vms': 0
+ }
+ sync_mock.assert_called_once_with(mock.sentinel.ctx,
+ expected_resources)
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.objects.InstanceList.get_by_host_and_node')
+ def test_no_instances_source_migration(self, get_mock, get_inst_mock):
+ # We test the behavior of update_available_resource() when
+ # there is an active migration that involves this compute node
+ # as the source host not the destination host, and the resource
+ # tracker does not have any instances assigned to it. This is
+ # the case when a migration from this compute host to another
+ # has been completed, but the user has not confirmed the resize
+ # yet, so the resource tracker must continue to keep the resources
+ # for the original instance type available on the source compute
+ # node in case of a revert of the resize.
+ self.flags(reserved_host_disk_mb=0,
+ reserved_host_memory_mb=0)
+ self._setup_rt()
+
+ get_mock.return_value = []
+ capi = self.cond_api_mock
+ migr_mock = capi.migration_get_in_progress_by_host_and_node
+ migr_obj = _MIGRATION_FIXTURES['source-only']
+ migr_mock.return_value = [migr_obj]
+ # Migration.instance property is accessed in the migration
+ # processing code, and this property calls
+ # objects.Instance.get_by_uuid, so we have the migration return
+ inst_uuid = migr_obj.instance_uuid
+ get_inst_mock.return_value = _MIGRATION_INSTANCE_FIXTURES[inst_uuid]
+
+ sync_mock = self._update_available_resources()
+
+ expected_resources = {
+ 'host_ip': 'fake-ip',
+ 'numa_topology': None,
+ 'metrics': '[]',
+ 'cpu_info': '',
+ 'hypervisor_hostname': 'fakehost',
+ 'free_disk_gb': 5,
+ 'hypervisor_version': 0,
+ 'local_gb': 6,
+ 'free_ram_mb': 384, # 512 total - 128 for possible revert of orig
+ 'memory_mb_used': 128, # 128 possible revert amount
+ 'pci_stats': '[]',
+ 'vcpus_used': 0,
+ 'hypervisor_type': 'fake',
+ 'local_gb_used': 1,
+ 'memory_mb': 512,
+ 'current_workload': 0,
+ 'vcpus': 4,
+ 'running_vms': 0
+ }
+ sync_mock.assert_called_once_with(mock.sentinel.ctx,
+ expected_resources)
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.objects.InstanceList.get_by_host_and_node')
+ def test_no_instances_dest_migration(self, get_mock, get_inst_mock):
+ # We test the behavior of update_available_resource() when
+ # there is an active migration that involves this compute node
+ # as the destination host not the source host, and the resource
+ # tracker does not yet have any instances assigned to it. This is
+ # the case when a migration to this compute host from another host
+ # is in progress, but the user has not confirmed the resize
+ # yet, so the resource tracker must reserve the resources
+ # for the possibly-to-be-confirmed instance's instance type
+ # node in case of a confirm of the resize.
+ self.flags(reserved_host_disk_mb=0,
+ reserved_host_memory_mb=0)
+ self._setup_rt()
+
+ get_mock.return_value = []
+ capi = self.cond_api_mock
+ migr_mock = capi.migration_get_in_progress_by_host_and_node
+ migr_obj = _MIGRATION_FIXTURES['dest-only']
+ migr_mock.return_value = [migr_obj]
+ inst_uuid = migr_obj.instance_uuid
+ get_inst_mock.return_value = _MIGRATION_INSTANCE_FIXTURES[inst_uuid]
+
+ sync_mock = self._update_available_resources()
+
+ expected_resources = {
+ 'host_ip': 'fake-ip',
+ 'numa_topology': None,
+ 'metrics': '[]',
+ 'cpu_info': '',
+ 'hypervisor_hostname': 'fakehost',
+ 'free_disk_gb': 1,
+ 'hypervisor_version': 0,
+ 'local_gb': 6,
+ 'free_ram_mb': 256, # 512 total - 256 for possible confirm of new
+ 'memory_mb_used': 256, # 256 possible confirmed amount
+ 'pci_stats': '[]',
+ 'vcpus_used': 0, # See NOTE(jaypipes) above about why this is 0
+ 'hypervisor_type': 'fake',
+ 'local_gb_used': 5,
+ 'memory_mb': 512,
+ 'current_workload': 0,
+ 'vcpus': 4,
+ 'running_vms': 0
+ }
+ sync_mock.assert_called_once_with(mock.sentinel.ctx,
+ expected_resources)
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.objects.InstanceList.get_by_host_and_node')
+ def test_some_instances_source_and_dest_migration(
+ self, get_mock, get_inst_mock):
+ # We test the behavior of update_available_resource() when
+ # there is an active migration that involves this compute node
+ # as the destination host AND the source host, and the resource
+ # tracker has a few instances assigned to it, including the
+ # instance that is resizing to this same compute node. The tracking
+ # of resource amounts takes into account both the old and new
+ # resize instance types as taking up space on the node.
+ self.flags(reserved_host_disk_mb=0,
+ reserved_host_memory_mb=0)
+ self._setup_rt()
+
+ capi = self.cond_api_mock
+ migr_mock = capi.migration_get_in_progress_by_host_and_node
+ migr_obj = _MIGRATION_FIXTURES['source-and-dest']
+ migr_mock.return_value = [migr_obj]
+ inst_uuid = migr_obj.instance_uuid
+ # The resizing instance has already had its instance type
+ # changed to the *new* instance type (the bigger one, instance type 2)
+ resizing_instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid]
+ all_instances = _INSTANCE_FIXTURES + [resizing_instance]
+ get_mock.return_value = all_instances
+ get_inst_mock.return_value = resizing_instance
+
+ sync_mock = self._update_available_resources()
+
+ expected_resources = {
+ 'host_ip': 'fake-ip',
+ 'numa_topology': None,
+ 'metrics': '[]',
+ 'cpu_info': '',
+ 'hypervisor_hostname': 'fakehost',
+ # 6 total - 1G existing - 5G new flav - 1G old flav
+ 'free_disk_gb': -1,
+ 'hypervisor_version': 0,
+ 'local_gb': 6,
+ # 512 total - 128 existing - 256 new flav - 128 old flav
+ 'free_ram_mb': 0,
+ 'memory_mb_used': 512, # 128 exist + 256 new flav + 128 old flav
+ 'pci_stats': '[]',
+ # See NOTE(jaypipes) above for reason why this isn't accurate until
+ # _sync_compute_node() is called.
+ 'vcpus_used': 0,
+ 'hypervisor_type': 'fake',
+ 'local_gb_used': 7, # 1G existing, 5G new flav + 1 old flav
+ 'memory_mb': 512,
+ 'current_workload': 1, # One migrating instance...
+ 'vcpus': 4,
+ 'running_vms': 2
+ }
+ sync_mock.assert_called_once_with(mock.sentinel.ctx,
+ expected_resources)
diff --git a/nova/tests/unit/compute/test_virtapi.py b/nova/tests/unit/compute/test_virtapi.py
new file mode 100644
index 0000000000..5e58ba05d1
--- /dev/null
+++ b/nova/tests/unit/compute/test_virtapi.py
@@ -0,0 +1,188 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import mox
+
+from nova.compute import manager as compute_manager
+from nova import context
+from nova import db
+from nova import exception
+from nova import objects
+from nova import test
+from nova.virt import fake
+from nova.virt import virtapi
+
+
+class VirtAPIBaseTest(test.NoDBTestCase, test.APICoverage):
+
+ cover_api = virtapi.VirtAPI
+
+ def setUp(self):
+ super(VirtAPIBaseTest, self).setUp()
+ self.context = context.RequestContext('fake-user', 'fake-project')
+ self.set_up_virtapi()
+
+ def set_up_virtapi(self):
+ self.virtapi = virtapi.VirtAPI()
+
+ def assertExpected(self, method, *args, **kwargs):
+ self.assertRaises(NotImplementedError,
+ getattr(self.virtapi, method), self.context,
+ *args, **kwargs)
+
+ def test_provider_fw_rule_get_all(self):
+ self.assertExpected('provider_fw_rule_get_all')
+
+ def test_wait_for_instance_event(self):
+ self.assertExpected('wait_for_instance_event',
+ 'instance', ['event'])
+
+
+class FakeVirtAPITest(VirtAPIBaseTest):
+
+ cover_api = fake.FakeVirtAPI
+
+ def set_up_virtapi(self):
+ self.virtapi = fake.FakeVirtAPI()
+
+ def assertExpected(self, method, *args, **kwargs):
+ if method == 'wait_for_instance_event':
+ run = False
+ with self.virtapi.wait_for_instance_event(*args, **kwargs):
+ run = True
+ self.assertTrue(run)
+ return
+
+ self.mox.StubOutWithMock(db, method)
+
+ if method in ('aggregate_metadata_add', 'aggregate_metadata_delete',
+ 'security_group_rule_get_by_security_group'):
+ # NOTE(danms): FakeVirtAPI will convert the first argument to
+ # argument['id'], so expect that in the actual db call
+ e_args = tuple([args[0]['id']] + list(args[1:]))
+ elif method == 'security_group_get_by_instance':
+ e_args = tuple([args[0]['uuid']] + list(args[1:]))
+ else:
+ e_args = args
+
+ getattr(db, method)(self.context, *e_args, **kwargs).AndReturn(
+ 'it worked')
+ self.mox.ReplayAll()
+ result = getattr(self.virtapi, method)(self.context, *args, **kwargs)
+ self.assertEqual(result, 'it worked')
+
+
+class FakeCompute(object):
+ def __init__(self):
+ self.conductor_api = mox.MockAnything()
+ self.db = mox.MockAnything()
+ self._events = []
+ self.instance_events = mock.MagicMock()
+ self.instance_events.prepare_for_instance_event.side_effect = \
+ self._prepare_for_instance_event
+
+ def _event_waiter(self):
+ event = mock.MagicMock()
+ event.status = 'completed'
+ return event
+
+ def _prepare_for_instance_event(self, instance, event_name):
+ m = mock.MagicMock()
+ m.instance = instance
+ m.event_name = event_name
+ m.wait.side_effect = self._event_waiter
+ self._events.append(m)
+ return m
+
+
+class ComputeVirtAPITest(VirtAPIBaseTest):
+
+ cover_api = compute_manager.ComputeVirtAPI
+
+ def set_up_virtapi(self):
+ self.compute = FakeCompute()
+ self.virtapi = compute_manager.ComputeVirtAPI(self.compute)
+
+ def assertExpected(self, method, *args, **kwargs):
+ self.mox.StubOutWithMock(self.compute.conductor_api, method)
+ getattr(self.compute.conductor_api, method)(
+ self.context, *args, **kwargs).AndReturn('it worked')
+ self.mox.ReplayAll()
+ result = getattr(self.virtapi, method)(self.context, *args, **kwargs)
+ self.assertEqual(result, 'it worked')
+
+ def test_wait_for_instance_event(self):
+ and_i_ran = ''
+ event_1_tag = objects.InstanceExternalEvent.make_key(
+ 'event1')
+ event_2_tag = objects.InstanceExternalEvent.make_key(
+ 'event2', 'tag')
+ events = {
+ 'event1': event_1_tag,
+ ('event2', 'tag'): event_2_tag,
+ }
+ with self.virtapi.wait_for_instance_event('instance', events.keys()):
+ and_i_ran = 'I ran so far a-waa-y'
+
+ self.assertEqual('I ran so far a-waa-y', and_i_ran)
+ self.assertEqual(2, len(self.compute._events))
+ for event in self.compute._events:
+ self.assertEqual('instance', event.instance)
+ self.assertIn(event.event_name, events.values())
+ event.wait.assert_called_once_with()
+
+ def test_wait_for_instance_event_failed(self):
+ def _failer():
+ event = mock.MagicMock()
+ event.status = 'failed'
+ return event
+
+ @mock.patch.object(self.virtapi._compute, '_event_waiter', _failer)
+ def do_test():
+ with self.virtapi.wait_for_instance_event('instance', ['foo']):
+ pass
+
+ self.assertRaises(exception.NovaException, do_test)
+
+ def test_wait_for_instance_event_failed_callback(self):
+ def _failer():
+ event = mock.MagicMock()
+ event.status = 'failed'
+ return event
+
+ @mock.patch.object(self.virtapi._compute, '_event_waiter', _failer)
+ def do_test():
+ callback = mock.MagicMock()
+ with self.virtapi.wait_for_instance_event('instance', ['foo'],
+ error_callback=callback):
+ pass
+ callback.assert_called_with('foo', 'instance')
+
+ do_test()
+
+ def test_wait_for_instance_event_timeout(self):
+ class TestException(Exception):
+ pass
+
+ def _failer():
+ raise TestException()
+
+ @mock.patch.object(self.virtapi._compute, '_event_waiter', _failer)
+ @mock.patch('eventlet.timeout.Timeout')
+ def do_test(timeout):
+ with self.virtapi.wait_for_instance_event('instance', ['foo']):
+ pass
+
+ self.assertRaises(TestException, do_test)
diff --git a/nova/tests/unit/compute/test_vmmode.py b/nova/tests/unit/compute/test_vmmode.py
new file mode 100644
index 0000000000..67475ecbb3
--- /dev/null
+++ b/nova/tests/unit/compute/test_vmmode.py
@@ -0,0 +1,70 @@
+# Copyright (C) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.compute import vm_mode
+from nova import exception
+from nova import test
+
+
+class ComputeVMModeTest(test.NoDBTestCase):
+
+ def test_case(self):
+ inst = dict(vm_mode="HVM")
+ mode = vm_mode.get_from_instance(inst)
+ self.assertEqual(mode, "hvm")
+
+ def test_legacy_pv(self):
+ inst = dict(vm_mode="pv")
+ mode = vm_mode.get_from_instance(inst)
+ self.assertEqual(mode, "xen")
+
+ def test_legacy_hv(self):
+ inst = dict(vm_mode="hv")
+ mode = vm_mode.get_from_instance(inst)
+ self.assertEqual(mode, "hvm")
+
+ def test_bogus(self):
+ inst = dict(vm_mode="wibble")
+ self.assertRaises(exception.Invalid,
+ vm_mode.get_from_instance,
+ inst)
+
+ def test_good(self):
+ inst = dict(vm_mode="hvm")
+ mode = vm_mode.get_from_instance(inst)
+ self.assertEqual(mode, "hvm")
+
+ def test_name_pv_compat(self):
+ mode = vm_mode.canonicalize('pv')
+ self.assertEqual(vm_mode.XEN, mode)
+
+ def test_name_hv_compat(self):
+ mode = vm_mode.canonicalize('hv')
+ self.assertEqual(vm_mode.HVM, mode)
+
+ def test_name_baremetal_compat(self):
+ mode = vm_mode.canonicalize('baremetal')
+ self.assertEqual(vm_mode.HVM, mode)
+
+ def test_name_hvm(self):
+ mode = vm_mode.canonicalize('hvm')
+ self.assertEqual(vm_mode.HVM, mode)
+
+ def test_name_none(self):
+ mode = vm_mode.canonicalize(None)
+ self.assertIsNone(mode)
+
+ def test_name_invalid(self):
+ self.assertRaises(exception.InvalidVirtualMachineMode,
+ vm_mode.canonicalize, 'invalid')
diff --git a/nova/tests/unit/conductor/__init__.py b/nova/tests/unit/conductor/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/conductor/__init__.py
diff --git a/nova/tests/unit/conductor/tasks/__init__.py b/nova/tests/unit/conductor/tasks/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/conductor/tasks/__init__.py
diff --git a/nova/tests/unit/conductor/tasks/test_live_migrate.py b/nova/tests/unit/conductor/tasks/test_live_migrate.py
new file mode 100644
index 0000000000..1d7c0340b9
--- /dev/null
+++ b/nova/tests/unit/conductor/tasks/test_live_migrate.py
@@ -0,0 +1,384 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mox
+
+from nova.compute import power_state
+from nova.compute import utils as compute_utils
+from nova.conductor.tasks import live_migrate
+from nova import db
+from nova import exception
+from nova import objects
+from nova.scheduler import utils as scheduler_utils
+from nova import test
+from nova.tests.unit import fake_instance
+
+
+class LiveMigrationTaskTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(LiveMigrationTaskTestCase, self).setUp()
+ self.context = "context"
+ self.instance_host = "host"
+ self.instance_uuid = "uuid"
+ self.instance_image = "image_ref"
+ db_instance = fake_instance.fake_db_instance(
+ host=self.instance_host,
+ uuid=self.instance_uuid,
+ power_state=power_state.RUNNING,
+ memory_mb=512,
+ image_ref=self.instance_image)
+ self.instance = objects.Instance._from_db_object(
+ self.context, objects.Instance(), db_instance)
+ self.destination = "destination"
+ self.block_migration = "bm"
+ self.disk_over_commit = "doc"
+ self._generate_task()
+
+ def _generate_task(self):
+ self.task = live_migrate.LiveMigrationTask(self.context,
+ self.instance, self.destination, self.block_migration,
+ self.disk_over_commit)
+
+ def test_execute_with_destination(self):
+ self.mox.StubOutWithMock(self.task, '_check_host_is_up')
+ self.mox.StubOutWithMock(self.task, '_check_requested_destination')
+ self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration')
+
+ self.task._check_host_is_up(self.instance_host)
+ self.task._check_requested_destination()
+ self.task.compute_rpcapi.live_migration(self.context,
+ host=self.instance_host,
+ instance=self.instance,
+ dest=self.destination,
+ block_migration=self.block_migration,
+ migrate_data=None).AndReturn("bob")
+
+ self.mox.ReplayAll()
+ self.assertEqual("bob", self.task.execute())
+
+ def test_execute_without_destination(self):
+ self.destination = None
+ self._generate_task()
+ self.assertIsNone(self.task.destination)
+
+ self.mox.StubOutWithMock(self.task, '_check_host_is_up')
+ self.mox.StubOutWithMock(self.task, '_find_destination')
+ self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration')
+
+ self.task._check_host_is_up(self.instance_host)
+ self.task._find_destination().AndReturn("found_host")
+ self.task.compute_rpcapi.live_migration(self.context,
+ host=self.instance_host,
+ instance=self.instance,
+ dest="found_host",
+ block_migration=self.block_migration,
+ migrate_data=None).AndReturn("bob")
+
+ self.mox.ReplayAll()
+ self.assertEqual("bob", self.task.execute())
+
+ def test_check_instance_is_running_passes(self):
+ self.task._check_instance_is_running()
+
+ def test_check_instance_is_running_fails_when_shutdown(self):
+ self.task.instance['power_state'] = power_state.SHUTDOWN
+ self.assertRaises(exception.InstanceNotRunning,
+ self.task._check_instance_is_running)
+
+ def test_check_instance_host_is_up(self):
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
+ self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
+
+ db.service_get_by_compute_host(self.context,
+ "host").AndReturn("service")
+ self.task.servicegroup_api.service_is_up("service").AndReturn(True)
+
+ self.mox.ReplayAll()
+ self.task._check_host_is_up("host")
+
+ def test_check_instance_host_is_up_fails_if_not_up(self):
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
+ self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
+
+ db.service_get_by_compute_host(self.context,
+ "host").AndReturn("service")
+ self.task.servicegroup_api.service_is_up("service").AndReturn(False)
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ComputeServiceUnavailable,
+ self.task._check_host_is_up, "host")
+
+ def test_check_instance_host_is_up_fails_if_not_found(self):
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
+
+ db.service_get_by_compute_host(self.context,
+ "host").AndRaise(exception.NotFound)
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ComputeServiceUnavailable,
+ self.task._check_host_is_up, "host")
+
+ def test_check_requested_destination(self):
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
+ self.mox.StubOutWithMock(self.task, '_get_compute_info')
+ self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
+ self.mox.StubOutWithMock(self.task.compute_rpcapi,
+ 'check_can_live_migrate_destination')
+
+ db.service_get_by_compute_host(self.context,
+ self.destination).AndReturn("service")
+ self.task.servicegroup_api.service_is_up("service").AndReturn(True)
+ hypervisor_details = {
+ "hypervisor_type": "a",
+ "hypervisor_version": 6.1,
+ "free_ram_mb": 513
+ }
+ self.task._get_compute_info(self.destination)\
+ .AndReturn(hypervisor_details)
+ self.task._get_compute_info(self.instance_host)\
+ .AndReturn(hypervisor_details)
+ self.task._get_compute_info(self.destination)\
+ .AndReturn(hypervisor_details)
+
+ self.task.compute_rpcapi.check_can_live_migrate_destination(
+ self.context, self.instance, self.destination,
+ self.block_migration, self.disk_over_commit).AndReturn(
+ "migrate_data")
+
+ self.mox.ReplayAll()
+ self.task._check_requested_destination()
+ self.assertEqual("migrate_data", self.task.migrate_data)
+
+ def test_check_requested_destination_fails_with_same_dest(self):
+ self.task.destination = "same"
+ self.task.source = "same"
+ self.assertRaises(exception.UnableToMigrateToSelf,
+ self.task._check_requested_destination)
+
+ def test_check_requested_destination_fails_when_destination_is_up(self):
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
+
+ db.service_get_by_compute_host(self.context,
+ self.destination).AndRaise(exception.NotFound)
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ComputeServiceUnavailable,
+ self.task._check_requested_destination)
+
+ def test_check_requested_destination_fails_with_not_enough_memory(self):
+ self.mox.StubOutWithMock(self.task, '_check_host_is_up')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
+
+ self.task._check_host_is_up(self.destination)
+ db.service_get_by_compute_host(self.context,
+ self.destination).AndReturn({
+ "compute_node": [{"free_ram_mb": 511}]
+ })
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.MigrationPreCheckError,
+ self.task._check_requested_destination)
+
+ def test_check_requested_destination_fails_with_hypervisor_diff(self):
+ self.mox.StubOutWithMock(self.task, '_check_host_is_up')
+ self.mox.StubOutWithMock(self.task,
+ '_check_destination_has_enough_memory')
+ self.mox.StubOutWithMock(self.task, '_get_compute_info')
+
+ self.task._check_host_is_up(self.destination)
+ self.task._check_destination_has_enough_memory()
+ self.task._get_compute_info(self.instance_host).AndReturn({
+ "hypervisor_type": "b"
+ })
+ self.task._get_compute_info(self.destination).AndReturn({
+ "hypervisor_type": "a"
+ })
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidHypervisorType,
+ self.task._check_requested_destination)
+
+ def test_check_requested_destination_fails_with_hypervisor_too_old(self):
+ self.mox.StubOutWithMock(self.task, '_check_host_is_up')
+ self.mox.StubOutWithMock(self.task,
+ '_check_destination_has_enough_memory')
+ self.mox.StubOutWithMock(self.task, '_get_compute_info')
+
+ self.task._check_host_is_up(self.destination)
+ self.task._check_destination_has_enough_memory()
+ self.task._get_compute_info(self.instance_host).AndReturn({
+ "hypervisor_type": "a",
+ "hypervisor_version": 7
+ })
+ self.task._get_compute_info(self.destination).AndReturn({
+ "hypervisor_type": "a",
+ "hypervisor_version": 6
+ })
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.DestinationHypervisorTooOld,
+ self.task._check_requested_destination)
+
+ def test_find_destination_works(self):
+ self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(self.task.scheduler_client,
+ 'select_destinations')
+ self.mox.StubOutWithMock(self.task,
+ '_check_compatible_with_source_hypervisor')
+ self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
+
+ compute_utils.get_image_metadata(self.context,
+ self.task.image_api, self.instance_image,
+ self.instance).AndReturn("image")
+ scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn({})
+ self.task.scheduler_client.select_destinations(self.context,
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
+ [{'host': 'host1'}])
+ self.task._check_compatible_with_source_hypervisor("host1")
+ self.task._call_livem_checks_on_host("host1")
+
+ self.mox.ReplayAll()
+ self.assertEqual("host1", self.task._find_destination())
+
+ def test_find_destination_no_image_works(self):
+ self.instance['image_ref'] = ''
+
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(self.task.scheduler_client,
+ 'select_destinations')
+ self.mox.StubOutWithMock(self.task,
+ '_check_compatible_with_source_hypervisor')
+ self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
+
+ scheduler_utils.build_request_spec(self.context, None,
+ mox.IgnoreArg()).AndReturn({})
+ self.task.scheduler_client.select_destinations(self.context,
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
+ [{'host': 'host1'}])
+ self.task._check_compatible_with_source_hypervisor("host1")
+ self.task._call_livem_checks_on_host("host1")
+
+ self.mox.ReplayAll()
+ self.assertEqual("host1", self.task._find_destination())
+
+ def _test_find_destination_retry_hypervisor_raises(self, error):
+ self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(self.task.scheduler_client,
+ 'select_destinations')
+ self.mox.StubOutWithMock(self.task,
+ '_check_compatible_with_source_hypervisor')
+ self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
+
+ compute_utils.get_image_metadata(self.context,
+ self.task.image_api, self.instance_image,
+ self.instance).AndReturn("image")
+ scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn({})
+ self.task.scheduler_client.select_destinations(self.context,
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
+ [{'host': 'host1'}])
+ self.task._check_compatible_with_source_hypervisor("host1")\
+ .AndRaise(error)
+
+ self.task.scheduler_client.select_destinations(self.context,
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
+ [{'host': 'host2'}])
+ self.task._check_compatible_with_source_hypervisor("host2")
+ self.task._call_livem_checks_on_host("host2")
+
+ self.mox.ReplayAll()
+ self.assertEqual("host2", self.task._find_destination())
+
+ def test_find_destination_retry_with_old_hypervisor(self):
+ self._test_find_destination_retry_hypervisor_raises(
+ exception.DestinationHypervisorTooOld)
+
+ def test_find_destination_retry_with_invalid_hypervisor_type(self):
+ self._test_find_destination_retry_hypervisor_raises(
+ exception.InvalidHypervisorType)
+
+ def test_find_destination_retry_with_invalid_livem_checks(self):
+ self.flags(migrate_max_retries=1)
+ self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(self.task.scheduler_client,
+ 'select_destinations')
+ self.mox.StubOutWithMock(self.task,
+ '_check_compatible_with_source_hypervisor')
+ self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
+
+ compute_utils.get_image_metadata(self.context,
+ self.task.image_api, self.instance_image,
+ self.instance).AndReturn("image")
+ scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn({})
+ self.task.scheduler_client.select_destinations(self.context,
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
+ [{'host': 'host1'}])
+ self.task._check_compatible_with_source_hypervisor("host1")
+ self.task._call_livem_checks_on_host("host1")\
+ .AndRaise(exception.Invalid)
+
+ self.task.scheduler_client.select_destinations(self.context,
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
+ [{'host': 'host2'}])
+ self.task._check_compatible_with_source_hypervisor("host2")
+ self.task._call_livem_checks_on_host("host2")
+
+ self.mox.ReplayAll()
+ self.assertEqual("host2", self.task._find_destination())
+
+ def test_find_destination_retry_exceeds_max(self):
+ self.flags(migrate_max_retries=0)
+ self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(self.task.scheduler_client,
+ 'select_destinations')
+ self.mox.StubOutWithMock(self.task,
+ '_check_compatible_with_source_hypervisor')
+
+ compute_utils.get_image_metadata(self.context,
+ self.task.image_api, self.instance_image,
+ self.instance).AndReturn("image")
+ scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn({})
+ self.task.scheduler_client.select_destinations(self.context,
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
+ [{'host': 'host1'}])
+ self.task._check_compatible_with_source_hypervisor("host1")\
+ .AndRaise(exception.DestinationHypervisorTooOld)
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NoValidHost, self.task._find_destination)
+
+ def test_find_destination_when_runs_out_of_hosts(self):
+ self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(self.task.scheduler_client,
+ 'select_destinations')
+ compute_utils.get_image_metadata(self.context,
+ self.task.image_api, self.instance_image,
+ self.instance).AndReturn("image")
+ scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn({})
+ self.task.scheduler_client.select_destinations(self.context,
+ mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(
+ exception.NoValidHost(reason=""))
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NoValidHost, self.task._find_destination)
+
+ def test_not_implemented_rollback(self):
+ self.assertRaises(NotImplementedError, self.task.rollback)
diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py
new file mode 100644
index 0000000000..0570ada217
--- /dev/null
+++ b/nova/tests/unit/conductor/test_conductor.py
@@ -0,0 +1,2151 @@
+# Copyright 2012 IBM Corp.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for the conductor service."""
+
+import contextlib
+
+import mock
+import mox
+from oslo.config import cfg
+from oslo import messaging
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+
+from nova.api.ec2 import ec2utils
+from nova.compute import arch
+from nova.compute import flavors
+from nova.compute import task_states
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova import conductor
+from nova.conductor import api as conductor_api
+from nova.conductor import manager as conductor_manager
+from nova.conductor import rpcapi as conductor_rpcapi
+from nova.conductor.tasks import live_migrate
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import models
+from nova import exception as exc
+from nova import notifications
+from nova import objects
+from nova.objects import base as obj_base
+from nova.objects import block_device as block_device_obj
+from nova.objects import fields
+from nova.objects import quotas as quotas_obj
+from nova import quota
+from nova import rpc
+from nova.scheduler import driver as scheduler_driver
+from nova.scheduler import utils as scheduler_utils
+from nova import test
+from nova.tests.unit import cast_as_call
+from nova.tests.unit.compute import test_compute
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_notifier
+from nova.tests.unit import fake_server_actions
+from nova.tests.unit import fake_utils
+from nova import utils
+
+
+CONF = cfg.CONF
+CONF.import_opt('report_interval', 'nova.service')
+
+
+FAKE_IMAGE_REF = 'fake-image-ref'
+
+
+class FakeContext(context.RequestContext):
+ def elevated(self):
+ """Return a consistent elevated context so we can detect it."""
+ if not hasattr(self, '_elevated'):
+ self._elevated = super(FakeContext, self).elevated()
+ return self._elevated
+
+
+class _BaseTestCase(object):
+ def setUp(self):
+ super(_BaseTestCase, self).setUp()
+ self.db = None
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = FakeContext(self.user_id, self.project_id)
+
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ def fake_deserialize_context(serializer, ctxt_dict):
+ self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
+ self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
+ return self.context
+
+ self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
+ fake_deserialize_context)
+
+ fake_utils.stub_out_utils_spawn_n(self.stubs)
+
+ def _create_fake_instance(self, params=None, type_name='m1.tiny'):
+ if not params:
+ params = {}
+
+ inst = {}
+ inst['vm_state'] = vm_states.ACTIVE
+ inst['image_ref'] = FAKE_IMAGE_REF
+ inst['reservation_id'] = 'r-fakeres'
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
+ inst['host'] = 'fake_host'
+ type_id = flavors.get_flavor_by_name(type_name)['id']
+ inst['instance_type_id'] = type_id
+ inst['ami_launch_index'] = 0
+ inst['memory_mb'] = 0
+ inst['vcpus'] = 0
+ inst['root_gb'] = 0
+ inst['ephemeral_gb'] = 0
+ inst['architecture'] = arch.X86_64
+ inst['os_type'] = 'Linux'
+ inst['availability_zone'] = 'fake-az'
+ inst.update(params)
+ return db.instance_create(self.context, inst)
+
+ def _do_update(self, instance_uuid, **updates):
+ return self.conductor.instance_update(self.context, instance_uuid,
+ updates, None)
+
+ def test_instance_update(self):
+ instance = self._create_fake_instance()
+ new_inst = self._do_update(instance['uuid'],
+ vm_state=vm_states.STOPPED)
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.assertEqual(instance['vm_state'], vm_states.STOPPED)
+ self.assertEqual(new_inst['vm_state'], instance['vm_state'])
+
+ def test_instance_update_invalid_key(self):
+ # NOTE(danms): the real DB API call ignores invalid keys
+ if self.db is None:
+ self.conductor = utils.ExceptionHelper(self.conductor)
+ self.assertRaises(KeyError,
+ self._do_update, 'any-uuid', foobar=1)
+
+ def test_migration_get_in_progress_by_host_and_node(self):
+ self.mox.StubOutWithMock(db,
+ 'migration_get_in_progress_by_host_and_node')
+ db.migration_get_in_progress_by_host_and_node(
+ self.context, 'fake-host', 'fake-node').AndReturn('fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.migration_get_in_progress_by_host_and_node(
+ self.context, 'fake-host', 'fake-node')
+ self.assertEqual(result, 'fake-result')
+
+ def test_aggregate_metadata_get_by_host(self):
+ self.mox.StubOutWithMock(db, 'aggregate_metadata_get_by_host')
+ db.aggregate_metadata_get_by_host(self.context, 'host',
+ 'key').AndReturn('result')
+ self.mox.ReplayAll()
+ result = self.conductor.aggregate_metadata_get_by_host(self.context,
+ 'host', 'key')
+ self.assertEqual(result, 'result')
+
+ def test_bw_usage_update(self):
+ self.mox.StubOutWithMock(db, 'bw_usage_update')
+ self.mox.StubOutWithMock(db, 'bw_usage_get')
+
+ update_args = (self.context, 'uuid', 'mac', 0, 10, 20, 5, 10, 20)
+ get_args = (self.context, 'uuid', 0, 'mac')
+
+ db.bw_usage_update(*update_args, update_cells=True)
+ db.bw_usage_get(*get_args).AndReturn('foo')
+
+ self.mox.ReplayAll()
+ result = self.conductor.bw_usage_update(*update_args,
+ update_cells=True)
+ self.assertEqual(result, 'foo')
+
+ def test_provider_fw_rule_get_all(self):
+ fake_rules = ['a', 'b', 'c']
+ self.mox.StubOutWithMock(db, 'provider_fw_rule_get_all')
+ db.provider_fw_rule_get_all(self.context).AndReturn(fake_rules)
+ self.mox.ReplayAll()
+ result = self.conductor.provider_fw_rule_get_all(self.context)
+ self.assertEqual(result, fake_rules)
+
+ def test_block_device_mapping_get_all_by_instance(self):
+ fake_inst = {'uuid': 'fake-uuid'}
+ self.mox.StubOutWithMock(db,
+ 'block_device_mapping_get_all_by_instance')
+ db.block_device_mapping_get_all_by_instance(
+ self.context, fake_inst['uuid']).AndReturn('fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.block_device_mapping_get_all_by_instance(
+ self.context, fake_inst, legacy=False)
+ self.assertEqual(result, 'fake-result')
+
+ def test_vol_usage_update(self):
+ self.mox.StubOutWithMock(db, 'vol_usage_update')
+ self.mox.StubOutWithMock(compute_utils, 'usage_volume_info')
+
+ fake_inst = {'uuid': 'fake-uuid',
+ 'project_id': 'fake-project',
+ 'user_id': 'fake-user',
+ 'availability_zone': 'fake-az',
+ }
+
+ db.vol_usage_update(self.context, 'fake-vol', 22, 33, 44, 55,
+ fake_inst['uuid'],
+ fake_inst['project_id'],
+ fake_inst['user_id'],
+ fake_inst['availability_zone'],
+ False).AndReturn('fake-usage')
+ compute_utils.usage_volume_info('fake-usage').AndReturn('fake-info')
+
+ self.mox.ReplayAll()
+
+ self.conductor.vol_usage_update(self.context, 'fake-vol',
+ 22, 33, 44, 55, fake_inst, None, False)
+
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('conductor.%s' % self.conductor_manager.host,
+ msg.publisher_id)
+ self.assertEqual('volume.usage', msg.event_type)
+ self.assertEqual('INFO', msg.priority)
+ self.assertEqual('fake-info', msg.payload)
+
+ def test_compute_node_create(self):
+ self.mox.StubOutWithMock(db, 'compute_node_create')
+ db.compute_node_create(self.context, 'fake-values').AndReturn(
+ 'fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.compute_node_create(self.context,
+ 'fake-values')
+ self.assertEqual(result, 'fake-result')
+
+ def test_compute_node_update(self):
+ node = {'id': 'fake-id'}
+ self.mox.StubOutWithMock(db, 'compute_node_update')
+ db.compute_node_update(self.context, node['id'], {'fake': 'values'}).\
+ AndReturn('fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.compute_node_update(self.context, node,
+ {'fake': 'values'})
+ self.assertEqual(result, 'fake-result')
+
+ def test_compute_node_delete(self):
+ node = {'id': 'fake-id'}
+ self.mox.StubOutWithMock(db, 'compute_node_delete')
+ db.compute_node_delete(self.context, node['id']).AndReturn(None)
+ self.mox.ReplayAll()
+ result = self.conductor.compute_node_delete(self.context, node)
+ self.assertIsNone(result)
+
+ def test_task_log_get(self):
+ self.mox.StubOutWithMock(db, 'task_log_get')
+ db.task_log_get(self.context, 'task', 'begin', 'end', 'host',
+ 'state').AndReturn('result')
+ self.mox.ReplayAll()
+ result = self.conductor.task_log_get(self.context, 'task', 'begin',
+ 'end', 'host', 'state')
+ self.assertEqual(result, 'result')
+
+ def test_task_log_get_with_no_state(self):
+ self.mox.StubOutWithMock(db, 'task_log_get')
+ db.task_log_get(self.context, 'task', 'begin', 'end',
+ 'host', None).AndReturn('result')
+ self.mox.ReplayAll()
+ result = self.conductor.task_log_get(self.context, 'task', 'begin',
+ 'end', 'host', None)
+ self.assertEqual(result, 'result')
+
+ def test_task_log_begin_task(self):
+ self.mox.StubOutWithMock(db, 'task_log_begin_task')
+ db.task_log_begin_task(self.context.elevated(), 'task', 'begin',
+ 'end', 'host', 'items',
+ 'message').AndReturn('result')
+ self.mox.ReplayAll()
+ result = self.conductor.task_log_begin_task(
+ self.context, 'task', 'begin', 'end', 'host', 'items', 'message')
+ self.assertEqual(result, 'result')
+
+ def test_task_log_end_task(self):
+ self.mox.StubOutWithMock(db, 'task_log_end_task')
+ db.task_log_end_task(self.context.elevated(), 'task', 'begin', 'end',
+ 'host', 'errors', 'message').AndReturn('result')
+ self.mox.ReplayAll()
+ result = self.conductor.task_log_end_task(
+ self.context, 'task', 'begin', 'end', 'host', 'errors', 'message')
+ self.assertEqual(result, 'result')
+
+ def test_notify_usage_exists(self):
+ info = {
+ 'audit_period_beginning': 'start',
+ 'audit_period_ending': 'end',
+ 'bandwidth': 'bw_usage',
+ 'image_meta': {},
+ 'extra': 'info',
+ }
+ instance = {
+ 'system_metadata': [],
+ }
+
+ self.mox.StubOutWithMock(notifications, 'audit_period_bounds')
+ self.mox.StubOutWithMock(notifications, 'bandwidth_usage')
+ self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage')
+
+ notifications.audit_period_bounds(False).AndReturn(('start', 'end'))
+ notifications.bandwidth_usage(instance, 'start', True).AndReturn(
+ 'bw_usage')
+ notifier = self.conductor_manager.notifier
+ compute_utils.notify_about_instance_usage(notifier,
+ self.context, instance,
+ 'exists',
+ system_metadata={},
+ extra_usage_info=info)
+
+ self.mox.ReplayAll()
+
+ self.conductor.notify_usage_exists(self.context, instance, False, True,
+ system_metadata={},
+ extra_usage_info=dict(extra='info'))
+
+ def test_security_groups_trigger_members_refresh(self):
+ self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
+ 'trigger_members_refresh')
+ self.conductor_manager.security_group_api.trigger_members_refresh(
+ self.context, [1, 2, 3])
+ self.mox.ReplayAll()
+ self.conductor.security_groups_trigger_members_refresh(self.context,
+ [1, 2, 3])
+
+ def test_get_ec2_ids(self):
+ expected = {
+ 'instance-id': 'ec2-inst-id',
+ 'ami-id': 'ec2-ami-id',
+ 'kernel-id': 'ami-kernel-ec2-kernelid',
+ 'ramdisk-id': 'ami-ramdisk-ec2-ramdiskid',
+ }
+ inst = {
+ 'uuid': 'fake-uuid',
+ 'kernel_id': 'ec2-kernelid',
+ 'ramdisk_id': 'ec2-ramdiskid',
+ 'image_ref': 'fake-image',
+ }
+ self.mox.StubOutWithMock(ec2utils, 'id_to_ec2_inst_id')
+ self.mox.StubOutWithMock(ec2utils, 'glance_id_to_ec2_id')
+ self.mox.StubOutWithMock(ec2utils, 'image_type')
+
+ ec2utils.id_to_ec2_inst_id(inst['uuid']).AndReturn(
+ expected['instance-id'])
+ ec2utils.glance_id_to_ec2_id(self.context,
+ inst['image_ref']).AndReturn(
+ expected['ami-id'])
+ for image_type in ['kernel', 'ramdisk']:
+ image_id = inst['%s_id' % image_type]
+ ec2utils.image_type(image_type).AndReturn('ami-' + image_type)
+ ec2utils.glance_id_to_ec2_id(self.context, image_id,
+ 'ami-' + image_type).AndReturn(
+ 'ami-%s-ec2-%sid' % (image_type, image_type))
+
+ self.mox.ReplayAll()
+ result = self.conductor.get_ec2_ids(self.context, inst)
+ self.assertEqual(result, expected)
+
+
+class ConductorTestCase(_BaseTestCase, test.TestCase):
+ """Conductor Manager Tests."""
+ def setUp(self):
+ super(ConductorTestCase, self).setUp()
+ self.conductor = conductor_manager.ConductorManager()
+ self.conductor_manager = self.conductor
+
+ def test_instance_get_by_uuid(self):
+ orig_instance = self._create_fake_instance()
+ copy_instance = self.conductor.instance_get_by_uuid(
+ self.context, orig_instance['uuid'], None)
+ self.assertEqual(orig_instance['name'],
+ copy_instance['name'])
+
+ def test_block_device_mapping_update_or_create(self):
+ fake_bdm = {'id': 1, 'device_name': 'foo',
+ 'source_type': 'volume', 'volume_id': 'fake-vol-id',
+ 'destination_type': 'volume'}
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict(fake_bdm)
+ fake_bdm2 = {'id': 1, 'device_name': 'foo2',
+ 'source_type': 'volume', 'volume_id': 'fake-vol-id',
+ 'destination_type': 'volume'}
+ fake_bdm2 = fake_block_device.FakeDbBlockDeviceDict(fake_bdm2)
+ cells_rpcapi = self.conductor.cells_rpcapi
+ self.mox.StubOutWithMock(db, 'block_device_mapping_create')
+ self.mox.StubOutWithMock(db, 'block_device_mapping_update')
+ self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
+ self.mox.StubOutWithMock(cells_rpcapi,
+ 'bdm_update_or_create_at_top')
+ db.block_device_mapping_create(self.context,
+ fake_bdm).AndReturn(fake_bdm2)
+ cells_rpcapi.bdm_update_or_create_at_top(
+ self.context, mox.IsA(block_device_obj.BlockDeviceMapping),
+ create=True)
+ db.block_device_mapping_update(self.context, fake_bdm['id'],
+ fake_bdm).AndReturn(fake_bdm2)
+ cells_rpcapi.bdm_update_or_create_at_top(
+ self.context, mox.IsA(block_device_obj.BlockDeviceMapping),
+ create=False)
+ self.mox.ReplayAll()
+ self.conductor.block_device_mapping_update_or_create(self.context,
+ fake_bdm,
+ create=True)
+ self.conductor.block_device_mapping_update_or_create(self.context,
+ fake_bdm,
+ create=False)
+
+ def test_instance_get_all_by_filters(self):
+ filters = {'foo': 'bar'}
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
+ db.instance_get_all_by_filters(self.context, filters,
+ 'fake-key', 'fake-sort',
+ columns_to_join=None, use_slave=False)
+ self.mox.ReplayAll()
+ self.conductor.instance_get_all_by_filters(self.context, filters,
+ 'fake-key', 'fake-sort',
+ None, False)
+
+ def test_instance_get_all_by_filters_use_slave(self):
+ filters = {'foo': 'bar'}
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
+ db.instance_get_all_by_filters(self.context, filters,
+ 'fake-key', 'fake-sort',
+ columns_to_join=None, use_slave=True)
+ self.mox.ReplayAll()
+ self.conductor.instance_get_all_by_filters(self.context, filters,
+ 'fake-key', 'fake-sort',
+ columns_to_join=None,
+ use_slave=True)
+
+ def test_instance_get_all_by_host(self):
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
+ db.instance_get_all_by_host(self.context.elevated(),
+ 'host', None).AndReturn('result')
+ db.instance_get_all_by_host_and_node(self.context.elevated(), 'host',
+ 'node').AndReturn('result')
+ self.mox.ReplayAll()
+ result = self.conductor.instance_get_all_by_host(self.context, 'host',
+ None, None)
+ self.assertEqual(result, 'result')
+ result = self.conductor.instance_get_all_by_host(self.context, 'host',
+ 'node', None)
+ self.assertEqual(result, 'result')
+
+ def _test_stubbed(self, name, dbargs, condargs,
+ db_result_listified=False, db_exception=None):
+
+ self.mox.StubOutWithMock(db, name)
+ if db_exception:
+ getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
+ getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
+ else:
+ getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
+ self.mox.ReplayAll()
+ if db_exception:
+ self.assertRaises(messaging.ExpectedException,
+ self.conductor.service_get_all_by,
+ self.context, **condargs)
+
+ self.conductor = utils.ExceptionHelper(self.conductor)
+
+ self.assertRaises(db_exception.__class__,
+ self.conductor.service_get_all_by,
+ self.context, **condargs)
+ else:
+ result = self.conductor.service_get_all_by(self.context,
+ **condargs)
+ if db_result_listified:
+ self.assertEqual(['fake-result'], result)
+ else:
+ self.assertEqual('fake-result', result)
+
+ def test_service_get_all(self):
+ self._test_stubbed('service_get_all', (),
+ dict(host=None, topic=None, binary=None))
+
+ def test_service_get_by_host_and_topic(self):
+ self._test_stubbed('service_get_by_host_and_topic',
+ ('host', 'topic'),
+ dict(topic='topic', host='host', binary=None))
+
+ def test_service_get_all_by_topic(self):
+ self._test_stubbed('service_get_all_by_topic',
+ ('topic',),
+ dict(topic='topic', host=None, binary=None))
+
+ def test_service_get_all_by_host(self):
+ self._test_stubbed('service_get_all_by_host',
+ ('host',),
+ dict(host='host', topic=None, binary=None))
+
+ def test_service_get_by_compute_host(self):
+ self._test_stubbed('service_get_by_compute_host',
+ ('host',),
+ dict(topic='compute', host='host', binary=None),
+ db_result_listified=True)
+
+ def test_service_get_by_args(self):
+ self._test_stubbed('service_get_by_args',
+ ('host', 'binary'),
+ dict(host='host', binary='binary', topic=None))
+
+ def test_service_get_by_compute_host_not_found(self):
+ self._test_stubbed('service_get_by_compute_host',
+ ('host',),
+ dict(topic='compute', host='host', binary=None),
+ db_exception=exc.ComputeHostNotFound(host='host'))
+
+ def test_service_get_by_args_not_found(self):
+ self._test_stubbed('service_get_by_args',
+ ('host', 'binary'),
+ dict(host='host', binary='binary', topic=None),
+ db_exception=exc.HostBinaryNotFound(binary='binary',
+ host='host'))
+
+ def test_security_groups_trigger_handler(self):
+ self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
+ 'trigger_handler')
+ self.conductor_manager.security_group_api.trigger_handler('event',
+ self.context,
+ 'args')
+ self.mox.ReplayAll()
+ self.conductor.security_groups_trigger_handler(self.context,
+ 'event', ['args'])
+
+ def _test_object_action(self, is_classmethod, raise_exception):
+ class TestObject(obj_base.NovaObject):
+ def foo(self, context, raise_exception=False):
+ if raise_exception:
+ raise Exception('test')
+ else:
+ return 'test'
+
+ @classmethod
+ def bar(cls, context, raise_exception=False):
+ if raise_exception:
+ raise Exception('test')
+ else:
+ return 'test'
+
+ obj = TestObject()
+ if is_classmethod:
+ result = self.conductor.object_class_action(
+ self.context, TestObject.obj_name(), 'bar', '1.0',
+ tuple(), {'raise_exception': raise_exception})
+ else:
+ updates, result = self.conductor.object_action(
+ self.context, obj, 'foo', tuple(),
+ {'raise_exception': raise_exception})
+ self.assertEqual('test', result)
+
+ def test_object_action(self):
+ self._test_object_action(False, False)
+
+ def test_object_action_on_raise(self):
+ self.assertRaises(messaging.ExpectedException,
+ self._test_object_action, False, True)
+
+ def test_object_class_action(self):
+ self._test_object_action(True, False)
+
+ def test_object_class_action_on_raise(self):
+ self.assertRaises(messaging.ExpectedException,
+ self._test_object_action, True, True)
+
+ def test_object_action_copies_object(self):
+ class TestObject(obj_base.NovaObject):
+ fields = {'dict': fields.DictOfStringsField()}
+
+ def touch_dict(self, context):
+ self.dict['foo'] = 'bar'
+ self.obj_reset_changes()
+
+ obj = TestObject()
+ obj.dict = {}
+ obj.obj_reset_changes()
+ updates, result = self.conductor.object_action(
+ self.context, obj, 'touch_dict', tuple(), {})
+ # NOTE(danms): If conductor did not properly copy the object, then
+ # the new and reference copies of the nested dict object will be
+ # the same, and thus 'dict' will not be reported as changed
+ self.assertIn('dict', updates)
+ self.assertEqual({'foo': 'bar'}, updates['dict'])
+
+ def _test_expected_exceptions(self, db_method, conductor_method, errors,
+ *args, **kwargs):
+ # Tests that expected exceptions are handled properly.
+ for error in errors:
+ with mock.patch.object(db, db_method, side_effect=error):
+ self.assertRaises(messaging.ExpectedException,
+ conductor_method,
+ self.context, *args, **kwargs)
+
+ def test_action_event_start_expected_exceptions(self):
+ error = exc.InstanceActionNotFound(request_id='1', instance_uuid='2')
+ self._test_expected_exceptions(
+ 'action_event_start', self.conductor.action_event_start, [error],
+ {'foo': 'bar'})
+
+ def test_action_event_finish_expected_exceptions(self):
+ errors = (exc.InstanceActionNotFound(request_id='1',
+ instance_uuid='2'),
+ exc.InstanceActionEventNotFound(event='1', action_id='2'))
+ self._test_expected_exceptions(
+ 'action_event_finish', self.conductor.action_event_finish,
+ errors, {'foo': 'bar'})
+
+ def test_instance_update_expected_exceptions(self):
+ errors = (exc.InvalidUUID(uuid='foo'),
+ exc.InstanceNotFound(instance_id=1),
+ exc.UnexpectedTaskStateError(expected='foo',
+ actual='bar'))
+ self._test_expected_exceptions(
+ 'instance_update', self.conductor.instance_update,
+ errors, None, {'foo': 'bar'}, None)
+
+ def test_instance_get_by_uuid_expected_exceptions(self):
+ error = exc.InstanceNotFound(instance_id=1)
+ self._test_expected_exceptions(
+ 'instance_get_by_uuid', self.conductor.instance_get_by_uuid,
+ [error], None, [])
+
+ def test_aggregate_host_add_expected_exceptions(self):
+ error = exc.AggregateHostExists(aggregate_id=1, host='foo')
+ self._test_expected_exceptions(
+ 'aggregate_host_add', self.conductor.aggregate_host_add,
+ [error], {'id': 1}, None)
+
+ def test_aggregate_host_delete_expected_exceptions(self):
+ error = exc.AggregateHostNotFound(aggregate_id=1, host='foo')
+ self._test_expected_exceptions(
+ 'aggregate_host_delete', self.conductor.aggregate_host_delete,
+ [error], {'id': 1}, None)
+
+ def test_service_update_expected_exceptions(self):
+ error = exc.ServiceNotFound(service_id=1)
+ self._test_expected_exceptions(
+ 'service_update',
+ self.conductor.service_update,
+ [error], {'id': 1}, None)
+
+ def test_service_destroy_expected_exceptions(self):
+ error = exc.ServiceNotFound(service_id=1)
+ self._test_expected_exceptions(
+ 'service_destroy',
+ self.conductor.service_destroy,
+ [error], 1)
+
+ def _setup_aggregate_with_host(self):
+ aggregate_ref = db.aggregate_create(self.context.elevated(),
+ {'name': 'foo'}, metadata={'availability_zone': 'foo'})
+
+ self.conductor.aggregate_host_add(self.context, aggregate_ref, 'bar')
+
+ aggregate_ref = db.aggregate_get(self.context.elevated(),
+ aggregate_ref['id'])
+
+ return aggregate_ref
+
+ def test_aggregate_host_add(self):
+ aggregate_ref = self._setup_aggregate_with_host()
+
+ self.assertIn('bar', aggregate_ref['hosts'])
+
+ db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
+
+ def test_aggregate_host_delete(self):
+ aggregate_ref = self._setup_aggregate_with_host()
+
+ self.conductor.aggregate_host_delete(self.context, aggregate_ref,
+ 'bar')
+
+ aggregate_ref = db.aggregate_get(self.context.elevated(),
+ aggregate_ref['id'])
+
+ self.assertNotIn('bar', aggregate_ref['hosts'])
+
+ db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
+
+ def test_network_migrate_instance_start(self):
+ self.mox.StubOutWithMock(self.conductor_manager.network_api,
+ 'migrate_instance_start')
+ self.conductor_manager.network_api.migrate_instance_start(self.context,
+ 'instance',
+ 'migration')
+ self.mox.ReplayAll()
+ self.conductor.network_migrate_instance_start(self.context,
+ 'instance',
+ 'migration')
+
+ def test_network_migrate_instance_finish(self):
+ self.mox.StubOutWithMock(self.conductor_manager.network_api,
+ 'migrate_instance_finish')
+ self.conductor_manager.network_api.migrate_instance_finish(
+ self.context, 'instance', 'migration')
+ self.mox.ReplayAll()
+ self.conductor.network_migrate_instance_finish(self.context,
+ 'instance',
+ 'migration')
+
+ def test_instance_destroy(self):
+ self.mox.StubOutWithMock(db, 'instance_destroy')
+ db.instance_destroy(self.context, 'fake-uuid').AndReturn('fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.instance_destroy(self.context,
+ {'uuid': 'fake-uuid'})
+ self.assertEqual(result, 'fake-result')
+
+ def test_compute_unrescue(self):
+ self.mox.StubOutWithMock(self.conductor_manager.compute_api,
+ 'unrescue')
+ self.conductor_manager.compute_api.unrescue(self.context, 'instance')
+ self.mox.ReplayAll()
+ self.conductor.compute_unrescue(self.context, 'instance')
+
+ def test_instance_get_active_by_window_joined(self):
+ self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
+ db.instance_get_active_by_window_joined(self.context, 'fake-begin',
+ 'fake-end', 'fake-proj',
+ 'fake-host')
+ self.mox.ReplayAll()
+ self.conductor.instance_get_active_by_window_joined(
+ self.context, 'fake-begin', 'fake-end', 'fake-proj', 'fake-host')
+
+ def test_instance_fault_create(self):
+ self.mox.StubOutWithMock(db, 'instance_fault_create')
+ db.instance_fault_create(self.context, 'fake-values').AndReturn(
+ 'fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.instance_fault_create(self.context,
+ 'fake-values')
+ self.assertEqual(result, 'fake-result')
+
+ def test_action_event_start(self):
+ self.mox.StubOutWithMock(db, 'action_event_start')
+ db.action_event_start(self.context, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conductor.action_event_start(self.context, {})
+
+ def test_action_event_finish(self):
+ self.mox.StubOutWithMock(db, 'action_event_finish')
+ db.action_event_finish(self.context, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conductor.action_event_finish(self.context, {})
+
+ def test_agent_build_get_by_triple(self):
+ self.mox.StubOutWithMock(db, 'agent_build_get_by_triple')
+ db.agent_build_get_by_triple(self.context, 'fake-hv', 'fake-os',
+ 'fake-arch').AndReturn('it worked')
+ self.mox.ReplayAll()
+ result = self.conductor.agent_build_get_by_triple(self.context,
+ 'fake-hv',
+ 'fake-os',
+ 'fake-arch')
+ self.assertEqual(result, 'it worked')
+
+
+class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
+ """Conductor RPC API Tests."""
+ def setUp(self):
+ super(ConductorRPCAPITestCase, self).setUp()
+ self.conductor_service = self.start_service(
+ 'conductor', manager='nova.conductor.manager.ConductorManager')
+ self.conductor_manager = self.conductor_service.manager
+ self.conductor = conductor_rpcapi.ConductorAPI()
+
+ def test_block_device_mapping_update_or_create(self):
+ fake_bdm = {'id': 'fake-id'}
+ self.mox.StubOutWithMock(db, 'block_device_mapping_create')
+ self.mox.StubOutWithMock(db, 'block_device_mapping_update')
+ self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
+ self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
+ '_from_db_object')
+ db.block_device_mapping_create(self.context, fake_bdm)
+ block_device_obj.BlockDeviceMapping._from_db_object(
+ self.context, mox.IgnoreArg(), mox.IgnoreArg())
+ db.block_device_mapping_update(self.context, fake_bdm['id'], fake_bdm)
+ block_device_obj.BlockDeviceMapping._from_db_object(
+ self.context, mox.IgnoreArg(), mox.IgnoreArg())
+ db.block_device_mapping_update_or_create(self.context, fake_bdm)
+ block_device_obj.BlockDeviceMapping._from_db_object(
+ self.context, mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conductor.block_device_mapping_update_or_create(self.context,
+ fake_bdm,
+ create=True)
+ self.conductor.block_device_mapping_update_or_create(self.context,
+ fake_bdm,
+ create=False)
+ self.conductor.block_device_mapping_update_or_create(self.context,
+ fake_bdm)
+
+ def _test_stubbed(self, name, dbargs, condargs,
+ db_result_listified=False, db_exception=None):
+ self.mox.StubOutWithMock(db, name)
+ if db_exception:
+ getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
+ else:
+ getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
+ self.mox.ReplayAll()
+ if db_exception:
+ self.assertRaises(db_exception.__class__,
+ self.conductor.service_get_all_by,
+ self.context, **condargs)
+ else:
+ result = self.conductor.service_get_all_by(self.context,
+ **condargs)
+ if db_result_listified:
+ self.assertEqual(['fake-result'], result)
+ else:
+ self.assertEqual('fake-result', result)
+
+ def test_service_get_all(self):
+ self._test_stubbed('service_get_all', (),
+ dict(topic=None, host=None, binary=None))
+
+ def test_service_get_by_host_and_topic(self):
+ self._test_stubbed('service_get_by_host_and_topic',
+ ('host', 'topic'),
+ dict(topic='topic', host='host', binary=None))
+
+ def test_service_get_all_by_topic(self):
+ self._test_stubbed('service_get_all_by_topic',
+ ('topic',),
+ dict(topic='topic', host=None, binary=None))
+
+ def test_service_get_all_by_host(self):
+ self._test_stubbed('service_get_all_by_host',
+ ('host',),
+ dict(host='host', topic=None, binary=None))
+
+ def test_service_get_by_compute_host(self):
+ self._test_stubbed('service_get_by_compute_host',
+ ('host',),
+ dict(topic='compute', host='host', binary=None),
+ db_result_listified=True)
+
+ def test_service_get_by_args(self):
+ self._test_stubbed('service_get_by_args',
+ ('host', 'binary'),
+ dict(host='host', binary='binary', topic=None))
+
+ def test_service_get_by_compute_host_not_found(self):
+ self._test_stubbed('service_get_by_compute_host',
+ ('host',),
+ dict(topic='compute', host='host', binary=None),
+ db_exception=exc.ComputeHostNotFound(host='host'))
+
+ def test_service_get_by_args_not_found(self):
+ self._test_stubbed('service_get_by_args',
+ ('host', 'binary'),
+ dict(host='host', binary='binary', topic=None),
+ db_exception=exc.HostBinaryNotFound(binary='binary',
+ host='host'))
+
+ def test_security_groups_trigger_handler(self):
+ self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
+ 'trigger_handler')
+ self.conductor_manager.security_group_api.trigger_handler('event',
+ self.context,
+ 'arg')
+ self.mox.ReplayAll()
+ self.conductor.security_groups_trigger_handler(self.context,
+ 'event', ['arg'])
+
+ @mock.patch.object(db, 'service_update')
+ @mock.patch('oslo.messaging.RPCClient.prepare')
+ def test_service_update_time_big(self, mock_prepare, mock_update):
+ CONF.set_override('report_interval', 10)
+ services = {'id': 1}
+ self.conductor.service_update(self.context, services, {})
+ mock_prepare.assert_called_once_with(timeout=9)
+
+ @mock.patch.object(db, 'service_update')
+ @mock.patch('oslo.messaging.RPCClient.prepare')
+ def test_service_update_time_small(self, mock_prepare, mock_update):
+ CONF.set_override('report_interval', 3)
+ services = {'id': 1}
+ self.conductor.service_update(self.context, services, {})
+ mock_prepare.assert_called_once_with(timeout=3)
+
+ @mock.patch.object(db, 'service_update')
+ @mock.patch('oslo.messaging.RPCClient.prepare')
+ def test_service_update_no_time(self, mock_prepare, mock_update):
+ CONF.set_override('report_interval', None)
+ services = {'id': 1}
+ self.conductor.service_update(self.context, services, {})
+ mock_prepare.assert_called_once_with()
+
+
+class ConductorAPITestCase(_BaseTestCase, test.TestCase):
+ """Conductor API Tests."""
+ def setUp(self):
+ super(ConductorAPITestCase, self).setUp()
+ self.conductor_service = self.start_service(
+ 'conductor', manager='nova.conductor.manager.ConductorManager')
+ self.conductor = conductor_api.API()
+ self.conductor_manager = self.conductor_service.manager
+ self.db = None
+
+ def _do_update(self, instance_uuid, **updates):
+ # NOTE(danms): the public API takes actual keyword arguments,
+ # so override the base class here to make the call correctly
+ return self.conductor.instance_update(self.context, instance_uuid,
+ **updates)
+
+ def test_bw_usage_get(self):
+ self.mox.StubOutWithMock(db, 'bw_usage_update')
+ self.mox.StubOutWithMock(db, 'bw_usage_get')
+
+ get_args = (self.context, 'uuid', 0, 'mac')
+
+ db.bw_usage_get(*get_args).AndReturn('foo')
+
+ self.mox.ReplayAll()
+ result = self.conductor.bw_usage_get(*get_args)
+ self.assertEqual(result, 'foo')
+
+ def test_block_device_mapping_update_or_create(self):
+ self.mox.StubOutWithMock(db, 'block_device_mapping_create')
+ self.mox.StubOutWithMock(db, 'block_device_mapping_update')
+ self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
+ self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
+ '_from_db_object')
+ db.block_device_mapping_create(self.context, 'fake-bdm')
+ block_device_obj.BlockDeviceMapping._from_db_object(
+ self.context, mox.IgnoreArg(), mox.IgnoreArg())
+ db.block_device_mapping_update(self.context,
+ 'fake-id', {'id': 'fake-id'})
+ block_device_obj.BlockDeviceMapping._from_db_object(
+ self.context, mox.IgnoreArg(), mox.IgnoreArg())
+ db.block_device_mapping_update_or_create(self.context, 'fake-bdm')
+ block_device_obj.BlockDeviceMapping._from_db_object(
+ self.context, mox.IgnoreArg(), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.conductor.block_device_mapping_create(self.context, 'fake-bdm')
+ self.conductor.block_device_mapping_update(self.context, 'fake-id', {})
+ self.conductor.block_device_mapping_update_or_create(self.context,
+ 'fake-bdm')
+
+ def _test_stubbed(self, name, *args, **kwargs):
+ if args and isinstance(args[0], FakeContext):
+ ctxt = args[0]
+ args = args[1:]
+ else:
+ ctxt = self.context
+ db_exception = kwargs.get('db_exception')
+ self.mox.StubOutWithMock(db, name)
+ if db_exception:
+ getattr(db, name)(ctxt, *args).AndRaise(db_exception)
+ else:
+ getattr(db, name)(ctxt, *args).AndReturn('fake-result')
+ if name == 'service_destroy':
+ # TODO(russellb) This is a hack ... SetUp() starts the conductor()
+ # service. There is a cleanup step that runs after this test which
+ # also deletes the associated service record. This involves a call
+ # to db.service_destroy(), which we have stubbed out.
+ db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+ if db_exception:
+ self.assertRaises(db_exception.__class__,
+ getattr(self.conductor, name),
+ self.context, *args)
+ else:
+ result = getattr(self.conductor, name)(self.context, *args)
+ self.assertEqual(
+ result, 'fake-result' if kwargs.get('returns', True) else None)
+
+ def test_service_get_all(self):
+ self._test_stubbed('service_get_all')
+
+ def test_service_get_by_host_and_topic(self):
+ self._test_stubbed('service_get_by_host_and_topic', 'host', 'topic')
+
+ def test_service_get_all_by_topic(self):
+ self._test_stubbed('service_get_all_by_topic', 'topic')
+
+ def test_service_get_all_by_host(self):
+ self._test_stubbed('service_get_all_by_host', 'host')
+
+ def test_service_get_by_compute_host(self):
+ self._test_stubbed('service_get_by_compute_host', 'host')
+
+ def test_service_get_by_args(self):
+ self._test_stubbed('service_get_by_args', 'host', 'binary')
+
+ def test_service_get_by_compute_host_not_found(self):
+ self._test_stubbed('service_get_by_compute_host', 'host',
+ db_exception=exc.ComputeHostNotFound(host='host'))
+
+ def test_service_get_by_args_not_found(self):
+ self._test_stubbed('service_get_by_args', 'host', 'binary',
+ db_exception=exc.HostBinaryNotFound(binary='binary',
+ host='host'))
+
+ def test_service_create(self):
+ self._test_stubbed('service_create', {})
+
+ def test_service_destroy(self):
+ self._test_stubbed('service_destroy', '', returns=False)
+
+ def test_service_update(self):
+ ctxt = self.context
+ self.mox.StubOutWithMock(db, 'service_update')
+ db.service_update(ctxt, '', {}).AndReturn('fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.service_update(self.context, {'id': ''}, {})
+ self.assertEqual(result, 'fake-result')
+
+ def test_instance_get_all_by_host_and_node(self):
+ self._test_stubbed('instance_get_all_by_host_and_node',
+ self.context.elevated(), 'host', 'node')
+
+ def test_instance_get_all_by_host(self):
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
+ db.instance_get_all_by_host(self.context.elevated(), 'host',
+ None).AndReturn('fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.instance_get_all_by_host(self.context,
+ 'host', None)
+ self.assertEqual(result, 'fake-result')
+
+ def test_wait_until_ready(self):
+ timeouts = []
+ calls = dict(count=0)
+
+ def fake_ping(context, message, timeout):
+ timeouts.append(timeout)
+ calls['count'] += 1
+ if calls['count'] < 15:
+ raise messaging.MessagingTimeout("fake")
+
+ self.stubs.Set(self.conductor.base_rpcapi, 'ping', fake_ping)
+
+ self.conductor.wait_until_ready(self.context)
+
+ self.assertEqual(timeouts.count(10), 10)
+ self.assertIn(None, timeouts)
+
+ def test_security_groups_trigger_handler(self):
+ self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
+ 'trigger_handler')
+ self.conductor_manager.security_group_api.trigger_handler('event',
+ self.context,
+ 'arg')
+ self.mox.ReplayAll()
+ self.conductor.security_groups_trigger_handler(self.context,
+ 'event', 'arg')
+
+
+class ConductorLocalAPITestCase(ConductorAPITestCase):
+ """Conductor LocalAPI Tests."""
+ def setUp(self):
+ super(ConductorLocalAPITestCase, self).setUp()
+ self.conductor = conductor_api.LocalAPI()
+ self.conductor_manager = self.conductor._manager._target
+ self.db = db
+
+ def test_client_exceptions(self):
+ instance = self._create_fake_instance()
+ # NOTE(danms): The LocalAPI should not raise exceptions wrapped
+ # in ClientException. KeyError should be raised if an invalid
+ # update key is passed, so use that to validate.
+ self.assertRaises(KeyError,
+ self._do_update, instance['uuid'], foo='bar')
+
+ def test_wait_until_ready(self):
+ # Override test in ConductorAPITestCase
+ pass
+
+
+class ConductorImportTest(test.TestCase):
+ def test_import_conductor_local(self):
+ self.flags(use_local=True, group='conductor')
+ self.assertIsInstance(conductor.API(), conductor_api.LocalAPI)
+ self.assertIsInstance(conductor.ComputeTaskAPI(),
+ conductor_api.LocalComputeTaskAPI)
+
+ def test_import_conductor_rpc(self):
+ self.flags(use_local=False, group='conductor')
+ self.assertIsInstance(conductor.API(), conductor_api.API)
+ self.assertIsInstance(conductor.ComputeTaskAPI(),
+ conductor_api.ComputeTaskAPI)
+
+ def test_import_conductor_override_to_local(self):
+ self.flags(use_local=False, group='conductor')
+ self.assertIsInstance(conductor.API(use_local=True),
+ conductor_api.LocalAPI)
+ self.assertIsInstance(conductor.ComputeTaskAPI(use_local=True),
+ conductor_api.LocalComputeTaskAPI)
+
+
+class ConductorPolicyTest(test.TestCase):
+ def test_all_allowed_keys(self):
+
+ def fake_db_instance_update(self, *args, **kwargs):
+ return None, None
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ fake_db_instance_update)
+
+ ctxt = context.RequestContext('fake-user', 'fake-project')
+ conductor = conductor_api.LocalAPI()
+ updates = {}
+ for key in conductor_manager.allowed_updates:
+ if key in conductor_manager.datetime_fields:
+ updates[key] = timeutils.utcnow()
+ else:
+ updates[key] = 'foo'
+ conductor.instance_update(ctxt, 'fake-instance', **updates)
+
+ def test_allowed_keys_are_real(self):
+ instance = models.Instance()
+ keys = list(conductor_manager.allowed_updates)
+
+ # NOTE(danms): expected_task_state is a parameter that gets
+ # passed to the db layer, but is not actually an instance attribute
+ del keys[keys.index('expected_task_state')]
+
+ for key in keys:
+ self.assertTrue(hasattr(instance, key))
+
+
+class _BaseTaskTestCase(object):
+ def setUp(self):
+ super(_BaseTaskTestCase, self).setUp()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = FakeContext(self.user_id, self.project_id)
+ fake_server_actions.stub_out_action_events(self.stubs)
+
+ def fake_deserialize_context(serializer, ctxt_dict):
+ self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
+ self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
+ return self.context
+
+ self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
+ fake_deserialize_context)
+
+ def _prepare_rebuild_args(self, update_args=None):
+ rebuild_args = {'new_pass': 'admin_password',
+ 'injected_files': 'files_to_inject',
+ 'image_ref': 'image_ref',
+ 'orig_image_ref': 'orig_image_ref',
+ 'orig_sys_metadata': 'orig_sys_meta',
+ 'bdms': {},
+ 'recreate': False,
+ 'on_shared_storage': False,
+ 'preserve_ephemeral': False,
+ 'host': 'compute-host'}
+ if update_args:
+ rebuild_args.update(update_args)
+ return rebuild_args
+
+ def test_live_migrate(self):
+ inst = fake_instance.fake_db_instance()
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), inst, [])
+
+ self.mox.StubOutWithMock(live_migrate, 'execute')
+ live_migrate.execute(self.context,
+ mox.IsA(objects.Instance),
+ 'destination',
+ 'block_migration',
+ 'disk_over_commit')
+ self.mox.ReplayAll()
+
+ if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
+ conductor_api.LocalComputeTaskAPI)):
+ # The API method is actually 'live_migrate_instance'. It gets
+ # converted into 'migrate_server' when doing RPC.
+ self.conductor.live_migrate_instance(self.context, inst_obj,
+ 'destination', 'block_migration', 'disk_over_commit')
+ else:
+ self.conductor.migrate_server(self.context, inst_obj,
+ {'host': 'destination'}, True, False, None,
+ 'block_migration', 'disk_over_commit')
+
+ def test_cold_migrate(self):
+ self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(
+ self.conductor_manager.compute_rpcapi, 'prep_resize')
+ self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
+ 'select_destinations')
+ inst = fake_instance.fake_db_instance(image_ref='image_ref')
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), inst, [])
+ flavor = flavors.get_default_flavor()
+ flavor['extra_specs'] = 'extra_specs'
+ request_spec = {'instance_type': flavor,
+ 'instance_properties': {}}
+ compute_utils.get_image_metadata(
+ self.context, self.conductor_manager.image_api,
+ 'image_ref', mox.IsA(objects.Instance)).AndReturn('image')
+
+ scheduler_utils.build_request_spec(
+ self.context, 'image',
+ [mox.IsA(objects.Instance)],
+ instance_type=flavor).AndReturn(request_spec)
+
+ hosts = [dict(host='host1', nodename=None, limits={})]
+ self.conductor_manager.scheduler_client.select_destinations(
+ self.context, request_spec,
+ {'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(hosts)
+
+ filter_properties = {'limits': {},
+ 'retry': {'num_attempts': 1,
+ 'hosts': [['host1', None]]}}
+
+ self.conductor_manager.compute_rpcapi.prep_resize(
+ self.context, 'image', mox.IsA(objects.Instance),
+ mox.IsA(dict), 'host1', [], request_spec=request_spec,
+ filter_properties=filter_properties, node=None)
+
+ self.mox.ReplayAll()
+
+ scheduler_hint = {'filter_properties': {}}
+
+ if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
+ conductor_api.LocalComputeTaskAPI)):
+ # The API method is actually 'resize_instance'. It gets
+ # converted into 'migrate_server' when doing RPC.
+ self.conductor.resize_instance(
+ self.context, inst_obj, {}, scheduler_hint, flavor, [])
+ else:
+ self.conductor.migrate_server(
+ self.context, inst_obj, scheduler_hint,
+ False, False, flavor, None, None, [])
+
+ def test_build_instances(self):
+ system_metadata = flavors.save_flavor_info({},
+ flavors.get_default_flavor())
+ instances = [fake_instance.fake_instance_obj(
+ self.context,
+ system_metadata=system_metadata,
+ expected_attrs=['system_metadata']) for i in xrange(2)]
+ instance_type = flavors.extract_flavor(instances[0])
+ instance_type['extra_specs'] = 'fake-specs'
+ instance_properties = jsonutils.to_primitive(instances[0])
+
+ self.mox.StubOutWithMock(db, 'flavor_extra_specs_get')
+ self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
+ self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
+ 'select_destinations')
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(db,
+ 'block_device_mapping_get_all_by_instance')
+ self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
+ 'build_and_run_instance')
+
+ db.flavor_extra_specs_get(
+ self.context,
+ instance_type['flavorid']).AndReturn('fake-specs')
+ scheduler_utils.setup_instance_group(self.context, None, None)
+ self.conductor_manager.scheduler_client.select_destinations(
+ self.context, {'image': {'fake_data': 'should_pass_silently'},
+ 'instance_properties': jsonutils.to_primitive(
+ instances[0]),
+ 'instance_type': instance_type,
+ 'instance_uuids': [inst.uuid for inst in instances],
+ 'num_instances': 2},
+ {'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
+ [{'host': 'host1', 'nodename': 'node1', 'limits': []},
+ {'host': 'host2', 'nodename': 'node2', 'limits': []}])
+ db.instance_get_by_uuid(self.context, instances[0].uuid,
+ columns_to_join=['system_metadata'],
+ use_slave=False).AndReturn(
+ jsonutils.to_primitive(instances[0]))
+ db.block_device_mapping_get_all_by_instance(self.context,
+ instances[0].uuid, use_slave=False).AndReturn([])
+ self.conductor_manager.compute_rpcapi.build_and_run_instance(
+ self.context,
+ instance=mox.IgnoreArg(),
+ host='host1',
+ image={'fake_data': 'should_pass_silently'},
+ request_spec={
+ 'image': {'fake_data': 'should_pass_silently'},
+ 'instance_properties': instance_properties,
+ 'instance_type': instance_type,
+ 'instance_uuids': [inst.uuid for inst in instances],
+ 'num_instances': 2},
+ filter_properties={'retry': {'num_attempts': 1,
+ 'hosts': [['host1', 'node1']]},
+ 'limits': []},
+ admin_password='admin_password',
+ injected_files='injected_files',
+ requested_networks=None,
+ security_groups='security_groups',
+ block_device_mapping=mox.IgnoreArg(),
+ node='node1', limits=[])
+ db.instance_get_by_uuid(self.context, instances[1].uuid,
+ columns_to_join=['system_metadata'],
+ use_slave=False).AndReturn(
+ jsonutils.to_primitive(instances[1]))
+ db.block_device_mapping_get_all_by_instance(self.context,
+ instances[1].uuid, use_slave=False).AndReturn([])
+ self.conductor_manager.compute_rpcapi.build_and_run_instance(
+ self.context,
+ instance=mox.IgnoreArg(),
+ host='host2',
+ image={'fake_data': 'should_pass_silently'},
+ request_spec={
+ 'image': {'fake_data': 'should_pass_silently'},
+ 'instance_properties': instance_properties,
+ 'instance_type': instance_type,
+ 'instance_uuids': [inst.uuid for inst in instances],
+ 'num_instances': 2},
+ filter_properties={'limits': [],
+ 'retry': {'num_attempts': 1,
+ 'hosts': [['host2', 'node2']]}},
+ admin_password='admin_password',
+ injected_files='injected_files',
+ requested_networks=None,
+ security_groups='security_groups',
+ block_device_mapping=mox.IgnoreArg(),
+ node='node2', limits=[])
+ self.mox.ReplayAll()
+
+ # build_instances() is a cast, we need to wait for it to complete
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ self.conductor.build_instances(self.context,
+ instances=instances,
+ image={'fake_data': 'should_pass_silently'},
+ filter_properties={},
+ admin_password='admin_password',
+ injected_files='injected_files',
+ requested_networks=None,
+ security_groups='security_groups',
+ block_device_mapping='block_device_mapping',
+ legacy_bdm=False)
+
+ def test_build_instances_scheduler_failure(self):
+ instances = [fake_instance.fake_instance_obj(self.context)
+ for i in xrange(2)]
+ image = {'fake-data': 'should_pass_silently'}
+ spec = {'fake': 'specs',
+ 'instance_properties': instances[0]}
+ exception = exc.NoValidHost(reason='fake-reason')
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
+ self.mox.StubOutWithMock(scheduler_driver, 'handle_schedule_error')
+ self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
+ 'select_destinations')
+
+ scheduler_utils.build_request_spec(self.context, image,
+ mox.IgnoreArg()).AndReturn(spec)
+ scheduler_utils.setup_instance_group(self.context, None, None)
+ self.conductor_manager.scheduler_client.select_destinations(
+ self.context, spec,
+ {'retry': {'num_attempts': 1,
+ 'hosts': []}}).AndRaise(exception)
+ for instance in instances:
+ scheduler_driver.handle_schedule_error(self.context, exception,
+ instance.uuid, spec)
+ self.mox.ReplayAll()
+
+ # build_instances() is a cast, we need to wait for it to complete
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ self.conductor.build_instances(self.context,
+ instances=instances,
+ image=image,
+ filter_properties={},
+ admin_password='admin_password',
+ injected_files='injected_files',
+ requested_networks=None,
+ security_groups='security_groups',
+ block_device_mapping='block_device_mapping',
+ legacy_bdm=False)
+
+ def test_unshelve_instance_on_host(self):
+ db_instance = self._create_fake_instance()
+ instance = objects.Instance.get_by_uuid(self.context,
+ db_instance['uuid'], expected_attrs=['system_metadata'])
+ instance.vm_state = vm_states.SHELVED
+ instance.task_state = task_states.UNSHELVING
+ instance.save()
+ system_metadata = instance.system_metadata
+
+ self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
+ 'start_instance')
+ self.mox.StubOutWithMock(self.conductor_manager, '_delete_image')
+ self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
+ 'unshelve_instance')
+
+ self.conductor_manager.compute_rpcapi.start_instance(self.context,
+ instance)
+ self.conductor_manager._delete_image(self.context,
+ 'fake_image_id')
+ self.mox.ReplayAll()
+
+ system_metadata['shelved_at'] = timeutils.utcnow()
+ system_metadata['shelved_image_id'] = 'fake_image_id'
+ system_metadata['shelved_host'] = 'fake-mini'
+ self.conductor_manager.unshelve_instance(self.context, instance)
+
+ def test_unshelve_offloaded_instance_glance_image_not_found(self):
+ shelved_image_id = "image_not_found"
+
+ db_instance = self._create_fake_instance()
+ instance = objects.Instance.get_by_uuid(
+ self.context,
+ db_instance['uuid'],
+ expected_attrs=['system_metadata'])
+ instance.vm_state = vm_states.SHELVED_OFFLOADED
+ instance.task_state = task_states.UNSHELVING
+ instance.save()
+ system_metadata = instance.system_metadata
+
+ self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
+
+ e = exc.ImageNotFound(image_id=shelved_image_id)
+ self.conductor_manager.image_api.get(
+ self.context, shelved_image_id, show_deleted=False).AndRaise(e)
+ self.mox.ReplayAll()
+
+ system_metadata['shelved_at'] = timeutils.utcnow()
+ system_metadata['shelved_host'] = 'fake-mini'
+ system_metadata['shelved_image_id'] = shelved_image_id
+
+ self.assertRaises(
+ exc.UnshelveException,
+ self.conductor_manager.unshelve_instance,
+ self.context, instance)
+ self.assertEqual(instance.vm_state, vm_states.ERROR)
+
+ def test_unshelve_offloaded_instance_image_id_is_none(self):
+ db_instance = jsonutils.to_primitive(self._create_fake_instance())
+ instance = objects.Instance.get_by_uuid(
+ self.context,
+ db_instance['uuid'],
+ expected_attrs=['system_metadata'])
+ instance.vm_state = vm_states.SHELVED_OFFLOADED
+ instance.task_state = task_states.UNSHELVING
+ system_metadata = instance.system_metadata
+ system_metadata['shelved_image_id'] = None
+ instance.save()
+
+ self.assertRaises(
+ exc.UnshelveException,
+ self.conductor_manager.unshelve_instance,
+ self.context, instance)
+ self.assertEqual(instance.vm_state, vm_states.ERROR)
+
+ def test_unshelve_instance_schedule_and_rebuild(self):
+ db_instance = self._create_fake_instance()
+ instance = objects.Instance.get_by_uuid(self.context,
+ db_instance['uuid'], expected_attrs=['system_metadata'])
+ instance.vm_state = vm_states.SHELVED_OFFLOADED
+ instance.save()
+ filter_properties = {}
+ system_metadata = instance.system_metadata
+
+ self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
+ self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
+ self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
+ 'unshelve_instance')
+
+ self.conductor_manager.image_api.get(self.context,
+ 'fake_image_id', show_deleted=False).AndReturn('fake_image')
+ self.conductor_manager._schedule_instances(self.context,
+ 'fake_image', filter_properties, instance).AndReturn(
+ [{'host': 'fake_host',
+ 'nodename': 'fake_node',
+ 'limits': {}}])
+ self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
+ instance, 'fake_host', image='fake_image',
+ filter_properties={'limits': {}}, node='fake_node')
+ self.mox.ReplayAll()
+
+ system_metadata['shelved_at'] = timeutils.utcnow()
+ system_metadata['shelved_image_id'] = 'fake_image_id'
+ system_metadata['shelved_host'] = 'fake-mini'
+ self.conductor_manager.unshelve_instance(self.context, instance)
+
+ def test_unshelve_instance_schedule_and_rebuild_novalid_host(self):
+ db_instance = self._create_fake_instance()
+ instance = objects.Instance.get_by_uuid(self.context,
+ db_instance['uuid'], expected_attrs=['system_metadata'])
+ instance.vm_state = vm_states.SHELVED_OFFLOADED
+ instance.save()
+ system_metadata = instance.system_metadata
+
+ def fake_schedule_instances(context, image, filter_properties,
+ *instances):
+ raise exc.NoValidHost(reason='')
+
+ with contextlib.nested(
+ mock.patch.object(self.conductor_manager.image_api, 'get',
+ return_value='fake_image'),
+ mock.patch.object(self.conductor_manager, '_schedule_instances',
+ fake_schedule_instances)
+ ) as (_get_image, _schedule_instances):
+ system_metadata['shelved_at'] = timeutils.utcnow()
+ system_metadata['shelved_image_id'] = 'fake_image_id'
+ system_metadata['shelved_host'] = 'fake-mini'
+ self.conductor_manager.unshelve_instance(self.context, instance)
+ _get_image.assert_has_calls([mock.call(self.context,
+ system_metadata['shelved_image_id'],
+ show_deleted=False)])
+ self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
+
+ def test_unshelve_instance_schedule_and_rebuild_volume_backed(self):
+ db_instance = self._create_fake_instance()
+ instance = objects.Instance.get_by_uuid(self.context,
+ db_instance['uuid'], expected_attrs=['system_metadata'])
+ instance.vm_state = vm_states.SHELVED_OFFLOADED
+ instance.save()
+ filter_properties = {}
+ system_metadata = instance.system_metadata
+
+ self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
+ self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
+ self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
+ 'unshelve_instance')
+
+ self.conductor_manager.image_api.get(self.context,
+ 'fake_image_id', show_deleted=False).AndReturn(None)
+ self.conductor_manager._schedule_instances(self.context,
+ None, filter_properties, instance).AndReturn(
+ [{'host': 'fake_host',
+ 'nodename': 'fake_node',
+ 'limits': {}}])
+ self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
+ instance, 'fake_host', image=None,
+ filter_properties={'limits': {}}, node='fake_node')
+ self.mox.ReplayAll()
+
+ system_metadata['shelved_at'] = timeutils.utcnow()
+ system_metadata['shelved_image_id'] = 'fake_image_id'
+ system_metadata['shelved_host'] = 'fake-mini'
+ self.conductor_manager.unshelve_instance(self.context, instance)
+
+ def test_rebuild_instance(self):
+ db_instance = self._create_fake_instance()
+ inst_obj = objects.Instance.get_by_uuid(self.context,
+ db_instance['uuid'])
+ rebuild_args = self._prepare_rebuild_args({'host': inst_obj.host})
+
+ with contextlib.nested(
+ mock.patch.object(self.conductor_manager.compute_rpcapi,
+ 'rebuild_instance'),
+ mock.patch.object(self.conductor_manager.scheduler_client,
+ 'select_destinations')
+ ) as (rebuild_mock, select_dest_mock):
+ self.conductor_manager.rebuild_instance(context=self.context,
+ instance=inst_obj,
+ **rebuild_args)
+ self.assertFalse(select_dest_mock.called)
+ rebuild_mock.assert_called_once_with(self.context,
+ instance=inst_obj,
+ **rebuild_args)
+
+ def test_rebuild_instance_with_scheduler(self):
+ db_instance = self._create_fake_instance()
+ inst_obj = objects.Instance.get_by_uuid(self.context,
+ db_instance['uuid'])
+ inst_obj.host = 'noselect'
+ rebuild_args = self._prepare_rebuild_args({'host': None})
+ expected_host = 'thebesthost'
+ request_spec = {}
+ filter_properties = {'ignore_hosts': [(inst_obj.host)]}
+
+ with contextlib.nested(
+ mock.patch.object(self.conductor_manager.compute_rpcapi,
+ 'rebuild_instance'),
+ mock.patch.object(self.conductor_manager.scheduler_client,
+ 'select_destinations',
+ return_value=[{'host': expected_host}]),
+ mock.patch('nova.scheduler.utils.build_request_spec',
+ return_value=request_spec)
+ ) as (rebuild_mock, select_dest_mock, bs_mock):
+ self.conductor_manager.rebuild_instance(context=self.context,
+ instance=inst_obj,
+ **rebuild_args)
+ select_dest_mock.assert_called_once_with(self.context,
+ request_spec,
+ filter_properties)
+ rebuild_args['host'] = expected_host
+ rebuild_mock.assert_called_once_with(self.context,
+ instance=inst_obj,
+ **rebuild_args)
+
+ def test_rebuild_instance_with_scheduler_no_host(self):
+ db_instance = self._create_fake_instance()
+ inst_obj = objects.Instance.get_by_uuid(self.context,
+ db_instance['uuid'])
+ inst_obj.host = 'noselect'
+ rebuild_args = self._prepare_rebuild_args({'host': None})
+ request_spec = {}
+ filter_properties = {'ignore_hosts': [(inst_obj.host)]}
+
+ with contextlib.nested(
+ mock.patch.object(self.conductor_manager.compute_rpcapi,
+ 'rebuild_instance'),
+ mock.patch.object(self.conductor_manager.scheduler_client,
+ 'select_destinations',
+ side_effect=exc.NoValidHost(reason='')),
+ mock.patch('nova.scheduler.utils.build_request_spec',
+ return_value=request_spec)
+ ) as (rebuild_mock, select_dest_mock, bs_mock):
+ self.assertRaises(exc.NoValidHost,
+ self.conductor_manager.rebuild_instance,
+ context=self.context, instance=inst_obj,
+ **rebuild_args)
+ select_dest_mock.assert_called_once_with(self.context,
+ request_spec,
+ filter_properties)
+ self.assertFalse(rebuild_mock.called)
+
+
+class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
+ """ComputeTaskManager Tests."""
+ def setUp(self):
+ super(ConductorTaskTestCase, self).setUp()
+ self.conductor = conductor_manager.ComputeTaskManager()
+ self.conductor_manager = self.conductor
+
+ def test_migrate_server_fails_with_rebuild(self):
+ self.assertRaises(NotImplementedError, self.conductor.migrate_server,
+ self.context, None, None, True, True, None, None, None)
+
+ def test_migrate_server_fails_with_flavor(self):
+ self.assertRaises(NotImplementedError, self.conductor.migrate_server,
+ self.context, None, None, True, False, "dummy", None, None)
+
+ def _build_request_spec(self, instance):
+ return {
+ 'instance_properties': {
+ 'uuid': instance['uuid'], },
+ }
+
+ def _test_migrate_server_deals_with_expected_exceptions(self, ex):
+ instance = fake_instance.fake_db_instance(uuid='uuid',
+ vm_state=vm_states.ACTIVE)
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), instance, [])
+ self.mox.StubOutWithMock(live_migrate, 'execute')
+ self.mox.StubOutWithMock(scheduler_utils,
+ 'set_vm_state_and_notify')
+
+ live_migrate.execute(self.context, mox.IsA(objects.Instance),
+ 'destination', 'block_migration',
+ 'disk_over_commit').AndRaise(ex)
+
+ scheduler_utils.set_vm_state_and_notify(self.context,
+ 'compute_task', 'migrate_server',
+ {'vm_state': vm_states.ACTIVE,
+ 'task_state': None,
+ 'expected_task_state': task_states.MIGRATING},
+ ex, self._build_request_spec(inst_obj),
+ self.conductor_manager.db)
+ self.mox.ReplayAll()
+
+ self.conductor = utils.ExceptionHelper(self.conductor)
+
+ self.assertRaises(type(ex),
+ self.conductor.migrate_server, self.context, inst_obj,
+ {'host': 'destination'}, True, False, None, 'block_migration',
+ 'disk_over_commit')
+
+ def test_migrate_server_deals_with_invalidcpuinfo_exception(self):
+ instance = fake_instance.fake_db_instance(uuid='uuid',
+ vm_state=vm_states.ACTIVE)
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), instance, [])
+ self.mox.StubOutWithMock(live_migrate, 'execute')
+ self.mox.StubOutWithMock(scheduler_utils,
+ 'set_vm_state_and_notify')
+
+ ex = exc.InvalidCPUInfo(reason="invalid cpu info.")
+ live_migrate.execute(self.context, mox.IsA(objects.Instance),
+ 'destination', 'block_migration',
+ 'disk_over_commit').AndRaise(ex)
+
+ scheduler_utils.set_vm_state_and_notify(self.context,
+ 'compute_task', 'migrate_server',
+ {'vm_state': vm_states.ACTIVE,
+ 'task_state': None,
+ 'expected_task_state': task_states.MIGRATING},
+ ex, self._build_request_spec(inst_obj),
+ self.conductor_manager.db)
+ self.mox.ReplayAll()
+
+ self.conductor = utils.ExceptionHelper(self.conductor)
+
+ self.assertRaises(exc.InvalidCPUInfo,
+ self.conductor.migrate_server, self.context, inst_obj,
+ {'host': 'destination'}, True, False, None, 'block_migration',
+ 'disk_over_commit')
+
+ @mock.patch.object(scheduler_utils, 'set_vm_state_and_notify')
+ @mock.patch.object(live_migrate, 'execute')
+ def test_migrate_server_deals_with_instancenotrunning_exception(self,
+ mock_live_migrate, mock_set_state):
+ inst = fake_instance.fake_db_instance()
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), inst, [])
+
+ error = exc.InstanceNotRunning(instance_id="fake")
+ mock_live_migrate.side_effect = error
+
+ self.conductor = utils.ExceptionHelper(self.conductor)
+
+ self.assertRaises(exc.InstanceNotRunning,
+ self.conductor.migrate_server, self.context, inst_obj,
+ {'host': 'destination'}, True, False, None,
+ 'block_migration', 'disk_over_commit')
+
+ request_spec = self._build_request_spec(inst_obj)
+ mock_set_state.assert_called_once_with(self.context, 'compute_task',
+ 'migrate_server',
+ dict(vm_state=inst_obj.vm_state,
+ task_state=None,
+ expected_task_state=task_states.MIGRATING),
+ error, request_spec, self.conductor_manager.db)
+
+ def test_migrate_server_deals_with_DestinationHypervisorTooOld(self):
+ ex = exc.DestinationHypervisorTooOld()
+ self._test_migrate_server_deals_with_expected_exceptions(ex)
+
+ def test_migrate_server_deals_with_HypervisorUnavailable(self):
+ ex = exc.HypervisorUnavailable(host='dummy')
+ self._test_migrate_server_deals_with_expected_exceptions(ex)
+
+ def test_migrate_server_deals_with_unexpected_exceptions(self):
+ instance = fake_instance.fake_db_instance()
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), instance, [])
+ self.mox.StubOutWithMock(live_migrate, 'execute')
+ self.mox.StubOutWithMock(scheduler_utils,
+ 'set_vm_state_and_notify')
+
+ ex = IOError()
+ live_migrate.execute(self.context, mox.IsA(objects.Instance),
+ 'destination', 'block_migration',
+ 'disk_over_commit').AndRaise(ex)
+ self.mox.ReplayAll()
+
+ self.conductor = utils.ExceptionHelper(self.conductor)
+
+ self.assertRaises(exc.MigrationError,
+ self.conductor.migrate_server, self.context, inst_obj,
+ {'host': 'destination'}, True, False, None, 'block_migration',
+ 'disk_over_commit')
+
+ def test_set_vm_state_and_notify(self):
+ self.mox.StubOutWithMock(scheduler_utils,
+ 'set_vm_state_and_notify')
+ scheduler_utils.set_vm_state_and_notify(
+ self.context, 'compute_task', 'method', 'updates',
+ 'ex', 'request_spec', self.conductor.db)
+
+ self.mox.ReplayAll()
+
+ self.conductor._set_vm_state_and_notify(
+ self.context, 'method', 'updates', 'ex', 'request_spec')
+
+ def test_cold_migrate_no_valid_host_back_in_active_state(self):
+ flavor = flavors.get_flavor_by_name('m1.tiny')
+ inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
+ instance_type_id=flavor['id'])
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), inst,
+ expected_attrs=[])
+ request_spec = dict(instance_type=dict(extra_specs=dict()),
+ instance_properties=dict())
+ filter_props = dict(context=None)
+ resvs = 'fake-resvs'
+ image = 'fake-image'
+
+ self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(self.conductor.scheduler_client,
+ 'select_destinations')
+ self.mox.StubOutWithMock(self.conductor,
+ '_set_vm_state_and_notify')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
+
+ compute_utils.get_image_metadata(
+ self.context, self.conductor_manager.image_api,
+ 'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
+
+ scheduler_utils.build_request_spec(
+ self.context, image, [inst_obj],
+ instance_type=flavor).AndReturn(request_spec)
+
+ exc_info = exc.NoValidHost(reason="")
+
+ self.conductor.scheduler_client.select_destinations(
+ self.context, request_spec,
+ filter_props).AndRaise(exc_info)
+
+ updates = {'vm_state': vm_states.ACTIVE,
+ 'task_state': None}
+
+ self.conductor._set_vm_state_and_notify(self.context,
+ 'migrate_server',
+ updates, exc_info,
+ request_spec)
+ # NOTE(mriedem): Validate that the quota rollback is using
+ # the correct project_id and user_id.
+ project_id, user_id = quotas_obj.ids_from_instance(self.context,
+ inst_obj)
+ quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
+ user_id=user_id)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exc.NoValidHost,
+ self.conductor._cold_migrate,
+ self.context, inst_obj,
+ flavor, filter_props, [resvs])
+
+ def test_cold_migrate_no_valid_host_back_in_stopped_state(self):
+ flavor = flavors.get_flavor_by_name('m1.tiny')
+ inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
+ vm_state=vm_states.STOPPED,
+ instance_type_id=flavor['id'])
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), inst,
+ expected_attrs=[])
+ request_spec = dict(instance_type=dict(extra_specs=dict()),
+ instance_properties=dict())
+ filter_props = dict(context=None)
+ resvs = 'fake-resvs'
+ image = 'fake-image'
+
+ self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(self.conductor.scheduler_client,
+ 'select_destinations')
+ self.mox.StubOutWithMock(self.conductor,
+ '_set_vm_state_and_notify')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
+
+ compute_utils.get_image_metadata(
+ self.context, self.conductor_manager.image_api,
+ 'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
+
+ scheduler_utils.build_request_spec(
+ self.context, image, [inst_obj],
+ instance_type=flavor).AndReturn(request_spec)
+
+ exc_info = exc.NoValidHost(reason="")
+
+ self.conductor.scheduler_client.select_destinations(
+ self.context, request_spec,
+ filter_props).AndRaise(exc_info)
+
+ updates = {'vm_state': vm_states.STOPPED,
+ 'task_state': None}
+
+ self.conductor._set_vm_state_and_notify(self.context,
+ 'migrate_server',
+ updates, exc_info,
+ request_spec)
+ # NOTE(mriedem): Validate that the quota rollback is using
+ # the correct project_id and user_id.
+ project_id, user_id = quotas_obj.ids_from_instance(self.context,
+ inst_obj)
+ quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
+ user_id=user_id)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exc.NoValidHost,
+ self.conductor._cold_migrate, self.context,
+ inst_obj, flavor, filter_props, [resvs])
+
+ def test_cold_migrate_no_valid_host_error_msg(self):
+ flavor = flavors.get_flavor_by_name('m1.tiny')
+ inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
+ vm_state=vm_states.STOPPED,
+ instance_type_id=flavor['id'])
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), inst,
+ expected_attrs=[])
+ request_spec = dict(instance_type=dict(extra_specs=dict()),
+ instance_properties=dict())
+ filter_props = dict(context=None)
+ resvs = 'fake-resvs'
+ image = 'fake-image'
+
+ with contextlib.nested(
+ mock.patch.object(compute_utils, 'get_image_metadata',
+ return_value=image),
+ mock.patch.object(scheduler_utils, 'build_request_spec',
+ return_value=request_spec),
+ mock.patch.object(self.conductor.scheduler_client,
+ 'select_destinations',
+ side_effect=exc.NoValidHost(reason=""))
+ ) as (image_mock, brs_mock, select_dest_mock):
+ nvh = self.assertRaises(exc.NoValidHost,
+ self.conductor._cold_migrate, self.context,
+ inst_obj, flavor, filter_props, [resvs])
+ self.assertIn('cold migrate', nvh.message)
+
+ def test_cold_migrate_exception_host_in_error_state_and_raise(self):
+ inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
+ vm_state=vm_states.STOPPED)
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), inst,
+ expected_attrs=[])
+ request_spec = dict(instance_type=dict(extra_specs=dict()),
+ instance_properties=dict())
+ filter_props = dict(context=None)
+ resvs = 'fake-resvs'
+ image = 'fake-image'
+ hosts = [dict(host='host1', nodename=None, limits={})]
+
+ self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(self.conductor.scheduler_client,
+ 'select_destinations')
+ self.mox.StubOutWithMock(scheduler_utils,
+ 'populate_filter_properties')
+ self.mox.StubOutWithMock(self.conductor.compute_rpcapi,
+ 'prep_resize')
+ self.mox.StubOutWithMock(self.conductor,
+ '_set_vm_state_and_notify')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
+
+ compute_utils.get_image_metadata(
+ self.context, self.conductor_manager.image_api,
+ 'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
+
+ scheduler_utils.build_request_spec(
+ self.context, image, [inst_obj],
+ instance_type='flavor').AndReturn(request_spec)
+
+ expected_filter_props = {'retry': {'num_attempts': 1,
+ 'hosts': []},
+ 'context': None}
+ self.conductor.scheduler_client.select_destinations(
+ self.context, request_spec,
+ expected_filter_props).AndReturn(hosts)
+
+ scheduler_utils.populate_filter_properties(filter_props,
+ hosts[0])
+ exc_info = test.TestingException('something happened')
+
+ expected_filter_props = {'retry': {'num_attempts': 1,
+ 'hosts': []}}
+
+ self.conductor.compute_rpcapi.prep_resize(
+ self.context, image, inst_obj,
+ 'flavor', hosts[0]['host'], [resvs],
+ request_spec=request_spec,
+ filter_properties=expected_filter_props,
+ node=hosts[0]['nodename']).AndRaise(exc_info)
+
+ updates = {'vm_state': vm_states.STOPPED,
+ 'task_state': None}
+
+ self.conductor._set_vm_state_and_notify(self.context,
+ 'migrate_server',
+ updates, exc_info,
+ request_spec)
+ # NOTE(mriedem): Validate that the quota rollback is using
+ # the correct project_id and user_id.
+ project_id, user_id = quotas_obj.ids_from_instance(self.context,
+ inst_obj)
+ quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
+ user_id=user_id)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(test.TestingException,
+ self.conductor._cold_migrate,
+ self.context, inst_obj, 'flavor',
+ filter_props, [resvs])
+
+ def test_resize_no_valid_host_error_msg(self):
+ flavor = flavors.get_flavor_by_name('m1.tiny')
+ flavor_new = flavors.get_flavor_by_name('m1.small')
+ inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
+ vm_state=vm_states.STOPPED,
+ instance_type_id=flavor['id'])
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), inst,
+ expected_attrs=[])
+ request_spec = dict(instance_type=dict(extra_specs=dict()),
+ instance_properties=dict())
+ filter_props = dict(context=None)
+ resvs = 'fake-resvs'
+ image = 'fake-image'
+
+ with contextlib.nested(
+ mock.patch.object(compute_utils, 'get_image_metadata',
+ return_value=image),
+ mock.patch.object(scheduler_utils, 'build_request_spec',
+ return_value=request_spec),
+ mock.patch.object(self.conductor.scheduler_client,
+ 'select_destinations',
+ side_effect=exc.NoValidHost(reason=""))
+ ) as (image_mock, brs_mock, select_dest_mock):
+ nvh = self.assertRaises(exc.NoValidHost,
+ self.conductor._cold_migrate, self.context,
+ inst_obj, flavor_new, filter_props,
+ [resvs])
+ self.assertIn('resize', nvh.message)
+
+ def test_build_instances_instance_not_found(self):
+ instances = [fake_instance.fake_instance_obj(self.context)
+ for i in xrange(2)]
+ self.mox.StubOutWithMock(instances[0], 'refresh')
+ self.mox.StubOutWithMock(instances[1], 'refresh')
+ image = {'fake-data': 'should_pass_silently'}
+ spec = {'fake': 'specs',
+ 'instance_properties': instances[0]}
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
+ self.mox.StubOutWithMock(scheduler_driver, 'handle_schedule_error')
+ self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
+ 'select_destinations')
+ self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
+ 'build_and_run_instance')
+
+ scheduler_utils.build_request_spec(self.context, image,
+ mox.IgnoreArg()).AndReturn(spec)
+ scheduler_utils.setup_instance_group(self.context, None, None)
+ self.conductor_manager.scheduler_client.select_destinations(
+ self.context, spec,
+ {'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
+ [{'host': 'host1', 'nodename': 'node1', 'limits': []},
+ {'host': 'host2', 'nodename': 'node2', 'limits': []}])
+ instances[0].refresh().AndRaise(
+ exc.InstanceNotFound(instance_id=instances[0].uuid))
+ instances[1].refresh()
+ self.conductor_manager.compute_rpcapi.build_and_run_instance(
+ self.context, instance=instances[1], host='host2',
+ image={'fake-data': 'should_pass_silently'}, request_spec=spec,
+ filter_properties={'limits': [],
+ 'retry': {'num_attempts': 1,
+ 'hosts': [['host2',
+ 'node2']]}},
+ admin_password='admin_password',
+ injected_files='injected_files',
+ requested_networks=None,
+ security_groups='security_groups',
+ block_device_mapping=mox.IsA(objects.BlockDeviceMappingList),
+ node='node2', limits=[])
+ self.mox.ReplayAll()
+
+ # build_instances() is a cast, we need to wait for it to complete
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ self.conductor.build_instances(self.context,
+ instances=instances,
+ image=image,
+ filter_properties={},
+ admin_password='admin_password',
+ injected_files='injected_files',
+ requested_networks=None,
+ security_groups='security_groups',
+ block_device_mapping='block_device_mapping',
+ legacy_bdm=False)
+
+ @mock.patch.object(scheduler_utils, 'setup_instance_group')
+ @mock.patch.object(scheduler_utils, 'build_request_spec')
+ def test_build_instances_info_cache_not_found(self, build_request_spec,
+ setup_instance_group):
+ instances = [fake_instance.fake_instance_obj(self.context)
+ for i in xrange(2)]
+ image = {'fake-data': 'should_pass_silently'}
+ destinations = [{'host': 'host1', 'nodename': 'node1', 'limits': []},
+ {'host': 'host2', 'nodename': 'node2', 'limits': []}]
+ spec = {'fake': 'specs',
+ 'instance_properties': instances[0]}
+ build_request_spec.return_value = spec
+ with contextlib.nested(
+ mock.patch.object(instances[0], 'refresh',
+ side_effect=exc.InstanceInfoCacheNotFound(
+ instance_uuid=instances[0].uuid)),
+ mock.patch.object(instances[1], 'refresh'),
+ mock.patch.object(self.conductor_manager.scheduler_client,
+ 'select_destinations', return_value=destinations),
+ mock.patch.object(self.conductor_manager.compute_rpcapi,
+ 'build_and_run_instance')
+ ) as (inst1_refresh, inst2_refresh, select_destinations,
+ build_and_run_instance):
+
+ # build_instances() is a cast, we need to wait for it to complete
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ self.conductor.build_instances(self.context,
+ instances=instances,
+ image=image,
+ filter_properties={},
+ admin_password='admin_password',
+ injected_files='injected_files',
+ requested_networks=None,
+ security_groups='security_groups',
+ block_device_mapping='block_device_mapping',
+ legacy_bdm=False)
+
+ setup_instance_group.assert_called_once_with(
+ self.context, None, None)
+ build_and_run_instance.assert_called_once_with(self.context,
+ instance=instances[1], host='host2', image={'fake-data':
+ 'should_pass_silently'}, request_spec=spec,
+ filter_properties={'limits': [],
+ 'retry': {'num_attempts': 1,
+ 'hosts': [['host2',
+ 'node2']]}},
+ admin_password='admin_password',
+ injected_files='injected_files',
+ requested_networks=None,
+ security_groups='security_groups',
+ block_device_mapping=mock.ANY,
+ node='node2', limits=[])
+
+
+class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
+ test_compute.BaseTestCase):
+ """Conductor compute_task RPC namespace Tests."""
+ def setUp(self):
+ super(ConductorTaskRPCAPITestCase, self).setUp()
+ self.conductor_service = self.start_service(
+ 'conductor', manager='nova.conductor.manager.ConductorManager')
+ self.conductor = conductor_rpcapi.ComputeTaskAPI()
+ service_manager = self.conductor_service.manager
+ self.conductor_manager = service_manager.compute_task_mgr
+
+
+class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
+ """Compute task API Tests."""
+ def setUp(self):
+ super(ConductorTaskAPITestCase, self).setUp()
+ self.conductor_service = self.start_service(
+ 'conductor', manager='nova.conductor.manager.ConductorManager')
+ self.conductor = conductor_api.ComputeTaskAPI()
+ service_manager = self.conductor_service.manager
+ self.conductor_manager = service_manager.compute_task_mgr
+
+
+class ConductorLocalComputeTaskAPITestCase(ConductorTaskAPITestCase):
+ """Conductor LocalComputeTaskAPI Tests."""
+ def setUp(self):
+ super(ConductorLocalComputeTaskAPITestCase, self).setUp()
+ self.conductor = conductor_api.LocalComputeTaskAPI()
+ self.conductor_manager = self.conductor._manager._target
diff --git a/nova/tests/unit/conf_fixture.py b/nova/tests/unit/conf_fixture.py
new file mode 100644
index 0000000000..336ba61daf
--- /dev/null
+++ b/nova/tests/unit/conf_fixture.py
@@ -0,0 +1,64 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+
+from nova import config
+from nova import ipv6
+from nova.openstack.common.fixture import config as config_fixture
+from nova import paths
+from nova.tests.unit import utils
+
+CONF = cfg.CONF
+CONF.import_opt('use_ipv6', 'nova.netconf')
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('scheduler_driver', 'nova.scheduler.manager')
+CONF.import_opt('fake_network', 'nova.network.linux_net')
+CONF.import_opt('network_size', 'nova.network.manager')
+CONF.import_opt('num_networks', 'nova.network.manager')
+CONF.import_opt('floating_ip_dns_manager', 'nova.network.floating_ips')
+CONF.import_opt('instance_dns_manager', 'nova.network.floating_ips')
+CONF.import_opt('policy_file', 'nova.openstack.common.policy')
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+CONF.import_opt('api_paste_config', 'nova.wsgi')
+
+
+class ConfFixture(config_fixture.Config):
+ """Fixture to manage global conf settings."""
+ def setUp(self):
+ super(ConfFixture, self).setUp()
+ self.conf.set_default('api_paste_config',
+ paths.state_path_def('etc/nova/api-paste.ini'))
+ self.conf.set_default('host', 'fake-mini')
+ self.conf.set_default('compute_driver',
+ 'nova.virt.fake.SmallFakeDriver')
+ self.conf.set_default('fake_network', True)
+ self.conf.set_default('flat_network_bridge', 'br100')
+ self.conf.set_default('floating_ip_dns_manager',
+ 'nova.tests.unit.utils.dns_manager')
+ self.conf.set_default('instance_dns_manager',
+ 'nova.tests.unit.utils.dns_manager')
+ self.conf.set_default('network_size', 8)
+ self.conf.set_default('num_networks', 2)
+ self.conf.set_default('use_ipv6', True)
+ self.conf.set_default('vlan_interface', 'eth0')
+ self.conf.set_default('auth_strategy', 'noauth')
+ config.parse_args([], default_config_files=[])
+ self.conf.set_default('connection', "sqlite://", group='database')
+ self.conf.set_default('sqlite_synchronous', False, group='database')
+ self.addCleanup(utils.cleanup_dns_managers)
+ self.addCleanup(ipv6.api.reset_backend)
diff --git a/nova/tests/unit/console/__init__.py b/nova/tests/unit/console/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/console/__init__.py
diff --git a/nova/tests/unit/console/test_console.py b/nova/tests/unit/console/test_console.py
new file mode 100644
index 0000000000..ba09272978
--- /dev/null
+++ b/nova/tests/unit/console/test_console.py
@@ -0,0 +1,186 @@
+# Copyright (c) 2010 OpenStack Foundation
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests For Console proxy."""
+
+from oslo.config import cfg
+from oslo.utils import importutils
+
+from nova.compute import rpcapi as compute_rpcapi
+from nova.console import api as console_api
+from nova.console import rpcapi as console_rpcapi
+from nova import context
+from nova import db
+from nova import exception
+from nova import test
+
+CONF = cfg.CONF
+CONF.import_opt('console_manager', 'nova.service')
+CONF.import_opt('console_driver', 'nova.console.manager')
+
+
+class ConsoleTestCase(test.TestCase):
+ """Test case for console proxy manager."""
+ def setUp(self):
+ super(ConsoleTestCase, self).setUp()
+ self.flags(console_driver='nova.console.fake.FakeConsoleProxy',
+ stub_compute=True)
+ self.console = importutils.import_object(CONF.console_manager)
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ self.host = 'test_compute_host'
+
+ def _create_instance(self):
+ """Create a test instance."""
+ inst = {}
+ inst['image_id'] = 1
+ inst['reservation_id'] = 'r-fakeres'
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
+ inst['instance_type_id'] = 1
+ inst['ami_launch_index'] = 0
+ return db.instance_create(self.context, inst)
+
+ def test_get_pool_for_instance_host(self):
+ pool = self.console._get_pool_for_instance_host(self.context,
+ self.host)
+ self.assertEqual(pool['compute_host'], self.host)
+
+ def test_get_pool_creates_new_pool_if_needed(self):
+ self.assertRaises(exception.NotFound,
+ db.console_pool_get_by_host_type,
+ self.context,
+ self.host,
+ self.console.host,
+ self.console.driver.console_type)
+ pool = self.console._get_pool_for_instance_host(self.context,
+ self.host)
+ pool2 = db.console_pool_get_by_host_type(self.context,
+ self.host,
+ self.console.host,
+ self.console.driver.console_type)
+ self.assertEqual(pool['id'], pool2['id'])
+
+ def test_get_pool_does_not_create_new_pool_if_exists(self):
+ pool_info = {'address': '127.0.0.1',
+ 'username': 'test',
+ 'password': '1234pass',
+ 'host': self.console.host,
+ 'console_type': self.console.driver.console_type,
+ 'compute_host': 'sometesthostname'}
+ new_pool = db.console_pool_create(self.context, pool_info)
+ pool = self.console._get_pool_for_instance_host(self.context,
+ 'sometesthostname')
+ self.assertEqual(pool['id'], new_pool['id'])
+
+ def test_add_console(self):
+ instance = self._create_instance()
+ self.console.add_console(self.context, instance['id'])
+ instance = db.instance_get(self.context, instance['id'])
+ pool = db.console_pool_get_by_host_type(self.context,
+ instance['host'], self.console.host,
+ self.console.driver.console_type)
+
+ console_instances = [con['instance_uuid'] for con in pool['consoles']]
+ self.assertIn(instance['uuid'], console_instances)
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_add_console_does_not_duplicate(self):
+ instance = self._create_instance()
+ cons1 = self.console.add_console(self.context, instance['id'])
+ cons2 = self.console.add_console(self.context, instance['id'])
+ self.assertEqual(cons1, cons2)
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_remove_console(self):
+ instance = self._create_instance()
+ console_id = self.console.add_console(self.context, instance['id'])
+ self.console.remove_console(self.context, console_id)
+
+ self.assertRaises(exception.NotFound,
+ db.console_get,
+ self.context,
+ console_id)
+ db.instance_destroy(self.context, instance['uuid'])
+
+
+class ConsoleAPITestCase(test.TestCase):
+ """Test case for console API."""
+ def setUp(self):
+ super(ConsoleAPITestCase, self).setUp()
+
+ self.context = context.RequestContext('fake', 'fake')
+ self.console_api = console_api.API()
+ self.fake_uuid = '00000000-aaaa-bbbb-cccc-000000000000'
+ self.fake_instance = {
+ 'id': 1,
+ 'uuid': self.fake_uuid,
+ 'host': 'fake_host'
+ }
+ self.fake_console = {
+ 'pool': {'host': 'fake_host'},
+ 'id': 'fake_id'
+ }
+
+ def _fake_db_console_get(_ctxt, _console_uuid, _instance_uuid):
+ return self.fake_console
+ self.stubs.Set(db, 'console_get', _fake_db_console_get)
+
+ def _fake_db_console_get_all_by_instance(_ctxt, _instance_uuid,
+ columns_to_join):
+ return [self.fake_console]
+ self.stubs.Set(db, 'console_get_all_by_instance',
+ _fake_db_console_get_all_by_instance)
+
+ def _fake_instance_get_by_uuid(_ctxt, _instance_uuid):
+ return self.fake_instance
+ self.stubs.Set(db, 'instance_get_by_uuid', _fake_instance_get_by_uuid)
+
+ def test_get_consoles(self):
+ console = self.console_api.get_consoles(self.context, self.fake_uuid)
+ self.assertEqual(console, [self.fake_console])
+
+ def test_get_console(self):
+ console = self.console_api.get_console(self.context, self.fake_uuid,
+ 'fake_id')
+ self.assertEqual(console, self.fake_console)
+
+ def test_delete_console(self):
+ self.mox.StubOutWithMock(console_rpcapi.ConsoleAPI, 'remove_console')
+
+ console_rpcapi.ConsoleAPI.remove_console(self.context, 'fake_id')
+
+ self.mox.ReplayAll()
+
+ self.console_api.delete_console(self.context, self.fake_uuid,
+ 'fake_id')
+
+ def test_create_console(self):
+ self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI,
+ 'get_console_topic')
+
+ compute_rpcapi.ComputeAPI.get_console_topic(
+ self.context, 'fake_host').AndReturn('compute.fake_host')
+ self.mox.StubOutClassWithMocks(console_rpcapi, 'ConsoleAPI')
+ console_api_mock = console_rpcapi.ConsoleAPI(
+ topic='compute', server='fake_host')
+ console_api_mock.add_console(self.context,
+ self.fake_instance['id'])
+
+ self.mox.ReplayAll()
+
+ self.console_api.create_console(self.context, self.fake_uuid)
diff --git a/nova/tests/unit/console/test_rpcapi.py b/nova/tests/unit/console/test_rpcapi.py
new file mode 100644
index 0000000000..690c4bb103
--- /dev/null
+++ b/nova/tests/unit/console/test_rpcapi.py
@@ -0,0 +1,76 @@
+# Copyright 2012, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Unit Tests for nova.console.rpcapi
+"""
+
+import contextlib
+
+import mock
+from oslo.config import cfg
+
+from nova.console import rpcapi as console_rpcapi
+from nova import context
+from nova import test
+
+CONF = cfg.CONF
+
+
+class ConsoleRpcAPITestCase(test.NoDBTestCase):
+ def _test_console_api(self, method, rpc_method, **kwargs):
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+
+ rpcapi = console_rpcapi.ConsoleAPI()
+ self.assertIsNotNone(rpcapi.client)
+ self.assertEqual(rpcapi.client.target.topic, CONF.console_topic)
+
+ orig_prepare = rpcapi.client.prepare
+ expected_version = kwargs.pop('version', rpcapi.client.target.version)
+
+ with contextlib.nested(
+ mock.patch.object(rpcapi.client, rpc_method),
+ mock.patch.object(rpcapi.client, 'prepare'),
+ mock.patch.object(rpcapi.client, 'can_send_version'),
+ ) as (
+ rpc_mock, prepare_mock, csv_mock
+ ):
+ prepare_mock.return_value = rpcapi.client
+ rpc_mock.return_value = 'foo' if rpc_method == 'call' else None
+ csv_mock.side_effect = (
+ lambda v: orig_prepare(version=v).can_send_version())
+
+ retval = getattr(rpcapi, method)(ctxt, **kwargs)
+ self.assertEqual(retval, rpc_mock.return_value)
+
+ prepare_mock.assert_called_once_with(version=expected_version)
+ rpc_mock.assert_called_once_with(ctxt, method, **kwargs)
+
+ def test_add_console(self):
+ self._test_console_api('add_console', instance_id='i',
+ rpc_method='cast')
+
+ # NOTE(russellb) Havana compat
+ self.flags(console='havana', group='upgrade_levels')
+ self._test_console_api('add_console', instance_id='i',
+ rpc_method='cast', version='1.0')
+
+ def test_remove_console(self):
+ self._test_console_api('remove_console', console_id='i',
+ rpc_method='cast')
+
+ # NOTE(russellb) Havana compat
+ self.flags(console='havana', group='upgrade_levels')
+ self._test_console_api('remove_console', console_id='i',
+ rpc_method='cast', version='1.0')
diff --git a/nova/tests/unit/console/test_serial.py b/nova/tests/unit/console/test_serial.py
new file mode 100644
index 0000000000..ebdc52dafa
--- /dev/null
+++ b/nova/tests/unit/console/test_serial.py
@@ -0,0 +1,137 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for Serial Console."""
+
+import socket
+
+import mock
+import six.moves
+
+from nova.console import serial
+from nova import exception
+from nova import test
+
+
+class SerialTestCase(test.TestCase):
+ def setUp(self):
+ super(SerialTestCase, self).setUp()
+ serial.ALLOCATED_PORTS = set()
+
+ def test_get_port_range(self):
+ start, stop = serial._get_port_range()
+ self.assertEqual(10000, start)
+ self.assertEqual(20000, stop)
+
+ def test_get_port_range_customized(self):
+ self.flags(port_range='30000:40000', group='serial_console')
+ start, stop = serial._get_port_range()
+ self.assertEqual(30000, start)
+ self.assertEqual(40000, stop)
+
+ def test_get_port_range_bad_range(self):
+ self.flags(port_range='40000:30000', group='serial_console')
+ start, stop = serial._get_port_range()
+ self.assertEqual(10000, start)
+ self.assertEqual(20000, stop)
+
+ def test_get_port_range_not_numeric(self):
+ self.flags(port_range='xxx:yyy', group='serial_console')
+ start, stop = serial._get_port_range()
+ self.assertEqual(10000, start)
+ self.assertEqual(20000, stop)
+
+ def test_get_port_range_invalid_syntax(self):
+ self.flags(port_range='10:20:30', group='serial_console')
+ start, stop = serial._get_port_range()
+ self.assertEqual(10000, start)
+ self.assertEqual(20000, stop)
+
+ @mock.patch('socket.socket')
+ def test_verify_port(self, fake_socket):
+ s = mock.MagicMock()
+ fake_socket.return_value = s
+
+ serial._verify_port('127.0.0.1', 10)
+
+ s.bind.assert_called_once_with(('127.0.0.1', 10))
+
+ @mock.patch('socket.socket')
+ def test_verify_port_in_use(self, fake_socket):
+ s = mock.MagicMock()
+ s.bind.side_effect = socket.error()
+ fake_socket.return_value = s
+
+ self.assertRaises(
+ exception.SocketPortInUseException,
+ serial._verify_port, '127.0.0.1', 10)
+
+ s.bind.assert_called_once_with(('127.0.0.1', 10))
+
+ @mock.patch('nova.console.serial._verify_port', lambda x, y: None)
+ def test_acquire_port(self):
+ start, stop = 15, 20
+ self.flags(
+ port_range='%d:%d' % (start, stop),
+ group='serial_console')
+
+ for port in six.moves.range(start, stop):
+ self.assertEqual(port, serial.acquire_port('127.0.0.1'))
+
+ for port in six.moves.range(start, stop):
+ self.assertEqual(port, serial.acquire_port('127.0.0.2'))
+
+ self.assertTrue(10, len(serial.ALLOCATED_PORTS))
+
+ @mock.patch('nova.console.serial._verify_port')
+ def test_acquire_port_in_use(self, fake_verify_port):
+ def port_10000_already_used(host, port):
+ if port == 10000 and host == '127.0.0.1':
+ raise exception.SocketPortInUseException(
+ port=port,
+ host=host,
+ error="already in use")
+ fake_verify_port.side_effect = port_10000_already_used
+
+ self.assertEqual(10001, serial.acquire_port('127.0.0.1'))
+ self.assertEqual(10000, serial.acquire_port('127.0.0.2'))
+
+ self.assertNotIn(('127.0.0.1', 10000), serial.ALLOCATED_PORTS)
+ self.assertIn(('127.0.0.1', 10001), serial.ALLOCATED_PORTS)
+ self.assertIn(('127.0.0.2', 10000), serial.ALLOCATED_PORTS)
+
+ @mock.patch('nova.console.serial._verify_port')
+ def test_acquire_port_not_ble_to_bind_at_any_port(self, fake_verify_port):
+ start, stop = 15, 20
+ self.flags(
+ port_range='%d:%d' % (start, stop),
+ group='serial_console')
+
+ fake_verify_port.side_effect = (
+ exception.SocketPortRangeExhaustedException(host='127.0.0.1'))
+
+ self.assertRaises(
+ exception.SocketPortRangeExhaustedException,
+ serial.acquire_port, '127.0.0.1')
+
+ def test_release_port(self):
+ serial.ALLOCATED_PORTS.add(('127.0.0.1', 100))
+ serial.ALLOCATED_PORTS.add(('127.0.0.2', 100))
+ self.assertEqual(2, len(serial.ALLOCATED_PORTS))
+
+ serial.release_port('127.0.0.1', 100)
+ self.assertEqual(1, len(serial.ALLOCATED_PORTS))
+
+ serial.release_port('127.0.0.2', 100)
+ self.assertEqual(0, len(serial.ALLOCATED_PORTS))
diff --git a/nova/tests/unit/console/test_type.py b/nova/tests/unit/console/test_type.py
new file mode 100644
index 0000000000..d9a82d7658
--- /dev/null
+++ b/nova/tests/unit/console/test_type.py
@@ -0,0 +1,61 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.console import type as ctype
+from nova import test
+
+
+class TypeTestCase(test.TestCase):
+ def test_console(self):
+ c = ctype.Console(host='127.0.0.1', port=8945)
+
+ self.assertTrue(hasattr(c, 'host'))
+ self.assertTrue(hasattr(c, 'port'))
+ self.assertTrue(hasattr(c, 'internal_access_path'))
+
+ self.assertEqual('127.0.0.1', c.host)
+ self.assertEqual(8945, c.port)
+ self.assertIsNone(c.internal_access_path)
+
+ self.assertEqual({
+ 'host': '127.0.0.1',
+ 'port': 8945,
+ 'internal_access_path': None,
+ 'token': 'a-token',
+ 'access_url': 'an-url'},
+ c.get_connection_info('a-token', 'an-url'))
+
+ def test_console_vnc(self):
+ c = ctype.ConsoleVNC(host='127.0.0.1', port=8945)
+
+ self.assertIsInstance(c, ctype.Console)
+
+ def test_console_rdp(self):
+ c = ctype.ConsoleRDP(host='127.0.0.1', port=8945)
+
+ self.assertIsInstance(c, ctype.Console)
+
+ def test_console_spice(self):
+ c = ctype.ConsoleSpice(host='127.0.0.1', port=8945, tlsPort=6547)
+
+ self.assertIsInstance(c, ctype.Console)
+ self.assertEqual(6547, c.tlsPort)
+ self.assertEqual(
+ 6547, c.get_connection_info('a-token', 'an-url')['tlsPort'])
+
+ def test_console_serial(self):
+ c = ctype.ConsoleSerial(host='127.0.0.1', port=8945)
+
+ self.assertIsInstance(c, ctype.Console)
diff --git a/nova/tests/unit/console/test_websocketproxy.py b/nova/tests/unit/console/test_websocketproxy.py
new file mode 100644
index 0000000000..c0526a2cf1
--- /dev/null
+++ b/nova/tests/unit/console/test_websocketproxy.py
@@ -0,0 +1,157 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for nova websocketproxy."""
+
+
+import mock
+
+from nova.console import websocketproxy
+from nova import exception
+from nova import test
+
+
+class NovaProxyRequestHandlerBaseTestCase(test.TestCase):
+
+ def setUp(self):
+ super(NovaProxyRequestHandlerBaseTestCase, self).setUp()
+
+ self.wh = websocketproxy.NovaProxyRequestHandlerBase()
+ self.wh.socket = mock.MagicMock()
+ self.wh.msg = mock.MagicMock()
+ self.wh.do_proxy = mock.MagicMock()
+ self.wh.headers = mock.MagicMock()
+
+ @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
+ def test_new_websocket_client(self, check_token):
+ check_token.return_value = {
+ 'host': 'node1',
+ 'port': '10000'
+ }
+ self.wh.socket.return_value = '<socket>'
+ self.wh.path = "http://127.0.0.1/?token=123-456-789"
+
+ self.wh.new_websocket_client()
+
+ check_token.assert_called_with(mock.ANY, token="123-456-789")
+ self.wh.socket.assert_called_with('node1', 10000, connect=True)
+ self.wh.do_proxy.assert_called_with('<socket>')
+
+ @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
+ def test_new_websocket_client_token_invalid(self, check_token):
+ check_token.return_value = False
+
+ self.wh.path = "http://127.0.0.1/?token=XXX"
+
+ self.assertRaises(exception.InvalidToken,
+ self.wh.new_websocket_client)
+ check_token.assert_called_with(mock.ANY, token="XXX")
+
+ @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
+ def test_new_websocket_client_novnc(self, check_token):
+ check_token.return_value = {
+ 'host': 'node1',
+ 'port': '10000'
+ }
+ self.wh.socket.return_value = '<socket>'
+ self.wh.path = "http://127.0.0.1/"
+ self.wh.headers.getheader.return_value = "token=123-456-789"
+
+ self.wh.new_websocket_client()
+
+ check_token.assert_called_with(mock.ANY, token="123-456-789")
+ self.wh.socket.assert_called_with('node1', 10000, connect=True)
+ self.wh.do_proxy.assert_called_with('<socket>')
+
+ @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
+ def test_new_websocket_client_novnc_token_invalid(self, check_token):
+ check_token.return_value = False
+
+ self.wh.path = "http://127.0.0.1/"
+ self.wh.headers.getheader.return_value = "token=XXX"
+
+ self.assertRaises(exception.InvalidToken,
+ self.wh.new_websocket_client)
+ check_token.assert_called_with(mock.ANY, token="XXX")
+
+ @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
+ def test_new_websocket_client_internal_access_path(self, check_token):
+ check_token.return_value = {
+ 'host': 'node1',
+ 'port': '10000',
+ 'internal_access_path': 'vmid'
+ }
+
+ tsock = mock.MagicMock()
+ tsock.recv.return_value = "HTTP/1.1 200 OK\r\n\r\n"
+
+ self.wh.socket.return_value = tsock
+ self.wh.path = "http://127.0.0.1/?token=123-456-789"
+
+ self.wh.new_websocket_client()
+
+ check_token.assert_called_with(mock.ANY, token="123-456-789")
+ self.wh.socket.assert_called_with('node1', 10000, connect=True)
+ self.wh.do_proxy.assert_called_with(tsock)
+
+ @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
+ def test_new_websocket_client_internal_access_path_err(self, check_token):
+ check_token.return_value = {
+ 'host': 'node1',
+ 'port': '10000',
+ 'internal_access_path': 'xxx'
+ }
+
+ tsock = mock.MagicMock()
+ tsock.recv.return_value = "HTTP/1.1 500 Internal Server Error\r\n\r\n"
+
+ self.wh.socket.return_value = tsock
+ self.wh.path = "http://127.0.0.1/?token=123-456-789"
+
+ self.assertRaises(exception.InvalidConnectionInfo,
+ self.wh.new_websocket_client)
+ check_token.assert_called_with(mock.ANY, token="123-456-789")
+
+ @mock.patch('sys.version_info')
+ @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
+ def test_new_websocket_client_py273_good_scheme(
+ self, check_token, version_info):
+ version_info.return_value = (2, 7, 3)
+ check_token.return_value = {
+ 'host': 'node1',
+ 'port': '10000'
+ }
+ self.wh.socket.return_value = '<socket>'
+ self.wh.path = "http://127.0.0.1/?token=123-456-789"
+
+ self.wh.new_websocket_client()
+
+ check_token.assert_called_with(mock.ANY, token="123-456-789")
+ self.wh.socket.assert_called_with('node1', 10000, connect=True)
+ self.wh.do_proxy.assert_called_with('<socket>')
+
+ @mock.patch('sys.version_info')
+ @mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
+ def test_new_websocket_client_py273_special_scheme(
+ self, check_token, version_info):
+ version_info.return_value = (2, 7, 3)
+ check_token.return_value = {
+ 'host': 'node1',
+ 'port': '10000'
+ }
+ self.wh.socket.return_value = '<socket>'
+ self.wh.path = "ws://127.0.0.1/?token=123-456-789"
+
+ self.assertRaises(exception.NovaException,
+ self.wh.new_websocket_client)
diff --git a/nova/tests/unit/consoleauth/__init__.py b/nova/tests/unit/consoleauth/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/consoleauth/__init__.py
diff --git a/nova/tests/unit/consoleauth/test_consoleauth.py b/nova/tests/unit/consoleauth/test_consoleauth.py
new file mode 100644
index 0000000000..571d54fd92
--- /dev/null
+++ b/nova/tests/unit/consoleauth/test_consoleauth.py
@@ -0,0 +1,181 @@
+# Copyright 2012 OpenStack Foundation
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests for Consoleauth Code.
+
+"""
+
+import mox
+from oslo.utils import timeutils
+
+from nova.consoleauth import manager
+from nova import context
+from nova import db
+from nova import test
+
+
+class ConsoleauthTestCase(test.TestCase):
+ """Test Case for consoleauth."""
+
+ def setUp(self):
+ super(ConsoleauthTestCase, self).setUp()
+ self.manager_api = self.manager = manager.ConsoleAuthManager()
+ self.context = context.get_admin_context()
+ self.instance = db.instance_create(self.context, {})
+
+ def test_tokens_expire(self):
+ # Test that tokens expire correctly.
+ self.useFixture(test.TimeOverride())
+ token = u'mytok'
+ self.flags(console_token_ttl=1)
+
+ self._stub_validate_console_port(True)
+
+ self.manager_api.authorize_console(self.context, token, 'novnc',
+ '127.0.0.1', '8080', 'host',
+ self.instance['uuid'])
+ self.assertTrue(self.manager_api.check_token(self.context, token))
+ timeutils.advance_time_seconds(1)
+ self.assertFalse(self.manager_api.check_token(self.context, token))
+
+ def _stub_validate_console_port(self, result):
+ def fake_validate_console_port(ctxt, instance, port, console_type):
+ return result
+
+ self.stubs.Set(self.manager.compute_rpcapi,
+ 'validate_console_port',
+ fake_validate_console_port)
+
+ def test_multiple_tokens_for_instance(self):
+ tokens = [u"token" + str(i) for i in xrange(10)]
+
+ self._stub_validate_console_port(True)
+
+ for token in tokens:
+ self.manager_api.authorize_console(self.context, token, 'novnc',
+ '127.0.0.1', '8080', 'host',
+ self.instance['uuid'])
+
+ for token in tokens:
+ self.assertTrue(self.manager_api.check_token(self.context, token))
+
+ def test_delete_tokens_for_instance(self):
+ tokens = [u"token" + str(i) for i in xrange(10)]
+ for token in tokens:
+ self.manager_api.authorize_console(self.context, token, 'novnc',
+ '127.0.0.1', '8080', 'host',
+ self.instance['uuid'])
+ self.manager_api.delete_tokens_for_instance(self.context,
+ self.instance['uuid'])
+ stored_tokens = self.manager._get_tokens_for_instance(
+ self.instance['uuid'])
+
+ self.assertEqual(len(stored_tokens), 0)
+
+ for token in tokens:
+ self.assertFalse(self.manager_api.check_token(self.context, token))
+
+ def test_wrong_token_has_port(self):
+ token = u'mytok'
+
+ self._stub_validate_console_port(False)
+
+ self.manager_api.authorize_console(self.context, token, 'novnc',
+ '127.0.0.1', '8080', 'host',
+ instance_uuid=self.instance['uuid'])
+ self.assertFalse(self.manager_api.check_token(self.context, token))
+
+ def test_delete_expired_tokens(self):
+ self.useFixture(test.TimeOverride())
+ token = u'mytok'
+ self.flags(console_token_ttl=1)
+
+ self._stub_validate_console_port(True)
+
+ self.manager_api.authorize_console(self.context, token, 'novnc',
+ '127.0.0.1', '8080', 'host',
+ self.instance['uuid'])
+ timeutils.advance_time_seconds(1)
+ self.assertFalse(self.manager_api.check_token(self.context, token))
+
+ token1 = u'mytok2'
+ self.manager_api.authorize_console(self.context, token1, 'novnc',
+ '127.0.0.1', '8080', 'host',
+ self.instance['uuid'])
+ stored_tokens = self.manager._get_tokens_for_instance(
+ self.instance['uuid'])
+ # when trying to store token1, expired token is removed fist.
+ self.assertEqual(len(stored_tokens), 1)
+ self.assertEqual(stored_tokens[0], token1)
+
+
+class ControlauthMemcacheEncodingTestCase(test.TestCase):
+ def setUp(self):
+ super(ControlauthMemcacheEncodingTestCase, self).setUp()
+ self.manager = manager.ConsoleAuthManager()
+ self.context = context.get_admin_context()
+ self.u_token = u"token"
+ self.u_instance = u"instance"
+
+ def test_authorize_console_encoding(self):
+ self.mox.StubOutWithMock(self.manager.mc, "set")
+ self.mox.StubOutWithMock(self.manager.mc, "get")
+ self.manager.mc.set(mox.IsA(str), mox.IgnoreArg(), mox.IgnoreArg()
+ ).AndReturn(True)
+ self.manager.mc.get(mox.IsA(str)).AndReturn(None)
+ self.manager.mc.set(mox.IsA(str), mox.IgnoreArg()).AndReturn(True)
+
+ self.mox.ReplayAll()
+
+ self.manager.authorize_console(self.context, self.u_token, 'novnc',
+ '127.0.0.1', '8080', 'host',
+ self.u_instance)
+
+ def test_check_token_encoding(self):
+ self.mox.StubOutWithMock(self.manager.mc, "get")
+ self.manager.mc.get(mox.IsA(str)).AndReturn(None)
+
+ self.mox.ReplayAll()
+
+ self.manager.check_token(self.context, self.u_token)
+
+ def test_delete_tokens_for_instance_encoding(self):
+ self.mox.StubOutWithMock(self.manager.mc, "delete")
+ self.mox.StubOutWithMock(self.manager.mc, "get")
+ self.manager.mc.get(mox.IsA(str)).AndReturn('["token"]')
+ self.manager.mc.delete(mox.IsA(str)).AndReturn(True)
+ self.manager.mc.delete(mox.IsA(str)).AndReturn(True)
+
+ self.mox.ReplayAll()
+
+ self.manager.delete_tokens_for_instance(self.context, self.u_instance)
+
+
+class CellsConsoleauthTestCase(ConsoleauthTestCase):
+ """Test Case for consoleauth w/ cells enabled."""
+
+ def setUp(self):
+ super(CellsConsoleauthTestCase, self).setUp()
+ self.flags(enable=True, group='cells')
+
+ def _stub_validate_console_port(self, result):
+ def fake_validate_console_port(ctxt, instance_uuid, console_port,
+ console_type):
+ return result
+
+ self.stubs.Set(self.manager.cells_rpcapi,
+ 'validate_console_port',
+ fake_validate_console_port)
diff --git a/nova/tests/unit/consoleauth/test_rpcapi.py b/nova/tests/unit/consoleauth/test_rpcapi.py
new file mode 100644
index 0000000000..eb76acbf34
--- /dev/null
+++ b/nova/tests/unit/consoleauth/test_rpcapi.py
@@ -0,0 +1,91 @@
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Unit Tests for nova.consoleauth.rpcapi
+"""
+
+import contextlib
+
+import mock
+from oslo.config import cfg
+
+from nova.consoleauth import rpcapi as consoleauth_rpcapi
+from nova import context
+from nova import test
+
+CONF = cfg.CONF
+
+
+class ConsoleAuthRpcAPITestCase(test.NoDBTestCase):
+ def _test_consoleauth_api(self, method, **kwargs):
+ do_cast = kwargs.pop('_do_cast', False)
+
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+
+ rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
+ self.assertIsNotNone(rpcapi.client)
+ self.assertEqual(rpcapi.client.target.topic, CONF.consoleauth_topic)
+
+ orig_prepare = rpcapi.client.prepare
+ expected_version = kwargs.pop('version', rpcapi.client.target.version)
+
+ with contextlib.nested(
+ mock.patch.object(rpcapi.client, 'cast' if do_cast else 'call'),
+ mock.patch.object(rpcapi.client, 'prepare'),
+ mock.patch.object(rpcapi.client, 'can_send_version'),
+ ) as (
+ rpc_mock, prepare_mock, csv_mock
+ ):
+ prepare_mock.return_value = rpcapi.client
+ rpc_mock.return_value = None if do_cast else 'foo'
+ csv_mock.side_effect = (
+ lambda v: orig_prepare(version=v).can_send_version())
+
+ retval = getattr(rpcapi, method)(ctxt, **kwargs)
+ self.assertEqual(retval, rpc_mock.return_value)
+
+ prepare_mock.assert_called_once_with(version=expected_version)
+ rpc_mock.assert_called_once_with(ctxt, method, **kwargs)
+
+ def test_authorize_console(self):
+ self._test_consoleauth_api('authorize_console', token='token',
+ console_type='ctype', host='h', port='p',
+ internal_access_path='iap', instance_uuid="instance")
+
+ # NOTE(russellb) Havana compat
+ self.flags(consoleauth='havana', group='upgrade_levels')
+ self._test_consoleauth_api('authorize_console', token='token',
+ console_type='ctype', host='h', port='p',
+ internal_access_path='iap', instance_uuid="instance",
+ version='1.2')
+
+ def test_check_token(self):
+ self._test_consoleauth_api('check_token', token='t')
+
+ # NOTE(russellb) Havana compat
+ self.flags(consoleauth='havana', group='upgrade_levels')
+ self._test_consoleauth_api('check_token', token='t', version='1.0')
+
+ def test_delete_tokens_for_instance(self):
+ self._test_consoleauth_api('delete_tokens_for_instance',
+ _do_cast=True,
+ instance_uuid="instance")
+
+ # NOTE(russellb) Havana compat
+ self.flags(consoleauth='havana', group='upgrade_levels')
+ self._test_consoleauth_api('delete_tokens_for_instance',
+ _do_cast=True,
+ instance_uuid="instance",
+ version='1.2')
diff --git a/nova/tests/unit/db/__init__.py b/nova/tests/unit/db/__init__.py
new file mode 100644
index 0000000000..fdf33be941
--- /dev/null
+++ b/nova/tests/unit/db/__init__.py
@@ -0,0 +1,18 @@
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`db` -- Stubs for DB API
+=============================
+"""
diff --git a/nova/tests/unit/db/fakes.py b/nova/tests/unit/db/fakes.py
new file mode 100644
index 0000000000..250c664d1e
--- /dev/null
+++ b/nova/tests/unit/db/fakes.py
@@ -0,0 +1,473 @@
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Stubouts, mocks and fixtures for the test suite."""
+
+import copy
+import datetime
+
+from nova import db
+from nova import exception
+
+
+class FakeModel(object):
+ """Stubs out for model."""
+ def __init__(self, values):
+ self.values = values
+
+ def __getattr__(self, name):
+ return self.values[name]
+
+ def __getitem__(self, key):
+ if key in self.values:
+ return self.values[key]
+ else:
+ raise NotImplementedError()
+
+ def __repr__(self):
+ return '<FakeModel: %s>' % self.values
+
+ def get(self, name):
+ return self.values[name]
+
+
+def stub_out(stubs, funcs):
+ """Set the stubs in mapping in the db api."""
+ for func in funcs:
+ func_name = '_'.join(func.__name__.split('_')[1:])
+ stubs.Set(db, func_name, func)
+ stubs.Set(db.api, func_name, func)
+
+
+fixed_ip_fields = {'id': 0,
+ 'network_id': 0,
+ 'address': '192.168.0.100',
+ 'instance': False,
+ 'instance_uuid': 'eb57d790-fc60-4119-a51a-f2b0913bdc93',
+ 'allocated': False,
+ 'virtual_interface_id': 0,
+ 'virtual_interface': None,
+ 'floating_ips': []}
+
+network_fields = {'id': 0,
+ 'cidr': '192.168.0.0/24',
+ 'netmask': '255.255.255.0',
+ 'cidr_v6': 'dead:beef::/64',
+ 'netmask_v6': '64',
+ 'project_id': 'fake',
+ 'label': 'fake',
+ 'gateway': '192.168.0.1',
+ 'bridge': 'fa0',
+ 'bridge_interface': 'fake_fa0',
+ 'broadcast': '192.168.0.255',
+ 'gateway_v6': 'dead:beef::1',
+ 'dns': '192.168.0.1',
+ 'vlan': None,
+ 'host': None,
+ 'injected': False,
+ 'vpn_public_address': '192.168.0.2'}
+
+flavor_fields = {'id': 0,
+ 'rxtx_cap': 3}
+
+floating_ip_fields = {'id': 0,
+ 'address': '192.168.1.100',
+ 'fixed_ip_id': None,
+ 'fixed_ip': None,
+ 'project_id': None,
+ 'pool': 'nova',
+ 'auto_assigned': False}
+
+virtual_interface_fields = {'id': 0,
+ 'address': 'DE:AD:BE:EF:00:00',
+ 'network_id': 0,
+ 'instance_id': 0,
+ 'network': FakeModel(network_fields)}
+
+fixed_ips = [fixed_ip_fields]
+floating_ips = [floating_ip_fields]
+virtual_interfacees = [virtual_interface_fields]
+networks = [network_fields]
+
+
+def fake_floating_ip_allocate_address(context, project_id, pool,
+ auto_assigned=False):
+ ips = filter(lambda i: i['fixed_ip_id'] is None and
+ i['project_id'] is None and
+ i['pool'] == pool,
+ floating_ips)
+ if not ips:
+ raise exception.NoMoreFloatingIps()
+ ips[0]['project_id'] = project_id
+ ips[0]['auto_assigned'] = auto_assigned
+ return FakeModel(ips[0])
+
+
+def fake_floating_ip_deallocate(context, address):
+ ips = filter(lambda i: i['address'] == address,
+ floating_ips)
+ if ips:
+ ips[0]['project_id'] = None
+ ips[0]['auto_assigned'] = False
+
+
+def fake_floating_ip_disassociate(context, address):
+ ips = filter(lambda i: i['address'] == address,
+ floating_ips)
+ if ips:
+ fixed_ip_address = None
+ if ips[0]['fixed_ip']:
+ fixed_ip_address = ips[0]['fixed_ip']['address']
+ ips[0]['fixed_ip'] = None
+ ips[0]['host'] = None
+ return fixed_ip_address
+
+
+def fake_floating_ip_fixed_ip_associate(context, floating_address,
+ fixed_address, host):
+ float = filter(lambda i: i['address'] == floating_address,
+ floating_ips)
+ fixed = filter(lambda i: i['address'] == fixed_address,
+ fixed_ips)
+ if float and fixed:
+ float[0]['fixed_ip'] = fixed[0]
+ float[0]['fixed_ip_id'] = fixed[0]['id']
+ float[0]['host'] = host
+
+
+def fake_floating_ip_get_all_by_host(context, host):
+ # TODO(jkoelker): Once we get the patches that remove host from
+ # the floating_ip table, we'll need to stub
+ # this out
+ pass
+
+
+def fake_floating_ip_get_by_address(context, address):
+ if isinstance(address, FakeModel):
+ # NOTE(tr3buchet): yo dawg, i heard you like addresses
+ address = address['address']
+ ips = filter(lambda i: i['address'] == address,
+ floating_ips)
+ if not ips:
+ raise exception.FloatingIpNotFoundForAddress(address=address)
+ return FakeModel(ips[0])
+
+
+def fake_floating_ip_set_auto_assigned(contex, address):
+ ips = filter(lambda i: i['address'] == address,
+ floating_ips)
+ if ips:
+ ips[0]['auto_assigned'] = True
+
+
+def fake_fixed_ip_associate(context, address, instance_id):
+ ips = filter(lambda i: i['address'] == address,
+ fixed_ips)
+ if not ips:
+ raise exception.NoMoreFixedIps(net='fake_net')
+ ips[0]['instance'] = True
+ ips[0]['instance_id'] = instance_id
+
+
+def fake_fixed_ip_associate_pool(context, network_id, instance_id):
+ ips = filter(lambda i: (i['network_id'] == network_id or
+ i['network_id'] is None) and not i['instance'],
+ fixed_ips)
+ if not ips:
+ raise exception.NoMoreFixedIps(net=network_id)
+ ips[0]['instance'] = True
+ ips[0]['instance_id'] = instance_id
+ return ips[0]['address']
+
+
+def fake_fixed_ip_create(context, values):
+ ip = dict(fixed_ip_fields)
+ ip['id'] = max([i['id'] for i in fixed_ips] or [-1]) + 1
+ for key in values:
+ ip[key] = values[key]
+ return ip
+
+
+def fake_fixed_ip_disassociate(context, address):
+ ips = filter(lambda i: i['address'] == address,
+ fixed_ips)
+ if ips:
+ ips[0]['instance_id'] = None
+ ips[0]['instance'] = None
+ ips[0]['virtual_interface'] = None
+ ips[0]['virtual_interface_id'] = None
+
+
+def fake_fixed_ip_disassociate_all_by_timeout(context, host, time):
+ return 0
+
+
+def fake_fixed_ip_get_all(context):
+ return [FakeModel(i) for i in fixed_ips]
+
+
+def fake_fixed_ip_get_by_instance(context, instance_uuid):
+ ips = filter(lambda i: i['instance_uuid'] == instance_uuid,
+ fixed_ips)
+ return [FakeModel(i) for i in ips]
+
+
+def fake_fixed_ip_get_by_address(context, address):
+ ips = filter(lambda i: i['address'] == address,
+ fixed_ips)
+ if ips:
+ return FakeModel(ips[0])
+
+
+def fake_fixed_ip_update(context, address, values):
+ ips = filter(lambda i: i['address'] == address,
+ fixed_ips)
+ fif = copy.deepcopy(fixed_ip_fields)
+ if ips:
+ for key in values:
+ ips[0][key] = values[key]
+ if key == 'virtual_interface_id':
+ vif = filter(lambda x: x['id'] == values[key],
+ virtual_interfacees)
+ if not vif:
+ continue
+ fif['virtual_interface'] = FakeModel(vif[0])
+
+
+def fake_flavor_get(context, id):
+ if flavor_fields['id'] == id:
+ return FakeModel(flavor_fields)
+
+
+def fake_virtual_interface_create(context, values):
+ vif = dict(virtual_interface_fields)
+ vif['id'] = max([m['id'] for m in virtual_interfacees] or [-1]) + 1
+ for key in values:
+ vif[key] = values[key]
+ return FakeModel(vif)
+
+
+def fake_virtual_interface_delete_by_instance(context, instance_id):
+ vif = copy.copy(virtual_interfacees)
+ addresses = [m for m in vif
+ if m['instance_id'] == instance_id]
+ try:
+ for address in addresses:
+ vif.remove(address)
+ except ValueError:
+ pass
+
+
+def fake_virtual_interface_get_by_instance(context, instance_id):
+ return [FakeModel(m) for m in virtual_interfacees
+ if m['instance_id'] == instance_id]
+
+
+def fake_virtual_interface_get_by_instance_and_network(context,
+ instance_id,
+ network_id):
+ vif = filter(lambda m: m['instance_id'] == instance_id and
+ m['network_id'] == network_id,
+ virtual_interfacees)
+ if not vif:
+ return None
+ return FakeModel(vif[0])
+
+
+def fake_network_create_safe(context, values):
+ net = dict(network_fields)
+ net['id'] = max([n['id'] for n in networks] or [-1]) + 1
+ for key in values:
+ net[key] = values[key]
+ return FakeModel(net)
+
+
+def fake_network_get(context, network_id):
+ net = filter(lambda n: n['id'] == network_id, networks)
+ if not net:
+ return None
+ return FakeModel(net[0])
+
+
+def fake_network_get_all(context):
+ return [FakeModel(n) for n in networks]
+
+
+def fake_network_get_all_by_host(context, host):
+ nets = filter(lambda n: n['host'] == host, networks)
+ return [FakeModel(n) for n in nets]
+
+
+def fake_network_set_host(context, network_id, host_id):
+ nets = filter(lambda n: n['id'] == network_id, networks)
+ for net in nets:
+ net['host'] = host_id
+ return host_id
+
+
+def fake_network_update(context, network_id, values):
+ nets = filter(lambda n: n['id'] == network_id, networks)
+ for net in nets:
+ for key in values:
+ net[key] = values[key]
+
+
+def fake_project_get_networks(context, project_id):
+ return [FakeModel(n) for n in networks
+ if n['project_id'] == project_id]
+
+
+def stub_out_db_network_api(stubs):
+
+ funcs = [fake_floating_ip_allocate_address,
+ fake_floating_ip_deallocate,
+ fake_floating_ip_disassociate,
+ fake_floating_ip_fixed_ip_associate,
+ fake_floating_ip_get_all_by_host,
+ fake_floating_ip_get_by_address,
+ fake_floating_ip_set_auto_assigned,
+ fake_fixed_ip_associate,
+ fake_fixed_ip_associate_pool,
+ fake_fixed_ip_create,
+ fake_fixed_ip_disassociate,
+ fake_fixed_ip_disassociate_all_by_timeout,
+ fake_fixed_ip_get_all,
+ fake_fixed_ip_get_by_instance,
+ fake_fixed_ip_get_by_address,
+ fake_fixed_ip_update,
+ fake_flavor_get,
+ fake_virtual_interface_create,
+ fake_virtual_interface_delete_by_instance,
+ fake_virtual_interface_get_by_instance,
+ fake_virtual_interface_get_by_instance_and_network,
+ fake_network_create_safe,
+ fake_network_get,
+ fake_network_get_all,
+ fake_network_get_all_by_host,
+ fake_network_set_host,
+ fake_network_update,
+ fake_project_get_networks]
+
+ stub_out(stubs, funcs)
+
+
+def stub_out_db_instance_api(stubs, injected=True):
+ """Stubs out the db API for creating Instances."""
+
+ def _create_instance_type(**updates):
+ instance_type = {'id': 2,
+ 'name': 'm1.tiny',
+ 'memory_mb': 512,
+ 'vcpus': 1,
+ 'vcpu_weight': None,
+ 'root_gb': 0,
+ 'ephemeral_gb': 10,
+ 'flavorid': 1,
+ 'rxtx_factor': 1.0,
+ 'swap': 0,
+ 'deleted_at': None,
+ 'created_at': datetime.datetime(2014, 8, 8, 0, 0, 0),
+ 'updated_at': None,
+ 'deleted': False,
+ 'disabled': False,
+ 'is_public': True,
+ 'extra_specs': {},
+ }
+ if updates:
+ instance_type.update(updates)
+ return instance_type
+
+ INSTANCE_TYPES = {
+ 'm1.tiny': _create_instance_type(
+ id=2,
+ name='m1.tiny',
+ memory_mb=512,
+ vcpus=1,
+ vcpu_weight=None,
+ root_gb=0,
+ ephemeral_gb=10,
+ flavorid=1,
+ rxtx_factor=1.0,
+ swap=0),
+ 'm1.small': _create_instance_type(
+ id=5,
+ name='m1.small',
+ memory_mb=2048,
+ vcpus=1,
+ vcpu_weight=None,
+ root_gb=20,
+ ephemeral_gb=0,
+ flavorid=2,
+ rxtx_factor=1.0,
+ swap=1024),
+ 'm1.medium': _create_instance_type(
+ id=1,
+ name='m1.medium',
+ memory_mb=4096,
+ vcpus=2,
+ vcpu_weight=None,
+ root_gb=40,
+ ephemeral_gb=40,
+ flavorid=3,
+ rxtx_factor=1.0,
+ swap=0),
+ 'm1.large': _create_instance_type(
+ id=3,
+ name='m1.large',
+ memory_mb=8192,
+ vcpus=4,
+ vcpu_weight=10,
+ root_gb=80,
+ ephemeral_gb=80,
+ flavorid=4,
+ rxtx_factor=1.0,
+ swap=0),
+ 'm1.xlarge': _create_instance_type(
+ id=4,
+ name='m1.xlarge',
+ memory_mb=16384,
+ vcpus=8,
+ vcpu_weight=None,
+ root_gb=160,
+ ephemeral_gb=160,
+ flavorid=5,
+ rxtx_factor=1.0,
+ swap=0)}
+
+ fixed_ip_fields = {'address': '10.0.0.3',
+ 'address_v6': 'fe80::a00:3',
+ 'network_id': 'fake_flat'}
+
+ def fake_flavor_get_all(context, inactive=0, filters=None):
+ return INSTANCE_TYPES.values()
+
+ def fake_flavor_get_by_name(context, name):
+ return INSTANCE_TYPES[name]
+
+ def fake_flavor_get(context, id):
+ for name, inst_type in INSTANCE_TYPES.iteritems():
+ if str(inst_type['id']) == str(id):
+ return inst_type
+ return None
+
+ def fake_fixed_ip_get_by_instance(context, instance_id):
+ return [FakeModel(fixed_ip_fields)]
+
+ funcs = [fake_flavor_get_all,
+ fake_flavor_get_by_name,
+ fake_flavor_get,
+ fake_fixed_ip_get_by_instance]
+ stub_out(stubs, funcs)
diff --git a/nova/tests/unit/db/test_db_api.py b/nova/tests/unit/db/test_db_api.py
new file mode 100644
index 0000000000..f103dd49ce
--- /dev/null
+++ b/nova/tests/unit/db/test_db_api.py
@@ -0,0 +1,7517 @@
+# encoding=UTF8
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for the DB API."""
+
+import copy
+import datetime
+import types
+import uuid as stdlib_uuid
+
+import iso8601
+import mock
+import netaddr
+from oslo.config import cfg
+from oslo.db import exception as db_exc
+from oslo.db.sqlalchemy import test_base
+from oslo.db.sqlalchemy import utils as sqlalchemyutils
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import six
+from sqlalchemy import Column
+from sqlalchemy.dialects import sqlite
+from sqlalchemy import Integer
+from sqlalchemy import MetaData
+from sqlalchemy.orm import query
+from sqlalchemy import sql
+from sqlalchemy import Table
+
+from nova import block_device
+from nova.compute import arch
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import api as sqlalchemy_api
+from nova.db.sqlalchemy import models
+from nova.db.sqlalchemy import types as col_types
+from nova.db.sqlalchemy import utils as db_utils
+from nova import exception
+from nova.openstack.common import uuidutils
+from nova import quota
+from nova import test
+from nova.tests.unit import matchers
+from nova import utils
+
+CONF = cfg.CONF
+CONF.import_opt('reserved_host_memory_mb', 'nova.compute.resource_tracker')
+CONF.import_opt('reserved_host_disk_mb', 'nova.compute.resource_tracker')
+
+get_engine = sqlalchemy_api.get_engine
+get_session = sqlalchemy_api.get_session
+
+
+def _reservation_get(context, uuid):
+ result = sqlalchemy_api.model_query(context, models.Reservation,
+ read_deleted="no").filter_by(uuid=uuid).first()
+
+ if not result:
+ raise exception.ReservationNotFound(uuid=uuid)
+
+ return result
+
+
+def _quota_reserve(context, project_id, user_id):
+ """Create sample Quota, QuotaUsage and Reservation objects.
+
+ There is no method db.quota_usage_create(), so we have to use
+ db.quota_reserve() for creating QuotaUsage objects.
+
+ Returns reservations uuids.
+
+ """
+ def get_sync(resource, usage):
+ def sync(elevated, project_id, user_id, session):
+ return {resource: usage}
+ return sync
+ quotas = {}
+ user_quotas = {}
+ resources = {}
+ deltas = {}
+ for i in range(3):
+ resource = 'resource%d' % i
+ if i == 2:
+ # test for project level resources
+ resource = 'fixed_ips'
+ quotas[resource] = db.quota_create(context,
+ project_id, resource, i)
+ user_quotas[resource] = quotas[resource]
+ else:
+ quotas[resource] = db.quota_create(context,
+ project_id, resource, i)
+ user_quotas[resource] = db.quota_create(context, project_id,
+ resource, i,
+ user_id=user_id)
+ sync_name = '_sync_%s' % resource
+ resources[resource] = quota.ReservableResource(
+ resource, sync_name, 'quota_res_%d' % i)
+ deltas[resource] = i
+ setattr(sqlalchemy_api, sync_name, get_sync(resource, i))
+ sqlalchemy_api.QUOTA_SYNC_FUNCTIONS[sync_name] = getattr(
+ sqlalchemy_api, sync_name)
+ return db.quota_reserve(context, resources, quotas, user_quotas, deltas,
+ timeutils.utcnow(), CONF.until_refresh,
+ datetime.timedelta(days=1), project_id, user_id)
+
+
+class DbTestCase(test.TestCase):
+ def setUp(self):
+ super(DbTestCase, self).setUp()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ def create_instance_with_args(self, **kwargs):
+ args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1',
+ 'node': 'node1', 'project_id': self.project_id,
+ 'vm_state': 'fake'}
+ if 'context' in kwargs:
+ ctxt = kwargs.pop('context')
+ args['project_id'] = ctxt.project_id
+ else:
+ ctxt = self.context
+ args.update(kwargs)
+ return db.instance_create(ctxt, args)
+
+ def fake_metadata(self, content):
+ meta = {}
+ for i in range(0, 10):
+ meta["foo%i" % i] = "this is %s item %i" % (content, i)
+ return meta
+
+ def create_metadata_for_instance(self, instance_uuid):
+ meta = self.fake_metadata('metadata')
+ db.instance_metadata_update(self.context, instance_uuid, meta, False)
+ sys_meta = self.fake_metadata('system_metadata')
+ db.instance_system_metadata_update(self.context, instance_uuid,
+ sys_meta, False)
+ return meta, sys_meta
+
+
+class DecoratorTestCase(test.TestCase):
+ def _test_decorator_wraps_helper(self, decorator):
+ def test_func():
+ """Test docstring."""
+
+ decorated_func = decorator(test_func)
+
+ self.assertEqual(test_func.func_name, decorated_func.func_name)
+ self.assertEqual(test_func.__doc__, decorated_func.__doc__)
+ self.assertEqual(test_func.__module__, decorated_func.__module__)
+
+ def test_require_context_decorator_wraps_functions_properly(self):
+ self._test_decorator_wraps_helper(sqlalchemy_api.require_context)
+
+ def test_require_admin_context_decorator_wraps_functions_properly(self):
+ self._test_decorator_wraps_helper(sqlalchemy_api.require_admin_context)
+
+ def test_require_deadlock_retry_wraps_functions_properly(self):
+ self._test_decorator_wraps_helper(sqlalchemy_api._retry_on_deadlock)
+
+
+def _get_fake_aggr_values():
+ return {'name': 'fake_aggregate'}
+
+
+def _get_fake_aggr_metadata():
+ return {'fake_key1': 'fake_value1',
+ 'fake_key2': 'fake_value2',
+ 'availability_zone': 'fake_avail_zone'}
+
+
+def _get_fake_aggr_hosts():
+ return ['foo.openstack.org']
+
+
+def _create_aggregate(context=context.get_admin_context(),
+ values=_get_fake_aggr_values(),
+ metadata=_get_fake_aggr_metadata()):
+ return db.aggregate_create(context, values, metadata)
+
+
+def _create_aggregate_with_hosts(context=context.get_admin_context(),
+ values=_get_fake_aggr_values(),
+ metadata=_get_fake_aggr_metadata(),
+ hosts=_get_fake_aggr_hosts()):
+ result = _create_aggregate(context=context,
+ values=values, metadata=metadata)
+ for host in hosts:
+ db.aggregate_host_add(context, result['id'], host)
+ return result
+
+
+class NotDbApiTestCase(DbTestCase):
+ def setUp(self):
+ super(NotDbApiTestCase, self).setUp()
+ self.flags(connection='notdb://', group='database')
+
+ def test_instance_get_all_by_filters_regex_unsupported_db(self):
+ # Ensure that the 'LIKE' operator is used for unsupported dbs.
+ self.create_instance_with_args(display_name='test1')
+ self.create_instance_with_args(display_name='test2')
+ self.create_instance_with_args(display_name='diff')
+ result = db.instance_get_all_by_filters(self.context,
+ {'display_name': 'test'})
+ self.assertEqual(2, len(result))
+ result = db.instance_get_all_by_filters(self.context,
+ {'display_name': 'di'})
+ self.assertEqual(1, len(result))
+
+ def test_instance_get_all_by_filters_paginate(self):
+ test1 = self.create_instance_with_args(display_name='test1')
+ test2 = self.create_instance_with_args(display_name='test2')
+ test3 = self.create_instance_with_args(display_name='test3')
+
+ result = db.instance_get_all_by_filters(self.context,
+ {'display_name': '%test%'},
+ marker=None)
+ self.assertEqual(3, len(result))
+ result = db.instance_get_all_by_filters(self.context,
+ {'display_name': '%test%'},
+ sort_dir="asc",
+ marker=test1['uuid'])
+ self.assertEqual(2, len(result))
+ result = db.instance_get_all_by_filters(self.context,
+ {'display_name': '%test%'},
+ sort_dir="asc",
+ marker=test2['uuid'])
+ self.assertEqual(1, len(result))
+ result = db.instance_get_all_by_filters(self.context,
+ {'display_name': '%test%'},
+ sort_dir="asc",
+ marker=test3['uuid'])
+ self.assertEqual(0, len(result))
+
+ self.assertRaises(exception.MarkerNotFound,
+ db.instance_get_all_by_filters,
+ self.context, {'display_name': '%test%'},
+ marker=str(stdlib_uuid.uuid4()))
+
+ def test_convert_objects_related_datetimes(self):
+
+ t1 = timeutils.utcnow()
+ t2 = t1 + datetime.timedelta(seconds=10)
+ t3 = t2 + datetime.timedelta(hours=1)
+
+ t2_utc = t2.replace(tzinfo=iso8601.iso8601.Utc())
+ t3_utc = t3.replace(tzinfo=iso8601.iso8601.Utc())
+
+ datetime_keys = ('created_at', 'deleted_at')
+
+ test1 = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
+ expected_dict = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
+ sqlalchemy_api.convert_objects_related_datetimes(test1, *datetime_keys)
+ self.assertEqual(test1, expected_dict)
+
+ test2 = {'created_at': t1, 'deleted_at': t2_utc, 'updated_at': t3}
+ expected_dict = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
+ sqlalchemy_api.convert_objects_related_datetimes(test2, *datetime_keys)
+ self.assertEqual(test2, expected_dict)
+
+ test3 = {'deleted_at': t2_utc, 'updated_at': t3_utc}
+ expected_dict = {'deleted_at': t2, 'updated_at': t3_utc}
+ sqlalchemy_api.convert_objects_related_datetimes(test3, *datetime_keys)
+ self.assertEqual(test3, expected_dict)
+
+
+class AggregateDBApiTestCase(test.TestCase):
+ def setUp(self):
+ super(AggregateDBApiTestCase, self).setUp()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ def test_aggregate_create_no_metadata(self):
+ result = _create_aggregate(metadata=None)
+ self.assertEqual(result['name'], 'fake_aggregate')
+
+ def test_aggregate_create_avoid_name_conflict(self):
+ r1 = _create_aggregate(metadata=None)
+ db.aggregate_delete(context.get_admin_context(), r1['id'])
+ values = {'name': r1['name']}
+ metadata = {'availability_zone': 'new_zone'}
+ r2 = _create_aggregate(values=values, metadata=metadata)
+ self.assertEqual(r2['name'], values['name'])
+ self.assertEqual(r2['availability_zone'],
+ metadata['availability_zone'])
+
+ def test_aggregate_create_raise_exist_exc(self):
+ _create_aggregate(metadata=None)
+ self.assertRaises(exception.AggregateNameExists,
+ _create_aggregate, metadata=None)
+
+ def test_aggregate_get_raise_not_found(self):
+ ctxt = context.get_admin_context()
+ # this does not exist!
+ aggregate_id = 1
+ self.assertRaises(exception.AggregateNotFound,
+ db.aggregate_get,
+ ctxt, aggregate_id)
+
+ def test_aggregate_metadata_get_raise_not_found(self):
+ ctxt = context.get_admin_context()
+ # this does not exist!
+ aggregate_id = 1
+ self.assertRaises(exception.AggregateNotFound,
+ db.aggregate_metadata_get,
+ ctxt, aggregate_id)
+
+ def test_aggregate_create_with_metadata(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt)
+ expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
+ self.assertThat(expected_metadata,
+ matchers.DictMatches(_get_fake_aggr_metadata()))
+
+ def test_aggregate_create_delete_create_with_metadata(self):
+ # test for bug 1052479
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt)
+ expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
+ self.assertThat(expected_metadata,
+ matchers.DictMatches(_get_fake_aggr_metadata()))
+ db.aggregate_delete(ctxt, result['id'])
+ result = _create_aggregate(metadata={'availability_zone':
+ 'fake_avail_zone'})
+ expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
+ self.assertEqual(expected_metadata, {'availability_zone':
+ 'fake_avail_zone'})
+
+ def test_aggregate_get(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate_with_hosts(context=ctxt)
+ expected = db.aggregate_get(ctxt, result['id'])
+ self.assertEqual(_get_fake_aggr_hosts(), expected['hosts'])
+ self.assertEqual(_get_fake_aggr_metadata(), expected['metadetails'])
+
+ def test_aggregate_get_by_host(self):
+ ctxt = context.get_admin_context()
+ values2 = {'name': 'fake_aggregate2'}
+ values3 = {'name': 'fake_aggregate3'}
+ values4 = {'name': 'fake_aggregate4'}
+ values5 = {'name': 'fake_aggregate5'}
+ a1 = _create_aggregate_with_hosts(context=ctxt)
+ a2 = _create_aggregate_with_hosts(context=ctxt, values=values2)
+ # a3 has no hosts and should not be in the results.
+ _create_aggregate(context=ctxt, values=values3)
+ # a4 has no matching hosts.
+ _create_aggregate_with_hosts(context=ctxt, values=values4,
+ hosts=['foo4.openstack.org'])
+ # a5 has no matching hosts after deleting the only matching host.
+ a5 = _create_aggregate_with_hosts(context=ctxt, values=values5,
+ hosts=['foo5.openstack.org', 'foo.openstack.org'])
+ db.aggregate_host_delete(ctxt, a5['id'],
+ 'foo.openstack.org')
+ r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org')
+ self.assertEqual([a1['id'], a2['id']], [x['id'] for x in r1])
+
+ def test_aggregate_get_by_host_with_key(self):
+ ctxt = context.get_admin_context()
+ values2 = {'name': 'fake_aggregate2'}
+ values3 = {'name': 'fake_aggregate3'}
+ values4 = {'name': 'fake_aggregate4'}
+ a1 = _create_aggregate_with_hosts(context=ctxt,
+ metadata={'goodkey': 'good'})
+ _create_aggregate_with_hosts(context=ctxt, values=values2)
+ _create_aggregate(context=ctxt, values=values3)
+ _create_aggregate_with_hosts(context=ctxt, values=values4,
+ hosts=['foo4.openstack.org'], metadata={'goodkey': 'bad'})
+ # filter result by key
+ r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org', key='goodkey')
+ self.assertEqual([a1['id']], [x['id'] for x in r1])
+
+ def test_aggregate_metadata_get_by_host(self):
+ ctxt = context.get_admin_context()
+ values = {'name': 'fake_aggregate2'}
+ values2 = {'name': 'fake_aggregate3'}
+ _create_aggregate_with_hosts(context=ctxt)
+ _create_aggregate_with_hosts(context=ctxt, values=values)
+ _create_aggregate_with_hosts(context=ctxt, values=values2,
+ hosts=['bar.openstack.org'], metadata={'badkey': 'bad'})
+ r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org')
+ self.assertEqual(r1['fake_key1'], set(['fake_value1']))
+ self.assertNotIn('badkey', r1)
+
+ def test_aggregate_metadata_get_by_metadata_key(self):
+ ctxt = context.get_admin_context()
+ values = {'aggregate_id': 'fake_id',
+ 'name': 'fake_aggregate'}
+ aggr = _create_aggregate_with_hosts(context=ctxt, values=values,
+ hosts=['bar.openstack.org'],
+ metadata={'availability_zone':
+ 'az1'})
+ r1 = db.aggregate_metadata_get_by_metadata_key(ctxt, aggr['id'],
+ 'availability_zone')
+ self.assertEqual(r1['availability_zone'], set(['az1']))
+ self.assertIn('availability_zone', r1)
+ self.assertNotIn('name', r1)
+
+ def test_aggregate_metadata_get_by_host_with_key(self):
+ ctxt = context.get_admin_context()
+ values2 = {'name': 'fake_aggregate12'}
+ values3 = {'name': 'fake_aggregate23'}
+ a2_hosts = ['foo1.openstack.org', 'foo2.openstack.org']
+ a2_metadata = {'good': 'value12', 'bad': 'badvalue12'}
+ a3_hosts = ['foo2.openstack.org', 'foo3.openstack.org']
+ a3_metadata = {'good': 'value23', 'bad': 'badvalue23'}
+ _create_aggregate_with_hosts(context=ctxt)
+ _create_aggregate_with_hosts(context=ctxt, values=values2,
+ hosts=a2_hosts, metadata=a2_metadata)
+ a3 = _create_aggregate_with_hosts(context=ctxt, values=values3,
+ hosts=a3_hosts, metadata=a3_metadata)
+ r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo2.openstack.org',
+ key='good')
+ self.assertEqual(r1['good'], set(['value12', 'value23']))
+ self.assertNotIn('fake_key1', r1)
+ self.assertNotIn('bad', r1)
+ # Delete metadata
+ db.aggregate_metadata_delete(ctxt, a3['id'], 'good')
+ r2 = db.aggregate_metadata_get_by_host(ctxt, 'foo3.openstack.org',
+ key='good')
+ self.assertNotIn('good', r2)
+
+ def test_aggregate_host_get_by_metadata_key(self):
+ ctxt = context.get_admin_context()
+ values2 = {'name': 'fake_aggregate12'}
+ values3 = {'name': 'fake_aggregate23'}
+ a2_hosts = ['foo1.openstack.org', 'foo2.openstack.org']
+ a2_metadata = {'good': 'value12', 'bad': 'badvalue12'}
+ a3_hosts = ['foo2.openstack.org', 'foo3.openstack.org']
+ a3_metadata = {'good': 'value23', 'bad': 'badvalue23'}
+ _create_aggregate_with_hosts(context=ctxt)
+ _create_aggregate_with_hosts(context=ctxt, values=values2,
+ hosts=a2_hosts, metadata=a2_metadata)
+ _create_aggregate_with_hosts(context=ctxt, values=values3,
+ hosts=a3_hosts, metadata=a3_metadata)
+ r1 = db.aggregate_host_get_by_metadata_key(ctxt, key='good')
+ self.assertEqual({
+ 'foo1.openstack.org': set(['value12']),
+ 'foo2.openstack.org': set(['value12', 'value23']),
+ 'foo3.openstack.org': set(['value23']),
+ }, r1)
+ self.assertNotIn('fake_key1', r1)
+
+ def test_aggregate_get_by_host_not_found(self):
+ ctxt = context.get_admin_context()
+ _create_aggregate_with_hosts(context=ctxt)
+ self.assertEqual([], db.aggregate_get_by_host(ctxt, 'unknown_host'))
+
+ def test_aggregate_delete_raise_not_found(self):
+ ctxt = context.get_admin_context()
+ # this does not exist!
+ aggregate_id = 1
+ self.assertRaises(exception.AggregateNotFound,
+ db.aggregate_delete,
+ ctxt, aggregate_id)
+
+ def test_aggregate_delete(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt, metadata=None)
+ db.aggregate_delete(ctxt, result['id'])
+ expected = db.aggregate_get_all(ctxt)
+ self.assertEqual(0, len(expected))
+ aggregate = db.aggregate_get(ctxt.elevated(read_deleted='yes'),
+ result['id'])
+ self.assertEqual(aggregate['deleted'], result['id'])
+
+ def test_aggregate_update(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt, metadata={'availability_zone':
+ 'fake_avail_zone'})
+ self.assertEqual(result['availability_zone'], 'fake_avail_zone')
+ new_values = _get_fake_aggr_values()
+ new_values['availability_zone'] = 'different_avail_zone'
+ updated = db.aggregate_update(ctxt, result['id'], new_values)
+ self.assertNotEqual(result['availability_zone'],
+ updated['availability_zone'])
+
+ def test_aggregate_update_with_metadata(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt, metadata=None)
+ values = _get_fake_aggr_values()
+ values['metadata'] = _get_fake_aggr_metadata()
+ values['availability_zone'] = 'different_avail_zone'
+ db.aggregate_update(ctxt, result['id'], values)
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
+ updated = db.aggregate_get(ctxt, result['id'])
+ self.assertThat(values['metadata'],
+ matchers.DictMatches(expected))
+ self.assertNotEqual(result['availability_zone'],
+ updated['availability_zone'])
+
+ def test_aggregate_update_with_existing_metadata(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt)
+ values = _get_fake_aggr_values()
+ values['metadata'] = _get_fake_aggr_metadata()
+ values['metadata']['fake_key1'] = 'foo'
+ db.aggregate_update(ctxt, result['id'], values)
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
+ self.assertThat(values['metadata'], matchers.DictMatches(expected))
+
+ def test_aggregate_update_zone_with_existing_metadata(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt)
+ new_zone = {'availability_zone': 'fake_avail_zone_2'}
+ metadata = _get_fake_aggr_metadata()
+ metadata.update(new_zone)
+ db.aggregate_update(ctxt, result['id'], new_zone)
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
+ self.assertThat(metadata, matchers.DictMatches(expected))
+
+ def test_aggregate_update_raise_not_found(self):
+ ctxt = context.get_admin_context()
+ # this does not exist!
+ aggregate_id = 1
+ new_values = _get_fake_aggr_values()
+ self.assertRaises(exception.AggregateNotFound,
+ db.aggregate_update, ctxt, aggregate_id, new_values)
+
+ def test_aggregate_update_raise_name_exist(self):
+ ctxt = context.get_admin_context()
+ _create_aggregate(context=ctxt, values={'name': 'test1'},
+ metadata={'availability_zone': 'fake_avail_zone'})
+ _create_aggregate(context=ctxt, values={'name': 'test2'},
+ metadata={'availability_zone': 'fake_avail_zone'})
+ aggregate_id = 1
+ new_values = {'name': 'test2'}
+ self.assertRaises(exception.AggregateNameExists,
+ db.aggregate_update, ctxt, aggregate_id, new_values)
+
+ def test_aggregate_get_all(self):
+ ctxt = context.get_admin_context()
+ counter = 3
+ for c in range(counter):
+ _create_aggregate(context=ctxt,
+ values={'name': 'fake_aggregate_%d' % c},
+ metadata=None)
+ results = db.aggregate_get_all(ctxt)
+ self.assertEqual(len(results), counter)
+
+ def test_aggregate_get_all_non_deleted(self):
+ ctxt = context.get_admin_context()
+ add_counter = 5
+ remove_counter = 2
+ aggregates = []
+ for c in range(1, add_counter):
+ values = {'name': 'fake_aggregate_%d' % c}
+ aggregates.append(_create_aggregate(context=ctxt,
+ values=values, metadata=None))
+ for c in range(1, remove_counter):
+ db.aggregate_delete(ctxt, aggregates[c - 1]['id'])
+ results = db.aggregate_get_all(ctxt)
+ self.assertEqual(len(results), add_counter - remove_counter)
+
+ def test_aggregate_metadata_add(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt, metadata=None)
+ metadata = _get_fake_aggr_metadata()
+ db.aggregate_metadata_add(ctxt, result['id'], metadata)
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
+ self.assertThat(metadata, matchers.DictMatches(expected))
+
+ def test_aggregate_metadata_add_and_update(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt)
+ metadata = _get_fake_aggr_metadata()
+ key = metadata.keys()[0]
+ new_metadata = {key: 'foo',
+ 'fake_new_key': 'fake_new_value'}
+ metadata.update(new_metadata)
+ db.aggregate_metadata_add(ctxt, result['id'], new_metadata)
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
+ self.assertThat(metadata, matchers.DictMatches(expected))
+
+ def test_aggregate_metadata_add_retry(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt, metadata=None)
+
+ def counted():
+ def get_query(context, id, session, read_deleted):
+ get_query.counter += 1
+ raise db_exc.DBDuplicateEntry
+ get_query.counter = 0
+ return get_query
+
+ get_query = counted()
+ self.stubs.Set(sqlalchemy_api,
+ '_aggregate_metadata_get_query', get_query)
+ self.assertRaises(db_exc.DBDuplicateEntry, sqlalchemy_api.
+ aggregate_metadata_add, ctxt, result['id'], {},
+ max_retries=5)
+ self.assertEqual(get_query.counter, 5)
+
+ def test_aggregate_metadata_update(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt)
+ metadata = _get_fake_aggr_metadata()
+ key = metadata.keys()[0]
+ db.aggregate_metadata_delete(ctxt, result['id'], key)
+ new_metadata = {key: 'foo'}
+ db.aggregate_metadata_add(ctxt, result['id'], new_metadata)
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
+ metadata[key] = 'foo'
+ self.assertThat(metadata, matchers.DictMatches(expected))
+
+ def test_aggregate_metadata_delete(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt, metadata=None)
+ metadata = _get_fake_aggr_metadata()
+ db.aggregate_metadata_add(ctxt, result['id'], metadata)
+ db.aggregate_metadata_delete(ctxt, result['id'], metadata.keys()[0])
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
+ del metadata[metadata.keys()[0]]
+ self.assertThat(metadata, matchers.DictMatches(expected))
+
+ def test_aggregate_remove_availability_zone(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt, metadata={'availability_zone':
+ 'fake_avail_zone'})
+ db.aggregate_metadata_delete(ctxt, result['id'], 'availability_zone')
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
+ aggregate = db.aggregate_get(ctxt, result['id'])
+ self.assertIsNone(aggregate['availability_zone'])
+ self.assertThat({}, matchers.DictMatches(expected))
+
+ def test_aggregate_metadata_delete_raise_not_found(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt)
+ self.assertRaises(exception.AggregateMetadataNotFound,
+ db.aggregate_metadata_delete,
+ ctxt, result['id'], 'foo_key')
+
+ def test_aggregate_host_add(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
+ expected = db.aggregate_host_get_all(ctxt, result['id'])
+ self.assertEqual(_get_fake_aggr_hosts(), expected)
+
+ def test_aggregate_host_re_add(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
+ host = _get_fake_aggr_hosts()[0]
+ db.aggregate_host_delete(ctxt, result['id'], host)
+ db.aggregate_host_add(ctxt, result['id'], host)
+ expected = db.aggregate_host_get_all(ctxt, result['id'])
+ self.assertEqual(len(expected), 1)
+
+ def test_aggregate_host_add_duplicate_works(self):
+ ctxt = context.get_admin_context()
+ r1 = _create_aggregate_with_hosts(context=ctxt, metadata=None)
+ r2 = _create_aggregate_with_hosts(ctxt,
+ values={'name': 'fake_aggregate2'},
+ metadata={'availability_zone': 'fake_avail_zone2'})
+ h1 = db.aggregate_host_get_all(ctxt, r1['id'])
+ h2 = db.aggregate_host_get_all(ctxt, r2['id'])
+ self.assertEqual(h1, h2)
+
+ def test_aggregate_host_add_duplicate_raise_exist_exc(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
+ self.assertRaises(exception.AggregateHostExists,
+ db.aggregate_host_add,
+ ctxt, result['id'], _get_fake_aggr_hosts()[0])
+
+ def test_aggregate_host_add_raise_not_found(self):
+ ctxt = context.get_admin_context()
+ # this does not exist!
+ aggregate_id = 1
+ host = _get_fake_aggr_hosts()[0]
+ self.assertRaises(exception.AggregateNotFound,
+ db.aggregate_host_add,
+ ctxt, aggregate_id, host)
+
+ def test_aggregate_host_delete(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
+ db.aggregate_host_delete(ctxt, result['id'],
+ _get_fake_aggr_hosts()[0])
+ expected = db.aggregate_host_get_all(ctxt, result['id'])
+ self.assertEqual(0, len(expected))
+
+ def test_aggregate_host_delete_raise_not_found(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt)
+ self.assertRaises(exception.AggregateHostNotFound,
+ db.aggregate_host_delete,
+ ctxt, result['id'], _get_fake_aggr_hosts()[0])
+
+
+class SqlAlchemyDbApiTestCase(DbTestCase):
+ def test_instance_get_all_by_host(self):
+ ctxt = context.get_admin_context()
+
+ self.create_instance_with_args()
+ self.create_instance_with_args()
+ self.create_instance_with_args(host='host2')
+ result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1')
+ self.assertEqual(2, len(result))
+
+ def test_instance_get_all_uuids_by_host(self):
+ ctxt = context.get_admin_context()
+ self.create_instance_with_args()
+ self.create_instance_with_args()
+ self.create_instance_with_args(host='host2')
+ result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1')
+ self.assertEqual(2, len(result))
+ self.assertEqual(types.UnicodeType, type(result[0]))
+
+ def test_instance_get_active_by_window_joined(self):
+ now = datetime.datetime(2013, 10, 10, 17, 16, 37, 156701)
+ start_time = now - datetime.timedelta(minutes=10)
+ now1 = now + datetime.timedelta(minutes=1)
+ now2 = now + datetime.timedelta(minutes=2)
+ now3 = now + datetime.timedelta(minutes=3)
+ ctxt = context.get_admin_context()
+ # used for testing columns_to_join
+ network_info = jsonutils.dumps({'ckey': 'cvalue'})
+ sample_data = {
+ 'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'},
+ 'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'},
+ 'info_cache': {'network_info': network_info},
+ }
+ self.create_instance_with_args(launched_at=now, **sample_data)
+ self.create_instance_with_args(launched_at=now1, terminated_at=now2,
+ **sample_data)
+ self.create_instance_with_args(launched_at=now2, terminated_at=now3,
+ **sample_data)
+ self.create_instance_with_args(launched_at=now3, terminated_at=None,
+ **sample_data)
+
+ result = sqlalchemy_api.instance_get_active_by_window_joined(
+ ctxt, begin=now)
+ self.assertEqual(4, len(result))
+ # verify that all default columns are joined
+ meta = utils.metadata_to_dict(result[0]['metadata'])
+ self.assertEqual(sample_data['metadata'], meta)
+ sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
+ self.assertEqual(sample_data['system_metadata'], sys_meta)
+ self.assertIn('info_cache', result[0])
+
+ result = sqlalchemy_api.instance_get_active_by_window_joined(
+ ctxt, begin=now3, columns_to_join=['info_cache'])
+ self.assertEqual(2, len(result))
+ # verify that only info_cache is loaded
+ meta = utils.metadata_to_dict(result[0]['metadata'])
+ self.assertEqual({}, meta)
+ self.assertIn('info_cache', result[0])
+
+ result = sqlalchemy_api.instance_get_active_by_window_joined(
+ ctxt, begin=start_time, end=now)
+ self.assertEqual(0, len(result))
+
+ result = sqlalchemy_api.instance_get_active_by_window_joined(
+ ctxt, begin=start_time, end=now2,
+ columns_to_join=['system_metadata'])
+ self.assertEqual(2, len(result))
+ # verify that only system_metadata is loaded
+ meta = utils.metadata_to_dict(result[0]['metadata'])
+ self.assertEqual({}, meta)
+ sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
+ self.assertEqual(sample_data['system_metadata'], sys_meta)
+ self.assertNotIn('info_cache', result[0])
+
+ result = sqlalchemy_api.instance_get_active_by_window_joined(
+ ctxt, begin=now2, end=now3,
+ columns_to_join=['metadata', 'info_cache'])
+ self.assertEqual(2, len(result))
+ # verify that only metadata and info_cache are loaded
+ meta = utils.metadata_to_dict(result[0]['metadata'])
+ self.assertEqual(sample_data['metadata'], meta)
+ sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
+ self.assertEqual({}, sys_meta)
+ self.assertIn('info_cache', result[0])
+ self.assertEqual(network_info, result[0]['info_cache']['network_info'])
+
+
+class ProcessSortParamTestCase(test.TestCase):
+
+ def test_process_sort_params_defaults(self):
+ '''Verifies default sort parameters.'''
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params([], [])
+ self.assertEqual(['created_at', 'id'], sort_keys)
+ self.assertEqual(['asc', 'asc'], sort_dirs)
+
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(None, None)
+ self.assertEqual(['created_at', 'id'], sort_keys)
+ self.assertEqual(['asc', 'asc'], sort_dirs)
+
+ def test_process_sort_params_override_default_keys(self):
+ '''Verifies that the default keys can be overridden.'''
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ [], [], default_keys=['key1', 'key2', 'key3'])
+ self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
+ self.assertEqual(['asc', 'asc', 'asc'], sort_dirs)
+
+ def test_process_sort_params_override_default_dir(self):
+ '''Verifies that the default direction can be overridden.'''
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ [], [], default_dir='dir1')
+ self.assertEqual(['created_at', 'id'], sort_keys)
+ self.assertEqual(['dir1', 'dir1'], sort_dirs)
+
+ def test_process_sort_params_override_default_key_and_dir(self):
+ '''Verifies that the default key and dir can be overridden.'''
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ [], [], default_keys=['key1', 'key2', 'key3'],
+ default_dir='dir1')
+ self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
+ self.assertEqual(['dir1', 'dir1', 'dir1'], sort_dirs)
+
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ [], [], default_keys=[], default_dir='dir1')
+ self.assertEqual([], sort_keys)
+ self.assertEqual([], sort_dirs)
+
+ def test_process_sort_params_non_default(self):
+ '''Verifies that non-default keys are added correctly.'''
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ ['key1', 'key2'], ['asc', 'desc'])
+ self.assertEqual(['key1', 'key2', 'created_at', 'id'], sort_keys)
+ # First sort_dir in list is used when adding the default keys
+ self.assertEqual(['asc', 'desc', 'asc', 'asc'], sort_dirs)
+
+ def test_process_sort_params_default(self):
+ '''Verifies that default keys are added correctly.'''
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ ['id', 'key2'], ['asc', 'desc'])
+ self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
+ self.assertEqual(['asc', 'desc', 'asc'], sort_dirs)
+
+ # Include default key value, rely on default direction
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ ['id', 'key2'], [])
+ self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
+ self.assertEqual(['asc', 'asc', 'asc'], sort_dirs)
+
+ def test_process_sort_params_default_dir(self):
+ '''Verifies that the default dir is applied to all keys.'''
+ # Direction is set, ignore default dir
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ ['id', 'key2'], ['desc'], default_dir='dir')
+ self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
+ self.assertEqual(['desc', 'desc', 'desc'], sort_dirs)
+
+ # But should be used if no direction is set
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ ['id', 'key2'], [], default_dir='dir')
+ self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
+ self.assertEqual(['dir', 'dir', 'dir'], sort_dirs)
+
+ def test_process_sort_params_unequal_length(self):
+ '''Verifies that a sort direction list is applied correctly.'''
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ ['id', 'key2', 'key3'], ['desc'])
+ self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
+ self.assertEqual(['desc', 'desc', 'desc', 'desc'], sort_dirs)
+
+ # Default direction is the first key in the list
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ ['id', 'key2', 'key3'], ['desc', 'asc'])
+ self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
+ self.assertEqual(['desc', 'asc', 'desc', 'desc'], sort_dirs)
+
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ ['id', 'key2', 'key3'], ['desc', 'asc', 'asc'])
+ self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
+ self.assertEqual(['desc', 'asc', 'asc', 'desc'], sort_dirs)
+
+ def test_process_sort_params_extra_dirs_lengths(self):
+ '''InvalidInput raised if more directions are given.'''
+ self.assertRaises(exception.InvalidInput,
+ sqlalchemy_api.process_sort_params,
+ ['key1', 'key2'],
+ ['asc', 'desc', 'desc'])
+
+
+class MigrationTestCase(test.TestCase):
+
+ def setUp(self):
+ super(MigrationTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ self._create()
+ self._create()
+ self._create(status='reverted')
+ self._create(status='confirmed')
+ self._create(status='error')
+ self._create(source_compute='host2', source_node='b',
+ dest_compute='host1', dest_node='a')
+ self._create(source_compute='host2', dest_compute='host3')
+ self._create(source_compute='host3', dest_compute='host4')
+
+ def _create(self, status='migrating', source_compute='host1',
+ source_node='a', dest_compute='host2', dest_node='b',
+ system_metadata=None):
+
+ values = {'host': source_compute}
+ instance = db.instance_create(self.ctxt, values)
+ if system_metadata:
+ db.instance_system_metadata_update(self.ctxt, instance['uuid'],
+ system_metadata, False)
+
+ values = {'status': status, 'source_compute': source_compute,
+ 'source_node': source_node, 'dest_compute': dest_compute,
+ 'dest_node': dest_node, 'instance_uuid': instance['uuid']}
+ db.migration_create(self.ctxt, values)
+
+ def _assert_in_progress(self, migrations):
+ for migration in migrations:
+ self.assertNotEqual('confirmed', migration['status'])
+ self.assertNotEqual('reverted', migration['status'])
+ self.assertNotEqual('error', migration['status'])
+
+ def test_migration_get_in_progress_joins(self):
+ self._create(source_compute='foo', system_metadata={'foo': 'bar'})
+ migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
+ 'foo', 'a')
+ system_metadata = migrations[0]['instance']['system_metadata'][0]
+ self.assertEqual(system_metadata['key'], 'foo')
+ self.assertEqual(system_metadata['value'], 'bar')
+
+ def test_in_progress_host1_nodea(self):
+ migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
+ 'host1', 'a')
+ # 2 as source + 1 as dest
+ self.assertEqual(3, len(migrations))
+ self._assert_in_progress(migrations)
+
+ def test_in_progress_host1_nodeb(self):
+ migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
+ 'host1', 'b')
+ # some migrations are to/from host1, but none with a node 'b'
+ self.assertEqual(0, len(migrations))
+
+ def test_in_progress_host2_nodeb(self):
+ migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
+ 'host2', 'b')
+ # 2 as dest, 1 as source
+ self.assertEqual(3, len(migrations))
+ self._assert_in_progress(migrations)
+
+ def test_instance_join(self):
+ migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
+ 'host2', 'b')
+ for migration in migrations:
+ instance = migration['instance']
+ self.assertEqual(migration['instance_uuid'], instance['uuid'])
+
+ def test_get_migrations_by_filters(self):
+ filters = {"status": "migrating", "host": "host3"}
+ migrations = db.migration_get_all_by_filters(self.ctxt, filters)
+ self.assertEqual(2, len(migrations))
+ for migration in migrations:
+ self.assertEqual(filters["status"], migration['status'])
+ hosts = [migration['source_compute'], migration['dest_compute']]
+ self.assertIn(filters["host"], hosts)
+
+ def test_only_admin_can_get_all_migrations_by_filters(self):
+ user_ctxt = context.RequestContext(user_id=None, project_id=None,
+ is_admin=False, read_deleted="no",
+ overwrite=False)
+
+ self.assertRaises(exception.AdminRequired,
+ db.migration_get_all_by_filters, user_ctxt, {})
+
+ def test_migration_get_unconfirmed_by_dest_compute(self):
+ # Ensure no migrations are returned.
+ results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
+ 'fake_host')
+ self.assertEqual(0, len(results))
+
+ # Ensure no migrations are returned.
+ results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
+ 'fake_host2')
+ self.assertEqual(0, len(results))
+
+ updated_at = datetime.datetime(2000, 1, 1, 12, 0, 0)
+ values = {"status": "finished", "updated_at": updated_at,
+ "dest_compute": "fake_host2"}
+ migration = db.migration_create(self.ctxt, values)
+
+ # Ensure different host is not returned
+ results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
+ 'fake_host')
+ self.assertEqual(0, len(results))
+
+ # Ensure one migration older than 10 seconds is returned.
+ results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
+ 'fake_host2')
+ self.assertEqual(1, len(results))
+ db.migration_update(self.ctxt, migration['id'],
+ {"status": "CONFIRMED"})
+
+ # Ensure the new migration is not returned.
+ updated_at = timeutils.utcnow()
+ values = {"status": "finished", "updated_at": updated_at,
+ "dest_compute": "fake_host2"}
+ migration = db.migration_create(self.ctxt, values)
+ results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
+ "fake_host2")
+ self.assertEqual(0, len(results))
+ db.migration_update(self.ctxt, migration['id'],
+ {"status": "CONFIRMED"})
+
+ def test_migration_update_not_found(self):
+ self.assertRaises(exception.MigrationNotFound,
+ db.migration_update, self.ctxt, 42, {})
+
+
+class ModelsObjectComparatorMixin(object):
+ def _dict_from_object(self, obj, ignored_keys):
+ if ignored_keys is None:
+ ignored_keys = []
+ return dict([(k, v) for k, v in obj.iteritems()
+ if k not in ignored_keys])
+
+ def _assertEqualObjects(self, obj1, obj2, ignored_keys=None):
+ obj1 = self._dict_from_object(obj1, ignored_keys)
+ obj2 = self._dict_from_object(obj2, ignored_keys)
+
+ self.assertEqual(len(obj1),
+ len(obj2),
+ "Keys mismatch: %s" %
+ str(set(obj1.keys()) ^ set(obj2.keys())))
+ for key, value in obj1.iteritems():
+ self.assertEqual(value, obj2[key])
+
+ def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None):
+ obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
+ sort_key = lambda d: [d[k] for k in sorted(d)]
+ conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key)
+
+ self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2))
+
+ def _assertEqualOrderedListOfObjects(self, objs1, objs2,
+ ignored_keys=None):
+ obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
+ conv = lambda obj: map(obj_to_dict, obj)
+
+ self.assertEqual(conv(objs1), conv(objs2))
+
+ def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2):
+ self.assertEqual(len(primitives1), len(primitives2))
+ for primitive in primitives1:
+ self.assertIn(primitive, primitives2)
+
+ for primitive in primitives2:
+ self.assertIn(primitive, primitives1)
+
+
+class InstanceSystemMetadataTestCase(test.TestCase):
+
+ """Tests for db.api.instance_system_metadata_* methods."""
+
+ def setUp(self):
+ super(InstanceSystemMetadataTestCase, self).setUp()
+ values = {'host': 'h1', 'project_id': 'p1',
+ 'system_metadata': {'key': 'value'}}
+ self.ctxt = context.get_admin_context()
+ self.instance = db.instance_create(self.ctxt, values)
+
+ def test_instance_system_metadata_get(self):
+ metadata = db.instance_system_metadata_get(self.ctxt,
+ self.instance['uuid'])
+ self.assertEqual(metadata, {'key': 'value'})
+
+ def test_instance_system_metadata_update_new_pair(self):
+ db.instance_system_metadata_update(
+ self.ctxt, self.instance['uuid'],
+ {'new_key': 'new_value'}, False)
+ metadata = db.instance_system_metadata_get(self.ctxt,
+ self.instance['uuid'])
+ self.assertEqual(metadata, {'key': 'value', 'new_key': 'new_value'})
+
+ def test_instance_system_metadata_update_existent_pair(self):
+ db.instance_system_metadata_update(
+ self.ctxt, self.instance['uuid'],
+ {'key': 'new_value'}, True)
+ metadata = db.instance_system_metadata_get(self.ctxt,
+ self.instance['uuid'])
+ self.assertEqual(metadata, {'key': 'new_value'})
+
+ def test_instance_system_metadata_update_delete_true(self):
+ db.instance_system_metadata_update(
+ self.ctxt, self.instance['uuid'],
+ {'new_key': 'new_value'}, True)
+ metadata = db.instance_system_metadata_get(self.ctxt,
+ self.instance['uuid'])
+ self.assertEqual(metadata, {'new_key': 'new_value'})
+
+ @test.testtools.skip("bug 1189462")
+ def test_instance_system_metadata_update_nonexistent(self):
+ self.assertRaises(exception.InstanceNotFound,
+ db.instance_system_metadata_update,
+ self.ctxt, 'nonexistent-uuid',
+ {'key': 'value'}, True)
+
+
+class ReservationTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ """Tests for db.api.reservation_* methods."""
+
+ def setUp(self):
+ super(ReservationTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ self.reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
+ usage = db.quota_usage_get(self.ctxt, 'project1', 'resource1', 'user1')
+
+ self.values = {'uuid': 'sample-uuid',
+ 'project_id': 'project1',
+ 'user_id': 'user1',
+ 'resource': 'resource1',
+ 'delta': 42,
+ 'expire': timeutils.utcnow() + datetime.timedelta(days=1),
+ 'usage': {'id': usage.id}}
+
+ def test_reservation_commit(self):
+ expected = {'project_id': 'project1', 'user_id': 'user1',
+ 'resource0': {'reserved': 0, 'in_use': 0},
+ 'resource1': {'reserved': 1, 'in_use': 1},
+ 'fixed_ips': {'reserved': 2, 'in_use': 2}}
+ self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
+ self.ctxt, 'project1', 'user1'))
+ _reservation_get(self.ctxt, self.reservations[0])
+ db.reservation_commit(self.ctxt, self.reservations, 'project1',
+ 'user1')
+ self.assertRaises(exception.ReservationNotFound,
+ _reservation_get, self.ctxt, self.reservations[0])
+ expected = {'project_id': 'project1', 'user_id': 'user1',
+ 'resource0': {'reserved': 0, 'in_use': 0},
+ 'resource1': {'reserved': 0, 'in_use': 2},
+ 'fixed_ips': {'reserved': 0, 'in_use': 4}}
+ self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
+ self.ctxt, 'project1', 'user1'))
+
+ def test_reservation_rollback(self):
+ expected = {'project_id': 'project1', 'user_id': 'user1',
+ 'resource0': {'reserved': 0, 'in_use': 0},
+ 'resource1': {'reserved': 1, 'in_use': 1},
+ 'fixed_ips': {'reserved': 2, 'in_use': 2}}
+ self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
+ self.ctxt, 'project1', 'user1'))
+ _reservation_get(self.ctxt, self.reservations[0])
+ db.reservation_rollback(self.ctxt, self.reservations, 'project1',
+ 'user1')
+ self.assertRaises(exception.ReservationNotFound,
+ _reservation_get, self.ctxt, self.reservations[0])
+ expected = {'project_id': 'project1', 'user_id': 'user1',
+ 'resource0': {'reserved': 0, 'in_use': 0},
+ 'resource1': {'reserved': 0, 'in_use': 1},
+ 'fixed_ips': {'reserved': 0, 'in_use': 2}}
+ self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
+ self.ctxt, 'project1', 'user1'))
+
+ def test_reservation_expire(self):
+ db.reservation_expire(self.ctxt)
+
+ expected = {'project_id': 'project1', 'user_id': 'user1',
+ 'resource0': {'reserved': 0, 'in_use': 0},
+ 'resource1': {'reserved': 0, 'in_use': 1},
+ 'fixed_ips': {'reserved': 0, 'in_use': 2}}
+ self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
+ self.ctxt, 'project1', 'user1'))
+
+
+class SecurityGroupRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(SecurityGroupRuleTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _get_base_values(self):
+ return {
+ 'name': 'fake_sec_group',
+ 'description': 'fake_sec_group_descr',
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'instances': []
+ }
+
+ def _get_base_rule_values(self):
+ return {
+ 'protocol': "tcp",
+ 'from_port': 80,
+ 'to_port': 8080,
+ 'cidr': None,
+ 'deleted': 0,
+ 'deleted_at': None,
+ 'grantee_group': None,
+ 'updated_at': None
+ }
+
+ def _create_security_group(self, values):
+ v = self._get_base_values()
+ v.update(values)
+ return db.security_group_create(self.ctxt, v)
+
+ def _create_security_group_rule(self, values):
+ v = self._get_base_rule_values()
+ v.update(values)
+ return db.security_group_rule_create(self.ctxt, v)
+
+ def test_security_group_rule_create(self):
+ security_group_rule = self._create_security_group_rule({})
+ self.assertIsNotNone(security_group_rule['id'])
+ for key, value in self._get_base_rule_values().items():
+ self.assertEqual(value, security_group_rule[key])
+
+ def _test_security_group_rule_get_by_security_group(self, columns=None):
+ instance = db.instance_create(self.ctxt,
+ {'system_metadata': {'foo': 'bar'}})
+ security_group = self._create_security_group({
+ 'instances': [instance]})
+ security_group_rule = self._create_security_group_rule(
+ {'parent_group': security_group, 'grantee_group': security_group})
+ security_group_rule1 = self._create_security_group_rule(
+ {'parent_group': security_group, 'grantee_group': security_group})
+ found_rules = db.security_group_rule_get_by_security_group(
+ self.ctxt, security_group['id'], columns_to_join=columns)
+ self.assertEqual(len(found_rules), 2)
+ rules_ids = [security_group_rule['id'], security_group_rule1['id']]
+ for rule in found_rules:
+ if columns is None:
+ self.assertIn('grantee_group', dict(rule.iteritems()))
+ self.assertIn('instances',
+ dict(rule.grantee_group.iteritems()))
+ self.assertIn(
+ 'system_metadata',
+ dict(rule.grantee_group.instances[0].iteritems()))
+ self.assertIn(rule['id'], rules_ids)
+ else:
+ self.assertNotIn('grantee_group', dict(rule.iteritems()))
+
+ def test_security_group_rule_get_by_security_group(self):
+ self._test_security_group_rule_get_by_security_group()
+
+ def test_security_group_rule_get_by_security_group_no_joins(self):
+ self._test_security_group_rule_get_by_security_group(columns=[])
+
+ def test_security_group_rule_get_by_security_group_grantee(self):
+ security_group = self._create_security_group({})
+ security_group_rule = self._create_security_group_rule(
+ {'grantee_group': security_group})
+ rules = db.security_group_rule_get_by_security_group_grantee(self.ctxt,
+ security_group['id'])
+ self.assertEqual(len(rules), 1)
+ self.assertEqual(rules[0]['id'], security_group_rule['id'])
+
+ def test_security_group_rule_destroy(self):
+ self._create_security_group({'name': 'fake1'})
+ self._create_security_group({'name': 'fake2'})
+ security_group_rule1 = self._create_security_group_rule({})
+ security_group_rule2 = self._create_security_group_rule({})
+ db.security_group_rule_destroy(self.ctxt, security_group_rule1['id'])
+ self.assertRaises(exception.SecurityGroupNotFound,
+ db.security_group_rule_get,
+ self.ctxt, security_group_rule1['id'])
+ self._assertEqualObjects(db.security_group_rule_get(self.ctxt,
+ security_group_rule2['id']),
+ security_group_rule2, ['grantee_group'])
+
+ def test_security_group_rule_destroy_not_found_exception(self):
+ self.assertRaises(exception.SecurityGroupNotFound,
+ db.security_group_rule_destroy, self.ctxt, 100500)
+
+ def test_security_group_rule_get(self):
+ security_group_rule1 = (
+ self._create_security_group_rule({}))
+ self._create_security_group_rule({})
+ real_security_group_rule = db.security_group_rule_get(self.ctxt,
+ security_group_rule1['id'])
+ self._assertEqualObjects(security_group_rule1,
+ real_security_group_rule, ['grantee_group'])
+
+ def test_security_group_rule_get_not_found_exception(self):
+ self.assertRaises(exception.SecurityGroupNotFound,
+ db.security_group_rule_get, self.ctxt, 100500)
+
+ def test_security_group_rule_count_by_group(self):
+ sg1 = self._create_security_group({'name': 'fake1'})
+ sg2 = self._create_security_group({'name': 'fake2'})
+ rules_by_group = {sg1: [], sg2: []}
+ for group in rules_by_group:
+ rules = rules_by_group[group]
+ for i in range(0, 10):
+ rules.append(
+ self._create_security_group_rule({'parent_group_id':
+ group['id']}))
+ db.security_group_rule_destroy(self.ctxt,
+ rules_by_group[sg1][0]['id'])
+ counted_groups = [db.security_group_rule_count_by_group(self.ctxt,
+ group['id'])
+ for group in [sg1, sg2]]
+ expected = [9, 10]
+ self.assertEqual(counted_groups, expected)
+
+
+class SecurityGroupTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(SecurityGroupTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _get_base_values(self):
+ return {
+ 'name': 'fake_sec_group',
+ 'description': 'fake_sec_group_descr',
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'instances': []
+ }
+
+ def _create_security_group(self, values):
+ v = self._get_base_values()
+ v.update(values)
+ return db.security_group_create(self.ctxt, v)
+
+ def test_security_group_create(self):
+ security_group = self._create_security_group({})
+ self.assertIsNotNone(security_group['id'])
+ for key, value in self._get_base_values().iteritems():
+ self.assertEqual(value, security_group[key])
+
+ def test_security_group_destroy(self):
+ security_group1 = self._create_security_group({})
+ security_group2 = \
+ self._create_security_group({'name': 'fake_sec_group2'})
+
+ db.security_group_destroy(self.ctxt, security_group1['id'])
+ self.assertRaises(exception.SecurityGroupNotFound,
+ db.security_group_get,
+ self.ctxt, security_group1['id'])
+ self._assertEqualObjects(db.security_group_get(
+ self.ctxt, security_group2['id'],
+ columns_to_join=['instances']), security_group2)
+
+ def test_security_group_get(self):
+ security_group1 = self._create_security_group({})
+ self._create_security_group({'name': 'fake_sec_group2'})
+ real_security_group = db.security_group_get(self.ctxt,
+ security_group1['id'],
+ columns_to_join=['instances'])
+ self._assertEqualObjects(security_group1,
+ real_security_group)
+
+ def test_security_group_get_with_instance_columns(self):
+ instance = db.instance_create(self.ctxt,
+ {'system_metadata': {'foo': 'bar'}})
+ secgroup = self._create_security_group({'instances': [instance]})
+ secgroup = db.security_group_get(
+ self.ctxt, secgroup['id'],
+ columns_to_join=['instances.system_metadata'])
+ inst = secgroup.instances[0]
+ self.assertIn('system_metadata', dict(inst.iteritems()).keys())
+
+ def test_security_group_get_no_instances(self):
+ instance = db.instance_create(self.ctxt, {})
+ sid = self._create_security_group({'instances': [instance]})['id']
+
+ security_group = db.security_group_get(self.ctxt, sid,
+ columns_to_join=['instances'])
+ self.assertIn('instances', security_group.__dict__)
+
+ security_group = db.security_group_get(self.ctxt, sid)
+ self.assertNotIn('instances', security_group.__dict__)
+
+ def test_security_group_get_not_found_exception(self):
+ self.assertRaises(exception.SecurityGroupNotFound,
+ db.security_group_get, self.ctxt, 100500)
+
+ def test_security_group_get_by_name(self):
+ security_group1 = self._create_security_group({'name': 'fake1'})
+ security_group2 = self._create_security_group({'name': 'fake2'})
+
+ real_security_group1 = db.security_group_get_by_name(
+ self.ctxt,
+ security_group1['project_id'],
+ security_group1['name'],
+ columns_to_join=None)
+ real_security_group2 = db.security_group_get_by_name(
+ self.ctxt,
+ security_group2['project_id'],
+ security_group2['name'],
+ columns_to_join=None)
+ self._assertEqualObjects(security_group1, real_security_group1)
+ self._assertEqualObjects(security_group2, real_security_group2)
+
+ def test_security_group_get_by_project(self):
+ security_group1 = self._create_security_group(
+ {'name': 'fake1', 'project_id': 'fake_proj1'})
+ security_group2 = self._create_security_group(
+ {'name': 'fake2', 'project_id': 'fake_proj2'})
+
+ real1 = db.security_group_get_by_project(
+ self.ctxt,
+ security_group1['project_id'])
+ real2 = db.security_group_get_by_project(
+ self.ctxt,
+ security_group2['project_id'])
+
+ expected1, expected2 = [security_group1], [security_group2]
+ self._assertEqualListsOfObjects(expected1, real1,
+ ignored_keys=['instances'])
+ self._assertEqualListsOfObjects(expected2, real2,
+ ignored_keys=['instances'])
+
+ def test_security_group_get_by_instance(self):
+ instance = db.instance_create(self.ctxt, dict(host='foo'))
+ values = [
+ {'name': 'fake1', 'instances': [instance]},
+ {'name': 'fake2', 'instances': [instance]},
+ {'name': 'fake3', 'instances': []},
+ ]
+ security_groups = [self._create_security_group(vals)
+ for vals in values]
+
+ real = db.security_group_get_by_instance(self.ctxt,
+ instance['uuid'])
+ expected = security_groups[:2]
+ self._assertEqualListsOfObjects(expected, real,
+ ignored_keys=['instances'])
+
+ def test_security_group_get_all(self):
+ values = [
+ {'name': 'fake1', 'project_id': 'fake_proj1'},
+ {'name': 'fake2', 'project_id': 'fake_proj2'},
+ ]
+ security_groups = [self._create_security_group(vals)
+ for vals in values]
+
+ real = db.security_group_get_all(self.ctxt)
+
+ self._assertEqualListsOfObjects(security_groups, real,
+ ignored_keys=['instances'])
+
+ def test_security_group_in_use(self):
+ instance = db.instance_create(self.ctxt, dict(host='foo'))
+ values = [
+ {'instances': [instance],
+ 'name': 'fake_in_use'},
+ {'instances': []},
+ ]
+
+ security_groups = [self._create_security_group(vals)
+ for vals in values]
+
+ real = []
+ for security_group in security_groups:
+ in_use = db.security_group_in_use(self.ctxt,
+ security_group['id'])
+ real.append(in_use)
+ expected = [True, False]
+
+ self.assertEqual(expected, real)
+
+ def test_security_group_ensure_default(self):
+ self.ctxt.project_id = 'fake'
+ self.ctxt.user_id = 'fake'
+ self.assertEqual(0, len(db.security_group_get_by_project(
+ self.ctxt,
+ self.ctxt.project_id)))
+
+ db.security_group_ensure_default(self.ctxt)
+
+ security_groups = db.security_group_get_by_project(
+ self.ctxt,
+ self.ctxt.project_id)
+
+ self.assertEqual(1, len(security_groups))
+ self.assertEqual("default", security_groups[0]["name"])
+
+ usage = db.quota_usage_get(self.ctxt,
+ self.ctxt.project_id,
+ 'security_groups',
+ self.ctxt.user_id)
+ self.assertEqual(1, usage.in_use)
+
+ @mock.patch.object(db.sqlalchemy.api, '_security_group_get_by_names')
+ def test_security_group_ensure_default_called_concurrently(self, sg_mock):
+ # make sure NotFound is always raised here to trick Nova to insert the
+ # duplicate security group entry
+ sg_mock.side_effect = exception.NotFound
+
+ # create the first db entry
+ self.ctxt.project_id = 1
+ db.security_group_ensure_default(self.ctxt)
+ security_groups = db.security_group_get_by_project(
+ self.ctxt,
+ self.ctxt.project_id)
+ self.assertEqual(1, len(security_groups))
+
+ # create the second one and ensure the exception is handled properly
+ default_group = db.security_group_ensure_default(self.ctxt)
+ self.assertEqual('default', default_group.name)
+
+ def test_security_group_update(self):
+ security_group = self._create_security_group({})
+ new_values = {
+ 'name': 'sec_group1',
+ 'description': 'sec_group_descr1',
+ 'user_id': 'fake_user1',
+ 'project_id': 'fake_proj1',
+ }
+
+ updated_group = db.security_group_update(self.ctxt,
+ security_group['id'],
+ new_values,
+ columns_to_join=['rules.grantee_group'])
+ for key, value in new_values.iteritems():
+ self.assertEqual(updated_group[key], value)
+ self.assertEqual(updated_group['rules'], [])
+
+ def test_security_group_update_to_duplicate(self):
+ self._create_security_group(
+ {'name': 'fake1', 'project_id': 'fake_proj1'})
+ security_group2 = self._create_security_group(
+ {'name': 'fake1', 'project_id': 'fake_proj2'})
+
+ self.assertRaises(exception.SecurityGroupExists,
+ db.security_group_update,
+ self.ctxt, security_group2['id'],
+ {'project_id': 'fake_proj1'})
+
+
+class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ """Tests for db.api.instance_* methods."""
+
+ sample_data = {
+ 'project_id': 'project1',
+ 'hostname': 'example.com',
+ 'host': 'h1',
+ 'node': 'n1',
+ 'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'},
+ 'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'},
+ 'info_cache': {'ckey': 'cvalue'},
+ }
+
+ def setUp(self):
+ super(InstanceTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _assertEqualInstances(self, instance1, instance2):
+ self._assertEqualObjects(instance1, instance2,
+ ignored_keys=['metadata', 'system_metadata', 'info_cache'])
+
+ def _assertEqualListsOfInstances(self, list1, list2):
+ self._assertEqualListsOfObjects(list1, list2,
+ ignored_keys=['metadata', 'system_metadata', 'info_cache'])
+
+ def create_instance_with_args(self, **kwargs):
+ if 'context' in kwargs:
+ context = kwargs.pop('context')
+ else:
+ context = self.ctxt
+ args = self.sample_data.copy()
+ args.update(kwargs)
+ return db.instance_create(context, args)
+
+ def test_instance_create(self):
+ instance = self.create_instance_with_args()
+ self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
+
+ def test_instance_create_with_object_values(self):
+ values = {
+ 'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
+ 'access_ip_v6': netaddr.IPAddress('::1'),
+ }
+ dt_keys = ('created_at', 'deleted_at', 'updated_at',
+ 'launched_at', 'terminated_at', 'scheduled_at')
+ dt = timeutils.utcnow()
+ dt_utc = dt.replace(tzinfo=iso8601.iso8601.Utc())
+ for key in dt_keys:
+ values[key] = dt_utc
+ inst = db.instance_create(self.ctxt, values)
+ self.assertEqual(inst['access_ip_v4'], '1.2.3.4')
+ self.assertEqual(inst['access_ip_v6'], '::1')
+ for key in dt_keys:
+ self.assertEqual(inst[key], dt)
+
+ def test_instance_update_with_object_values(self):
+ values = {
+ 'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
+ 'access_ip_v6': netaddr.IPAddress('::1'),
+ }
+ dt_keys = ('created_at', 'deleted_at', 'updated_at',
+ 'launched_at', 'terminated_at', 'scheduled_at')
+ dt = timeutils.utcnow()
+ dt_utc = dt.replace(tzinfo=iso8601.iso8601.Utc())
+ for key in dt_keys:
+ values[key] = dt_utc
+ inst = db.instance_create(self.ctxt, {})
+ inst = db.instance_update(self.ctxt, inst['uuid'], values)
+ self.assertEqual(inst['access_ip_v4'], '1.2.3.4')
+ self.assertEqual(inst['access_ip_v6'], '::1')
+ for key in dt_keys:
+ self.assertEqual(inst[key], dt)
+
+ def test_instance_update_no_metadata_clobber(self):
+ meta = {'foo': 'bar'}
+ sys_meta = {'sfoo': 'sbar'}
+ values = {
+ 'metadata': meta,
+ 'system_metadata': sys_meta,
+ }
+ inst = db.instance_create(self.ctxt, {})
+ inst = db.instance_update(self.ctxt, inst['uuid'], values)
+ self.assertEqual({'foo': 'bar'}, meta)
+ self.assertEqual({'sfoo': 'sbar'}, sys_meta)
+
+ def test_instance_get_all_with_meta(self):
+ inst = self.create_instance_with_args()
+ for inst in db.instance_get_all(self.ctxt):
+ meta = utils.metadata_to_dict(inst['metadata'])
+ self.assertEqual(meta, self.sample_data['metadata'])
+ sys_meta = utils.metadata_to_dict(inst['system_metadata'])
+ self.assertEqual(sys_meta, self.sample_data['system_metadata'])
+
+ def test_instance_update(self):
+ instance = self.create_instance_with_args()
+ metadata = {'host': 'bar', 'key2': 'wuff'}
+ system_metadata = {'original_image_ref': 'baz'}
+ # Update the metadata
+ db.instance_update(self.ctxt, instance['uuid'], {'metadata': metadata,
+ 'system_metadata': system_metadata})
+ # Retrieve the user-provided metadata to ensure it was successfully
+ # updated
+ self.assertEqual(metadata,
+ db.instance_metadata_get(self.ctxt, instance['uuid']))
+ self.assertEqual(system_metadata,
+ db.instance_system_metadata_get(self.ctxt, instance['uuid']))
+
+ def test_instance_update_bad_str_dates(self):
+ instance = self.create_instance_with_args()
+ values = {'created_at': '123'}
+ self.assertRaises(ValueError,
+ db.instance_update,
+ self.ctxt, instance['uuid'], values)
+
+ def test_instance_update_good_str_dates(self):
+ instance = self.create_instance_with_args()
+ values = {'created_at': '2011-01-31T00:00:00.0'}
+ actual = db.instance_update(self.ctxt, instance['uuid'], values)
+ expected = datetime.datetime(2011, 1, 31)
+ self.assertEqual(expected, actual["created_at"])
+
+ def test_create_instance_unique_hostname(self):
+ context1 = context.RequestContext('user1', 'p1')
+ context2 = context.RequestContext('user2', 'p2')
+ self.create_instance_with_args(hostname='h1', project_id='p1')
+
+ # With scope 'global' any duplicate should fail, be it this project:
+ self.flags(osapi_compute_unique_server_name_scope='global')
+ self.assertRaises(exception.InstanceExists,
+ self.create_instance_with_args,
+ context=context1,
+ hostname='h1', project_id='p3')
+ # or another:
+ self.assertRaises(exception.InstanceExists,
+ self.create_instance_with_args,
+ context=context2,
+ hostname='h1', project_id='p2')
+ # With scope 'project' a duplicate in the project should fail:
+ self.flags(osapi_compute_unique_server_name_scope='project')
+ self.assertRaises(exception.InstanceExists,
+ self.create_instance_with_args,
+ context=context1,
+ hostname='h1', project_id='p1')
+ # With scope 'project' a duplicate in a different project should work:
+ self.flags(osapi_compute_unique_server_name_scope='project')
+ self.create_instance_with_args(context=context2, hostname='h2')
+ self.flags(osapi_compute_unique_server_name_scope=None)
+
+ def test_instance_get_all_by_filters_with_meta(self):
+ inst = self.create_instance_with_args()
+ for inst in db.instance_get_all_by_filters(self.ctxt, {}):
+ meta = utils.metadata_to_dict(inst['metadata'])
+ self.assertEqual(meta, self.sample_data['metadata'])
+ sys_meta = utils.metadata_to_dict(inst['system_metadata'])
+ self.assertEqual(sys_meta, self.sample_data['system_metadata'])
+
+ def test_instance_get_all_by_filters_without_meta(self):
+ inst = self.create_instance_with_args()
+ result = db.instance_get_all_by_filters(self.ctxt, {},
+ columns_to_join=[])
+ for inst in result:
+ meta = utils.metadata_to_dict(inst['metadata'])
+ self.assertEqual(meta, {})
+ sys_meta = utils.metadata_to_dict(inst['system_metadata'])
+ self.assertEqual(sys_meta, {})
+
+ def test_instance_get_all_by_filters(self):
+ instances = [self.create_instance_with_args() for i in range(3)]
+ filtered_instances = db.instance_get_all_by_filters(self.ctxt, {})
+ self._assertEqualListsOfInstances(instances, filtered_instances)
+
+ def test_instance_get_all_by_filters_zero_limit(self):
+ self.create_instance_with_args()
+ instances = db.instance_get_all_by_filters(self.ctxt, {}, limit=0)
+ self.assertEqual([], instances)
+
+ def test_instance_metadata_get_multi(self):
+ uuids = [self.create_instance_with_args()['uuid'] for i in range(3)]
+ meta = sqlalchemy_api._instance_metadata_get_multi(self.ctxt, uuids)
+ for row in meta:
+ self.assertIn(row['instance_uuid'], uuids)
+
+ def test_instance_metadata_get_multi_no_uuids(self):
+ self.mox.StubOutWithMock(query.Query, 'filter')
+ self.mox.ReplayAll()
+ sqlalchemy_api._instance_metadata_get_multi(self.ctxt, [])
+
+ def test_instance_system_system_metadata_get_multi(self):
+ uuids = [self.create_instance_with_args()['uuid'] for i in range(3)]
+ sys_meta = sqlalchemy_api._instance_system_metadata_get_multi(
+ self.ctxt, uuids)
+ for row in sys_meta:
+ self.assertIn(row['instance_uuid'], uuids)
+
+ def test_instance_system_metadata_get_multi_no_uuids(self):
+ self.mox.StubOutWithMock(query.Query, 'filter')
+ self.mox.ReplayAll()
+ sqlalchemy_api._instance_system_metadata_get_multi(self.ctxt, [])
+
+ def test_instance_get_all_by_filters_regex(self):
+ i1 = self.create_instance_with_args(display_name='test1')
+ i2 = self.create_instance_with_args(display_name='teeeest2')
+ self.create_instance_with_args(display_name='diff')
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'display_name': 't.*st.'})
+ self._assertEqualListsOfInstances(result, [i1, i2])
+
+ def test_instance_get_all_by_filters_changes_since(self):
+ i1 = self.create_instance_with_args(updated_at=
+ '2013-12-05T15:03:25.000000')
+ i2 = self.create_instance_with_args(updated_at=
+ '2013-12-05T15:03:26.000000')
+ changes_since = iso8601.parse_date('2013-12-05T15:03:25.000000')
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'changes-since':
+ changes_since})
+ self._assertEqualListsOfInstances([i1, i2], result)
+
+ changes_since = iso8601.parse_date('2013-12-05T15:03:26.000000')
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'changes-since':
+ changes_since})
+ self._assertEqualListsOfInstances([i2], result)
+
+ def test_instance_get_all_by_filters_exact_match(self):
+ instance = self.create_instance_with_args(host='host1')
+ self.create_instance_with_args(host='host12')
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'host': 'host1'})
+ self._assertEqualListsOfInstances([instance], result)
+
+ def test_instance_get_all_by_filters_metadata(self):
+ instance = self.create_instance_with_args(metadata={'foo': 'bar'})
+ self.create_instance_with_args()
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'metadata': {'foo': 'bar'}})
+ self._assertEqualListsOfInstances([instance], result)
+
+ def test_instance_get_all_by_filters_system_metadata(self):
+ instance = self.create_instance_with_args(
+ system_metadata={'foo': 'bar'})
+ self.create_instance_with_args()
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'system_metadata': {'foo': 'bar'}})
+ self._assertEqualListsOfInstances([instance], result)
+
+ def test_instance_get_all_by_filters_unicode_value(self):
+ instance = self.create_instance_with_args(display_name=u'test♥')
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'display_name': u'test'})
+ self._assertEqualListsOfInstances([instance], result)
+
+ def test_instance_get_all_by_filters_tags(self):
+ instance = self.create_instance_with_args(
+ metadata={'foo': 'bar'})
+ self.create_instance_with_args()
+ # For format 'tag-'
+ result = db.instance_get_all_by_filters(
+ self.ctxt, {'filter': [
+ {'name': 'tag-key', 'value': 'foo'},
+ {'name': 'tag-value', 'value': 'bar'},
+ ]})
+ self._assertEqualListsOfInstances([instance], result)
+ # For format 'tag:'
+ result = db.instance_get_all_by_filters(
+ self.ctxt, {'filter': [
+ {'name': 'tag:foo', 'value': 'bar'},
+ ]})
+ self._assertEqualListsOfInstances([instance], result)
+ # For non-existent tag
+ result = db.instance_get_all_by_filters(
+ self.ctxt, {'filter': [
+ {'name': 'tag:foo', 'value': 'barred'},
+ ]})
+ self.assertEqual([], result)
+
+ # Confirm with deleted tags
+ db.instance_metadata_delete(self.ctxt, instance['uuid'], 'foo')
+ # For format 'tag-'
+ result = db.instance_get_all_by_filters(
+ self.ctxt, {'filter': [
+ {'name': 'tag-key', 'value': 'foo'},
+ ]})
+ self.assertEqual([], result)
+ result = db.instance_get_all_by_filters(
+ self.ctxt, {'filter': [
+ {'name': 'tag-value', 'value': 'bar'}
+ ]})
+ self.assertEqual([], result)
+ # For format 'tag:'
+ result = db.instance_get_all_by_filters(
+ self.ctxt, {'filter': [
+ {'name': 'tag:foo', 'value': 'bar'},
+ ]})
+ self.assertEqual([], result)
+
+ def test_instance_get_by_uuid(self):
+ inst = self.create_instance_with_args()
+ result = db.instance_get_by_uuid(self.ctxt, inst['uuid'])
+ self._assertEqualInstances(inst, result)
+
+ def test_instance_get_by_uuid_join_empty(self):
+ inst = self.create_instance_with_args()
+ result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
+ columns_to_join=[])
+ meta = utils.metadata_to_dict(result['metadata'])
+ self.assertEqual(meta, {})
+ sys_meta = utils.metadata_to_dict(result['system_metadata'])
+ self.assertEqual(sys_meta, {})
+
+ def test_instance_get_by_uuid_join_meta(self):
+ inst = self.create_instance_with_args()
+ result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
+ columns_to_join=['metadata'])
+ meta = utils.metadata_to_dict(result['metadata'])
+ self.assertEqual(meta, self.sample_data['metadata'])
+ sys_meta = utils.metadata_to_dict(result['system_metadata'])
+ self.assertEqual(sys_meta, {})
+
+ def test_instance_get_by_uuid_join_sys_meta(self):
+ inst = self.create_instance_with_args()
+ result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
+ columns_to_join=['system_metadata'])
+ meta = utils.metadata_to_dict(result['metadata'])
+ self.assertEqual(meta, {})
+ sys_meta = utils.metadata_to_dict(result['system_metadata'])
+ self.assertEqual(sys_meta, self.sample_data['system_metadata'])
+
+ def test_instance_get_all_by_filters_deleted(self):
+ inst1 = self.create_instance_with_args()
+ inst2 = self.create_instance_with_args(reservation_id='b')
+ db.instance_destroy(self.ctxt, inst1['uuid'])
+ result = db.instance_get_all_by_filters(self.ctxt, {})
+ self._assertEqualListsOfObjects([inst1, inst2], result,
+ ignored_keys=['metadata', 'system_metadata',
+ 'deleted', 'deleted_at', 'info_cache',
+ 'pci_devices'])
+
+ def test_instance_get_all_by_filters_deleted_and_soft_deleted(self):
+ inst1 = self.create_instance_with_args()
+ inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
+ self.create_instance_with_args()
+ db.instance_destroy(self.ctxt, inst1['uuid'])
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'deleted': True})
+ self._assertEqualListsOfObjects([inst1, inst2], result,
+ ignored_keys=['metadata', 'system_metadata',
+ 'deleted', 'deleted_at', 'info_cache',
+ 'pci_devices'])
+
+ def test_instance_get_all_by_filters_deleted_no_soft_deleted(self):
+ inst1 = self.create_instance_with_args()
+ self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
+ self.create_instance_with_args()
+ db.instance_destroy(self.ctxt, inst1['uuid'])
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'deleted': True,
+ 'soft_deleted': False})
+ self._assertEqualListsOfObjects([inst1], result,
+ ignored_keys=['deleted', 'deleted_at', 'metadata',
+ 'system_metadata', 'info_cache', 'pci_devices'])
+
+ def test_instance_get_all_by_filters_alive_and_soft_deleted(self):
+ inst1 = self.create_instance_with_args()
+ inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
+ inst3 = self.create_instance_with_args()
+ db.instance_destroy(self.ctxt, inst1['uuid'])
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'deleted': False,
+ 'soft_deleted': True})
+ self._assertEqualListsOfInstances([inst2, inst3], result)
+
+ def test_instance_get_all_by_filters_not_deleted(self):
+ inst1 = self.create_instance_with_args()
+ self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
+ inst3 = self.create_instance_with_args()
+ inst4 = self.create_instance_with_args(vm_state=vm_states.ACTIVE)
+ db.instance_destroy(self.ctxt, inst1['uuid'])
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'deleted': False})
+ self.assertIsNone(inst3.vm_state)
+ self._assertEqualListsOfInstances([inst3, inst4], result)
+
+ def test_instance_get_all_by_filters_cleaned(self):
+ inst1 = self.create_instance_with_args()
+ inst2 = self.create_instance_with_args(reservation_id='b')
+ db.instance_update(self.ctxt, inst1['uuid'], {'cleaned': 1})
+ result = db.instance_get_all_by_filters(self.ctxt, {})
+ self.assertEqual(2, len(result))
+ self.assertIn(inst1['uuid'], [result[0]['uuid'], result[1]['uuid']])
+ self.assertIn(inst2['uuid'], [result[0]['uuid'], result[1]['uuid']])
+ if inst1['uuid'] == result[0]['uuid']:
+ self.assertTrue(result[0]['cleaned'])
+ self.assertFalse(result[1]['cleaned'])
+ else:
+ self.assertTrue(result[1]['cleaned'])
+ self.assertFalse(result[0]['cleaned'])
+
+ def test_instance_get_all_by_host_and_node_no_join(self):
+ instance = self.create_instance_with_args()
+ result = db.instance_get_all_by_host_and_node(self.ctxt, 'h1', 'n1')
+ self.assertEqual(result[0]['uuid'], instance['uuid'])
+ self.assertEqual(result[0]['system_metadata'], [])
+
+ def test_instance_get_all_hung_in_rebooting(self):
+ # Ensure no instances are returned.
+ results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
+ self.assertEqual([], results)
+
+ # Ensure one rebooting instance with updated_at older than 10 seconds
+ # is returned.
+ instance = self.create_instance_with_args(task_state="rebooting",
+ updated_at=datetime.datetime(2000, 1, 1, 12, 0, 0))
+ results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
+ self._assertEqualListsOfObjects([instance], results,
+ ignored_keys=['task_state', 'info_cache', 'security_groups',
+ 'metadata', 'system_metadata', 'pci_devices'])
+ db.instance_update(self.ctxt, instance['uuid'], {"task_state": None})
+
+ # Ensure the newly rebooted instance is not returned.
+ instance = self.create_instance_with_args(task_state="rebooting",
+ updated_at=timeutils.utcnow())
+ results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
+ self.assertEqual([], results)
+
+ def test_instance_update_with_expected_vm_state(self):
+ instance = self.create_instance_with_args(vm_state='foo')
+ db.instance_update(self.ctxt, instance['uuid'], {'host': 'h1',
+ 'expected_vm_state': ('foo', 'bar')})
+
+ def test_instance_update_with_unexpected_vm_state(self):
+ instance = self.create_instance_with_args(vm_state='foo')
+ self.assertRaises(exception.UnexpectedVMStateError,
+ db.instance_update, self.ctxt, instance['uuid'],
+ {'host': 'h1', 'expected_vm_state': ('spam', 'bar')})
+
+ def test_instance_update_with_instance_uuid(self):
+ # test instance_update() works when an instance UUID is passed.
+ ctxt = context.get_admin_context()
+
+ # Create an instance with some metadata
+ values = {'metadata': {'host': 'foo', 'key1': 'meow'},
+ 'system_metadata': {'original_image_ref': 'blah'}}
+ instance = db.instance_create(ctxt, values)
+
+ # Update the metadata
+ values = {'metadata': {'host': 'bar', 'key2': 'wuff'},
+ 'system_metadata': {'original_image_ref': 'baz'}}
+ db.instance_update(ctxt, instance['uuid'], values)
+
+ # Retrieve the user-provided metadata to ensure it was successfully
+ # updated
+ instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
+ self.assertEqual('bar', instance_meta['host'])
+ self.assertEqual('wuff', instance_meta['key2'])
+ self.assertNotIn('key1', instance_meta)
+
+ # Retrieve the system metadata to ensure it was successfully updated
+ system_meta = db.instance_system_metadata_get(ctxt, instance['uuid'])
+ self.assertEqual('baz', system_meta['original_image_ref'])
+
+ def test_delete_instance_metadata_on_instance_destroy(self):
+ ctxt = context.get_admin_context()
+ # Create an instance with some metadata
+ values = {'metadata': {'host': 'foo', 'key1': 'meow'},
+ 'system_metadata': {'original_image_ref': 'blah'}}
+ instance = db.instance_create(ctxt, values)
+ instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
+ self.assertEqual('foo', instance_meta['host'])
+ self.assertEqual('meow', instance_meta['key1'])
+ db.instance_destroy(ctxt, instance['uuid'])
+ instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
+ # Make sure instance metadata is deleted as well
+ self.assertEqual({}, instance_meta)
+
+ def test_delete_instance_faults_on_instance_destroy(self):
+ ctxt = context.get_admin_context()
+ uuid = str(stdlib_uuid.uuid4())
+ # Create faults
+ db.instance_create(ctxt, {'uuid': uuid})
+
+ fault_values = {
+ 'message': 'message',
+ 'details': 'detail',
+ 'instance_uuid': uuid,
+ 'code': 404,
+ 'host': 'localhost'
+ }
+ fault = db.instance_fault_create(ctxt, fault_values)
+
+ # Retrieve the fault to ensure it was successfully added
+ faults = db.instance_fault_get_by_instance_uuids(ctxt, [uuid])
+ self.assertEqual(1, len(faults[uuid]))
+ self._assertEqualObjects(fault, faults[uuid][0])
+ db.instance_destroy(ctxt, uuid)
+ faults = db.instance_fault_get_by_instance_uuids(ctxt, [uuid])
+ # Make sure instance faults is deleted as well
+ self.assertEqual(0, len(faults[uuid]))
+
+ def test_instance_update_with_and_get_original(self):
+ instance = self.create_instance_with_args(vm_state='building')
+ (old_ref, new_ref) = db.instance_update_and_get_original(self.ctxt,
+ instance['uuid'], {'vm_state': 'needscoffee'})
+ self.assertEqual('building', old_ref['vm_state'])
+ self.assertEqual('needscoffee', new_ref['vm_state'])
+
+ def test_instance_update_and_get_original_metadata(self):
+ instance = self.create_instance_with_args()
+ columns_to_join = ['metadata']
+ (old_ref, new_ref) = db.instance_update_and_get_original(
+ self.ctxt, instance['uuid'], {'vm_state': 'needscoffee'},
+ columns_to_join=columns_to_join)
+ meta = utils.metadata_to_dict(new_ref['metadata'])
+ self.assertEqual(meta, self.sample_data['metadata'])
+ sys_meta = utils.metadata_to_dict(new_ref['system_metadata'])
+ self.assertEqual(sys_meta, {})
+
+ def test_instance_update_and_get_original_metadata_none_join(self):
+ instance = self.create_instance_with_args()
+ (old_ref, new_ref) = db.instance_update_and_get_original(
+ self.ctxt, instance['uuid'], {'metadata': {'mk1': 'mv3'}})
+ meta = utils.metadata_to_dict(new_ref['metadata'])
+ self.assertEqual(meta, {'mk1': 'mv3'})
+
+ def test_instance_update_unique_name(self):
+ context1 = context.RequestContext('user1', 'p1')
+ context2 = context.RequestContext('user2', 'p2')
+
+ inst1 = self.create_instance_with_args(context=context1,
+ project_id='p1',
+ hostname='fake_name1')
+ inst2 = self.create_instance_with_args(context=context1,
+ project_id='p1',
+ hostname='fake_name2')
+ inst3 = self.create_instance_with_args(context=context2,
+ project_id='p2',
+ hostname='fake_name3')
+ # osapi_compute_unique_server_name_scope is unset so this should work:
+ db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_name2'})
+ db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_name1'})
+
+ # With scope 'global' any duplicate should fail.
+ self.flags(osapi_compute_unique_server_name_scope='global')
+ self.assertRaises(exception.InstanceExists,
+ db.instance_update,
+ context1,
+ inst2['uuid'],
+ {'hostname': 'fake_name1'})
+ self.assertRaises(exception.InstanceExists,
+ db.instance_update,
+ context2,
+ inst3['uuid'],
+ {'hostname': 'fake_name1'})
+ # But we should definitely be able to update our name if we aren't
+ # really changing it.
+ db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_NAME'})
+
+ # With scope 'project' a duplicate in the project should fail:
+ self.flags(osapi_compute_unique_server_name_scope='project')
+ self.assertRaises(exception.InstanceExists, db.instance_update,
+ context1, inst2['uuid'], {'hostname': 'fake_NAME'})
+
+ # With scope 'project' a duplicate in a different project should work:
+ self.flags(osapi_compute_unique_server_name_scope='project')
+ db.instance_update(context2, inst3['uuid'], {'hostname': 'fake_NAME'})
+
+ def _test_instance_update_updates_metadata(self, metadata_type):
+ instance = self.create_instance_with_args()
+
+ def set_and_check(meta):
+ inst = db.instance_update(self.ctxt, instance['uuid'],
+ {metadata_type: dict(meta)})
+ _meta = utils.metadata_to_dict(inst[metadata_type])
+ self.assertEqual(meta, _meta)
+
+ meta = {'speed': '88', 'units': 'MPH'}
+ set_and_check(meta)
+ meta['gigawatts'] = '1.21'
+ set_and_check(meta)
+ del meta['gigawatts']
+ set_and_check(meta)
+
+ def test_security_group_in_use(self):
+ db.instance_create(self.ctxt, dict(host='foo'))
+
+ def test_instance_update_updates_system_metadata(self):
+ # Ensure that system_metadata is updated during instance_update
+ self._test_instance_update_updates_metadata('system_metadata')
+
+ def test_instance_update_updates_metadata(self):
+ # Ensure that metadata is updated during instance_update
+ self._test_instance_update_updates_metadata('metadata')
+
+ def test_instance_floating_address_get_all(self):
+ ctxt = context.get_admin_context()
+
+ instance1 = db.instance_create(ctxt, {'host': 'h1', 'hostname': 'n1'})
+ instance2 = db.instance_create(ctxt, {'host': 'h2', 'hostname': 'n2'})
+
+ fixed_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
+ float_addresses = ['2.1.1.1', '2.1.1.2', '2.1.1.3']
+ instance_uuids = [instance1['uuid'], instance1['uuid'],
+ instance2['uuid']]
+
+ for fixed_addr, float_addr, instance_uuid in zip(fixed_addresses,
+ float_addresses,
+ instance_uuids):
+ db.fixed_ip_create(ctxt, {'address': fixed_addr,
+ 'instance_uuid': instance_uuid})
+ fixed_id = db.fixed_ip_get_by_address(ctxt, fixed_addr)['id']
+ db.floating_ip_create(ctxt,
+ {'address': float_addr,
+ 'fixed_ip_id': fixed_id})
+
+ real_float_addresses = \
+ db.instance_floating_address_get_all(ctxt, instance_uuids[0])
+ self.assertEqual(set(float_addresses[:2]), set(real_float_addresses))
+ real_float_addresses = \
+ db.instance_floating_address_get_all(ctxt, instance_uuids[2])
+ self.assertEqual(set([float_addresses[2]]), set(real_float_addresses))
+
+ self.assertRaises(exception.InvalidUUID,
+ db.instance_floating_address_get_all,
+ ctxt, 'invalid_uuid')
+
+ def test_instance_stringified_ips(self):
+ instance = self.create_instance_with_args()
+ instance = db.instance_update(
+ self.ctxt, instance['uuid'],
+ {'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
+ 'access_ip_v6': netaddr.IPAddress('::1')})
+ self.assertIsInstance(instance['access_ip_v4'], six.string_types)
+ self.assertIsInstance(instance['access_ip_v6'], six.string_types)
+ instance = db.instance_get_by_uuid(self.ctxt, instance['uuid'])
+ self.assertIsInstance(instance['access_ip_v4'], six.string_types)
+ self.assertIsInstance(instance['access_ip_v6'], six.string_types)
+
+ def test_instance_destroy(self):
+ ctxt = context.get_admin_context()
+ values = {
+ 'metadata': {'key': 'value'}
+ }
+ inst_uuid = self.create_instance_with_args(**values)['uuid']
+ db.instance_destroy(ctxt, inst_uuid)
+
+ self.assertRaises(exception.InstanceNotFound,
+ db.instance_get, ctxt, inst_uuid)
+ self.assertIsNone(db.instance_info_cache_get(ctxt, inst_uuid))
+ self.assertEqual({}, db.instance_metadata_get(ctxt, inst_uuid))
+
+ def test_instance_destroy_already_destroyed(self):
+ ctxt = context.get_admin_context()
+ instance = self.create_instance_with_args()
+ db.instance_destroy(ctxt, instance['uuid'])
+ self.assertRaises(exception.InstanceNotFound,
+ db.instance_destroy, ctxt, instance['uuid'])
+
+
+class InstanceMetadataTestCase(test.TestCase):
+
+ """Tests for db.api.instance_metadata_* methods."""
+
+ def setUp(self):
+ super(InstanceMetadataTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def test_instance_metadata_get(self):
+ instance = db.instance_create(self.ctxt, {'metadata':
+ {'key': 'value'}})
+ self.assertEqual({'key': 'value'}, db.instance_metadata_get(
+ self.ctxt, instance['uuid']))
+
+ def test_instance_metadata_delete(self):
+ instance = db.instance_create(self.ctxt,
+ {'metadata': {'key': 'val',
+ 'key1': 'val1'}})
+ db.instance_metadata_delete(self.ctxt, instance['uuid'], 'key1')
+ self.assertEqual({'key': 'val'}, db.instance_metadata_get(
+ self.ctxt, instance['uuid']))
+
+ def test_instance_metadata_update(self):
+ instance = db.instance_create(self.ctxt, {'host': 'h1',
+ 'project_id': 'p1', 'metadata': {'key': 'value'}})
+
+ # This should add new key/value pair
+ metadata = db.instance_metadata_update(
+ self.ctxt, instance['uuid'],
+ {'new_key': 'new_value'}, False)
+ metadata = db.instance_metadata_get(self.ctxt, instance['uuid'])
+ self.assertEqual(metadata, {'key': 'value', 'new_key': 'new_value'})
+
+ # This should leave only one key/value pair
+ metadata = db.instance_metadata_update(
+ self.ctxt, instance['uuid'],
+ {'new_key': 'new_value'}, True)
+ metadata = db.instance_metadata_get(self.ctxt, instance['uuid'])
+ self.assertEqual(metadata, {'new_key': 'new_value'})
+
+
+class InstanceExtraTestCase(test.TestCase):
+ def setUp(self):
+ super(InstanceExtraTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.instance = db.instance_create(self.ctxt, {})
+
+ def test_instance_extra_get_by_uuid_instance_create(self):
+ inst_extra = db.instance_extra_get_by_instance_uuid(
+ self.ctxt, self.instance['uuid'])
+ self.assertIsNotNone(inst_extra)
+
+ def test_instance_extra_update_by_uuid(self):
+ db.instance_extra_update_by_uuid(self.ctxt, self.instance['uuid'],
+ {'numa_topology': 'changed'})
+ inst_extra = db.instance_extra_get_by_instance_uuid(
+ self.ctxt, self.instance['uuid'])
+ self.assertEqual('changed', inst_extra.numa_topology)
+
+ def test_instance_extra_get_with_columns(self):
+ extra = db.instance_extra_get_by_instance_uuid(
+ self.ctxt, self.instance['uuid'],
+ columns=['numa_topology'])
+ self.assertNotIn('pci_requests', extra)
+ self.assertIn('numa_topology', extra)
+
+
+class ServiceTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(ServiceTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _get_base_values(self):
+ return {
+ 'host': 'fake_host',
+ 'binary': 'fake_binary',
+ 'topic': 'fake_topic',
+ 'report_count': 3,
+ 'disabled': False
+ }
+
+ def _create_service(self, values):
+ v = self._get_base_values()
+ v.update(values)
+ return db.service_create(self.ctxt, v)
+
+ def test_service_create(self):
+ service = self._create_service({})
+ self.assertIsNotNone(service['id'])
+ for key, value in self._get_base_values().iteritems():
+ self.assertEqual(value, service[key])
+
+ def test_service_destroy(self):
+ service1 = self._create_service({})
+ service2 = self._create_service({'host': 'fake_host2'})
+
+ db.service_destroy(self.ctxt, service1['id'])
+ self.assertRaises(exception.ServiceNotFound,
+ db.service_get, self.ctxt, service1['id'])
+ self._assertEqualObjects(db.service_get(self.ctxt, service2['id']),
+ service2, ignored_keys=['compute_node'])
+
+ def test_service_update(self):
+ service = self._create_service({})
+ new_values = {
+ 'host': 'fake_host1',
+ 'binary': 'fake_binary1',
+ 'topic': 'fake_topic1',
+ 'report_count': 4,
+ 'disabled': True
+ }
+ db.service_update(self.ctxt, service['id'], new_values)
+ updated_service = db.service_get(self.ctxt, service['id'])
+ for key, value in new_values.iteritems():
+ self.assertEqual(value, updated_service[key])
+
+ def test_service_update_not_found_exception(self):
+ self.assertRaises(exception.ServiceNotFound,
+ db.service_update, self.ctxt, 100500, {})
+
+ def test_service_get(self):
+ service1 = self._create_service({})
+ self._create_service({'host': 'some_other_fake_host'})
+ real_service1 = db.service_get(self.ctxt, service1['id'])
+ self._assertEqualObjects(service1, real_service1,
+ ignored_keys=['compute_node'])
+
+ def test_service_get_with_compute_node(self):
+ service = self._create_service({})
+ compute_values = dict(vcpus=2, memory_mb=1024, local_gb=2048,
+ vcpus_used=0, memory_mb_used=0,
+ local_gb_used=0, free_ram_mb=1024,
+ free_disk_gb=2048, hypervisor_type="xen",
+ hypervisor_version=1, cpu_info="",
+ running_vms=0, current_workload=0,
+ service_id=service['id'])
+ compute = db.compute_node_create(self.ctxt, compute_values)
+ real_service = db.service_get(self.ctxt, service['id'],
+ with_compute_node=True)
+ real_compute = real_service['compute_node'][0]
+ self.assertEqual(compute['id'], real_compute['id'])
+
+ def test_service_get_not_found_exception(self):
+ self.assertRaises(exception.ServiceNotFound,
+ db.service_get, self.ctxt, 100500)
+
+ def test_service_get_by_host_and_topic(self):
+ service1 = self._create_service({'host': 'host1', 'topic': 'topic1'})
+ self._create_service({'host': 'host2', 'topic': 'topic2'})
+
+ real_service1 = db.service_get_by_host_and_topic(self.ctxt,
+ host='host1',
+ topic='topic1')
+ self._assertEqualObjects(service1, real_service1)
+
+ def test_service_get_all(self):
+ values = [
+ {'host': 'host1', 'topic': 'topic1'},
+ {'host': 'host2', 'topic': 'topic2'},
+ {'disabled': True}
+ ]
+ services = [self._create_service(vals) for vals in values]
+ disabled_services = [services[-1]]
+ non_disabled_services = services[:-1]
+
+ compares = [
+ (services, db.service_get_all(self.ctxt)),
+ (disabled_services, db.service_get_all(self.ctxt, True)),
+ (non_disabled_services, db.service_get_all(self.ctxt, False))
+ ]
+ for comp in compares:
+ self._assertEqualListsOfObjects(*comp)
+
+ def test_service_get_all_by_topic(self):
+ values = [
+ {'host': 'host1', 'topic': 't1'},
+ {'host': 'host2', 'topic': 't1'},
+ {'disabled': True, 'topic': 't1'},
+ {'host': 'host3', 'topic': 't2'}
+ ]
+ services = [self._create_service(vals) for vals in values]
+ expected = services[:2]
+ real = db.service_get_all_by_topic(self.ctxt, 't1')
+ self._assertEqualListsOfObjects(expected, real)
+
+ def test_service_get_all_by_host(self):
+ values = [
+ {'host': 'host1', 'topic': 't11', 'binary': 'b11'},
+ {'host': 'host1', 'topic': 't12', 'binary': 'b12'},
+ {'host': 'host2', 'topic': 't1'},
+ {'host': 'host3', 'topic': 't1'}
+ ]
+ services = [self._create_service(vals) for vals in values]
+
+ expected = services[:2]
+ real = db.service_get_all_by_host(self.ctxt, 'host1')
+ self._assertEqualListsOfObjects(expected, real)
+
+ def test_service_get_by_compute_host(self):
+ values = [
+ {'host': 'host1', 'topic': CONF.compute_topic},
+ {'host': 'host2', 'topic': 't1'},
+ {'host': 'host3', 'topic': CONF.compute_topic}
+ ]
+ services = [self._create_service(vals) for vals in values]
+
+ real_service = db.service_get_by_compute_host(self.ctxt, 'host1')
+ self._assertEqualObjects(services[0], real_service,
+ ignored_keys=['compute_node'])
+
+ self.assertRaises(exception.ComputeHostNotFound,
+ db.service_get_by_compute_host,
+ self.ctxt, 'non-exists-host')
+
+ def test_service_get_by_compute_host_not_found(self):
+ self.assertRaises(exception.ComputeHostNotFound,
+ db.service_get_by_compute_host,
+ self.ctxt, 'non-exists-host')
+
+ def test_service_get_by_args(self):
+ values = [
+ {'host': 'host1', 'binary': 'a'},
+ {'host': 'host2', 'binary': 'b'}
+ ]
+ services = [self._create_service(vals) for vals in values]
+
+ service1 = db.service_get_by_args(self.ctxt, 'host1', 'a')
+ self._assertEqualObjects(services[0], service1)
+
+ service2 = db.service_get_by_args(self.ctxt, 'host2', 'b')
+ self._assertEqualObjects(services[1], service2)
+
+ def test_service_get_by_args_not_found_exception(self):
+ self.assertRaises(exception.HostBinaryNotFound,
+ db.service_get_by_args,
+ self.ctxt, 'non-exists-host', 'a')
+
+ def test_service_binary_exists_exception(self):
+ db.service_create(self.ctxt, self._get_base_values())
+ values = self._get_base_values()
+ values.update({'topic': 'top1'})
+ self.assertRaises(exception.ServiceBinaryExists, db.service_create,
+ self.ctxt, values)
+
+ def test_service_topic_exists_exceptions(self):
+ db.service_create(self.ctxt, self._get_base_values())
+ values = self._get_base_values()
+ values.update({'binary': 'bin1'})
+ self.assertRaises(exception.ServiceTopicExists, db.service_create,
+ self.ctxt, values)
+
+
+class BaseInstanceTypeTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(BaseInstanceTypeTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.user_ctxt = context.RequestContext('user', 'user')
+
+ def _get_base_values(self):
+ return {
+ 'name': 'fake_name',
+ 'memory_mb': 512,
+ 'vcpus': 1,
+ 'root_gb': 10,
+ 'ephemeral_gb': 10,
+ 'flavorid': 'fake_flavor',
+ 'swap': 0,
+ 'rxtx_factor': 0.5,
+ 'vcpu_weight': 1,
+ 'disabled': False,
+ 'is_public': True
+ }
+
+ def _create_flavor(self, values, projects=None):
+ v = self._get_base_values()
+ v.update(values)
+ return db.flavor_create(self.ctxt, v, projects)
+
+
+class InstanceActionTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ IGNORED_FIELDS = [
+ 'id',
+ 'created_at',
+ 'updated_at',
+ 'deleted_at',
+ 'deleted'
+ ]
+
+ def setUp(self):
+ super(InstanceActionTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _create_action_values(self, uuid, action='run_instance',
+ ctxt=None, extra=None):
+ if ctxt is None:
+ ctxt = self.ctxt
+
+ db.instance_create(ctxt, {'uuid': uuid})
+
+ values = {
+ 'action': action,
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'user_id': ctxt.user_id,
+ 'project_id': ctxt.project_id,
+ 'start_time': timeutils.utcnow(),
+ 'message': 'action-message'
+ }
+ if extra is not None:
+ values.update(extra)
+ return values
+
+ def _create_event_values(self, uuid, event='schedule',
+ ctxt=None, extra=None):
+ if ctxt is None:
+ ctxt = self.ctxt
+ values = {
+ 'event': event,
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'start_time': timeutils.utcnow(),
+ 'host': 'fake-host',
+ 'details': 'fake-details',
+ }
+ if extra is not None:
+ values.update(extra)
+ return values
+
+ def _assertActionSaved(self, action, uuid):
+ """Retrieve the action to ensure it was successfully added."""
+ actions = db.actions_get(self.ctxt, uuid)
+ self.assertEqual(1, len(actions))
+ self._assertEqualObjects(action, actions[0])
+
+ def _assertActionEventSaved(self, event, action_id):
+ # Retrieve the event to ensure it was successfully added
+ events = db.action_events_get(self.ctxt, action_id)
+ self.assertEqual(1, len(events))
+ self._assertEqualObjects(event, events[0],
+ ['instance_uuid', 'request_id'])
+
+ def test_instance_action_start(self):
+ """Create an instance action."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ action_values = self._create_action_values(uuid)
+ action = db.action_start(self.ctxt, action_values)
+
+ ignored_keys = self.IGNORED_FIELDS + ['finish_time']
+ self._assertEqualObjects(action_values, action, ignored_keys)
+
+ self._assertActionSaved(action, uuid)
+
+ def test_instance_action_finish(self):
+ """Create an instance action."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ action_values = self._create_action_values(uuid)
+ db.action_start(self.ctxt, action_values)
+
+ action_values['finish_time'] = timeutils.utcnow()
+ action = db.action_finish(self.ctxt, action_values)
+ self._assertEqualObjects(action_values, action, self.IGNORED_FIELDS)
+
+ self._assertActionSaved(action, uuid)
+
+ def test_instance_action_finish_without_started_event(self):
+ """Create an instance finish action."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ action_values = self._create_action_values(uuid)
+ action_values['finish_time'] = timeutils.utcnow()
+ self.assertRaises(exception.InstanceActionNotFound, db.action_finish,
+ self.ctxt, action_values)
+
+ def test_instance_actions_get_by_instance(self):
+ """Ensure we can get actions by UUID."""
+ uuid1 = str(stdlib_uuid.uuid4())
+
+ expected = []
+
+ action_values = self._create_action_values(uuid1)
+ action = db.action_start(self.ctxt, action_values)
+ expected.append(action)
+
+ action_values['action'] = 'resize'
+ action = db.action_start(self.ctxt, action_values)
+ expected.append(action)
+
+ # Create some extra actions
+ uuid2 = str(stdlib_uuid.uuid4())
+ ctxt2 = context.get_admin_context()
+ action_values = self._create_action_values(uuid2, 'reboot', ctxt2)
+ db.action_start(ctxt2, action_values)
+ db.action_start(ctxt2, action_values)
+
+ # Retrieve the action to ensure it was successfully added
+ actions = db.actions_get(self.ctxt, uuid1)
+ self._assertEqualListsOfObjects(expected, actions)
+
+ def test_instance_actions_get_are_in_order(self):
+ """Ensure retrived actions are in order."""
+ uuid1 = str(stdlib_uuid.uuid4())
+
+ extra = {
+ 'created_at': timeutils.utcnow()
+ }
+
+ action_values = self._create_action_values(uuid1, extra=extra)
+ action1 = db.action_start(self.ctxt, action_values)
+
+ action_values['action'] = 'delete'
+ action2 = db.action_start(self.ctxt, action_values)
+
+ actions = db.actions_get(self.ctxt, uuid1)
+ self.assertEqual(2, len(actions))
+
+ self._assertEqualOrderedListOfObjects([action2, action1], actions)
+
+ def test_instance_action_get_by_instance_and_action(self):
+ """Ensure we can get an action by instance UUID and action id."""
+ ctxt2 = context.get_admin_context()
+ uuid1 = str(stdlib_uuid.uuid4())
+ uuid2 = str(stdlib_uuid.uuid4())
+
+ action_values = self._create_action_values(uuid1)
+ db.action_start(self.ctxt, action_values)
+ request_id = action_values['request_id']
+
+ # NOTE(rpodolyaka): ensure we use a different req id for the 2nd req
+ action_values['action'] = 'resize'
+ action_values['request_id'] = 'req-00000000-7522-4d99-7ff-111111111111'
+ db.action_start(self.ctxt, action_values)
+
+ action_values = self._create_action_values(uuid2, 'reboot', ctxt2)
+ db.action_start(ctxt2, action_values)
+ db.action_start(ctxt2, action_values)
+
+ action = db.action_get_by_request_id(self.ctxt, uuid1, request_id)
+ self.assertEqual('run_instance', action['action'])
+ self.assertEqual(self.ctxt.request_id, action['request_id'])
+
+ def test_instance_action_event_start(self):
+ """Create an instance action event."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ action_values = self._create_action_values(uuid)
+ action = db.action_start(self.ctxt, action_values)
+
+ event_values = self._create_event_values(uuid)
+ event = db.action_event_start(self.ctxt, event_values)
+ # self.fail(self._dict_from_object(event, None))
+ event_values['action_id'] = action['id']
+ ignored = self.IGNORED_FIELDS + ['finish_time', 'traceback', 'result']
+ self._assertEqualObjects(event_values, event, ignored)
+
+ self._assertActionEventSaved(event, action['id'])
+
+ def test_instance_action_event_start_without_action(self):
+ """Create an instance action event."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ event_values = self._create_event_values(uuid)
+ self.assertRaises(exception.InstanceActionNotFound,
+ db.action_event_start, self.ctxt, event_values)
+
+ def test_instance_action_event_finish_without_started_event(self):
+ """Finish an instance action event."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ db.action_start(self.ctxt, self._create_action_values(uuid))
+
+ event_values = {
+ 'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
+ 'result': 'Success'
+ }
+ event_values = self._create_event_values(uuid, extra=event_values)
+ self.assertRaises(exception.InstanceActionEventNotFound,
+ db.action_event_finish, self.ctxt, event_values)
+
+ def test_instance_action_event_finish_without_action(self):
+ """Finish an instance action event."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ event_values = {
+ 'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
+ 'result': 'Success'
+ }
+ event_values = self._create_event_values(uuid, extra=event_values)
+ self.assertRaises(exception.InstanceActionNotFound,
+ db.action_event_finish, self.ctxt, event_values)
+
+ def test_instance_action_event_finish_success(self):
+ """Finish an instance action event."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ action = db.action_start(self.ctxt, self._create_action_values(uuid))
+
+ db.action_event_start(self.ctxt, self._create_event_values(uuid))
+
+ event_values = {
+ 'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
+ 'result': 'Success'
+ }
+ event_values = self._create_event_values(uuid, extra=event_values)
+ event = db.action_event_finish(self.ctxt, event_values)
+
+ self._assertActionEventSaved(event, action['id'])
+ action = db.action_get_by_request_id(self.ctxt, uuid,
+ self.ctxt.request_id)
+ self.assertNotEqual('Error', action['message'])
+
+ def test_instance_action_event_finish_error(self):
+ """Finish an instance action event with an error."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ action = db.action_start(self.ctxt, self._create_action_values(uuid))
+
+ db.action_event_start(self.ctxt, self._create_event_values(uuid))
+
+ event_values = {
+ 'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
+ 'result': 'Error'
+ }
+ event_values = self._create_event_values(uuid, extra=event_values)
+ event = db.action_event_finish(self.ctxt, event_values)
+
+ self._assertActionEventSaved(event, action['id'])
+ action = db.action_get_by_request_id(self.ctxt, uuid,
+ self.ctxt.request_id)
+ self.assertEqual('Error', action['message'])
+
+ def test_instance_action_and_event_start_string_time(self):
+ """Create an instance action and event with a string start_time."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ action = db.action_start(self.ctxt, self._create_action_values(uuid))
+
+ event_values = {'start_time': timeutils.strtime(timeutils.utcnow())}
+ event_values = self._create_event_values(uuid, extra=event_values)
+ event = db.action_event_start(self.ctxt, event_values)
+
+ self._assertActionEventSaved(event, action['id'])
+
+ def test_instance_action_events_get_are_in_order(self):
+ """Ensure retrived action events are in order."""
+ uuid1 = str(stdlib_uuid.uuid4())
+
+ action = db.action_start(self.ctxt,
+ self._create_action_values(uuid1))
+
+ extra1 = {
+ 'created_at': timeutils.utcnow()
+ }
+ extra2 = {
+ 'created_at': timeutils.utcnow() + datetime.timedelta(seconds=5)
+ }
+
+ event_val1 = self._create_event_values(uuid1, 'schedule', extra=extra1)
+ event_val2 = self._create_event_values(uuid1, 'run', extra=extra1)
+ event_val3 = self._create_event_values(uuid1, 'stop', extra=extra2)
+
+ event1 = db.action_event_start(self.ctxt, event_val1)
+ event2 = db.action_event_start(self.ctxt, event_val2)
+ event3 = db.action_event_start(self.ctxt, event_val3)
+
+ events = db.action_events_get(self.ctxt, action['id'])
+ self.assertEqual(3, len(events))
+
+ self._assertEqualOrderedListOfObjects([event3, event2, event1], events,
+ ['instance_uuid', 'request_id'])
+
+ def test_instance_action_event_get_by_id(self):
+ """Get a specific instance action event."""
+ ctxt2 = context.get_admin_context()
+ uuid1 = str(stdlib_uuid.uuid4())
+ uuid2 = str(stdlib_uuid.uuid4())
+
+ action = db.action_start(self.ctxt,
+ self._create_action_values(uuid1))
+
+ db.action_start(ctxt2,
+ self._create_action_values(uuid2, 'reboot', ctxt2))
+
+ event = db.action_event_start(self.ctxt,
+ self._create_event_values(uuid1))
+
+ event_values = self._create_event_values(uuid2, 'reboot', ctxt2)
+ db.action_event_start(ctxt2, event_values)
+
+ # Retrieve the event to ensure it was successfully added
+ saved_event = db.action_event_get_by_id(self.ctxt,
+ action['id'],
+ event['id'])
+ self._assertEqualObjects(event, saved_event,
+ ['instance_uuid', 'request_id'])
+
+
+class InstanceFaultTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(InstanceFaultTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _create_fault_values(self, uuid, code=404):
+ return {
+ 'message': 'message',
+ 'details': 'detail',
+ 'instance_uuid': uuid,
+ 'code': code,
+ 'host': 'localhost'
+ }
+
+ def test_instance_fault_create(self):
+ """Ensure we can create an instance fault."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ # Ensure no faults registered for this instance
+ faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
+ self.assertEqual(0, len(faults[uuid]))
+
+ # Create a fault
+ fault_values = self._create_fault_values(uuid)
+ db.instance_create(self.ctxt, {'uuid': uuid})
+ fault = db.instance_fault_create(self.ctxt, fault_values)
+
+ ignored_keys = ['deleted', 'created_at', 'updated_at',
+ 'deleted_at', 'id']
+ self._assertEqualObjects(fault_values, fault, ignored_keys)
+
+ # Retrieve the fault to ensure it was successfully added
+ faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
+ self.assertEqual(1, len(faults[uuid]))
+ self._assertEqualObjects(fault, faults[uuid][0])
+
+ def test_instance_fault_get_by_instance(self):
+ """Ensure we can retrieve faults for instance."""
+ uuids = [str(stdlib_uuid.uuid4()), str(stdlib_uuid.uuid4())]
+ fault_codes = [404, 500]
+ expected = {}
+
+ # Create faults
+ for uuid in uuids:
+ db.instance_create(self.ctxt, {'uuid': uuid})
+
+ expected[uuid] = []
+ for code in fault_codes:
+ fault_values = self._create_fault_values(uuid, code)
+ fault = db.instance_fault_create(self.ctxt, fault_values)
+ expected[uuid].append(fault)
+
+ # Ensure faults are saved
+ faults = db.instance_fault_get_by_instance_uuids(self.ctxt, uuids)
+ self.assertEqual(len(expected), len(faults))
+ for uuid in uuids:
+ self._assertEqualListsOfObjects(expected[uuid], faults[uuid])
+
+ def test_instance_faults_get_by_instance_uuids_no_faults(self):
+ uuid = str(stdlib_uuid.uuid4())
+ # None should be returned when no faults exist.
+ faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
+ expected = {uuid: []}
+ self.assertEqual(expected, faults)
+
+ def test_instance_faults_get_by_instance_uuids_no_uuids(self):
+ self.mox.StubOutWithMock(query.Query, 'filter')
+ self.mox.ReplayAll()
+ faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [])
+ self.assertEqual({}, faults)
+
+
+class InstanceTypeTestCase(BaseInstanceTypeTestCase):
+
+ def test_flavor_create(self):
+ flavor = self._create_flavor({})
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at', 'extra_specs']
+
+ self.assertIsNotNone(flavor['id'])
+ self._assertEqualObjects(flavor, self._get_base_values(),
+ ignored_keys)
+
+ def test_flavor_create_with_projects(self):
+ projects = ['fake-project1', 'fake-project2']
+ flavor = self._create_flavor({}, projects + ['fake-project2'])
+ access = db.flavor_access_get_by_flavor_id(self.ctxt,
+ flavor['flavorid'])
+ self.assertEqual(projects, [x.project_id for x in access])
+
+ def test_flavor_destroy(self):
+ specs1 = {'a': '1', 'b': '2'}
+ flavor1 = self._create_flavor({'name': 'name1', 'flavorid': 'a1',
+ 'extra_specs': specs1})
+ specs2 = {'c': '4', 'd': '3'}
+ flavor2 = self._create_flavor({'name': 'name2', 'flavorid': 'a2',
+ 'extra_specs': specs2})
+
+ db.flavor_destroy(self.ctxt, 'name1')
+
+ self.assertRaises(exception.FlavorNotFound,
+ db.flavor_get, self.ctxt, flavor1['id'])
+ real_specs1 = db.flavor_extra_specs_get(self.ctxt, flavor1['flavorid'])
+ self._assertEqualObjects(real_specs1, {})
+
+ r_flavor2 = db.flavor_get(self.ctxt, flavor2['id'])
+ self._assertEqualObjects(flavor2, r_flavor2, 'extra_specs')
+
+ def test_flavor_destroy_not_found(self):
+ self.assertRaises(exception.FlavorNotFound,
+ db.flavor_destroy, self.ctxt, 'nonexists')
+
+ def test_flavor_create_duplicate_name(self):
+ self._create_flavor({})
+ self.assertRaises(exception.FlavorExists,
+ self._create_flavor,
+ {'flavorid': 'some_random_flavor'})
+
+ def test_flavor_create_duplicate_flavorid(self):
+ self._create_flavor({})
+ self.assertRaises(exception.FlavorIdExists,
+ self._create_flavor,
+ {'name': 'some_random_name'})
+
+ def test_flavor_create_with_extra_specs(self):
+ extra_specs = dict(a='abc', b='def', c='ghi')
+ flavor = self._create_flavor({'extra_specs': extra_specs})
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at', 'extra_specs']
+
+ self._assertEqualObjects(flavor, self._get_base_values(),
+ ignored_keys)
+ self._assertEqualObjects(extra_specs, flavor['extra_specs'])
+
+ def test_flavor_get_all(self):
+ # NOTE(boris-42): Remove base instance types
+ for it in db.flavor_get_all(self.ctxt):
+ db.flavor_destroy(self.ctxt, it['name'])
+
+ flavors = [
+ {'root_gb': 600, 'memory_mb': 100, 'disabled': True,
+ 'is_public': True, 'name': 'a1', 'flavorid': 'f1'},
+ {'root_gb': 500, 'memory_mb': 200, 'disabled': True,
+ 'is_public': True, 'name': 'a2', 'flavorid': 'f2'},
+ {'root_gb': 400, 'memory_mb': 300, 'disabled': False,
+ 'is_public': True, 'name': 'a3', 'flavorid': 'f3'},
+ {'root_gb': 300, 'memory_mb': 400, 'disabled': False,
+ 'is_public': False, 'name': 'a4', 'flavorid': 'f4'},
+ {'root_gb': 200, 'memory_mb': 500, 'disabled': True,
+ 'is_public': False, 'name': 'a5', 'flavorid': 'f5'},
+ {'root_gb': 100, 'memory_mb': 600, 'disabled': True,
+ 'is_public': False, 'name': 'a6', 'flavorid': 'f6'}
+ ]
+ flavors = [self._create_flavor(it) for it in flavors]
+
+ lambda_filters = {
+ 'min_memory_mb': lambda it, v: it['memory_mb'] >= v,
+ 'min_root_gb': lambda it, v: it['root_gb'] >= v,
+ 'disabled': lambda it, v: it['disabled'] == v,
+ 'is_public': lambda it, v: (v is None or it['is_public'] == v)
+ }
+
+ mem_filts = [{'min_memory_mb': x} for x in [100, 350, 550, 650]]
+ root_filts = [{'min_root_gb': x} for x in [100, 350, 550, 650]]
+ disabled_filts = [{'disabled': x} for x in [True, False]]
+ is_public_filts = [{'is_public': x} for x in [True, False, None]]
+
+ def assert_multi_filter_flavor_get(filters=None):
+ if filters is None:
+ filters = {}
+
+ expected_it = flavors
+ for name, value in filters.iteritems():
+ filt = lambda it: lambda_filters[name](it, value)
+ expected_it = filter(filt, expected_it)
+
+ real_it = db.flavor_get_all(self.ctxt, filters=filters)
+ self._assertEqualListsOfObjects(expected_it, real_it)
+
+ # no filter
+ assert_multi_filter_flavor_get()
+
+ # test only with one filter
+ for filt in mem_filts:
+ assert_multi_filter_flavor_get(filt)
+ for filt in root_filts:
+ assert_multi_filter_flavor_get(filt)
+ for filt in disabled_filts:
+ assert_multi_filter_flavor_get(filt)
+ for filt in is_public_filts:
+ assert_multi_filter_flavor_get(filt)
+
+ # test all filters together
+ for mem in mem_filts:
+ for root in root_filts:
+ for disabled in disabled_filts:
+ for is_public in is_public_filts:
+ filts = [f.items() for f in
+ [mem, root, disabled, is_public]]
+ filts = dict(reduce(lambda x, y: x + y, filts, []))
+ assert_multi_filter_flavor_get(filts)
+
+ def test_flavor_get_all_limit_sort(self):
+ def assert_sorted_by_key_dir(sort_key, asc=True):
+ sort_dir = 'asc' if asc else 'desc'
+ results = db.flavor_get_all(self.ctxt, sort_key='name',
+ sort_dir=sort_dir)
+ # Manually sort the results as we would expect them
+ expected_results = sorted(results,
+ key=lambda item: item['name'],
+ reverse=(not asc))
+ self.assertEqual(expected_results, results)
+
+ def assert_sorted_by_key_both_dir(sort_key):
+ assert_sorted_by_key_dir(sort_key, True)
+ assert_sorted_by_key_dir(sort_key, False)
+
+ for attr in ['memory_mb', 'root_gb', 'deleted_at', 'name', 'deleted',
+ 'created_at', 'ephemeral_gb', 'updated_at', 'disabled',
+ 'vcpus', 'swap', 'rxtx_factor', 'is_public', 'flavorid',
+ 'vcpu_weight', 'id']:
+ assert_sorted_by_key_both_dir(attr)
+
+ def test_flavor_get_all_limit(self):
+ limited_flavors = db.flavor_get_all(self.ctxt, limit=2)
+ self.assertEqual(2, len(limited_flavors))
+
+ def test_flavor_get_all_list_marker(self):
+ all_flavors = db.flavor_get_all(self.ctxt)
+
+ # Set the 3rd result as the marker
+ marker_flavorid = all_flavors[2]['flavorid']
+ marked_flavors = db.flavor_get_all(self.ctxt, marker=marker_flavorid)
+ # We expect everything /after/ the 3rd result
+ expected_results = all_flavors[3:]
+ self.assertEqual(expected_results, marked_flavors)
+
+ def test_flavor_get_all_marker_not_found(self):
+ self.assertRaises(exception.MarkerNotFound,
+ db.flavor_get_all, self.ctxt, marker='invalid')
+
+ def test_flavor_get(self):
+ flavors = [{'name': 'abc', 'flavorid': '123'},
+ {'name': 'def', 'flavorid': '456'},
+ {'name': 'ghi', 'flavorid': '789'}]
+ flavors = [self._create_flavor(t) for t in flavors]
+
+ for flavor in flavors:
+ flavor_by_id = db.flavor_get(self.ctxt, flavor['id'])
+ self._assertEqualObjects(flavor, flavor_by_id)
+
+ def test_flavor_get_non_public(self):
+ flavor = self._create_flavor({'name': 'abc', 'flavorid': '123',
+ 'is_public': False})
+
+ # Admin can see it
+ flavor_by_id = db.flavor_get(self.ctxt, flavor['id'])
+ self._assertEqualObjects(flavor, flavor_by_id)
+
+ # Regular user can not
+ self.assertRaises(exception.FlavorNotFound, db.flavor_get,
+ self.user_ctxt, flavor['id'])
+
+ # Regular user can see it after being granted access
+ db.flavor_access_add(self.ctxt, flavor['flavorid'],
+ self.user_ctxt.project_id)
+ flavor_by_id = db.flavor_get(self.user_ctxt, flavor['id'])
+ self._assertEqualObjects(flavor, flavor_by_id)
+
+ def test_flavor_get_by_name(self):
+ flavors = [{'name': 'abc', 'flavorid': '123'},
+ {'name': 'def', 'flavorid': '456'},
+ {'name': 'ghi', 'flavorid': '789'}]
+ flavors = [self._create_flavor(t) for t in flavors]
+
+ for flavor in flavors:
+ flavor_by_name = db.flavor_get_by_name(self.ctxt, flavor['name'])
+ self._assertEqualObjects(flavor, flavor_by_name)
+
+ def test_flavor_get_by_name_not_found(self):
+ self._create_flavor({})
+ self.assertRaises(exception.FlavorNotFoundByName,
+ db.flavor_get_by_name, self.ctxt, 'nonexists')
+
+ def test_flavor_get_by_name_non_public(self):
+ flavor = self._create_flavor({'name': 'abc', 'flavorid': '123',
+ 'is_public': False})
+
+ # Admin can see it
+ flavor_by_name = db.flavor_get_by_name(self.ctxt, flavor['name'])
+ self._assertEqualObjects(flavor, flavor_by_name)
+
+ # Regular user can not
+ self.assertRaises(exception.FlavorNotFoundByName,
+ db.flavor_get_by_name, self.user_ctxt,
+ flavor['name'])
+
+ # Regular user can see it after being granted access
+ db.flavor_access_add(self.ctxt, flavor['flavorid'],
+ self.user_ctxt.project_id)
+ flavor_by_name = db.flavor_get_by_name(self.user_ctxt, flavor['name'])
+ self._assertEqualObjects(flavor, flavor_by_name)
+
+ def test_flavor_get_by_flavor_id(self):
+ flavors = [{'name': 'abc', 'flavorid': '123'},
+ {'name': 'def', 'flavorid': '456'},
+ {'name': 'ghi', 'flavorid': '789'}]
+ flavors = [self._create_flavor(t) for t in flavors]
+
+ for flavor in flavors:
+ params = (self.ctxt, flavor['flavorid'])
+ flavor_by_flavorid = db.flavor_get_by_flavor_id(*params)
+ self._assertEqualObjects(flavor, flavor_by_flavorid)
+
+ def test_flavor_get_by_flavor_not_found(self):
+ self._create_flavor({})
+ self.assertRaises(exception.FlavorNotFound,
+ db.flavor_get_by_flavor_id,
+ self.ctxt, 'nonexists')
+
+ def test_flavor_get_by_flavor_id_non_public(self):
+ flavor = self._create_flavor({'name': 'abc', 'flavorid': '123',
+ 'is_public': False})
+
+ # Admin can see it
+ flavor_by_fid = db.flavor_get_by_flavor_id(self.ctxt,
+ flavor['flavorid'])
+ self._assertEqualObjects(flavor, flavor_by_fid)
+
+ # Regular user can not
+ self.assertRaises(exception.FlavorNotFound,
+ db.flavor_get_by_flavor_id, self.user_ctxt,
+ flavor['flavorid'])
+
+ # Regular user can see it after being granted access
+ db.flavor_access_add(self.ctxt, flavor['flavorid'],
+ self.user_ctxt.project_id)
+ flavor_by_fid = db.flavor_get_by_flavor_id(self.user_ctxt,
+ flavor['flavorid'])
+ self._assertEqualObjects(flavor, flavor_by_fid)
+
+ def test_flavor_get_by_flavor_id_deleted(self):
+ flavor = self._create_flavor({'name': 'abc', 'flavorid': '123'})
+
+ db.flavor_destroy(self.ctxt, 'abc')
+
+ flavor_by_fid = db.flavor_get_by_flavor_id(self.ctxt,
+ flavor['flavorid'], read_deleted='yes')
+ self.assertEqual(flavor['id'], flavor_by_fid['id'])
+
+ def test_flavor_get_by_flavor_id_deleted_and_recreat(self):
+ # NOTE(wingwj): Aims to test difference between mysql and postgresql
+ # for bug 1288636
+ param_dict = {'name': 'abc', 'flavorid': '123'}
+
+ self._create_flavor(param_dict)
+ db.flavor_destroy(self.ctxt, 'abc')
+
+ # Recreate the flavor with the same params
+ flavor = self._create_flavor(param_dict)
+
+ flavor_by_fid = db.flavor_get_by_flavor_id(self.ctxt,
+ flavor['flavorid'], read_deleted='yes')
+ self.assertEqual(flavor['id'], flavor_by_fid['id'])
+
+
+class InstanceTypeExtraSpecsTestCase(BaseInstanceTypeTestCase):
+
+ def setUp(self):
+ super(InstanceTypeExtraSpecsTestCase, self).setUp()
+ values = ({'name': 'n1', 'flavorid': 'f1',
+ 'extra_specs': dict(a='a', b='b', c='c')},
+ {'name': 'n2', 'flavorid': 'f2',
+ 'extra_specs': dict(d='d', e='e', f='f')})
+
+ # NOTE(boris-42): We have already tested flavor_create method
+ # with extra_specs in InstanceTypeTestCase.
+ self.flavors = [self._create_flavor(v) for v in values]
+
+ def test_flavor_extra_specs_get(self):
+ for it in self.flavors:
+ real_specs = db.flavor_extra_specs_get(self.ctxt, it['flavorid'])
+ self._assertEqualObjects(it['extra_specs'], real_specs)
+
+ def test_flavor_extra_specs_get_item(self):
+ expected = dict(f1=dict(a='a', b='b', c='c'),
+ f2=dict(d='d', e='e', f='f'))
+
+ for flavor, specs in expected.iteritems():
+ for key, val in specs.iteritems():
+ spec = db.flavor_extra_specs_get_item(self.ctxt, flavor, key)
+ self.assertEqual(spec[key], val)
+
+ def test_flavor_extra_specs_delete(self):
+ for it in self.flavors:
+ specs = it['extra_specs']
+ key = specs.keys()[0]
+ del specs[key]
+ db.flavor_extra_specs_delete(self.ctxt, it['flavorid'], key)
+ real_specs = db.flavor_extra_specs_get(self.ctxt, it['flavorid'])
+ self._assertEqualObjects(it['extra_specs'], real_specs)
+
+ def test_flavor_extra_specs_delete_failed(self):
+ for it in self.flavors:
+ self.assertRaises(exception.FlavorExtraSpecsNotFound,
+ db.flavor_extra_specs_delete,
+ self.ctxt, it['flavorid'], 'dummy')
+
+ def test_flavor_extra_specs_update_or_create(self):
+ for it in self.flavors:
+ current_specs = it['extra_specs']
+ current_specs.update(dict(b='b1', c='c1', d='d1', e='e1'))
+ params = (self.ctxt, it['flavorid'], current_specs)
+ db.flavor_extra_specs_update_or_create(*params)
+ real_specs = db.flavor_extra_specs_get(self.ctxt, it['flavorid'])
+ self._assertEqualObjects(current_specs, real_specs)
+
+ def test_flavor_extra_specs_update_or_create_flavor_not_found(self):
+ self.assertRaises(exception.FlavorNotFound,
+ db.flavor_extra_specs_update_or_create,
+ self.ctxt, 'nonexists', {})
+
+ def test_flavor_extra_specs_update_or_create_retry(self):
+
+ def counted():
+ def get_id(context, flavorid, session):
+ get_id.counter += 1
+ raise db_exc.DBDuplicateEntry
+ get_id.counter = 0
+ return get_id
+
+ get_id = counted()
+ self.stubs.Set(sqlalchemy_api, '_flavor_get_id_from_flavor', get_id)
+ self.assertRaises(exception.FlavorExtraSpecUpdateCreateFailed,
+ sqlalchemy_api.flavor_extra_specs_update_or_create,
+ self.ctxt, 1, {}, 5)
+ self.assertEqual(get_id.counter, 5)
+
+
+class InstanceTypeAccessTestCase(BaseInstanceTypeTestCase):
+
+ def _create_flavor_access(self, flavor_id, project_id):
+ return db.flavor_access_add(self.ctxt, flavor_id, project_id)
+
+ def test_flavor_access_get_by_flavor_id(self):
+ flavors = ({'name': 'n1', 'flavorid': 'f1'},
+ {'name': 'n2', 'flavorid': 'f2'})
+ it1, it2 = tuple((self._create_flavor(v) for v in flavors))
+
+ access_it1 = [self._create_flavor_access(it1['flavorid'], 'pr1'),
+ self._create_flavor_access(it1['flavorid'], 'pr2')]
+
+ access_it2 = [self._create_flavor_access(it2['flavorid'], 'pr1')]
+
+ for it, access_it in zip((it1, it2), (access_it1, access_it2)):
+ params = (self.ctxt, it['flavorid'])
+ real_access_it = db.flavor_access_get_by_flavor_id(*params)
+ self._assertEqualListsOfObjects(access_it, real_access_it)
+
+ def test_flavor_access_get_by_flavor_id_flavor_not_found(self):
+ self.assertRaises(exception.FlavorNotFound,
+ db.flavor_get_by_flavor_id,
+ self.ctxt, 'nonexists')
+
+ def test_flavor_access_add(self):
+ flavor = self._create_flavor({'flavorid': 'f1'})
+ project_id = 'p1'
+
+ access = self._create_flavor_access(flavor['flavorid'], project_id)
+ # NOTE(boris-42): Check that flavor_access_add doesn't fail and
+ # returns correct value. This is enough because other
+ # logic is checked by other methods.
+ self.assertIsNotNone(access['id'])
+ self.assertEqual(access['instance_type_id'], flavor['id'])
+ self.assertEqual(access['project_id'], project_id)
+
+ def test_flavor_access_add_to_non_existing_flavor(self):
+ self.assertRaises(exception.FlavorNotFound,
+ self._create_flavor_access,
+ 'nonexists', 'does_not_matter')
+
+ def test_flavor_access_add_duplicate_project_id_flavor(self):
+ flavor = self._create_flavor({'flavorid': 'f1'})
+ params = (flavor['flavorid'], 'p1')
+
+ self._create_flavor_access(*params)
+ self.assertRaises(exception.FlavorAccessExists,
+ self._create_flavor_access, *params)
+
+ def test_flavor_access_remove(self):
+ flavors = ({'name': 'n1', 'flavorid': 'f1'},
+ {'name': 'n2', 'flavorid': 'f2'})
+ it1, it2 = tuple((self._create_flavor(v) for v in flavors))
+
+ access_it1 = [self._create_flavor_access(it1['flavorid'], 'pr1'),
+ self._create_flavor_access(it1['flavorid'], 'pr2')]
+
+ access_it2 = [self._create_flavor_access(it2['flavorid'], 'pr1')]
+
+ db.flavor_access_remove(self.ctxt, it1['flavorid'],
+ access_it1[1]['project_id'])
+
+ for it, access_it in zip((it1, it2), (access_it1[:1], access_it2)):
+ params = (self.ctxt, it['flavorid'])
+ real_access_it = db.flavor_access_get_by_flavor_id(*params)
+ self._assertEqualListsOfObjects(access_it, real_access_it)
+
+ def test_flavor_access_remove_flavor_not_found(self):
+ self.assertRaises(exception.FlavorNotFound,
+ db.flavor_access_remove,
+ self.ctxt, 'nonexists', 'does_not_matter')
+
+ def test_flavor_access_remove_access_not_found(self):
+ flavor = self._create_flavor({'flavorid': 'f1'})
+ params = (flavor['flavorid'], 'p1')
+ self._create_flavor_access(*params)
+ self.assertRaises(exception.FlavorAccessNotFound,
+ db.flavor_access_remove,
+ self.ctxt, flavor['flavorid'], 'p2')
+
+ def test_flavor_access_removed_after_flavor_destroy(self):
+ flavor1 = self._create_flavor({'flavorid': 'f1', 'name': 'n1'})
+ flavor2 = self._create_flavor({'flavorid': 'f2', 'name': 'n2'})
+ values = [
+ (flavor1['flavorid'], 'p1'),
+ (flavor1['flavorid'], 'p2'),
+ (flavor2['flavorid'], 'p3')
+ ]
+ for v in values:
+ self._create_flavor_access(*v)
+
+ db.flavor_destroy(self.ctxt, flavor1['name'])
+
+ p = (self.ctxt, flavor1['flavorid'])
+ self.assertEqual(0, len(db.flavor_access_get_by_flavor_id(*p)))
+ p = (self.ctxt, flavor2['flavorid'])
+ self.assertEqual(1, len(db.flavor_access_get_by_flavor_id(*p)))
+ db.flavor_destroy(self.ctxt, flavor2['name'])
+ self.assertEqual(0, len(db.flavor_access_get_by_flavor_id(*p)))
+
+
+class FixedIPTestCase(BaseInstanceTypeTestCase):
+ def _timeout_test(self, ctxt, timeout, multi_host):
+ instance = db.instance_create(ctxt, dict(host='foo'))
+ net = db.network_create_safe(ctxt, dict(multi_host=multi_host,
+ host='bar'))
+ old = timeout - datetime.timedelta(seconds=5)
+ new = timeout + datetime.timedelta(seconds=5)
+ # should deallocate
+ db.fixed_ip_create(ctxt, dict(allocated=False,
+ instance_uuid=instance['uuid'],
+ network_id=net['id'],
+ updated_at=old))
+ # still allocated
+ db.fixed_ip_create(ctxt, dict(allocated=True,
+ instance_uuid=instance['uuid'],
+ network_id=net['id'],
+ updated_at=old))
+ # wrong network
+ db.fixed_ip_create(ctxt, dict(allocated=False,
+ instance_uuid=instance['uuid'],
+ network_id=None,
+ updated_at=old))
+ # too new
+ db.fixed_ip_create(ctxt, dict(allocated=False,
+ instance_uuid=instance['uuid'],
+ network_id=None,
+ updated_at=new))
+
+ def mock_db_query_first_to_raise_data_error_exception(self):
+ self.mox.StubOutWithMock(query.Query, 'first')
+ query.Query.first().AndRaise(db_exc.DBError())
+ self.mox.ReplayAll()
+
+ def test_fixed_ip_disassociate_all_by_timeout_single_host(self):
+ now = timeutils.utcnow()
+ self._timeout_test(self.ctxt, now, False)
+ result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now)
+ self.assertEqual(result, 0)
+ result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now)
+ self.assertEqual(result, 1)
+
+ def test_fixed_ip_disassociate_all_by_timeout_multi_host(self):
+ now = timeutils.utcnow()
+ self._timeout_test(self.ctxt, now, True)
+ result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now)
+ self.assertEqual(result, 1)
+ result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now)
+ self.assertEqual(result, 0)
+
+ def test_fixed_ip_get_by_floating_address(self):
+ fixed_ip = db.fixed_ip_create(self.ctxt, {'address': '192.168.0.2'})
+ values = {'address': '8.7.6.5',
+ 'fixed_ip_id': fixed_ip['id']}
+ floating = db.floating_ip_create(self.ctxt, values)['address']
+ fixed_ip_ref = db.fixed_ip_get_by_floating_address(self.ctxt, floating)
+ self._assertEqualObjects(fixed_ip, fixed_ip_ref)
+
+ def test_fixed_ip_get_by_host(self):
+ host_ips = {
+ 'host1': ['1.1.1.1', '1.1.1.2', '1.1.1.3'],
+ 'host2': ['1.1.1.4', '1.1.1.5'],
+ 'host3': ['1.1.1.6']
+ }
+
+ for host, ips in host_ips.iteritems():
+ for ip in ips:
+ instance_uuid = self._create_instance(host=host)
+ db.fixed_ip_create(self.ctxt, {'address': ip})
+ db.fixed_ip_associate(self.ctxt, ip, instance_uuid)
+
+ for host, ips in host_ips.iteritems():
+ ips_on_host = map(lambda x: x['address'],
+ db.fixed_ip_get_by_host(self.ctxt, host))
+ self._assertEqualListsOfPrimitivesAsSets(ips_on_host, ips)
+
+ def test_fixed_ip_get_by_network_host_not_found_exception(self):
+ self.assertRaises(
+ exception.FixedIpNotFoundForNetworkHost,
+ db.fixed_ip_get_by_network_host,
+ self.ctxt, 1, 'ignore')
+
+ def test_fixed_ip_get_by_network_host_fixed_ip_found(self):
+ db.fixed_ip_create(self.ctxt, dict(network_id=1, host='host'))
+
+ fip = db.fixed_ip_get_by_network_host(self.ctxt, 1, 'host')
+
+ self.assertEqual(1, fip['network_id'])
+ self.assertEqual('host', fip['host'])
+
+ def _create_instance(self, **kwargs):
+ instance = db.instance_create(self.ctxt, kwargs)
+ return instance['uuid']
+
+ def test_fixed_ip_get_by_instance_fixed_ip_found(self):
+ instance_uuid = self._create_instance()
+
+ FIXED_IP_ADDRESS = '192.168.1.5'
+ db.fixed_ip_create(self.ctxt, dict(
+ instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS))
+
+ ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
+ self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS],
+ [ips_list[0].address])
+
+ def test_fixed_ip_get_by_instance_multiple_fixed_ips_found(self):
+ instance_uuid = self._create_instance()
+
+ FIXED_IP_ADDRESS_1 = '192.168.1.5'
+ db.fixed_ip_create(self.ctxt, dict(
+ instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1))
+ FIXED_IP_ADDRESS_2 = '192.168.1.6'
+ db.fixed_ip_create(self.ctxt, dict(
+ instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2))
+
+ ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
+ self._assertEqualListsOfPrimitivesAsSets(
+ [FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
+ [ips_list[0].address, ips_list[1].address])
+
+ def test_fixed_ip_get_by_instance_inappropriate_ignored(self):
+ instance_uuid = self._create_instance()
+
+ FIXED_IP_ADDRESS_1 = '192.168.1.5'
+ db.fixed_ip_create(self.ctxt, dict(
+ instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1))
+ FIXED_IP_ADDRESS_2 = '192.168.1.6'
+ db.fixed_ip_create(self.ctxt, dict(
+ instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2))
+
+ another_instance = db.instance_create(self.ctxt, {})
+ db.fixed_ip_create(self.ctxt, dict(
+ instance_uuid=another_instance['uuid'], address="192.168.1.7"))
+
+ ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
+ self._assertEqualListsOfPrimitivesAsSets(
+ [FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
+ [ips_list[0].address, ips_list[1].address])
+
+ def test_fixed_ip_get_by_instance_not_found_exception(self):
+ instance_uuid = self._create_instance()
+
+ self.assertRaises(exception.FixedIpNotFoundForInstance,
+ db.fixed_ip_get_by_instance,
+ self.ctxt, instance_uuid)
+
+ def test_fixed_ips_by_virtual_interface_fixed_ip_found(self):
+ instance_uuid = self._create_instance()
+
+ vif = db.virtual_interface_create(
+ self.ctxt, dict(instance_uuid=instance_uuid))
+
+ FIXED_IP_ADDRESS = '192.168.1.5'
+ db.fixed_ip_create(self.ctxt, dict(
+ virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS))
+
+ ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
+ self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS],
+ [ips_list[0].address])
+
+ def test_fixed_ips_by_virtual_interface_multiple_fixed_ips_found(self):
+ instance_uuid = self._create_instance()
+
+ vif = db.virtual_interface_create(
+ self.ctxt, dict(instance_uuid=instance_uuid))
+
+ FIXED_IP_ADDRESS_1 = '192.168.1.5'
+ db.fixed_ip_create(self.ctxt, dict(
+ virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1))
+ FIXED_IP_ADDRESS_2 = '192.168.1.6'
+ db.fixed_ip_create(self.ctxt, dict(
+ virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2))
+
+ ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
+ self._assertEqualListsOfPrimitivesAsSets(
+ [FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
+ [ips_list[0].address, ips_list[1].address])
+
+ def test_fixed_ips_by_virtual_interface_inappropriate_ignored(self):
+ instance_uuid = self._create_instance()
+
+ vif = db.virtual_interface_create(
+ self.ctxt, dict(instance_uuid=instance_uuid))
+
+ FIXED_IP_ADDRESS_1 = '192.168.1.5'
+ db.fixed_ip_create(self.ctxt, dict(
+ virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1))
+ FIXED_IP_ADDRESS_2 = '192.168.1.6'
+ db.fixed_ip_create(self.ctxt, dict(
+ virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2))
+
+ another_vif = db.virtual_interface_create(
+ self.ctxt, dict(instance_uuid=instance_uuid))
+ db.fixed_ip_create(self.ctxt, dict(
+ virtual_interface_id=another_vif.id, address="192.168.1.7"))
+
+ ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
+ self._assertEqualListsOfPrimitivesAsSets(
+ [FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
+ [ips_list[0].address, ips_list[1].address])
+
+ def test_fixed_ips_by_virtual_interface_no_ip_found(self):
+ instance_uuid = self._create_instance()
+
+ vif = db.virtual_interface_create(
+ self.ctxt, dict(instance_uuid=instance_uuid))
+
+ ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
+ self.assertEqual(0, len(ips_list))
+
+ def create_fixed_ip(self, **params):
+ default_params = {'address': '192.168.0.1'}
+ default_params.update(params)
+ return db.fixed_ip_create(self.ctxt, default_params)['address']
+
+ def test_fixed_ip_associate_fails_if_ip_not_in_network(self):
+ instance_uuid = self._create_instance()
+ self.assertRaises(exception.FixedIpNotFoundForNetwork,
+ db.fixed_ip_associate,
+ self.ctxt, None, instance_uuid)
+
+ def test_fixed_ip_associate_fails_if_ip_in_use(self):
+ instance_uuid = self._create_instance()
+
+ address = self.create_fixed_ip(instance_uuid=instance_uuid)
+ self.assertRaises(exception.FixedIpAlreadyInUse,
+ db.fixed_ip_associate,
+ self.ctxt, address, instance_uuid)
+
+ def test_fixed_ip_associate_succeeds(self):
+ instance_uuid = self._create_instance()
+ network = db.network_create_safe(self.ctxt, {})
+
+ address = self.create_fixed_ip(network_id=network['id'])
+ db.fixed_ip_associate(self.ctxt, address, instance_uuid,
+ network_id=network['id'])
+ fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
+ self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
+
+ def test_fixed_ip_associate_succeeds_and_sets_network(self):
+ instance_uuid = self._create_instance()
+ network = db.network_create_safe(self.ctxt, {})
+
+ address = self.create_fixed_ip()
+ db.fixed_ip_associate(self.ctxt, address, instance_uuid,
+ network_id=network['id'])
+ fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
+ self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
+ self.assertEqual(fixed_ip['network_id'], network['id'])
+
+ def test_fixed_ip_associate_pool_invalid_uuid(self):
+ instance_uuid = '123'
+ self.assertRaises(exception.InvalidUUID, db.fixed_ip_associate_pool,
+ self.ctxt, None, instance_uuid)
+
+ def test_fixed_ip_associate_pool_no_more_fixed_ips(self):
+ instance_uuid = self._create_instance()
+ self.assertRaises(exception.NoMoreFixedIps, db.fixed_ip_associate_pool,
+ self.ctxt, None, instance_uuid)
+
+ def test_fixed_ip_associate_pool_succeeds(self):
+ instance_uuid = self._create_instance()
+ network = db.network_create_safe(self.ctxt, {})
+
+ address = self.create_fixed_ip(network_id=network['id'])
+ db.fixed_ip_associate_pool(self.ctxt, network['id'], instance_uuid)
+ fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
+ self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
+
+ def test_fixed_ip_create_same_address(self):
+ address = '192.168.1.5'
+ params = {'address': address}
+ db.fixed_ip_create(self.ctxt, params)
+ self.assertRaises(exception.FixedIpExists, db.fixed_ip_create,
+ self.ctxt, params)
+
+ def test_fixed_ip_create_success(self):
+ instance_uuid = self._create_instance()
+ network_id = db.network_create_safe(self.ctxt, {})['id']
+ param = {
+ 'reserved': False,
+ 'deleted': 0,
+ 'leased': False,
+ 'host': '127.0.0.1',
+ 'address': '192.168.1.5',
+ 'allocated': False,
+ 'instance_uuid': instance_uuid,
+ 'network_id': network_id,
+ 'virtual_interface_id': None
+ }
+
+ ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
+ fixed_ip_data = db.fixed_ip_create(self.ctxt, param)
+ self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
+
+ def test_fixed_ip_bulk_create_same_address(self):
+ address_1 = '192.168.1.5'
+ address_2 = '192.168.1.6'
+ instance_uuid = self._create_instance()
+ network_id_1 = db.network_create_safe(self.ctxt, {})['id']
+ network_id_2 = db.network_create_safe(self.ctxt, {})['id']
+ params = [
+ {'reserved': False, 'deleted': 0, 'leased': False,
+ 'host': '127.0.0.1', 'address': address_2, 'allocated': False,
+ 'instance_uuid': instance_uuid, 'network_id': network_id_1,
+ 'virtual_interface_id': None},
+ {'reserved': False, 'deleted': 0, 'leased': False,
+ 'host': '127.0.0.1', 'address': address_1, 'allocated': False,
+ 'instance_uuid': instance_uuid, 'network_id': network_id_1,
+ 'virtual_interface_id': None},
+ {'reserved': False, 'deleted': 0, 'leased': False,
+ 'host': 'localhost', 'address': address_2, 'allocated': True,
+ 'instance_uuid': instance_uuid, 'network_id': network_id_2,
+ 'virtual_interface_id': None},
+ ]
+
+ self.assertRaises(exception.FixedIpExists, db.fixed_ip_bulk_create,
+ self.ctxt, params)
+ # In this case the transaction will be rolled back and none of the ips
+ # will make it to the database.
+ self.assertRaises(exception.FixedIpNotFoundForAddress,
+ db.fixed_ip_get_by_address, self.ctxt, address_1)
+ self.assertRaises(exception.FixedIpNotFoundForAddress,
+ db.fixed_ip_get_by_address, self.ctxt, address_2)
+
+ def test_fixed_ip_bulk_create_success(self):
+ address_1 = '192.168.1.5'
+ address_2 = '192.168.1.6'
+
+ instance_uuid = self._create_instance()
+ network_id_1 = db.network_create_safe(self.ctxt, {})['id']
+ network_id_2 = db.network_create_safe(self.ctxt, {})['id']
+ params = [
+ {'reserved': False, 'deleted': 0, 'leased': False,
+ 'host': '127.0.0.1', 'address': address_1, 'allocated': False,
+ 'instance_uuid': instance_uuid, 'network_id': network_id_1,
+ 'virtual_interface_id': None},
+ {'reserved': False, 'deleted': 0, 'leased': False,
+ 'host': 'localhost', 'address': address_2, 'allocated': True,
+ 'instance_uuid': instance_uuid, 'network_id': network_id_2,
+ 'virtual_interface_id': None}
+ ]
+
+ db.fixed_ip_bulk_create(self.ctxt, params)
+ ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at',
+ 'virtual_interface', 'network', 'floating_ips']
+ fixed_ip_data = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
+
+ # we have no `id` in incoming data so we can not use
+ # _assertEqualListsOfObjects to compare incoming data and received
+ # objects
+ fixed_ip_data = sorted(fixed_ip_data, key=lambda i: i['network_id'])
+ params = sorted(params, key=lambda i: i['network_id'])
+ for param, ip in zip(params, fixed_ip_data):
+ self._assertEqualObjects(param, ip, ignored_keys)
+
+ def test_fixed_ip_disassociate(self):
+ address = '192.168.1.5'
+ instance_uuid = self._create_instance()
+ network_id = db.network_create_safe(self.ctxt, {})['id']
+ values = {'address': '192.168.1.5', 'instance_uuid': instance_uuid}
+ vif = db.virtual_interface_create(self.ctxt, values)
+ param = {
+ 'reserved': False,
+ 'deleted': 0,
+ 'leased': False,
+ 'host': '127.0.0.1',
+ 'address': address,
+ 'allocated': False,
+ 'instance_uuid': instance_uuid,
+ 'network_id': network_id,
+ 'virtual_interface_id': vif['id']
+ }
+ db.fixed_ip_create(self.ctxt, param)
+
+ db.fixed_ip_disassociate(self.ctxt, address)
+ fixed_ip_data = db.fixed_ip_get_by_address(self.ctxt, address)
+ ignored_keys = ['created_at', 'id', 'deleted_at',
+ 'updated_at', 'instance_uuid',
+ 'virtual_interface_id']
+ self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
+ self.assertIsNone(fixed_ip_data['instance_uuid'])
+ self.assertIsNone(fixed_ip_data['virtual_interface_id'])
+
+ def test_fixed_ip_get_not_found_exception(self):
+ self.assertRaises(exception.FixedIpNotFound,
+ db.fixed_ip_get, self.ctxt, 0)
+
+ def test_fixed_ip_get_success2(self):
+ address = '192.168.1.5'
+ instance_uuid = self._create_instance()
+ network_id = db.network_create_safe(self.ctxt, {})['id']
+ param = {
+ 'reserved': False,
+ 'deleted': 0,
+ 'leased': False,
+ 'host': '127.0.0.1',
+ 'address': address,
+ 'allocated': False,
+ 'instance_uuid': instance_uuid,
+ 'network_id': network_id,
+ 'virtual_interface_id': None
+ }
+ fixed_ip_id = db.fixed_ip_create(self.ctxt, param)
+
+ self.ctxt.is_admin = False
+ self.assertRaises(exception.Forbidden, db.fixed_ip_get,
+ self.ctxt, fixed_ip_id)
+
+ def test_fixed_ip_get_success(self):
+ address = '192.168.1.5'
+ instance_uuid = self._create_instance()
+ network_id = db.network_create_safe(self.ctxt, {})['id']
+ param = {
+ 'reserved': False,
+ 'deleted': 0,
+ 'leased': False,
+ 'host': '127.0.0.1',
+ 'address': address,
+ 'allocated': False,
+ 'instance_uuid': instance_uuid,
+ 'network_id': network_id,
+ 'virtual_interface_id': None
+ }
+ db.fixed_ip_create(self.ctxt, param)
+
+ fixed_ip_id = db.fixed_ip_get_by_address(self.ctxt, address)['id']
+ fixed_ip_data = db.fixed_ip_get(self.ctxt, fixed_ip_id)
+ ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
+ self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
+
+ def test_fixed_ip_get_by_address(self):
+ instance_uuid = self._create_instance()
+ db.fixed_ip_create(self.ctxt, {'address': '1.2.3.4',
+ 'instance_uuid': instance_uuid,
+ })
+ fixed_ip = db.fixed_ip_get_by_address(self.ctxt, '1.2.3.4',
+ columns_to_join=['instance'])
+ self.assertIn('instance', fixed_ip.__dict__)
+ self.assertEqual(instance_uuid, fixed_ip.instance.uuid)
+
+ def test_fixed_ip_get_by_address_detailed_not_found_exception(self):
+ self.assertRaises(exception.FixedIpNotFoundForAddress,
+ db.fixed_ip_get_by_address_detailed, self.ctxt,
+ '192.168.1.5')
+
+ def test_fixed_ip_get_by_address_with_data_error_exception(self):
+ self.mock_db_query_first_to_raise_data_error_exception()
+ self.assertRaises(exception.FixedIpInvalid,
+ db.fixed_ip_get_by_address_detailed, self.ctxt,
+ '192.168.1.6')
+
+ def test_fixed_ip_get_by_address_detailed_sucsess(self):
+ address = '192.168.1.5'
+ instance_uuid = self._create_instance()
+ network_id = db.network_create_safe(self.ctxt, {})['id']
+ param = {
+ 'reserved': False,
+ 'deleted': 0,
+ 'leased': False,
+ 'host': '127.0.0.1',
+ 'address': address,
+ 'allocated': False,
+ 'instance_uuid': instance_uuid,
+ 'network_id': network_id,
+ 'virtual_interface_id': None
+ }
+ db.fixed_ip_create(self.ctxt, param)
+
+ fixed_ip_data = db.fixed_ip_get_by_address_detailed(self.ctxt, address)
+ # fixed ip check here
+ ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
+ self._assertEqualObjects(param, fixed_ip_data[0], ignored_keys)
+
+ # network model check here
+ network_data = db.network_get(self.ctxt, network_id)
+ self._assertEqualObjects(network_data, fixed_ip_data[1])
+
+ # Instance check here
+ instance_data = db.instance_get_by_uuid(self.ctxt, instance_uuid)
+ ignored_keys = ['info_cache', 'system_metadata',
+ 'security_groups', 'metadata',
+ 'pci_devices'] # HOW ????
+ self._assertEqualObjects(instance_data, fixed_ip_data[2], ignored_keys)
+
+ def test_fixed_ip_update_not_found_for_address(self):
+ self.assertRaises(exception.FixedIpNotFoundForAddress,
+ db.fixed_ip_update, self.ctxt,
+ '192.168.1.5', {})
+
+ def test_fixed_ip_update(self):
+ instance_uuid_1 = self._create_instance()
+ instance_uuid_2 = self._create_instance()
+ network_id_1 = db.network_create_safe(self.ctxt, {})['id']
+ network_id_2 = db.network_create_safe(self.ctxt, {})['id']
+ param_1 = {
+ 'reserved': True, 'deleted': 0, 'leased': True,
+ 'host': '192.168.133.1', 'address': '10.0.0.2',
+ 'allocated': True, 'instance_uuid': instance_uuid_1,
+ 'network_id': network_id_1, 'virtual_interface_id': '123',
+ }
+
+ param_2 = {
+ 'reserved': False, 'deleted': 0, 'leased': False,
+ 'host': '127.0.0.1', 'address': '10.0.0.3', 'allocated': False,
+ 'instance_uuid': instance_uuid_2, 'network_id': network_id_2,
+ 'virtual_interface_id': None
+ }
+
+ ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
+ fixed_ip_addr = db.fixed_ip_create(self.ctxt, param_1)['address']
+ db.fixed_ip_update(self.ctxt, fixed_ip_addr, param_2)
+ fixed_ip_after_update = db.fixed_ip_get_by_address(self.ctxt,
+ param_2['address'])
+ self._assertEqualObjects(param_2, fixed_ip_after_update, ignored_keys)
+
+
+class FloatingIpTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ def setUp(self):
+ super(FloatingIpTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _get_base_values(self):
+ return {
+ 'address': '1.1.1.1',
+ 'fixed_ip_id': None,
+ 'project_id': 'fake_project',
+ 'host': 'fake_host',
+ 'auto_assigned': False,
+ 'pool': 'fake_pool',
+ 'interface': 'fake_interface',
+ }
+
+ def mock_db_query_first_to_raise_data_error_exception(self):
+ self.mox.StubOutWithMock(query.Query, 'first')
+ query.Query.first().AndRaise(db_exc.DBError())
+ self.mox.ReplayAll()
+
+ def _create_floating_ip(self, values):
+ if not values:
+ values = {}
+ vals = self._get_base_values()
+ vals.update(values)
+ return db.floating_ip_create(self.ctxt, vals)
+
+ def test_floating_ip_get(self):
+ values = [{'address': '0.0.0.0'}, {'address': '1.1.1.1'}]
+ floating_ips = [self._create_floating_ip(val) for val in values]
+
+ for floating_ip in floating_ips:
+ real_floating_ip = db.floating_ip_get(self.ctxt, floating_ip['id'])
+ self._assertEqualObjects(floating_ip, real_floating_ip,
+ ignored_keys=['fixed_ip'])
+
+ def test_floating_ip_get_not_found(self):
+ self.assertRaises(exception.FloatingIpNotFound,
+ db.floating_ip_get, self.ctxt, 100500)
+
+ def test_floating_ip_get_with_long_id_not_found(self):
+ self.mock_db_query_first_to_raise_data_error_exception()
+ self.assertRaises(exception.InvalidID,
+ db.floating_ip_get, self.ctxt, 123456789101112)
+
+ def test_floating_ip_get_pools(self):
+ values = [
+ {'address': '0.0.0.0', 'pool': 'abc'},
+ {'address': '1.1.1.1', 'pool': 'abc'},
+ {'address': '2.2.2.2', 'pool': 'def'},
+ {'address': '3.3.3.3', 'pool': 'ghi'},
+ ]
+ for val in values:
+ self._create_floating_ip(val)
+ expected_pools = [{'name': x}
+ for x in set(map(lambda x: x['pool'], values))]
+ real_pools = db.floating_ip_get_pools(self.ctxt)
+ self._assertEqualListsOfPrimitivesAsSets(real_pools, expected_pools)
+
+ def test_floating_ip_allocate_address(self):
+ pools = {
+ 'pool1': ['0.0.0.0', '1.1.1.1'],
+ 'pool2': ['2.2.2.2'],
+ 'pool3': ['3.3.3.3', '4.4.4.4', '5.5.5.5']
+ }
+ for pool, addresses in pools.iteritems():
+ for address in addresses:
+ vals = {'pool': pool, 'address': address, 'project_id': None}
+ self._create_floating_ip(vals)
+
+ project_id = self._get_base_values()['project_id']
+ for pool, addresses in pools.iteritems():
+ alloc_addrs = []
+ for i in addresses:
+ float_addr = db.floating_ip_allocate_address(self.ctxt,
+ project_id, pool)
+ alloc_addrs.append(float_addr)
+ self._assertEqualListsOfPrimitivesAsSets(alloc_addrs, addresses)
+
+ def test_floating_ip_allocate_auto_assigned(self):
+ addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
+
+ float_ips = []
+ for i in range(0, 2):
+ float_ips.append(self._create_floating_ip(
+ {"address": addresses[i]}))
+ for i in range(2, 4):
+ float_ips.append(self._create_floating_ip({"address": addresses[i],
+ "auto_assigned": True}))
+
+ for i in range(0, 2):
+ float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
+ self.assertFalse(float_ip.auto_assigned)
+ for i in range(2, 4):
+ float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
+ self.assertTrue(float_ip.auto_assigned)
+
+ def test_floating_ip_allocate_address_no_more_floating_ips(self):
+ self.assertRaises(exception.NoMoreFloatingIps,
+ db.floating_ip_allocate_address,
+ self.ctxt, 'any_project_id', 'no_such_pool')
+
+ def test_floating_ip_allocate_not_authorized(self):
+ ctxt = context.RequestContext(user_id='a', project_id='abc',
+ is_admin=False)
+ self.assertRaises(exception.Forbidden,
+ db.floating_ip_allocate_address,
+ ctxt, 'other_project_id', 'any_pool')
+
+ def _get_existing_ips(self):
+ return [ip['address'] for ip in db.floating_ip_get_all(self.ctxt)]
+
+ def test_floating_ip_bulk_create(self):
+ expected_ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
+ db.floating_ip_bulk_create(self.ctxt,
+ map(lambda x: {'address': x}, expected_ips))
+ self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(),
+ expected_ips)
+
+ def test_floating_ip_bulk_create_duplicate(self):
+ ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
+ prepare_ips = lambda x: {'address': x}
+
+ result = db.floating_ip_bulk_create(self.ctxt, map(prepare_ips, ips))
+ self.assertEqual('1.1.1.1', result[0].address)
+ self.assertRaises(exception.FloatingIpExists,
+ db.floating_ip_bulk_create,
+ self.ctxt, map(prepare_ips, ['1.1.1.5', '1.1.1.4']))
+ self.assertRaises(exception.FloatingIpNotFoundForAddress,
+ db.floating_ip_get_by_address,
+ self.ctxt, '1.1.1.5')
+
+ def test_floating_ip_bulk_destroy(self):
+ ips_for_delete = []
+ ips_for_non_delete = []
+
+ def create_ips(i, j):
+ return [{'address': '1.1.%s.%s' % (i, k)} for k in range(1, j + 1)]
+
+ # NOTE(boris-42): Create more than 256 ip to check that
+ # _ip_range_splitter works properly.
+ for i in range(1, 3):
+ ips_for_delete.extend(create_ips(i, 255))
+ ips_for_non_delete.extend(create_ips(3, 255))
+
+ db.floating_ip_bulk_create(self.ctxt,
+ ips_for_delete + ips_for_non_delete)
+
+ non_bulk_ips_for_delete = create_ips(4, 3)
+ non_bulk_ips_for_non_delete = create_ips(5, 3)
+ non_bulk_ips = non_bulk_ips_for_delete + non_bulk_ips_for_non_delete
+ project_id = 'fake_project'
+ reservations = quota.QUOTAS.reserve(self.ctxt,
+ floating_ips=len(non_bulk_ips),
+ project_id=project_id)
+ for dct in non_bulk_ips:
+ self._create_floating_ip(dct)
+ quota.QUOTAS.commit(self.ctxt, reservations, project_id=project_id)
+ self.assertEqual(db.quota_usage_get_all_by_project(
+ self.ctxt, project_id),
+ {'project_id': project_id,
+ 'floating_ips': {'in_use': 6, 'reserved': 0}})
+ ips_for_delete.extend(non_bulk_ips_for_delete)
+ ips_for_non_delete.extend(non_bulk_ips_for_non_delete)
+
+ db.floating_ip_bulk_destroy(self.ctxt, ips_for_delete)
+
+ expected_addresses = map(lambda x: x['address'], ips_for_non_delete)
+ self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(),
+ expected_addresses)
+ self.assertEqual(db.quota_usage_get_all_by_project(
+ self.ctxt, project_id),
+ {'project_id': project_id,
+ 'floating_ips': {'in_use': 3, 'reserved': 0}})
+
+ def test_floating_ip_create(self):
+ floating_ip = self._create_floating_ip({})
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at']
+
+ self.assertIsNotNone(floating_ip['id'])
+ self._assertEqualObjects(floating_ip, self._get_base_values(),
+ ignored_keys)
+
+ def test_floating_ip_create_duplicate(self):
+ self._create_floating_ip({})
+ self.assertRaises(exception.FloatingIpExists,
+ self._create_floating_ip, {})
+
+ def _create_fixed_ip(self, params):
+ default_params = {'address': '192.168.0.1'}
+ default_params.update(params)
+ return db.fixed_ip_create(self.ctxt, default_params)['address']
+
+ def test_floating_ip_fixed_ip_associate(self):
+ float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
+ fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3']
+
+ float_ips = [self._create_floating_ip({'address': address})
+ for address in float_addresses]
+ fixed_addrs = [self._create_fixed_ip({'address': address})
+ for address in fixed_addresses]
+
+ for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
+ fixed_ip = db.floating_ip_fixed_ip_associate(self.ctxt,
+ float_ip.address,
+ fixed_addr, 'host')
+ self.assertEqual(fixed_ip.address, fixed_addr)
+
+ updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
+ self.assertEqual(fixed_ip.id, updated_float_ip.fixed_ip_id)
+ self.assertEqual('host', updated_float_ip.host)
+
+ # Test that already allocated float_ip returns None
+ result = db.floating_ip_fixed_ip_associate(self.ctxt,
+ float_addresses[0],
+ fixed_addresses[0], 'host')
+ self.assertIsNone(result)
+
+ def test_floating_ip_fixed_ip_associate_float_ip_not_found(self):
+ self.assertRaises(exception.FloatingIpNotFoundForAddress,
+ db.floating_ip_fixed_ip_associate,
+ self.ctxt, '10.10.10.10', 'some', 'some')
+
+ def test_floating_ip_deallocate(self):
+ values = {'address': '1.1.1.1', 'project_id': 'fake', 'host': 'fake'}
+ float_ip = self._create_floating_ip(values)
+ rows_updated = db.floating_ip_deallocate(self.ctxt, float_ip.address)
+ self.assertEqual(1, rows_updated)
+
+ updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
+ self.assertIsNone(updated_float_ip.project_id)
+ self.assertIsNone(updated_float_ip.host)
+ self.assertFalse(updated_float_ip.auto_assigned)
+
+ def test_floating_ip_deallocate_address_not_found(self):
+ self.assertEqual(0, db.floating_ip_deallocate(self.ctxt, '2.2.2.2'))
+
+ def test_floating_ip_destroy(self):
+ addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
+ float_ips = [self._create_floating_ip({'address': addr})
+ for addr in addresses]
+
+ expected_len = len(addresses)
+ for float_ip in float_ips:
+ db.floating_ip_destroy(self.ctxt, float_ip.address)
+ self.assertRaises(exception.FloatingIpNotFound,
+ db.floating_ip_get, self.ctxt, float_ip.id)
+ expected_len -= 1
+ if expected_len > 0:
+ self.assertEqual(expected_len,
+ len(db.floating_ip_get_all(self.ctxt)))
+ else:
+ self.assertRaises(exception.NoFloatingIpsDefined,
+ db.floating_ip_get_all, self.ctxt)
+
+ def test_floating_ip_disassociate(self):
+ float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
+ fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3']
+
+ float_ips = [self._create_floating_ip({'address': address})
+ for address in float_addresses]
+ fixed_addrs = [self._create_fixed_ip({'address': address})
+ for address in fixed_addresses]
+
+ for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
+ db.floating_ip_fixed_ip_associate(self.ctxt,
+ float_ip.address,
+ fixed_addr, 'host')
+
+ for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
+ fixed = db.floating_ip_disassociate(self.ctxt, float_ip.address)
+ self.assertEqual(fixed.address, fixed_addr)
+ updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
+ self.assertIsNone(updated_float_ip.fixed_ip_id)
+ self.assertIsNone(updated_float_ip.host)
+
+ def test_floating_ip_disassociate_not_found(self):
+ self.assertRaises(exception.FloatingIpNotFoundForAddress,
+ db.floating_ip_disassociate, self.ctxt,
+ '11.11.11.11')
+
+ def test_floating_ip_set_auto_assigned(self):
+ addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
+ float_ips = [self._create_floating_ip({'address': addr,
+ 'auto_assigned': False})
+ for addr in addresses]
+
+ for i in range(2):
+ db.floating_ip_set_auto_assigned(self.ctxt, float_ips[i].address)
+ for i in range(2):
+ float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
+ self.assertTrue(float_ip.auto_assigned)
+
+ float_ip = db.floating_ip_get(self.ctxt, float_ips[2].id)
+ self.assertFalse(float_ip.auto_assigned)
+
+ def test_floating_ip_get_all(self):
+ addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
+ float_ips = [self._create_floating_ip({'address': addr})
+ for addr in addresses]
+ self._assertEqualListsOfObjects(float_ips,
+ db.floating_ip_get_all(self.ctxt))
+
+ def test_floating_ip_get_all_not_found(self):
+ self.assertRaises(exception.NoFloatingIpsDefined,
+ db.floating_ip_get_all, self.ctxt)
+
+ def test_floating_ip_get_all_by_host(self):
+ hosts = {
+ 'host1': ['1.1.1.1', '1.1.1.2'],
+ 'host2': ['2.1.1.1', '2.1.1.2'],
+ 'host3': ['3.1.1.1', '3.1.1.2', '3.1.1.3']
+ }
+
+ hosts_with_float_ips = {}
+ for host, addresses in hosts.iteritems():
+ hosts_with_float_ips[host] = []
+ for address in addresses:
+ float_ip = self._create_floating_ip({'host': host,
+ 'address': address})
+ hosts_with_float_ips[host].append(float_ip)
+
+ for host, float_ips in hosts_with_float_ips.iteritems():
+ real_float_ips = db.floating_ip_get_all_by_host(self.ctxt, host)
+ self._assertEqualListsOfObjects(float_ips, real_float_ips)
+
+ def test_floating_ip_get_all_by_host_not_found(self):
+ self.assertRaises(exception.FloatingIpNotFoundForHost,
+ db.floating_ip_get_all_by_host,
+ self.ctxt, 'non_exists_host')
+
+ def test_floating_ip_get_all_by_project(self):
+ projects = {
+ 'pr1': ['1.1.1.1', '1.1.1.2'],
+ 'pr2': ['2.1.1.1', '2.1.1.2'],
+ 'pr3': ['3.1.1.1', '3.1.1.2', '3.1.1.3']
+ }
+
+ projects_with_float_ips = {}
+ for project_id, addresses in projects.iteritems():
+ projects_with_float_ips[project_id] = []
+ for address in addresses:
+ float_ip = self._create_floating_ip({'project_id': project_id,
+ 'address': address})
+ projects_with_float_ips[project_id].append(float_ip)
+
+ for project_id, float_ips in projects_with_float_ips.iteritems():
+ real_float_ips = db.floating_ip_get_all_by_project(self.ctxt,
+ project_id)
+ self._assertEqualListsOfObjects(float_ips, real_float_ips,
+ ignored_keys='fixed_ip')
+
+ def test_floating_ip_get_all_by_project_not_authorized(self):
+ ctxt = context.RequestContext(user_id='a', project_id='abc',
+ is_admin=False)
+ self.assertRaises(exception.Forbidden,
+ db.floating_ip_get_all_by_project,
+ ctxt, 'other_project')
+
+ def test_floating_ip_get_by_address(self):
+ addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
+ float_ips = [self._create_floating_ip({'address': addr})
+ for addr in addresses]
+
+ for float_ip in float_ips:
+ real_float_ip = db.floating_ip_get_by_address(self.ctxt,
+ float_ip.address)
+ self._assertEqualObjects(float_ip, real_float_ip,
+ ignored_keys='fixed_ip')
+
+ def test_floating_ip_get_by_address_not_found(self):
+ self.assertRaises(exception.FloatingIpNotFoundForAddress,
+ db.floating_ip_get_by_address,
+ self.ctxt, '20.20.20.20')
+
+ def test_floating_ip_get_by_invalid_address(self):
+ self.mock_db_query_first_to_raise_data_error_exception()
+ self.assertRaises(exception.InvalidIpAddressError,
+ db.floating_ip_get_by_address,
+ self.ctxt, 'non_exists_host')
+
+ def test_floating_ip_get_by_fixed_address(self):
+ fixed_float = [
+ ('1.1.1.1', '2.2.2.1'),
+ ('1.1.1.2', '2.2.2.2'),
+ ('1.1.1.3', '2.2.2.3')
+ ]
+
+ for fixed_addr, float_addr in fixed_float:
+ self._create_floating_ip({'address': float_addr})
+ self._create_fixed_ip({'address': fixed_addr})
+ db.floating_ip_fixed_ip_associate(self.ctxt, float_addr,
+ fixed_addr, 'some_host')
+
+ for fixed_addr, float_addr in fixed_float:
+ float_ip = db.floating_ip_get_by_fixed_address(self.ctxt,
+ fixed_addr)
+ self.assertEqual(float_addr, float_ip[0]['address'])
+
+ def test_floating_ip_get_by_fixed_ip_id(self):
+ fixed_float = [
+ ('1.1.1.1', '2.2.2.1'),
+ ('1.1.1.2', '2.2.2.2'),
+ ('1.1.1.3', '2.2.2.3')
+ ]
+
+ for fixed_addr, float_addr in fixed_float:
+ self._create_floating_ip({'address': float_addr})
+ self._create_fixed_ip({'address': fixed_addr})
+ db.floating_ip_fixed_ip_associate(self.ctxt, float_addr,
+ fixed_addr, 'some_host')
+
+ for fixed_addr, float_addr in fixed_float:
+ fixed_ip = db.fixed_ip_get_by_address(self.ctxt, fixed_addr)
+ float_ip = db.floating_ip_get_by_fixed_ip_id(self.ctxt,
+ fixed_ip['id'])
+ self.assertEqual(float_addr, float_ip[0]['address'])
+
+ def test_floating_ip_update(self):
+ float_ip = self._create_floating_ip({})
+
+ values = {
+ 'project_id': 'some_pr',
+ 'host': 'some_host',
+ 'auto_assigned': True,
+ 'interface': 'some_interface',
+ 'pool': 'some_pool'
+ }
+ floating_ref = db.floating_ip_update(self.ctxt, float_ip['address'],
+ values)
+ self.assertIsNotNone(floating_ref)
+ updated_float_ip = db.floating_ip_get(self.ctxt, float_ip['id'])
+ self._assertEqualObjects(updated_float_ip, values,
+ ignored_keys=['id', 'address', 'updated_at',
+ 'deleted_at', 'created_at',
+ 'deleted', 'fixed_ip_id',
+ 'fixed_ip'])
+
+ def test_floating_ip_update_to_duplicate(self):
+ float_ip1 = self._create_floating_ip({'address': '1.1.1.1'})
+ float_ip2 = self._create_floating_ip({'address': '1.1.1.2'})
+
+ self.assertRaises(exception.FloatingIpExists,
+ db.floating_ip_update,
+ self.ctxt, float_ip2['address'],
+ {'address': float_ip1['address']})
+
+
+class InstanceDestroyConstraints(test.TestCase):
+
+ def test_destroy_with_equal_any_constraint_met_single_value(self):
+ ctx = context.get_admin_context()
+ instance = db.instance_create(ctx, {'task_state': 'deleting'})
+ constraint = db.constraint(task_state=db.equal_any('deleting'))
+ db.instance_destroy(ctx, instance['uuid'], constraint)
+ self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
+ ctx, instance['uuid'])
+
+ def test_destroy_with_equal_any_constraint_met(self):
+ ctx = context.get_admin_context()
+ instance = db.instance_create(ctx, {'task_state': 'deleting'})
+ constraint = db.constraint(task_state=db.equal_any('deleting',
+ 'error'))
+ db.instance_destroy(ctx, instance['uuid'], constraint)
+ self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
+ ctx, instance['uuid'])
+
+ def test_destroy_with_equal_any_constraint_not_met(self):
+ ctx = context.get_admin_context()
+ instance = db.instance_create(ctx, {'vm_state': 'resize'})
+ constraint = db.constraint(vm_state=db.equal_any('active', 'error'))
+ self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
+ ctx, instance['uuid'], constraint)
+ instance = db.instance_get_by_uuid(ctx, instance['uuid'])
+ self.assertFalse(instance['deleted'])
+
+ def test_destroy_with_not_equal_constraint_met(self):
+ ctx = context.get_admin_context()
+ instance = db.instance_create(ctx, {'task_state': 'deleting'})
+ constraint = db.constraint(task_state=db.not_equal('error', 'resize'))
+ db.instance_destroy(ctx, instance['uuid'], constraint)
+ self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
+ ctx, instance['uuid'])
+
+ def test_destroy_with_not_equal_constraint_not_met(self):
+ ctx = context.get_admin_context()
+ instance = db.instance_create(ctx, {'vm_state': 'active'})
+ constraint = db.constraint(vm_state=db.not_equal('active', 'error'))
+ self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
+ ctx, instance['uuid'], constraint)
+ instance = db.instance_get_by_uuid(ctx, instance['uuid'])
+ self.assertFalse(instance['deleted'])
+
+
+class VolumeUsageDBApiTestCase(test.TestCase):
+
+ def setUp(self):
+ super(VolumeUsageDBApiTestCase, self).setUp()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ self.useFixture(test.TimeOverride())
+
+ def test_vol_usage_update_no_totals_update(self):
+ ctxt = context.get_admin_context()
+ now = timeutils.utcnow()
+ timeutils.set_time_override(now)
+ start_time = now - datetime.timedelta(seconds=10)
+
+ expected_vol_usages = {
+ u'1': {'volume_id': u'1',
+ 'instance_uuid': 'fake-instance-uuid1',
+ 'project_id': 'fake-project-uuid1',
+ 'user_id': 'fake-user-uuid1',
+ 'curr_reads': 1000,
+ 'curr_read_bytes': 2000,
+ 'curr_writes': 3000,
+ 'curr_write_bytes': 4000,
+ 'curr_last_refreshed': now,
+ 'tot_reads': 0,
+ 'tot_read_bytes': 0,
+ 'tot_writes': 0,
+ 'tot_write_bytes': 0,
+ 'tot_last_refreshed': None},
+ u'2': {'volume_id': u'2',
+ 'instance_uuid': 'fake-instance-uuid2',
+ 'project_id': 'fake-project-uuid2',
+ 'user_id': 'fake-user-uuid2',
+ 'curr_reads': 100,
+ 'curr_read_bytes': 200,
+ 'curr_writes': 300,
+ 'curr_write_bytes': 400,
+ 'tot_reads': 0,
+ 'tot_read_bytes': 0,
+ 'tot_writes': 0,
+ 'tot_write_bytes': 0,
+ 'tot_last_refreshed': None}
+ }
+
+ def _compare(vol_usage, expected):
+ for key, value in expected.items():
+ self.assertEqual(vol_usage[key], value)
+
+ vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
+ self.assertEqual(len(vol_usages), 0)
+
+ db.vol_usage_update(ctxt, u'1', rd_req=10, rd_bytes=20,
+ wr_req=30, wr_bytes=40,
+ instance_id='fake-instance-uuid1',
+ project_id='fake-project-uuid1',
+ user_id='fake-user-uuid1',
+ availability_zone='fake-az')
+ db.vol_usage_update(ctxt, u'2', rd_req=100, rd_bytes=200,
+ wr_req=300, wr_bytes=400,
+ instance_id='fake-instance-uuid2',
+ project_id='fake-project-uuid2',
+ user_id='fake-user-uuid2',
+ availability_zone='fake-az')
+ db.vol_usage_update(ctxt, u'1', rd_req=1000, rd_bytes=2000,
+ wr_req=3000, wr_bytes=4000,
+ instance_id='fake-instance-uuid1',
+ project_id='fake-project-uuid1',
+ user_id='fake-user-uuid1',
+ availability_zone='fake-az')
+
+ vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
+ self.assertEqual(len(vol_usages), 2)
+ for usage in vol_usages:
+ _compare(usage, expected_vol_usages[usage.volume_id])
+
+ def test_vol_usage_update_totals_update(self):
+ ctxt = context.get_admin_context()
+ now = datetime.datetime(1, 1, 1, 1, 0, 0)
+ start_time = now - datetime.timedelta(seconds=10)
+ now1 = now + datetime.timedelta(minutes=1)
+ now2 = now + datetime.timedelta(minutes=2)
+ now3 = now + datetime.timedelta(minutes=3)
+
+ timeutils.set_time_override(now)
+ db.vol_usage_update(ctxt, u'1', rd_req=100, rd_bytes=200,
+ wr_req=300, wr_bytes=400,
+ instance_id='fake-instance-uuid',
+ project_id='fake-project-uuid',
+ user_id='fake-user-uuid',
+ availability_zone='fake-az')
+ current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
+ self.assertEqual(current_usage['tot_reads'], 0)
+ self.assertEqual(current_usage['curr_reads'], 100)
+
+ timeutils.set_time_override(now1)
+ db.vol_usage_update(ctxt, u'1', rd_req=200, rd_bytes=300,
+ wr_req=400, wr_bytes=500,
+ instance_id='fake-instance-uuid',
+ project_id='fake-project-uuid',
+ user_id='fake-user-uuid',
+ availability_zone='fake-az',
+ update_totals=True)
+ current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
+ self.assertEqual(current_usage['tot_reads'], 200)
+ self.assertEqual(current_usage['curr_reads'], 0)
+
+ timeutils.set_time_override(now2)
+ db.vol_usage_update(ctxt, u'1', rd_req=300, rd_bytes=400,
+ wr_req=500, wr_bytes=600,
+ instance_id='fake-instance-uuid',
+ project_id='fake-project-uuid',
+ availability_zone='fake-az',
+ user_id='fake-user-uuid')
+ current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
+ self.assertEqual(current_usage['tot_reads'], 200)
+ self.assertEqual(current_usage['curr_reads'], 300)
+
+ timeutils.set_time_override(now3)
+ db.vol_usage_update(ctxt, u'1', rd_req=400, rd_bytes=500,
+ wr_req=600, wr_bytes=700,
+ instance_id='fake-instance-uuid',
+ project_id='fake-project-uuid',
+ user_id='fake-user-uuid',
+ availability_zone='fake-az',
+ update_totals=True)
+
+ vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
+
+ expected_vol_usages = {'volume_id': u'1',
+ 'project_id': 'fake-project-uuid',
+ 'user_id': 'fake-user-uuid',
+ 'instance_uuid': 'fake-instance-uuid',
+ 'availability_zone': 'fake-az',
+ 'tot_reads': 600,
+ 'tot_read_bytes': 800,
+ 'tot_writes': 1000,
+ 'tot_write_bytes': 1200,
+ 'tot_last_refreshed': now3,
+ 'curr_reads': 0,
+ 'curr_read_bytes': 0,
+ 'curr_writes': 0,
+ 'curr_write_bytes': 0,
+ 'curr_last_refreshed': now2}
+
+ self.assertEqual(1, len(vol_usages))
+ for key, value in expected_vol_usages.items():
+ self.assertEqual(vol_usages[0][key], value, key)
+
+ def test_vol_usage_update_when_blockdevicestats_reset(self):
+ ctxt = context.get_admin_context()
+ now = timeutils.utcnow()
+ start_time = now - datetime.timedelta(seconds=10)
+
+ vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
+ self.assertEqual(len(vol_usages), 0)
+
+ db.vol_usage_update(ctxt, u'1',
+ rd_req=10000, rd_bytes=20000,
+ wr_req=30000, wr_bytes=40000,
+ instance_id='fake-instance-uuid1',
+ project_id='fake-project-uuid1',
+ availability_zone='fake-az',
+ user_id='fake-user-uuid1')
+
+ # Instance rebooted or crashed. block device stats were reset and are
+ # less than the previous values
+ db.vol_usage_update(ctxt, u'1',
+ rd_req=100, rd_bytes=200,
+ wr_req=300, wr_bytes=400,
+ instance_id='fake-instance-uuid1',
+ project_id='fake-project-uuid1',
+ availability_zone='fake-az',
+ user_id='fake-user-uuid1')
+
+ db.vol_usage_update(ctxt, u'1',
+ rd_req=200, rd_bytes=300,
+ wr_req=400, wr_bytes=500,
+ instance_id='fake-instance-uuid1',
+ project_id='fake-project-uuid1',
+ availability_zone='fake-az',
+ user_id='fake-user-uuid1')
+
+ vol_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
+ expected_vol_usage = {'volume_id': u'1',
+ 'instance_uuid': 'fake-instance-uuid1',
+ 'project_id': 'fake-project-uuid1',
+ 'availability_zone': 'fake-az',
+ 'user_id': 'fake-user-uuid1',
+ 'curr_reads': 200,
+ 'curr_read_bytes': 300,
+ 'curr_writes': 400,
+ 'curr_write_bytes': 500,
+ 'tot_reads': 10000,
+ 'tot_read_bytes': 20000,
+ 'tot_writes': 30000,
+ 'tot_write_bytes': 40000}
+ for key, value in expected_vol_usage.items():
+ self.assertEqual(vol_usage[key], value, key)
+
+ def test_vol_usage_update_totals_update_when_blockdevicestats_reset(self):
+ # This is unlikely to happen, but could when a volume is detached
+ # right after a instance has rebooted / recovered and before
+ # the system polled and updated the volume usage cache table.
+ ctxt = context.get_admin_context()
+ now = timeutils.utcnow()
+ start_time = now - datetime.timedelta(seconds=10)
+
+ vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
+ self.assertEqual(len(vol_usages), 0)
+
+ db.vol_usage_update(ctxt, u'1',
+ rd_req=10000, rd_bytes=20000,
+ wr_req=30000, wr_bytes=40000,
+ instance_id='fake-instance-uuid1',
+ project_id='fake-project-uuid1',
+ availability_zone='fake-az',
+ user_id='fake-user-uuid1')
+
+ # Instance rebooted or crashed. block device stats were reset and are
+ # less than the previous values
+ db.vol_usage_update(ctxt, u'1',
+ rd_req=100, rd_bytes=200,
+ wr_req=300, wr_bytes=400,
+ instance_id='fake-instance-uuid1',
+ project_id='fake-project-uuid1',
+ availability_zone='fake-az',
+ user_id='fake-user-uuid1',
+ update_totals=True)
+
+ vol_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
+ expected_vol_usage = {'volume_id': u'1',
+ 'instance_uuid': 'fake-instance-uuid1',
+ 'project_id': 'fake-project-uuid1',
+ 'availability_zone': 'fake-az',
+ 'user_id': 'fake-user-uuid1',
+ 'curr_reads': 0,
+ 'curr_read_bytes': 0,
+ 'curr_writes': 0,
+ 'curr_write_bytes': 0,
+ 'tot_reads': 10100,
+ 'tot_read_bytes': 20200,
+ 'tot_writes': 30300,
+ 'tot_write_bytes': 40400}
+ for key, value in expected_vol_usage.items():
+ self.assertEqual(vol_usage[key], value, key)
+
+
+class TaskLogTestCase(test.TestCase):
+
+ def setUp(self):
+ super(TaskLogTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ now = timeutils.utcnow()
+ self.begin = now - datetime.timedelta(seconds=10)
+ self.end = now - datetime.timedelta(seconds=5)
+ self.task_name = 'fake-task-name'
+ self.host = 'fake-host'
+ self.message = 'Fake task message'
+ db.task_log_begin_task(self.context, self.task_name, self.begin,
+ self.end, self.host, message=self.message)
+
+ def test_task_log_get(self):
+ result = db.task_log_get(self.context, self.task_name, self.begin,
+ self.end, self.host)
+ self.assertEqual(result['task_name'], self.task_name)
+ self.assertEqual(result['period_beginning'], self.begin)
+ self.assertEqual(result['period_ending'], self.end)
+ self.assertEqual(result['host'], self.host)
+ self.assertEqual(result['message'], self.message)
+
+ def test_task_log_get_all(self):
+ result = db.task_log_get_all(self.context, self.task_name, self.begin,
+ self.end, host=self.host)
+ self.assertEqual(len(result), 1)
+ result = db.task_log_get_all(self.context, self.task_name, self.begin,
+ self.end, host=self.host, state='')
+ self.assertEqual(len(result), 0)
+
+ def test_task_log_begin_task(self):
+ db.task_log_begin_task(self.context, 'fake', self.begin,
+ self.end, self.host, task_items=42,
+ message=self.message)
+ result = db.task_log_get(self.context, 'fake', self.begin,
+ self.end, self.host)
+ self.assertEqual(result['task_name'], 'fake')
+
+ def test_task_log_begin_task_duplicate(self):
+ params = (self.context, 'fake', self.begin, self.end, self.host)
+ db.task_log_begin_task(*params, message=self.message)
+ self.assertRaises(exception.TaskAlreadyRunning,
+ db.task_log_begin_task,
+ *params, message=self.message)
+
+ def test_task_log_end_task(self):
+ errors = 1
+ db.task_log_end_task(self.context, self.task_name, self.begin,
+ self.end, self.host, errors, message=self.message)
+ result = db.task_log_get(self.context, self.task_name, self.begin,
+ self.end, self.host)
+ self.assertEqual(result['errors'], 1)
+
+ def test_task_log_end_task_task_not_running(self):
+ self.assertRaises(exception.TaskNotRunning,
+ db.task_log_end_task, self.context, 'nonexistent',
+ self.begin, self.end, self.host, 42,
+ message=self.message)
+
+
+class BlockDeviceMappingTestCase(test.TestCase):
+ def setUp(self):
+ super(BlockDeviceMappingTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.instance = db.instance_create(self.ctxt, {})
+
+ def _create_bdm(self, values):
+ values.setdefault('instance_uuid', self.instance['uuid'])
+ values.setdefault('device_name', 'fake_device')
+ values.setdefault('source_type', 'volume')
+ values.setdefault('destination_type', 'volume')
+ block_dev = block_device.BlockDeviceDict(values)
+ db.block_device_mapping_create(self.ctxt, block_dev, legacy=False)
+ uuid = block_dev['instance_uuid']
+
+ bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+
+ for bdm in bdms:
+ if bdm['device_name'] == values['device_name']:
+ return bdm
+
+ def test_scrub_empty_str_values_no_effect(self):
+ values = {'volume_size': 5}
+ expected = copy.copy(values)
+ sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
+ self.assertEqual(values, expected)
+
+ def test_scrub_empty_str_values_empty_string(self):
+ values = {'volume_size': ''}
+ sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
+ self.assertEqual(values, {})
+
+ def test_scrub_empty_str_values_empty_unicode(self):
+ values = {'volume_size': u''}
+ sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
+ self.assertEqual(values, {})
+
+ def test_block_device_mapping_create(self):
+ bdm = self._create_bdm({})
+ self.assertIsNotNone(bdm)
+
+ def test_block_device_mapping_update(self):
+ bdm = self._create_bdm({})
+ result = db.block_device_mapping_update(
+ self.ctxt, bdm['id'], {'destination_type': 'moon'},
+ legacy=False)
+ uuid = bdm['instance_uuid']
+ bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(bdm_real[0]['destination_type'], 'moon')
+ # Also make sure the update call returned correct data
+ self.assertEqual(dict(bdm_real[0].iteritems()),
+ dict(result.iteritems()))
+
+ def test_block_device_mapping_update_or_create(self):
+ values = {
+ 'instance_uuid': self.instance['uuid'],
+ 'device_name': 'fake_name',
+ 'source_type': 'volume',
+ 'destination_type': 'volume'
+ }
+ # check create
+ db.block_device_mapping_update_or_create(self.ctxt, values,
+ legacy=False)
+ uuid = values['instance_uuid']
+ bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdm_real), 1)
+ self.assertEqual(bdm_real[0]['device_name'], 'fake_name')
+
+ # check update
+ values['destination_type'] = 'camelot'
+ db.block_device_mapping_update_or_create(self.ctxt, values,
+ legacy=False)
+ bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdm_real), 1)
+ bdm_real = bdm_real[0]
+ self.assertEqual(bdm_real['device_name'], 'fake_name')
+ self.assertEqual(bdm_real['destination_type'], 'camelot')
+
+ # check create without device_name
+ bdm1 = dict(values)
+ bdm1['device_name'] = None
+ db.block_device_mapping_update_or_create(self.ctxt, bdm1, legacy=False)
+ bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdm_real), 2)
+ bdm_real = bdm_real[1]
+ self.assertIsNone(bdm_real['device_name'])
+
+ # check create multiple devices without device_name
+ bdm2 = dict(values)
+ bdm2['device_name'] = None
+ db.block_device_mapping_update_or_create(self.ctxt, bdm2, legacy=False)
+ bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdm_real), 3)
+ bdm_real = bdm_real[2]
+ self.assertIsNone(bdm_real['device_name'])
+
+ def test_block_device_mapping_update_or_create_multiple_ephemeral(self):
+ uuid = self.instance['uuid']
+ values = {
+ 'instance_uuid': uuid,
+ 'source_type': 'blank',
+ 'guest_format': 'myformat',
+ }
+
+ bdm1 = dict(values)
+ bdm1['device_name'] = '/dev/sdb'
+ db.block_device_mapping_update_or_create(self.ctxt, bdm1, legacy=False)
+
+ bdm2 = dict(values)
+ bdm2['device_name'] = '/dev/sdc'
+ db.block_device_mapping_update_or_create(self.ctxt, bdm2, legacy=False)
+
+ bdm_real = sorted(
+ db.block_device_mapping_get_all_by_instance(self.ctxt, uuid),
+ key=lambda bdm: bdm['device_name']
+ )
+
+ self.assertEqual(len(bdm_real), 2)
+ for bdm, device_name in zip(bdm_real, ['/dev/sdb', '/dev/sdc']):
+ self.assertEqual(bdm['device_name'], device_name)
+ self.assertEqual(bdm['guest_format'], 'myformat')
+
+ def test_block_device_mapping_update_or_create_check_remove_virt(self):
+ uuid = self.instance['uuid']
+ values = {
+ 'instance_uuid': uuid,
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': 'swap',
+ }
+
+ # check that old swap bdms are deleted on create
+ val1 = dict(values)
+ val1['device_name'] = 'device1'
+ db.block_device_mapping_create(self.ctxt, val1, legacy=False)
+ val2 = dict(values)
+ val2['device_name'] = 'device2'
+ db.block_device_mapping_update_or_create(self.ctxt, val2, legacy=False)
+ bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdm_real), 1)
+ bdm_real = bdm_real[0]
+ self.assertEqual(bdm_real['device_name'], 'device2')
+ self.assertEqual(bdm_real['source_type'], 'blank')
+ self.assertEqual(bdm_real['guest_format'], 'swap')
+ db.block_device_mapping_destroy(self.ctxt, bdm_real['id'])
+
+ def test_block_device_mapping_get_all_by_instance(self):
+ uuid1 = self.instance['uuid']
+ uuid2 = db.instance_create(self.ctxt, {})['uuid']
+
+ bmds_values = [{'instance_uuid': uuid1,
+ 'device_name': '/dev/vda'},
+ {'instance_uuid': uuid2,
+ 'device_name': '/dev/vdb'},
+ {'instance_uuid': uuid2,
+ 'device_name': '/dev/vdc'}]
+
+ for bdm in bmds_values:
+ self._create_bdm(bdm)
+
+ bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid1)
+ self.assertEqual(len(bmd), 1)
+ self.assertEqual(bmd[0]['device_name'], '/dev/vda')
+
+ bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid2)
+ self.assertEqual(len(bmd), 2)
+
+ def test_block_device_mapping_destroy(self):
+ bdm = self._create_bdm({})
+ db.block_device_mapping_destroy(self.ctxt, bdm['id'])
+ bdm = db.block_device_mapping_get_all_by_instance(self.ctxt,
+ bdm['instance_uuid'])
+ self.assertEqual(len(bdm), 0)
+
+ def test_block_device_mapping_destroy_by_instance_and_volume(self):
+ vol_id1 = '69f5c254-1a5b-4fff-acf7-cb369904f58f'
+ vol_id2 = '69f5c254-1a5b-4fff-acf7-cb369904f59f'
+
+ self._create_bdm({'device_name': '/dev/vda', 'volume_id': vol_id1})
+ self._create_bdm({'device_name': '/dev/vdb', 'volume_id': vol_id2})
+
+ uuid = self.instance['uuid']
+ db.block_device_mapping_destroy_by_instance_and_volume(self.ctxt, uuid,
+ vol_id1)
+ bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdms), 1)
+ self.assertEqual(bdms[0]['device_name'], '/dev/vdb')
+
+ def test_block_device_mapping_destroy_by_instance_and_device(self):
+ self._create_bdm({'device_name': '/dev/vda'})
+ self._create_bdm({'device_name': '/dev/vdb'})
+
+ uuid = self.instance['uuid']
+ params = (self.ctxt, uuid, '/dev/vdb')
+ db.block_device_mapping_destroy_by_instance_and_device(*params)
+
+ bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdms), 1)
+ self.assertEqual(bdms[0]['device_name'], '/dev/vda')
+
+ def test_block_device_mapping_get_by_volume_id(self):
+ self._create_bdm({'volume_id': 'fake_id'})
+ bdm = db.block_device_mapping_get_by_volume_id(self.ctxt, 'fake_id')
+ self.assertEqual(bdm['volume_id'], 'fake_id')
+
+ def test_block_device_mapping_get_by_volume_id_join_instance(self):
+ self._create_bdm({'volume_id': 'fake_id'})
+ bdm = db.block_device_mapping_get_by_volume_id(self.ctxt, 'fake_id',
+ ['instance'])
+ self.assertEqual(bdm['volume_id'], 'fake_id')
+ self.assertEqual(bdm['instance']['uuid'], self.instance['uuid'])
+
+
+class AgentBuildTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ """Tests for db.api.agent_build_* methods."""
+
+ def setUp(self):
+ super(AgentBuildTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def test_agent_build_create_and_get_all(self):
+ self.assertEqual(0, len(db.agent_build_get_all(self.ctxt)))
+ agent_build = db.agent_build_create(self.ctxt, {'os': 'GNU/HURD'})
+ all_agent_builds = db.agent_build_get_all(self.ctxt)
+ self.assertEqual(1, len(all_agent_builds))
+ self._assertEqualObjects(agent_build, all_agent_builds[0])
+
+ def test_agent_build_get_by_triple(self):
+ agent_build = db.agent_build_create(self.ctxt, {'hypervisor': 'kvm',
+ 'os': 'FreeBSD', 'architecture': arch.X86_64})
+ self.assertIsNone(db.agent_build_get_by_triple(self.ctxt, 'kvm',
+ 'FreeBSD', 'i386'))
+ self._assertEqualObjects(agent_build, db.agent_build_get_by_triple(
+ self.ctxt, 'kvm', 'FreeBSD', arch.X86_64))
+
+ def test_agent_build_destroy(self):
+ agent_build = db.agent_build_create(self.ctxt, {})
+ self.assertEqual(1, len(db.agent_build_get_all(self.ctxt)))
+ db.agent_build_destroy(self.ctxt, agent_build.id)
+ self.assertEqual(0, len(db.agent_build_get_all(self.ctxt)))
+
+ def test_agent_build_update(self):
+ agent_build = db.agent_build_create(self.ctxt, {'os': 'HaikuOS'})
+ db.agent_build_update(self.ctxt, agent_build.id, {'os': 'ReactOS'})
+ self.assertEqual('ReactOS', db.agent_build_get_all(self.ctxt)[0].os)
+
+ def test_agent_build_destroy_destroyed(self):
+ agent_build = db.agent_build_create(self.ctxt, {})
+ db.agent_build_destroy(self.ctxt, agent_build.id)
+ self.assertRaises(exception.AgentBuildNotFound,
+ db.agent_build_destroy, self.ctxt, agent_build.id)
+
+ def test_agent_build_update_destroyed(self):
+ agent_build = db.agent_build_create(self.ctxt, {'os': 'HaikuOS'})
+ db.agent_build_destroy(self.ctxt, agent_build.id)
+ self.assertRaises(exception.AgentBuildNotFound,
+ db.agent_build_update, self.ctxt, agent_build.id, {'os': 'OS/2'})
+
+ def test_agent_build_exists(self):
+ values = {'hypervisor': 'kvm', 'os': 'FreeBSD',
+ 'architecture': arch.X86_64}
+ db.agent_build_create(self.ctxt, values)
+ self.assertRaises(exception.AgentBuildExists, db.agent_build_create,
+ self.ctxt, values)
+
+ def test_agent_build_get_all_by_hypervisor(self):
+ values = {'hypervisor': 'kvm', 'os': 'FreeBSD',
+ 'architecture': arch.X86_64}
+ created = db.agent_build_create(self.ctxt, values)
+ actual = db.agent_build_get_all(self.ctxt, hypervisor='kvm')
+ self._assertEqualListsOfObjects([created], actual)
+
+
+class VirtualInterfaceTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(VirtualInterfaceTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.instance_uuid = db.instance_create(self.ctxt, {})['uuid']
+ values = {'host': 'localhost', 'project_id': 'project1'}
+ self.network = db.network_create_safe(self.ctxt, values)
+
+ def _get_base_values(self):
+ return {
+ 'instance_uuid': self.instance_uuid,
+ 'address': 'fake_address',
+ 'network_id': self.network['id'],
+ 'uuid': str(stdlib_uuid.uuid4())
+ }
+
+ def mock_db_query_first_to_raise_data_error_exception(self):
+ self.mox.StubOutWithMock(query.Query, 'first')
+ query.Query.first().AndRaise(db_exc.DBError())
+ self.mox.ReplayAll()
+
+ def _create_virt_interface(self, values):
+ v = self._get_base_values()
+ v.update(values)
+ return db.virtual_interface_create(self.ctxt, v)
+
+ def test_virtual_interface_create(self):
+ vif = self._create_virt_interface({})
+ self.assertIsNotNone(vif['id'])
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at', 'uuid']
+ self._assertEqualObjects(vif, self._get_base_values(), ignored_keys)
+
+ def test_virtual_interface_create_with_duplicate_address(self):
+ vif = self._create_virt_interface({})
+ self.assertRaises(exception.VirtualInterfaceCreateException,
+ self._create_virt_interface, {"uuid": vif['uuid']})
+
+ def test_virtual_interface_get(self):
+ vifs = [self._create_virt_interface({'address': 'a'}),
+ self._create_virt_interface({'address': 'b'})]
+
+ for vif in vifs:
+ real_vif = db.virtual_interface_get(self.ctxt, vif['id'])
+ self._assertEqualObjects(vif, real_vif)
+
+ def test_virtual_interface_get_by_address(self):
+ vifs = [self._create_virt_interface({'address': 'first'}),
+ self._create_virt_interface({'address': 'second'})]
+ for vif in vifs:
+ real_vif = db.virtual_interface_get_by_address(self.ctxt,
+ vif['address'])
+ self._assertEqualObjects(vif, real_vif)
+
+ def test_virtual_interface_get_by_address_not_found(self):
+ self.assertIsNone(db.virtual_interface_get_by_address(self.ctxt,
+ "i.nv.ali.ip"))
+
+ def test_virtual_interface_get_by_address_data_error_exception(self):
+ self.mock_db_query_first_to_raise_data_error_exception()
+ self.assertRaises(exception.InvalidIpAddressError,
+ db.virtual_interface_get_by_address,
+ self.ctxt,
+ "i.nv.ali.ip")
+
+ def test_virtual_interface_get_by_uuid(self):
+ vifs = [self._create_virt_interface({"address": "address_1"}),
+ self._create_virt_interface({"address": "address_2"})]
+ for vif in vifs:
+ real_vif = db.virtual_interface_get_by_uuid(self.ctxt, vif['uuid'])
+ self._assertEqualObjects(vif, real_vif)
+
+ def test_virtual_interface_get_by_instance(self):
+ inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
+ vifs1 = [self._create_virt_interface({'address': 'fake1'}),
+ self._create_virt_interface({'address': 'fake2'})]
+ # multiple nic of same instance
+ vifs2 = [self._create_virt_interface({'address': 'fake3',
+ 'instance_uuid': inst_uuid2}),
+ self._create_virt_interface({'address': 'fake4',
+ 'instance_uuid': inst_uuid2})]
+ vifs1_real = db.virtual_interface_get_by_instance(self.ctxt,
+ self.instance_uuid)
+ vifs2_real = db.virtual_interface_get_by_instance(self.ctxt,
+ inst_uuid2)
+ self._assertEqualListsOfObjects(vifs1, vifs1_real)
+ self._assertEqualOrderedListOfObjects(vifs2, vifs2_real)
+
+ def test_virtual_interface_get_by_instance_and_network(self):
+ inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
+ values = {'host': 'localhost', 'project_id': 'project2'}
+ network_id = db.network_create_safe(self.ctxt, values)['id']
+
+ vifs = [self._create_virt_interface({'address': 'fake1'}),
+ self._create_virt_interface({'address': 'fake2',
+ 'network_id': network_id,
+ 'instance_uuid': inst_uuid2}),
+ self._create_virt_interface({'address': 'fake3',
+ 'instance_uuid': inst_uuid2})]
+ for vif in vifs:
+ params = (self.ctxt, vif['instance_uuid'], vif['network_id'])
+ r_vif = db.virtual_interface_get_by_instance_and_network(*params)
+ self._assertEqualObjects(r_vif, vif)
+
+ def test_virtual_interface_delete_by_instance(self):
+ inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
+
+ values = [dict(address='fake1'), dict(address='fake2'),
+ dict(address='fake3', instance_uuid=inst_uuid2)]
+ for vals in values:
+ self._create_virt_interface(vals)
+
+ db.virtual_interface_delete_by_instance(self.ctxt, self.instance_uuid)
+
+ real_vifs1 = db.virtual_interface_get_by_instance(self.ctxt,
+ self.instance_uuid)
+ real_vifs2 = db.virtual_interface_get_by_instance(self.ctxt,
+ inst_uuid2)
+ self.assertEqual(len(real_vifs1), 0)
+ self.assertEqual(len(real_vifs2), 1)
+
+ def test_virtual_interface_get_all(self):
+ inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
+ values = [dict(address='fake1'), dict(address='fake2'),
+ dict(address='fake3', instance_uuid=inst_uuid2)]
+
+ vifs = [self._create_virt_interface(val) for val in values]
+ real_vifs = db.virtual_interface_get_all(self.ctxt)
+ self._assertEqualListsOfObjects(vifs, real_vifs)
+
+
+class NetworkTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ """Tests for db.api.network_* methods."""
+
+ def setUp(self):
+ super(NetworkTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _get_associated_fixed_ip(self, host, cidr, ip):
+ network = db.network_create_safe(self.ctxt,
+ {'project_id': 'project1', 'cidr': cidr})
+ self.assertFalse(db.network_in_use_on_host(self.ctxt, network.id,
+ host))
+ instance = db.instance_create(self.ctxt,
+ {'project_id': 'project1', 'host': host})
+ virtual_interface = db.virtual_interface_create(self.ctxt,
+ {'instance_uuid': instance.uuid, 'network_id': network.id,
+ 'address': ip})
+ db.fixed_ip_create(self.ctxt, {'address': ip,
+ 'network_id': network.id, 'allocated': True,
+ 'virtual_interface_id': virtual_interface.id})
+ db.fixed_ip_associate(self.ctxt, ip, instance.uuid,
+ network.id)
+ return network, instance
+
+ def test_network_get_associated_default_route(self):
+ network, instance = self._get_associated_fixed_ip('host.net',
+ '192.0.2.0/30', '192.0.2.1')
+ network2 = db.network_create_safe(self.ctxt,
+ {'project_id': 'project1', 'cidr': '192.0.3.0/30'})
+ ip = '192.0.3.1'
+ virtual_interface = db.virtual_interface_create(self.ctxt,
+ {'instance_uuid': instance.uuid, 'network_id': network2.id,
+ 'address': ip})
+ db.fixed_ip_create(self.ctxt, {'address': ip,
+ 'network_id': network2.id, 'allocated': True,
+ 'virtual_interface_id': virtual_interface.id})
+ db.fixed_ip_associate(self.ctxt, ip, instance.uuid,
+ network2.id)
+ data = db.network_get_associated_fixed_ips(self.ctxt, network.id)
+ self.assertEqual(1, len(data))
+ self.assertTrue(data[0]['default_route'])
+ data = db.network_get_associated_fixed_ips(self.ctxt, network2.id)
+ self.assertEqual(1, len(data))
+ self.assertFalse(data[0]['default_route'])
+
+ def test_network_get_associated_fixed_ips(self):
+ network, instance = self._get_associated_fixed_ip('host.net',
+ '192.0.2.0/30', '192.0.2.1')
+ data = db.network_get_associated_fixed_ips(self.ctxt, network.id)
+ self.assertEqual(1, len(data))
+ self.assertEqual('192.0.2.1', data[0]['address'])
+ self.assertEqual('192.0.2.1', data[0]['vif_address'])
+ self.assertEqual(instance.uuid, data[0]['instance_uuid'])
+ self.assertTrue(data[0]['allocated'])
+
+ def test_network_create_safe(self):
+ values = {'host': 'localhost', 'project_id': 'project1'}
+ network = db.network_create_safe(self.ctxt, values)
+ self.assertEqual(36, len(network['uuid']))
+ db_network = db.network_get(self.ctxt, network['id'])
+ self._assertEqualObjects(network, db_network)
+
+ def test_network_create_with_duplicate_vlan(self):
+ values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
+ values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 1}
+ db.network_create_safe(self.ctxt, values1)
+ self.assertRaises(exception.DuplicateVlan,
+ db.network_create_safe, self.ctxt, values2)
+
+ def test_network_delete_safe(self):
+ values = {'host': 'localhost', 'project_id': 'project1'}
+ network = db.network_create_safe(self.ctxt, values)
+ db.network_get(self.ctxt, network['id'])
+ values = {'network_id': network['id'], 'address': '192.168.1.5'}
+ address1 = db.fixed_ip_create(self.ctxt, values)['address']
+ values = {'network_id': network['id'],
+ 'address': '192.168.1.6',
+ 'allocated': True}
+ address2 = db.fixed_ip_create(self.ctxt, values)['address']
+ self.assertRaises(exception.NetworkInUse,
+ db.network_delete_safe, self.ctxt, network['id'])
+ db.fixed_ip_update(self.ctxt, address2, {'allocated': False})
+ network = db.network_delete_safe(self.ctxt, network['id'])
+ self.assertRaises(exception.FixedIpNotFoundForAddress,
+ db.fixed_ip_get_by_address, self.ctxt, address1)
+ ctxt = self.ctxt.elevated(read_deleted='yes')
+ fixed_ip = db.fixed_ip_get_by_address(ctxt, address1)
+ self.assertTrue(fixed_ip['deleted'])
+
+ def test_network_in_use_on_host(self):
+ values = {'host': 'foo', 'hostname': 'myname'}
+ instance = db.instance_create(self.ctxt, values)
+ values = {'address': '192.168.1.5', 'instance_uuid': instance['uuid']}
+ vif = db.virtual_interface_create(self.ctxt, values)
+ values = {'address': '192.168.1.6',
+ 'network_id': 1,
+ 'allocated': True,
+ 'instance_uuid': instance['uuid'],
+ 'virtual_interface_id': vif['id']}
+ db.fixed_ip_create(self.ctxt, values)
+ self.assertEqual(db.network_in_use_on_host(self.ctxt, 1, 'foo'), True)
+ self.assertEqual(db.network_in_use_on_host(self.ctxt, 1, 'bar'), False)
+
+ def test_network_update_nonexistent(self):
+ self.assertRaises(exception.NetworkNotFound,
+ db.network_update, self.ctxt, 123456, {})
+
+ def test_network_update_with_duplicate_vlan(self):
+ values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
+ values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 2}
+ network_ref = db.network_create_safe(self.ctxt, values1)
+ db.network_create_safe(self.ctxt, values2)
+ self.assertRaises(exception.DuplicateVlan,
+ db.network_update, self.ctxt,
+ network_ref["id"], values2)
+
+ def test_network_update(self):
+ network = db.network_create_safe(self.ctxt, {'project_id': 'project1',
+ 'vlan': 1, 'host': 'test.com'})
+ db.network_update(self.ctxt, network.id, {'vlan': 2})
+ network_new = db.network_get(self.ctxt, network.id)
+ self.assertEqual(2, network_new.vlan)
+
+ def test_network_set_host_nonexistent_network(self):
+ self.assertRaises(exception.NetworkNotFound,
+ db.network_set_host, self.ctxt, 123456, 'nonexistent')
+
+ def test_network_set_host_with_initially_no_host(self):
+ values = {'host': 'example.com', 'project_id': 'project1'}
+ network = db.network_create_safe(self.ctxt, values)
+ self.assertEqual(
+ db.network_set_host(self.ctxt, network.id, 'new.example.com'),
+ 'example.com')
+
+ def test_network_set_host(self):
+ values = {'project_id': 'project1'}
+ network = db.network_create_safe(self.ctxt, values)
+ self.assertEqual(
+ db.network_set_host(self.ctxt, network.id, 'example.com'),
+ 'example.com')
+ self.assertEqual('example.com',
+ db.network_get(self.ctxt, network.id).host)
+
+ def test_network_get_all_by_host(self):
+ self.assertEqual([],
+ db.network_get_all_by_host(self.ctxt, 'example.com'))
+ host = 'h1.example.com'
+ # network with host set
+ net1 = db.network_create_safe(self.ctxt, {'host': host})
+ self._assertEqualListsOfObjects([net1],
+ db.network_get_all_by_host(self.ctxt, host))
+ # network with fixed ip with host set
+ net2 = db.network_create_safe(self.ctxt, {})
+ db.fixed_ip_create(self.ctxt, {'host': host, 'network_id': net2.id})
+ db.network_get_all_by_host(self.ctxt, host)
+ self._assertEqualListsOfObjects([net1, net2],
+ db.network_get_all_by_host(self.ctxt, host))
+ # network with instance with host set
+ net3 = db.network_create_safe(self.ctxt, {})
+ instance = db.instance_create(self.ctxt, {'host': host})
+ db.fixed_ip_create(self.ctxt, {'network_id': net3.id,
+ 'instance_uuid': instance.uuid})
+ self._assertEqualListsOfObjects([net1, net2, net3],
+ db.network_get_all_by_host(self.ctxt, host))
+
+ def test_network_get_by_cidr(self):
+ cidr = '192.0.2.0/30'
+ cidr_v6 = '2001:db8:1::/64'
+ network = db.network_create_safe(self.ctxt,
+ {'project_id': 'project1', 'cidr': cidr, 'cidr_v6': cidr_v6})
+ self._assertEqualObjects(network,
+ db.network_get_by_cidr(self.ctxt, cidr))
+ self._assertEqualObjects(network,
+ db.network_get_by_cidr(self.ctxt, cidr_v6))
+
+ def test_network_get_by_cidr_nonexistent(self):
+ self.assertRaises(exception.NetworkNotFoundForCidr,
+ db.network_get_by_cidr, self.ctxt, '192.0.2.0/30')
+
+ def test_network_get_by_uuid(self):
+ network = db.network_create_safe(self.ctxt,
+ {'project_id': 'project_1'})
+ self._assertEqualObjects(network,
+ db.network_get_by_uuid(self.ctxt, network.uuid))
+
+ def test_network_get_by_uuid_nonexistent(self):
+ self.assertRaises(exception.NetworkNotFoundForUUID,
+ db.network_get_by_uuid, self.ctxt, 'non-existent-uuid')
+
+ def test_network_get_all_by_uuids_no_networks(self):
+ self.assertRaises(exception.NoNetworksFound,
+ db.network_get_all_by_uuids, self.ctxt, ['non-existent-uuid'])
+
+ def test_network_get_all_by_uuids(self):
+ net1 = db.network_create_safe(self.ctxt, {})
+ net2 = db.network_create_safe(self.ctxt, {})
+ self._assertEqualListsOfObjects([net1, net2],
+ db.network_get_all_by_uuids(self.ctxt, [net1.uuid, net2.uuid]))
+
+ def test_network_get_all_no_networks(self):
+ self.assertRaises(exception.NoNetworksFound,
+ db.network_get_all, self.ctxt)
+
+ def test_network_get_all(self):
+ network = db.network_create_safe(self.ctxt, {})
+ network_db = db.network_get_all(self.ctxt)
+ self.assertEqual(1, len(network_db))
+ self._assertEqualObjects(network, network_db[0])
+
+ def test_network_get_all_admin_user(self):
+ network1 = db.network_create_safe(self.ctxt, {})
+ network2 = db.network_create_safe(self.ctxt,
+ {'project_id': 'project1'})
+ self._assertEqualListsOfObjects([network1, network2],
+ db.network_get_all(self.ctxt,
+ project_only=True))
+
+ def test_network_get_all_normal_user(self):
+ normal_ctxt = context.RequestContext('fake', 'fake')
+ db.network_create_safe(self.ctxt, {})
+ db.network_create_safe(self.ctxt, {'project_id': 'project1'})
+ network1 = db.network_create_safe(self.ctxt,
+ {'project_id': 'fake'})
+ network_db = db.network_get_all(normal_ctxt, project_only=True)
+ self.assertEqual(1, len(network_db))
+ self._assertEqualObjects(network1, network_db[0])
+
+ def test_network_get(self):
+ network = db.network_create_safe(self.ctxt, {})
+ self._assertEqualObjects(db.network_get(self.ctxt, network.id),
+ network)
+ db.network_delete_safe(self.ctxt, network.id)
+ self.assertRaises(exception.NetworkNotFound,
+ db.network_get, self.ctxt, network.id)
+
+ def test_network_associate(self):
+ network = db.network_create_safe(self.ctxt, {})
+ self.assertIsNone(network.project_id)
+ db.network_associate(self.ctxt, "project1", network.id)
+ self.assertEqual("project1", db.network_get(self.ctxt,
+ network.id).project_id)
+
+ def test_network_diassociate(self):
+ network = db.network_create_safe(self.ctxt,
+ {'project_id': 'project1', 'host': 'test.net'})
+ # disassociate project
+ db.network_disassociate(self.ctxt, network.id, False, True)
+ self.assertIsNone(db.network_get(self.ctxt, network.id).project_id)
+ # disassociate host
+ db.network_disassociate(self.ctxt, network.id, True, False)
+ self.assertIsNone(db.network_get(self.ctxt, network.id).host)
+
+ def test_network_count_reserved_ips(self):
+ net = db.network_create_safe(self.ctxt, {})
+ self.assertEqual(0, db.network_count_reserved_ips(self.ctxt, net.id))
+ db.fixed_ip_create(self.ctxt, {'network_id': net.id,
+ 'reserved': True})
+ self.assertEqual(1, db.network_count_reserved_ips(self.ctxt, net.id))
+
+
+class KeyPairTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(KeyPairTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _create_key_pair(self, values):
+ return db.key_pair_create(self.ctxt, values)
+
+ def test_key_pair_create(self):
+ param = {
+ 'name': 'test_1',
+ 'user_id': 'test_user_id_1',
+ 'public_key': 'test_public_key_1',
+ 'fingerprint': 'test_fingerprint_1'
+ }
+ key_pair = self._create_key_pair(param)
+
+ self.assertIsNotNone(key_pair['id'])
+ ignored_keys = ['deleted', 'created_at', 'updated_at',
+ 'deleted_at', 'id']
+ self._assertEqualObjects(key_pair, param, ignored_keys)
+
+ def test_key_pair_create_with_duplicate_name(self):
+ params = {'name': 'test_name', 'user_id': 'test_user_id'}
+ self._create_key_pair(params)
+ self.assertRaises(exception.KeyPairExists, self._create_key_pair,
+ params)
+
+ def test_key_pair_get(self):
+ params = [
+ {'name': 'test_1', 'user_id': 'test_user_id_1'},
+ {'name': 'test_2', 'user_id': 'test_user_id_2'},
+ {'name': 'test_3', 'user_id': 'test_user_id_3'}
+ ]
+ key_pairs = [self._create_key_pair(p) for p in params]
+
+ for key in key_pairs:
+ real_key = db.key_pair_get(self.ctxt, key['user_id'], key['name'])
+ self._assertEqualObjects(key, real_key)
+
+ def test_key_pair_get_no_results(self):
+ param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
+ self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
+ self.ctxt, param['user_id'], param['name'])
+
+ def test_key_pair_get_deleted(self):
+ param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
+ key_pair_created = self._create_key_pair(param)
+
+ db.key_pair_destroy(self.ctxt, param['user_id'], param['name'])
+ self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
+ self.ctxt, param['user_id'], param['name'])
+
+ ctxt = self.ctxt.elevated(read_deleted='yes')
+ key_pair_deleted = db.key_pair_get(ctxt, param['user_id'],
+ param['name'])
+ ignored_keys = ['deleted', 'created_at', 'updated_at', 'deleted_at']
+ self._assertEqualObjects(key_pair_deleted, key_pair_created,
+ ignored_keys)
+ self.assertEqual(key_pair_deleted['deleted'], key_pair_deleted['id'])
+
+ def test_key_pair_get_all_by_user(self):
+ params = [
+ {'name': 'test_1', 'user_id': 'test_user_id_1'},
+ {'name': 'test_2', 'user_id': 'test_user_id_1'},
+ {'name': 'test_3', 'user_id': 'test_user_id_2'}
+ ]
+ key_pairs_user_1 = [self._create_key_pair(p) for p in params
+ if p['user_id'] == 'test_user_id_1']
+ key_pairs_user_2 = [self._create_key_pair(p) for p in params
+ if p['user_id'] == 'test_user_id_2']
+
+ real_keys_1 = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id_1')
+ real_keys_2 = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id_2')
+
+ self._assertEqualListsOfObjects(key_pairs_user_1, real_keys_1)
+ self._assertEqualListsOfObjects(key_pairs_user_2, real_keys_2)
+
+ def test_key_pair_count_by_user(self):
+ params = [
+ {'name': 'test_1', 'user_id': 'test_user_id_1'},
+ {'name': 'test_2', 'user_id': 'test_user_id_1'},
+ {'name': 'test_3', 'user_id': 'test_user_id_2'}
+ ]
+ for p in params:
+ self._create_key_pair(p)
+
+ count_1 = db.key_pair_count_by_user(self.ctxt, 'test_user_id_1')
+ self.assertEqual(count_1, 2)
+
+ count_2 = db.key_pair_count_by_user(self.ctxt, 'test_user_id_2')
+ self.assertEqual(count_2, 1)
+
+ def test_key_pair_destroy(self):
+ param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
+ self._create_key_pair(param)
+
+ db.key_pair_destroy(self.ctxt, param['user_id'], param['name'])
+ self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
+ self.ctxt, param['user_id'], param['name'])
+
+ def test_key_pair_destroy_no_such_key(self):
+ param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
+ self.assertRaises(exception.KeypairNotFound,
+ db.key_pair_destroy, self.ctxt,
+ param['user_id'], param['name'])
+
+
+class QuotaTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ """Tests for db.api.quota_* methods."""
+
+ def setUp(self):
+ super(QuotaTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def test_quota_create(self):
+ quota = db.quota_create(self.ctxt, 'project1', 'resource', 99)
+ self.assertEqual(quota.resource, 'resource')
+ self.assertEqual(quota.hard_limit, 99)
+ self.assertEqual(quota.project_id, 'project1')
+
+ def test_quota_get(self):
+ quota = db.quota_create(self.ctxt, 'project1', 'resource', 99)
+ quota_db = db.quota_get(self.ctxt, 'project1', 'resource')
+ self._assertEqualObjects(quota, quota_db)
+
+ def test_quota_get_all_by_project(self):
+ for i in range(3):
+ for j in range(3):
+ db.quota_create(self.ctxt, 'proj%d' % i, 'resource%d' % j, j)
+ for i in range(3):
+ quotas_db = db.quota_get_all_by_project(self.ctxt, 'proj%d' % i)
+ self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
+ 'resource0': 0,
+ 'resource1': 1,
+ 'resource2': 2})
+
+ def test_quota_get_all_by_project_and_user(self):
+ for i in range(3):
+ for j in range(3):
+ db.quota_create(self.ctxt, 'proj%d' % i, 'resource%d' % j,
+ j - 1, user_id='user%d' % i)
+ for i in range(3):
+ quotas_db = db.quota_get_all_by_project_and_user(self.ctxt,
+ 'proj%d' % i,
+ 'user%d' % i)
+ self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
+ 'user_id': 'user%d' % i,
+ 'resource0': -1,
+ 'resource1': 0,
+ 'resource2': 1})
+
+ def test_quota_update(self):
+ db.quota_create(self.ctxt, 'project1', 'resource1', 41)
+ db.quota_update(self.ctxt, 'project1', 'resource1', 42)
+ quota = db.quota_get(self.ctxt, 'project1', 'resource1')
+ self.assertEqual(quota.hard_limit, 42)
+ self.assertEqual(quota.resource, 'resource1')
+ self.assertEqual(quota.project_id, 'project1')
+
+ def test_quota_update_nonexistent(self):
+ self.assertRaises(exception.ProjectQuotaNotFound,
+ db.quota_update, self.ctxt, 'project1', 'resource1', 42)
+
+ def test_quota_get_nonexistent(self):
+ self.assertRaises(exception.ProjectQuotaNotFound,
+ db.quota_get, self.ctxt, 'project1', 'resource1')
+
+ def test_quota_reserve_all_resources(self):
+ quotas = {}
+ deltas = {}
+ reservable_resources = {}
+ for i, resource in enumerate(quota.resources):
+ if isinstance(resource, quota.ReservableResource):
+ quotas[resource.name] = db.quota_create(self.ctxt, 'project1',
+ resource.name, 100)
+ deltas[resource.name] = i
+ reservable_resources[resource.name] = resource
+
+ usages = {'instances': 3, 'cores': 6, 'ram': 9}
+ instances = []
+ for i in range(3):
+ instances.append(db.instance_create(self.ctxt,
+ {'vcpus': 2, 'memory_mb': 3,
+ 'project_id': 'project1'}))
+
+ usages['fixed_ips'] = 2
+ network = db.network_create_safe(self.ctxt, {})
+ for i in range(2):
+ address = '192.168.0.%d' % i
+ db.fixed_ip_create(self.ctxt, {'project_id': 'project1',
+ 'address': address,
+ 'network_id': network['id']})
+ db.fixed_ip_associate(self.ctxt, address,
+ instances[0].uuid, network['id'])
+
+ usages['floating_ips'] = 5
+ for i in range(5):
+ db.floating_ip_create(self.ctxt, {'project_id': 'project1'})
+
+ usages['security_groups'] = 3
+ for i in range(3):
+ db.security_group_create(self.ctxt, {'project_id': 'project1'})
+
+ usages['server_groups'] = 4
+ for i in range(4):
+ db.instance_group_create(self.ctxt, {'uuid': str(i),
+ 'project_id': 'project1'})
+
+ reservations_uuids = db.quota_reserve(self.ctxt, reservable_resources,
+ quotas, quotas, deltas, None,
+ None, None, 'project1')
+ resources_names = reservable_resources.keys()
+ for reservation_uuid in reservations_uuids:
+ reservation = _reservation_get(self.ctxt, reservation_uuid)
+ usage = db.quota_usage_get(self.ctxt, 'project1',
+ reservation.resource)
+ self.assertEqual(usage.in_use, usages[reservation.resource],
+ 'Resource: %s' % reservation.resource)
+ self.assertEqual(usage.reserved, deltas[reservation.resource])
+ self.assertIn(reservation.resource, resources_names)
+ resources_names.remove(reservation.resource)
+ self.assertEqual(len(resources_names), 0)
+
+ def test_quota_destroy_all_by_project(self):
+ reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
+ db.quota_destroy_all_by_project(self.ctxt, 'project1')
+ self.assertEqual(db.quota_get_all_by_project(self.ctxt, 'project1'),
+ {'project_id': 'project1'})
+ self.assertEqual(db.quota_get_all_by_project_and_user(self.ctxt,
+ 'project1', 'user1'),
+ {'project_id': 'project1', 'user_id': 'user1'})
+ self.assertEqual(db.quota_usage_get_all_by_project(
+ self.ctxt, 'project1'),
+ {'project_id': 'project1'})
+ for r in reservations:
+ self.assertRaises(exception.ReservationNotFound,
+ _reservation_get, self.ctxt, r)
+
+ def test_quota_destroy_all_by_project_and_user(self):
+ reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
+ db.quota_destroy_all_by_project_and_user(self.ctxt, 'project1',
+ 'user1')
+ self.assertEqual(db.quota_get_all_by_project_and_user(self.ctxt,
+ 'project1', 'user1'),
+ {'project_id': 'project1',
+ 'user_id': 'user1'})
+ self.assertEqual(db.quota_usage_get_all_by_project_and_user(
+ self.ctxt, 'project1', 'user1'),
+ {'project_id': 'project1',
+ 'user_id': 'user1',
+ 'fixed_ips': {'in_use': 2, 'reserved': 2}})
+ for r in reservations:
+ self.assertRaises(exception.ReservationNotFound,
+ _reservation_get, self.ctxt, r)
+
+ def test_quota_usage_get_nonexistent(self):
+ self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_get,
+ self.ctxt, 'p1', 'nonexitent_resource')
+
+ def test_quota_usage_get(self):
+ _quota_reserve(self.ctxt, 'p1', 'u1')
+ quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'resource0')
+ expected = {'resource': 'resource0', 'project_id': 'p1',
+ 'in_use': 0, 'reserved': 0, 'total': 0}
+ for key, value in expected.iteritems():
+ self.assertEqual(value, quota_usage[key])
+
+ def test_quota_usage_get_all_by_project(self):
+ _quota_reserve(self.ctxt, 'p1', 'u1')
+ expected = {'project_id': 'p1',
+ 'resource0': {'in_use': 0, 'reserved': 0},
+ 'resource1': {'in_use': 1, 'reserved': 1},
+ 'fixed_ips': {'in_use': 2, 'reserved': 2}}
+ self.assertEqual(expected, db.quota_usage_get_all_by_project(
+ self.ctxt, 'p1'))
+
+ def test_quota_usage_get_all_by_project_and_user(self):
+ _quota_reserve(self.ctxt, 'p1', 'u1')
+ expected = {'project_id': 'p1',
+ 'user_id': 'u1',
+ 'resource0': {'in_use': 0, 'reserved': 0},
+ 'resource1': {'in_use': 1, 'reserved': 1},
+ 'fixed_ips': {'in_use': 2, 'reserved': 2}}
+ self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
+ self.ctxt, 'p1', 'u1'))
+
+ def test_quota_usage_update_nonexistent(self):
+ self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_update,
+ self.ctxt, 'p1', 'u1', 'resource', in_use=42)
+
+ def test_quota_usage_update(self):
+ _quota_reserve(self.ctxt, 'p1', 'u1')
+ db.quota_usage_update(self.ctxt, 'p1', 'u1', 'resource0', in_use=42,
+ reserved=43)
+ quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'resource0', 'u1')
+ expected = {'resource': 'resource0', 'project_id': 'p1',
+ 'user_id': 'u1', 'in_use': 42, 'reserved': 43, 'total': 85}
+ for key, value in expected.iteritems():
+ self.assertEqual(value, quota_usage[key])
+
+ def test_quota_create_exists(self):
+ db.quota_create(self.ctxt, 'project1', 'resource1', 41)
+ self.assertRaises(exception.QuotaExists, db.quota_create, self.ctxt,
+ 'project1', 'resource1', 42)
+
+
+class QuotaReserveNoDbTestCase(test.NoDBTestCase):
+ """Tests quota reserve/refresh operations using mock."""
+
+ def test_create_quota_usage_if_missing_not_created(self):
+ # Tests that QuotaUsage isn't created if it's already in user_usages.
+ resource = 'fake-resource'
+ project_id = 'fake-project'
+ user_id = 'fake_user'
+ session = mock.sentinel
+ quota_usage = mock.sentinel
+ user_usages = {resource: quota_usage}
+ with mock.patch.object(sqlalchemy_api, '_quota_usage_create') as quc:
+ self.assertFalse(sqlalchemy_api._create_quota_usage_if_missing(
+ user_usages, resource, None,
+ project_id, user_id, session))
+ self.assertFalse(quc.called)
+
+ def _test_create_quota_usage_if_missing_created(self, per_project_quotas):
+ # Tests that the QuotaUsage is created.
+ user_usages = {}
+ if per_project_quotas:
+ resource = sqlalchemy_api.PER_PROJECT_QUOTAS[0]
+ else:
+ resource = 'fake-resource'
+ project_id = 'fake-project'
+ user_id = 'fake_user'
+ session = mock.sentinel
+ quota_usage = mock.sentinel
+ with mock.patch.object(sqlalchemy_api, '_quota_usage_create',
+ return_value=quota_usage) as quc:
+ self.assertTrue(sqlalchemy_api._create_quota_usage_if_missing(
+ user_usages, resource, None,
+ project_id, user_id, session))
+ self.assertEqual(quota_usage, user_usages[resource])
+ # Now test if the QuotaUsage was created with a user_id or not.
+ if per_project_quotas:
+ quc.assert_called_once_with(
+ project_id, None, resource, 0, 0, None, session=session)
+ else:
+ quc.assert_called_once_with(
+ project_id, user_id, resource, 0, 0, None, session=session)
+
+ def test_create_quota_usage_if_missing_created_per_project_quotas(self):
+ self._test_create_quota_usage_if_missing_created(True)
+
+ def test_create_quota_usage_if_missing_created_user_quotas(self):
+ self._test_create_quota_usage_if_missing_created(False)
+
+ def test_is_quota_refresh_needed_in_use(self):
+ # Tests when a quota refresh is needed based on the in_use value.
+ for in_use in range(-1, 1):
+ # We have to set until_refresh=None otherwise mock will give it
+ # a value which runs some code we don't want.
+ quota_usage = mock.MagicMock(in_use=in_use, until_refresh=None)
+ if in_use < 0:
+ self.assertTrue(sqlalchemy_api._is_quota_refresh_needed(
+ quota_usage, max_age=0))
+ else:
+ self.assertFalse(sqlalchemy_api._is_quota_refresh_needed(
+ quota_usage, max_age=0))
+
+ def test_is_quota_refresh_needed_until_refresh_none(self):
+ quota_usage = mock.MagicMock(in_use=0, until_refresh=None)
+ self.assertFalse(sqlalchemy_api._is_quota_refresh_needed(quota_usage,
+ max_age=0))
+
+ def test_is_quota_refresh_needed_until_refresh_not_none(self):
+ # Tests different values for the until_refresh counter.
+ for until_refresh in range(3):
+ quota_usage = mock.MagicMock(in_use=0, until_refresh=until_refresh)
+ refresh = sqlalchemy_api._is_quota_refresh_needed(quota_usage,
+ max_age=0)
+ until_refresh -= 1
+ if until_refresh <= 0:
+ self.assertTrue(refresh)
+ else:
+ self.assertFalse(refresh)
+ self.assertEqual(until_refresh, quota_usage.until_refresh)
+
+ def test_refresh_quota_usages(self):
+ quota_usage = mock.Mock(spec=models.QuotaUsage)
+ quota_usage.in_use = 5
+ quota_usage.until_refresh = None
+ sqlalchemy_api._refresh_quota_usages(quota_usage, until_refresh=5,
+ in_use=6)
+ self.assertEqual(6, quota_usage.in_use)
+ self.assertEqual(5, quota_usage.until_refresh)
+
+ def test_calculate_overquota_no_delta(self):
+ deltas = {'foo': -1}
+ user_quotas = {'foo': 10}
+ overs = sqlalchemy_api._calculate_overquota({}, user_quotas, deltas,
+ {}, {})
+ self.assertFalse(overs)
+
+ def test_calculate_overquota_unlimited_quota(self):
+ deltas = {'foo': 1}
+ project_quotas = {}
+ user_quotas = {'foo': -1}
+ project_usages = {}
+ user_usages = {'foo': 10}
+ overs = sqlalchemy_api._calculate_overquota(
+ project_quotas, user_quotas, deltas, project_usages, user_usages)
+ self.assertFalse(overs)
+
+ def _test_calculate_overquota(self, resource, project_usages, user_usages):
+ deltas = {resource: 1}
+ project_quotas = {resource: 10}
+ user_quotas = {resource: 10}
+ overs = sqlalchemy_api._calculate_overquota(
+ project_quotas, user_quotas, deltas, project_usages, user_usages)
+ self.assertEqual(resource, overs[0])
+
+ def test_calculate_overquota_per_project_quota_overquota(self):
+ # In this test, user quotas are fine but project quotas are over.
+ resource = 'foo'
+ project_usages = {resource: {'total': 10}}
+ user_usages = {resource: {'total': 5}}
+ self._test_calculate_overquota(resource, project_usages, user_usages)
+
+ def test_calculate_overquota_per_user_quota_overquota(self):
+ # In this test, project quotas are fine but user quotas are over.
+ resource = 'foo'
+ project_usages = {resource: {'total': 5}}
+ user_usages = {resource: {'total': 10}}
+ self._test_calculate_overquota(resource, project_usages, user_usages)
+
+
+class QuotaClassTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ def setUp(self):
+ super(QuotaClassTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def test_quota_class_get_default(self):
+ params = {
+ 'test_resource1': '10',
+ 'test_resource2': '20',
+ 'test_resource3': '30',
+ }
+ for res, limit in params.items():
+ db.quota_class_create(self.ctxt, 'default', res, limit)
+
+ defaults = db.quota_class_get_default(self.ctxt)
+ self.assertEqual(defaults, dict(class_name='default',
+ test_resource1=10,
+ test_resource2=20,
+ test_resource3=30))
+
+ def test_quota_class_create(self):
+ qc = db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
+ self.assertEqual(qc.class_name, 'class name')
+ self.assertEqual(qc.resource, 'resource')
+ self.assertEqual(qc.hard_limit, 42)
+
+ def test_quota_class_get(self):
+ qc = db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
+ qc_db = db.quota_class_get(self.ctxt, 'class name', 'resource')
+ self._assertEqualObjects(qc, qc_db)
+
+ def test_quota_class_get_nonexistent(self):
+ self.assertRaises(exception.QuotaClassNotFound, db.quota_class_get,
+ self.ctxt, 'nonexistent', 'resource')
+
+ def test_quota_class_get_all_by_name(self):
+ for i in range(3):
+ for j in range(3):
+ db.quota_class_create(self.ctxt, 'class%d' % i,
+ 'resource%d' % j, j)
+ for i in range(3):
+ classes = db.quota_class_get_all_by_name(self.ctxt, 'class%d' % i)
+ self.assertEqual(classes, {'class_name': 'class%d' % i,
+ 'resource0': 0, 'resource1': 1, 'resource2': 2})
+
+ def test_quota_class_update(self):
+ db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
+ db.quota_class_update(self.ctxt, 'class name', 'resource', 43)
+ self.assertEqual(db.quota_class_get(self.ctxt, 'class name',
+ 'resource').hard_limit, 43)
+
+ def test_quota_class_update_nonexistent(self):
+ self.assertRaises(exception.QuotaClassNotFound, db.quota_class_update,
+ self.ctxt, 'class name', 'resource', 42)
+
+ def test_refresh_quota_usages(self):
+ quota_usages = mock.Mock()
+ sqlalchemy_api._refresh_quota_usages(quota_usages, until_refresh=5,
+ in_use=6)
+
+
+class S3ImageTestCase(test.TestCase):
+
+ def setUp(self):
+ super(S3ImageTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.values = [uuidutils.generate_uuid() for i in xrange(3)]
+ self.images = [db.s3_image_create(self.ctxt, uuid)
+ for uuid in self.values]
+
+ def test_s3_image_create(self):
+ for ref in self.images:
+ self.assertTrue(uuidutils.is_uuid_like(ref.uuid))
+ self.assertEqual(sorted(self.values),
+ sorted([ref.uuid for ref in self.images]))
+
+ def test_s3_image_get_by_uuid(self):
+ for uuid in self.values:
+ ref = db.s3_image_get_by_uuid(self.ctxt, uuid)
+ self.assertTrue(uuidutils.is_uuid_like(ref.uuid))
+ self.assertEqual(uuid, ref.uuid)
+
+ def test_s3_image_get(self):
+ self.assertEqual(sorted(self.values),
+ sorted([db.s3_image_get(self.ctxt, ref.id).uuid
+ for ref in self.images]))
+
+ def test_s3_image_get_not_found(self):
+ self.assertRaises(exception.ImageNotFound, db.s3_image_get, self.ctxt,
+ 100500)
+
+ def test_s3_image_get_by_uuid_not_found(self):
+ self.assertRaises(exception.ImageNotFound, db.s3_image_get_by_uuid,
+ self.ctxt, uuidutils.generate_uuid())
+
+
+class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ _ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
+
+ def setUp(self):
+ super(ComputeNodeTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.service_dict = dict(host='host1', binary='nova-compute',
+ topic=CONF.compute_topic, report_count=1,
+ disabled=False)
+ self.service = db.service_create(self.ctxt, self.service_dict)
+ self.compute_node_dict = dict(vcpus=2, memory_mb=1024, local_gb=2048,
+ vcpus_used=0, memory_mb_used=0,
+ local_gb_used=0, free_ram_mb=1024,
+ free_disk_gb=2048, hypervisor_type="xen",
+ hypervisor_version=1, cpu_info="",
+ running_vms=0, current_workload=0,
+ service_id=self.service['id'],
+ disk_available_least=100,
+ hypervisor_hostname='abracadabra104',
+ host_ip='127.0.0.1',
+ supported_instances='',
+ pci_stats='',
+ metrics='',
+ extra_resources='',
+ stats='', numa_topology='')
+ # add some random stats
+ self.stats = dict(num_instances=3, num_proj_12345=2,
+ num_proj_23456=2, num_vm_building=3)
+ self.compute_node_dict['stats'] = jsonutils.dumps(self.stats)
+ self.flags(reserved_host_memory_mb=0)
+ self.flags(reserved_host_disk_mb=0)
+ self.item = db.compute_node_create(self.ctxt, self.compute_node_dict)
+
+ def test_compute_node_create(self):
+ self._assertEqualObjects(self.compute_node_dict, self.item,
+ ignored_keys=self._ignored_keys + ['stats'])
+ new_stats = jsonutils.loads(self.item['stats'])
+ self.assertEqual(self.stats, new_stats)
+
+ def test_compute_node_get_all(self):
+ date_fields = set(['created_at', 'updated_at',
+ 'deleted_at', 'deleted'])
+ for no_date_fields in [False, True]:
+ nodes = db.compute_node_get_all(self.ctxt, no_date_fields)
+ self.assertEqual(1, len(nodes))
+ node = nodes[0]
+ self._assertEqualObjects(self.compute_node_dict, node,
+ ignored_keys=self._ignored_keys +
+ ['stats', 'service'])
+ node_fields = set(node.keys())
+ if no_date_fields:
+ self.assertFalse(date_fields & node_fields)
+ else:
+ self.assertTrue(date_fields <= node_fields)
+ new_stats = jsonutils.loads(node['stats'])
+ self.assertEqual(self.stats, new_stats)
+
+ def test_compute_node_get_all_deleted_compute_node(self):
+ # Create a service and compute node and ensure we can find its stats;
+ # delete the service and compute node when done and loop again
+ for x in range(2, 5):
+ # Create a service
+ service_data = self.service_dict.copy()
+ service_data['host'] = 'host-%s' % x
+ service = db.service_create(self.ctxt, service_data)
+
+ # Create a compute node
+ compute_node_data = self.compute_node_dict.copy()
+ compute_node_data['service_id'] = service['id']
+ compute_node_data['stats'] = jsonutils.dumps(self.stats.copy())
+ compute_node_data['hypervisor_hostname'] = 'hypervisor-%s' % x
+ node = db.compute_node_create(self.ctxt, compute_node_data)
+
+ # Ensure the "new" compute node is found
+ nodes = db.compute_node_get_all(self.ctxt, False)
+ self.assertEqual(2, len(nodes))
+ found = None
+ for n in nodes:
+ if n['id'] == node['id']:
+ found = n
+ break
+ self.assertIsNotNone(found)
+ # Now ensure the match has stats!
+ self.assertNotEqual(jsonutils.loads(found['stats']), {})
+
+ # Now delete the newly-created compute node to ensure the related
+ # compute node stats are wiped in a cascaded fashion
+ db.compute_node_delete(self.ctxt, node['id'])
+
+ # Clean up the service
+ db.service_destroy(self.ctxt, service['id'])
+
+ def test_compute_node_get_all_mult_compute_nodes_one_service_entry(self):
+ service_data = self.service_dict.copy()
+ service_data['host'] = 'host2'
+ service = db.service_create(self.ctxt, service_data)
+
+ existing_node = dict(self.item.iteritems())
+ existing_node['service'] = dict(self.service.iteritems())
+ expected = [existing_node]
+
+ for name in ['bm_node1', 'bm_node2']:
+ compute_node_data = self.compute_node_dict.copy()
+ compute_node_data['service_id'] = service['id']
+ compute_node_data['stats'] = jsonutils.dumps(self.stats)
+ compute_node_data['hypervisor_hostname'] = 'bm_node_1'
+ node = db.compute_node_create(self.ctxt, compute_node_data)
+
+ node = dict(node.iteritems())
+ node['service'] = dict(service.iteritems())
+
+ expected.append(node)
+
+ result = sorted(db.compute_node_get_all(self.ctxt, False),
+ key=lambda n: n['hypervisor_hostname'])
+
+ self._assertEqualListsOfObjects(expected, result,
+ ignored_keys=['stats'])
+
+ def test_compute_node_get(self):
+ compute_node_id = self.item['id']
+ node = db.compute_node_get(self.ctxt, compute_node_id)
+ self._assertEqualObjects(self.compute_node_dict, node,
+ ignored_keys=self._ignored_keys + ['stats', 'service'])
+ new_stats = jsonutils.loads(node['stats'])
+ self.assertEqual(self.stats, new_stats)
+
+ def test_compute_node_update(self):
+ compute_node_id = self.item['id']
+ stats = jsonutils.loads(self.item['stats'])
+ # change some values:
+ stats['num_instances'] = 8
+ stats['num_tribbles'] = 1
+ values = {
+ 'vcpus': 4,
+ 'stats': jsonutils.dumps(stats),
+ }
+ item_updated = db.compute_node_update(self.ctxt, compute_node_id,
+ values)
+ self.assertEqual(4, item_updated['vcpus'])
+ new_stats = jsonutils.loads(item_updated['stats'])
+ self.assertEqual(stats, new_stats)
+
+ def test_compute_node_delete(self):
+ compute_node_id = self.item['id']
+ db.compute_node_delete(self.ctxt, compute_node_id)
+ nodes = db.compute_node_get_all(self.ctxt)
+ self.assertEqual(len(nodes), 0)
+
+ def test_compute_node_search_by_hypervisor(self):
+ nodes_created = []
+ new_service = copy.copy(self.service_dict)
+ for i in xrange(3):
+ new_service['binary'] += str(i)
+ new_service['topic'] += str(i)
+ service = db.service_create(self.ctxt, new_service)
+ self.compute_node_dict['service_id'] = service['id']
+ self.compute_node_dict['hypervisor_hostname'] = 'testhost' + str(i)
+ self.compute_node_dict['stats'] = jsonutils.dumps(self.stats)
+ node = db.compute_node_create(self.ctxt, self.compute_node_dict)
+ nodes_created.append(node)
+ nodes = db.compute_node_search_by_hypervisor(self.ctxt, 'host')
+ self.assertEqual(3, len(nodes))
+ self._assertEqualListsOfObjects(nodes_created, nodes,
+ ignored_keys=self._ignored_keys + ['stats', 'service'])
+
+ def test_compute_node_statistics(self):
+ stats = db.compute_node_statistics(self.ctxt)
+ self.assertEqual(stats.pop('count'), 1)
+ for k, v in stats.iteritems():
+ self.assertEqual(v, self.item[k])
+
+ def test_compute_node_statistics_disabled_service(self):
+ serv = db.service_get_by_host_and_topic(
+ self.ctxt, 'host1', CONF.compute_topic)
+ db.service_update(self.ctxt, serv['id'], {'disabled': True})
+ stats = db.compute_node_statistics(self.ctxt)
+ self.assertEqual(stats.pop('count'), 0)
+
+ def test_compute_node_not_found(self):
+ self.assertRaises(exception.ComputeHostNotFound, db.compute_node_get,
+ self.ctxt, 100500)
+
+ def test_compute_node_update_always_updates_updated_at(self):
+ item_updated = db.compute_node_update(self.ctxt,
+ self.item['id'], {})
+ self.assertNotEqual(self.item['updated_at'],
+ item_updated['updated_at'])
+
+ def test_compute_node_update_override_updated_at(self):
+ # Update the record once so updated_at is set.
+ first = db.compute_node_update(self.ctxt, self.item['id'],
+ {'free_ram_mb': '12'})
+ self.assertIsNotNone(first['updated_at'])
+
+ # Update a second time. Make sure that the updated_at value we send
+ # is overridden.
+ second = db.compute_node_update(self.ctxt, self.item['id'],
+ {'updated_at': first.updated_at,
+ 'free_ram_mb': '13'})
+ self.assertNotEqual(first['updated_at'], second['updated_at'])
+
+
+class ProviderFwRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ def setUp(self):
+ super(ProviderFwRuleTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.values = self._get_rule_values()
+ self.rules = [db.provider_fw_rule_create(self.ctxt, rule)
+ for rule in self.values]
+
+ def _get_rule_values(self):
+ cidr_samples = ['192.168.0.0/24', '10.1.2.3/32',
+ '2001:4f8:3:ba::/64',
+ '2001:4f8:3:ba:2e0:81ff:fe22:d1f1/128']
+ values = []
+ for i in xrange(len(cidr_samples)):
+ rule = {}
+ rule['protocol'] = 'foo' + str(i)
+ rule['from_port'] = 9999 + i
+ rule['to_port'] = 9898 + i
+ rule['cidr'] = cidr_samples[i]
+ values.append(rule)
+ return values
+
+ def test_provider_fw_rule_create(self):
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
+ 'updated_at']
+ for i, rule in enumerate(self.values):
+ self._assertEqualObjects(self.rules[i], rule,
+ ignored_keys=ignored_keys)
+
+ def test_provider_fw_rule_get_all(self):
+ self._assertEqualListsOfObjects(self.rules,
+ db.provider_fw_rule_get_all(self.ctxt))
+
+ def test_provider_fw_rule_destroy(self):
+ for rule in self.rules:
+ db.provider_fw_rule_destroy(self.ctxt, rule.id)
+ self.assertEqual([], db.provider_fw_rule_get_all(self.ctxt))
+
+
+class CertificateTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ def setUp(self):
+ super(CertificateTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.created = self._certificates_create()
+
+ def _get_certs_values(self):
+ base_values = {
+ 'user_id': 'user',
+ 'project_id': 'project',
+ 'file_name': 'filename'
+ }
+ return [dict((k, v + str(x)) for k, v in base_values.iteritems())
+ for x in xrange(1, 4)]
+
+ def _certificates_create(self):
+ return [db.certificate_create(self.ctxt, cert)
+ for cert in self._get_certs_values()]
+
+ def test_certificate_create(self):
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
+ 'updated_at']
+ for i, cert in enumerate(self._get_certs_values()):
+ self._assertEqualObjects(self.created[i], cert,
+ ignored_keys=ignored_keys)
+
+ def test_certificate_get_all_by_project(self):
+ cert = db.certificate_get_all_by_project(self.ctxt,
+ self.created[1].project_id)
+ self._assertEqualObjects(self.created[1], cert[0])
+
+ def test_certificate_get_all_by_user(self):
+ cert = db.certificate_get_all_by_user(self.ctxt,
+ self.created[1].user_id)
+ self._assertEqualObjects(self.created[1], cert[0])
+
+ def test_certificate_get_all_by_user_and_project(self):
+ cert = db.certificate_get_all_by_user_and_project(self.ctxt,
+ self.created[1].user_id, self.created[1].project_id)
+ self._assertEqualObjects(self.created[1], cert[0])
+
+
+class ConsoleTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ def setUp(self):
+ super(ConsoleTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ pools_data = [
+ {'address': '192.168.10.10',
+ 'username': 'user1',
+ 'password': 'passwd1',
+ 'console_type': 'type1',
+ 'public_hostname': 'public_host1',
+ 'host': 'host1',
+ 'compute_host': 'compute_host1',
+ },
+ {'address': '192.168.10.11',
+ 'username': 'user2',
+ 'password': 'passwd2',
+ 'console_type': 'type2',
+ 'public_hostname': 'public_host2',
+ 'host': 'host2',
+ 'compute_host': 'compute_host2',
+ },
+ ]
+ self.console_pools = [db.console_pool_create(self.ctxt, val)
+ for val in pools_data]
+ instance_uuid = uuidutils.generate_uuid()
+ db.instance_create(self.ctxt, {'uuid': instance_uuid})
+ self.console_data = [dict([('instance_name', 'name' + str(x)),
+ ('instance_uuid', instance_uuid),
+ ('password', 'pass' + str(x)),
+ ('port', 7878 + x),
+ ('pool_id', self.console_pools[x]['id'])])
+ for x in xrange(len(pools_data))]
+ self.consoles = [db.console_create(self.ctxt, val)
+ for val in self.console_data]
+
+ def test_console_create(self):
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
+ 'updated_at']
+ for console in self.consoles:
+ self.assertIsNotNone(console['id'])
+ self._assertEqualListsOfObjects(self.console_data, self.consoles,
+ ignored_keys=ignored_keys)
+
+ def test_console_get_by_id(self):
+ console = self.consoles[0]
+ console_get = db.console_get(self.ctxt, console['id'])
+ self._assertEqualObjects(console, console_get,
+ ignored_keys=['pool'])
+
+ def test_console_get_by_id_uuid(self):
+ console = self.consoles[0]
+ console_get = db.console_get(self.ctxt, console['id'],
+ console['instance_uuid'])
+ self._assertEqualObjects(console, console_get,
+ ignored_keys=['pool'])
+
+ def test_console_get_by_pool_instance(self):
+ console = self.consoles[0]
+ console_get = db.console_get_by_pool_instance(self.ctxt,
+ console['pool_id'], console['instance_uuid'])
+ self._assertEqualObjects(console, console_get,
+ ignored_keys=['pool'])
+
+ def test_console_get_all_by_instance(self):
+ instance_uuid = self.consoles[0]['instance_uuid']
+ consoles_get = db.console_get_all_by_instance(self.ctxt, instance_uuid)
+ self._assertEqualListsOfObjects(self.consoles, consoles_get)
+
+ def test_console_get_all_by_instance_with_pool(self):
+ instance_uuid = self.consoles[0]['instance_uuid']
+ consoles_get = db.console_get_all_by_instance(self.ctxt, instance_uuid,
+ columns_to_join=['pool'])
+ self._assertEqualListsOfObjects(self.consoles, consoles_get,
+ ignored_keys=['pool'])
+ self._assertEqualListsOfObjects([pool for pool in self.console_pools],
+ [c['pool'] for c in consoles_get])
+
+ def test_console_get_all_by_instance_empty(self):
+ consoles_get = db.console_get_all_by_instance(self.ctxt,
+ uuidutils.generate_uuid())
+ self.assertEqual(consoles_get, [])
+
+ def test_console_delete(self):
+ console_id = self.consoles[0]['id']
+ db.console_delete(self.ctxt, console_id)
+ self.assertRaises(exception.ConsoleNotFound, db.console_get,
+ self.ctxt, console_id)
+
+ def test_console_get_by_pool_instance_not_found(self):
+ self.assertRaises(exception.ConsoleNotFoundInPoolForInstance,
+ db.console_get_by_pool_instance, self.ctxt,
+ self.consoles[0]['pool_id'],
+ uuidutils.generate_uuid())
+
+ def test_console_get_not_found(self):
+ self.assertRaises(exception.ConsoleNotFound, db.console_get,
+ self.ctxt, 100500)
+
+ def test_console_get_not_found_instance(self):
+ self.assertRaises(exception.ConsoleNotFoundForInstance, db.console_get,
+ self.ctxt, self.consoles[0]['id'],
+ uuidutils.generate_uuid())
+
+
+class CellTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ _ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
+
+ def setUp(self):
+ super(CellTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _get_cell_base_values(self):
+ return {
+ 'name': 'myname',
+ 'api_url': 'apiurl',
+ 'transport_url': 'transporturl',
+ 'weight_offset': 0.5,
+ 'weight_scale': 1.5,
+ 'is_parent': True,
+ }
+
+ def _cell_value_modify(self, value, step):
+ if isinstance(value, str):
+ return value + str(step)
+ elif isinstance(value, float):
+ return value + step + 0.6
+ elif isinstance(value, bool):
+ return bool(step % 2)
+ elif isinstance(value, int):
+ return value + step
+
+ def _create_cells(self):
+ test_values = []
+ for x in xrange(1, 4):
+ modified_val = dict([(k, self._cell_value_modify(v, x))
+ for k, v in self._get_cell_base_values().iteritems()])
+ db.cell_create(self.ctxt, modified_val)
+ test_values.append(modified_val)
+ return test_values
+
+ def test_cell_create(self):
+ cell = db.cell_create(self.ctxt, self._get_cell_base_values())
+ self.assertIsNotNone(cell['id'])
+ self._assertEqualObjects(cell, self._get_cell_base_values(),
+ ignored_keys=self._ignored_keys)
+
+ def test_cell_update(self):
+ db.cell_create(self.ctxt, self._get_cell_base_values())
+ new_values = {
+ 'api_url': 'apiurl1',
+ 'transport_url': 'transporturl1',
+ 'weight_offset': 0.6,
+ 'weight_scale': 1.6,
+ 'is_parent': False,
+ }
+ test_cellname = self._get_cell_base_values()['name']
+ updated_cell = db.cell_update(self.ctxt, test_cellname, new_values)
+ self._assertEqualObjects(updated_cell, new_values,
+ ignored_keys=self._ignored_keys + ['name'])
+
+ def test_cell_delete(self):
+ new_cells = self._create_cells()
+ for cell in new_cells:
+ test_cellname = cell['name']
+ db.cell_delete(self.ctxt, test_cellname)
+ self.assertRaises(exception.CellNotFound, db.cell_get, self.ctxt,
+ test_cellname)
+
+ def test_cell_get(self):
+ new_cells = self._create_cells()
+ for cell in new_cells:
+ cell_get = db.cell_get(self.ctxt, cell['name'])
+ self._assertEqualObjects(cell_get, cell,
+ ignored_keys=self._ignored_keys)
+
+ def test_cell_get_all(self):
+ new_cells = self._create_cells()
+ cells = db.cell_get_all(self.ctxt)
+ self.assertEqual(len(new_cells), len(cells))
+ cells_byname = dict([(newcell['name'],
+ newcell) for newcell in new_cells])
+ for cell in cells:
+ self._assertEqualObjects(cell, cells_byname[cell['name']],
+ self._ignored_keys)
+
+ def test_cell_get_not_found(self):
+ self._create_cells()
+ self.assertRaises(exception.CellNotFound, db.cell_get, self.ctxt,
+ 'cellnotinbase')
+
+ def test_cell_update_not_found(self):
+ self._create_cells()
+ self.assertRaises(exception.CellNotFound, db.cell_update, self.ctxt,
+ 'cellnotinbase', self._get_cell_base_values())
+
+ def test_cell_create_exists(self):
+ db.cell_create(self.ctxt, self._get_cell_base_values())
+ self.assertRaises(exception.CellExists, db.cell_create,
+ self.ctxt, self._get_cell_base_values())
+
+
+class ConsolePoolTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(ConsolePoolTestCase, self).setUp()
+
+ self.ctxt = context.get_admin_context()
+ self.test_console_pool_1 = {
+ 'address': '192.168.2.10',
+ 'username': 'user_1',
+ 'password': 'secret_123',
+ 'console_type': 'type_1',
+ 'public_hostname': 'public_hostname_123',
+ 'host': 'localhost',
+ 'compute_host': '127.0.0.1',
+ }
+ self.test_console_pool_2 = {
+ 'address': '192.168.2.11',
+ 'username': 'user_2',
+ 'password': 'secret_1234',
+ 'console_type': 'type_2',
+ 'public_hostname': 'public_hostname_1234',
+ 'host': '127.0.0.1',
+ 'compute_host': 'localhost',
+ }
+ self.test_console_pool_3 = {
+ 'address': '192.168.2.12',
+ 'username': 'user_3',
+ 'password': 'secret_12345',
+ 'console_type': 'type_2',
+ 'public_hostname': 'public_hostname_12345',
+ 'host': '127.0.0.1',
+ 'compute_host': '192.168.1.1',
+ }
+
+ def test_console_pool_create(self):
+ console_pool = db.console_pool_create(
+ self.ctxt, self.test_console_pool_1)
+ self.assertIsNotNone(console_pool.get('id'))
+ ignored_keys = ['deleted', 'created_at', 'updated_at',
+ 'deleted_at', 'id']
+ self._assertEqualObjects(
+ console_pool, self.test_console_pool_1, ignored_keys)
+
+ def test_console_pool_create_duplicate(self):
+ db.console_pool_create(self.ctxt, self.test_console_pool_1)
+ self.assertRaises(exception.ConsolePoolExists, db.console_pool_create,
+ self.ctxt, self.test_console_pool_1)
+
+ def test_console_pool_get_by_host_type(self):
+ params = [
+ self.test_console_pool_1,
+ self.test_console_pool_2,
+ ]
+
+ for p in params:
+ db.console_pool_create(self.ctxt, p)
+
+ ignored_keys = ['deleted', 'created_at', 'updated_at',
+ 'deleted_at', 'id', 'consoles']
+
+ cp = self.test_console_pool_1
+ db_cp = db.console_pool_get_by_host_type(
+ self.ctxt, cp['compute_host'], cp['host'], cp['console_type']
+ )
+ self._assertEqualObjects(cp, db_cp, ignored_keys)
+
+ def test_console_pool_get_by_host_type_no_resuls(self):
+ self.assertRaises(
+ exception.ConsolePoolNotFoundForHostType,
+ db.console_pool_get_by_host_type, self.ctxt, 'compute_host',
+ 'host', 'console_type')
+
+ def test_console_pool_get_all_by_host_type(self):
+ params = [
+ self.test_console_pool_1,
+ self.test_console_pool_2,
+ self.test_console_pool_3,
+ ]
+ for p in params:
+ db.console_pool_create(self.ctxt, p)
+ ignored_keys = ['deleted', 'created_at', 'updated_at',
+ 'deleted_at', 'id', 'consoles']
+
+ cp = self.test_console_pool_2
+ db_cp = db.console_pool_get_all_by_host_type(
+ self.ctxt, cp['host'], cp['console_type'])
+
+ self._assertEqualListsOfObjects(
+ db_cp, [self.test_console_pool_2, self.test_console_pool_3],
+ ignored_keys)
+
+ def test_console_pool_get_all_by_host_type_no_results(self):
+ res = db.console_pool_get_all_by_host_type(
+ self.ctxt, 'cp_host', 'cp_console_type')
+ self.assertEqual([], res)
+
+
+class DnsdomainTestCase(test.TestCase):
+
+ def setUp(self):
+ super(DnsdomainTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.domain = 'test.domain'
+ self.testzone = 'testzone'
+ self.project = 'fake'
+
+ def test_dnsdomain_register_for_zone(self):
+ db.dnsdomain_register_for_zone(self.ctxt, self.domain, self.testzone)
+ domain = db.dnsdomain_get(self.ctxt, self.domain)
+ self.assertEqual(domain['domain'], self.domain)
+ self.assertEqual(domain['availability_zone'], self.testzone)
+ self.assertEqual(domain['scope'], 'private')
+
+ def test_dnsdomain_register_for_project(self):
+ db.dnsdomain_register_for_project(self.ctxt, self.domain, self.project)
+ domain = db.dnsdomain_get(self.ctxt, self.domain)
+ self.assertEqual(domain['domain'], self.domain)
+ self.assertEqual(domain['project_id'], self.project)
+ self.assertEqual(domain['scope'], 'public')
+
+ def test_dnsdomain_list(self):
+ d_list = ['test.domain.one', 'test.domain.two']
+ db.dnsdomain_register_for_zone(self.ctxt, d_list[0], self.testzone)
+ db.dnsdomain_register_for_project(self.ctxt, d_list[1], self.project)
+ db_list = db.dnsdomain_list(self.ctxt)
+ self.assertEqual(sorted(d_list), sorted(db_list))
+
+ def test_dnsdomain_unregister(self):
+ db.dnsdomain_register_for_zone(self.ctxt, self.domain, self.testzone)
+ db.dnsdomain_unregister(self.ctxt, self.domain)
+ domain = db.dnsdomain_get(self.ctxt, self.domain)
+ self.assertIsNone(domain)
+
+ def test_dnsdomain_get_all(self):
+ d_list = ['test.domain.one', 'test.domain.two']
+ db.dnsdomain_register_for_zone(self.ctxt, d_list[0], 'zone')
+ db.dnsdomain_register_for_zone(self.ctxt, d_list[1], 'zone')
+ db_list = db.dnsdomain_get_all(self.ctxt)
+ db_domain_list = [d.domain for d in db_list]
+ self.assertEqual(sorted(d_list), sorted(db_domain_list))
+
+
+class BwUsageTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ _ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
+
+ def setUp(self):
+ super(BwUsageTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.useFixture(test.TimeOverride())
+
+ def test_bw_usage_get_by_uuids(self):
+ now = timeutils.utcnow()
+ start_period = now - datetime.timedelta(seconds=10)
+ uuid3_refreshed = now - datetime.timedelta(seconds=5)
+
+ expected_bw_usages = {
+ 'fake_uuid1': {'uuid': 'fake_uuid1',
+ 'mac': 'fake_mac1',
+ 'start_period': start_period,
+ 'bw_in': 100,
+ 'bw_out': 200,
+ 'last_ctr_in': 12345,
+ 'last_ctr_out': 67890,
+ 'last_refreshed': now},
+ 'fake_uuid2': {'uuid': 'fake_uuid2',
+ 'mac': 'fake_mac2',
+ 'start_period': start_period,
+ 'bw_in': 200,
+ 'bw_out': 300,
+ 'last_ctr_in': 22345,
+ 'last_ctr_out': 77890,
+ 'last_refreshed': now},
+ 'fake_uuid3': {'uuid': 'fake_uuid3',
+ 'mac': 'fake_mac3',
+ 'start_period': start_period,
+ 'bw_in': 400,
+ 'bw_out': 500,
+ 'last_ctr_in': 32345,
+ 'last_ctr_out': 87890,
+ 'last_refreshed': uuid3_refreshed}
+ }
+
+ bw_usages = db.bw_usage_get_by_uuids(self.ctxt,
+ ['fake_uuid1', 'fake_uuid2'], start_period)
+ # No matches
+ self.assertEqual(len(bw_usages), 0)
+
+ # Add 3 entries
+ db.bw_usage_update(self.ctxt, 'fake_uuid1',
+ 'fake_mac1', start_period,
+ 100, 200, 12345, 67890)
+ db.bw_usage_update(self.ctxt, 'fake_uuid2',
+ 'fake_mac2', start_period,
+ 100, 200, 42, 42)
+ # Test explicit refreshed time
+ db.bw_usage_update(self.ctxt, 'fake_uuid3',
+ 'fake_mac3', start_period,
+ 400, 500, 32345, 87890,
+ last_refreshed=uuid3_refreshed)
+ # Update 2nd entry
+ db.bw_usage_update(self.ctxt, 'fake_uuid2',
+ 'fake_mac2', start_period,
+ 200, 300, 22345, 77890)
+
+ bw_usages = db.bw_usage_get_by_uuids(self.ctxt,
+ ['fake_uuid1', 'fake_uuid2', 'fake_uuid3'], start_period)
+ self.assertEqual(len(bw_usages), 3)
+ for usage in bw_usages:
+ self._assertEqualObjects(expected_bw_usages[usage['uuid']], usage,
+ ignored_keys=self._ignored_keys)
+
+ def test_bw_usage_get(self):
+ now = timeutils.utcnow()
+ start_period = now - datetime.timedelta(seconds=10)
+
+ expected_bw_usage = {'uuid': 'fake_uuid1',
+ 'mac': 'fake_mac1',
+ 'start_period': start_period,
+ 'bw_in': 100,
+ 'bw_out': 200,
+ 'last_ctr_in': 12345,
+ 'last_ctr_out': 67890,
+ 'last_refreshed': now}
+
+ bw_usage = db.bw_usage_get(self.ctxt, 'fake_uuid1', start_period,
+ 'fake_mac1')
+ self.assertIsNone(bw_usage)
+
+ db.bw_usage_update(self.ctxt, 'fake_uuid1',
+ 'fake_mac1', start_period,
+ 100, 200, 12345, 67890)
+
+ bw_usage = db.bw_usage_get(self.ctxt, 'fake_uuid1', start_period,
+ 'fake_mac1')
+ self._assertEqualObjects(bw_usage, expected_bw_usage,
+ ignored_keys=self._ignored_keys)
+
+
+class Ec2TestCase(test.TestCase):
+
+ def setUp(self):
+ super(Ec2TestCase, self).setUp()
+ self.ctxt = context.RequestContext('fake_user', 'fake_project')
+
+ def test_ec2_ids_not_found_are_printable(self):
+ def check_exc_format(method, value):
+ try:
+ method(self.ctxt, value)
+ except exception.NotFound as exc:
+ self.assertIn(six.text_type(value), six.text_type(exc))
+
+ check_exc_format(db.get_ec2_instance_id_by_uuid, 'fake')
+ check_exc_format(db.get_instance_uuid_by_ec2_id, 123456)
+ check_exc_format(db.ec2_snapshot_get_by_ec2_id, 123456)
+ check_exc_format(db.ec2_snapshot_get_by_uuid, 'fake')
+
+ def test_ec2_volume_create(self):
+ vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
+ self.assertIsNotNone(vol['id'])
+ self.assertEqual(vol['uuid'], 'fake-uuid')
+
+ def test_ec2_volume_get_by_id(self):
+ vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
+ vol2 = db.ec2_volume_get_by_id(self.ctxt, vol['id'])
+ self.assertEqual(vol2['uuid'], vol['uuid'])
+
+ def test_ec2_volume_get_by_uuid(self):
+ vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
+ vol2 = db.ec2_volume_get_by_uuid(self.ctxt, vol['uuid'])
+ self.assertEqual(vol2['id'], vol['id'])
+
+ def test_ec2_snapshot_create(self):
+ snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
+ self.assertIsNotNone(snap['id'])
+ self.assertEqual(snap['uuid'], 'fake-uuid')
+
+ def test_ec2_snapshot_get_by_ec2_id(self):
+ snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
+ snap2 = db.ec2_snapshot_get_by_ec2_id(self.ctxt, snap['id'])
+ self.assertEqual(snap2['uuid'], 'fake-uuid')
+
+ def test_ec2_snapshot_get_by_uuid(self):
+ snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
+ snap2 = db.ec2_snapshot_get_by_uuid(self.ctxt, 'fake-uuid')
+ self.assertEqual(snap['id'], snap2['id'])
+
+ def test_ec2_snapshot_get_by_ec2_id_not_found(self):
+ self.assertRaises(exception.SnapshotNotFound,
+ db.ec2_snapshot_get_by_ec2_id,
+ self.ctxt, 123456)
+
+ def test_ec2_snapshot_get_by_uuid_not_found(self):
+ self.assertRaises(exception.SnapshotNotFound,
+ db.ec2_snapshot_get_by_uuid,
+ self.ctxt, 'fake-uuid')
+
+ def test_ec2_instance_create(self):
+ inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
+ self.assertIsNotNone(inst['id'])
+ self.assertEqual(inst['uuid'], 'fake-uuid')
+
+ def test_ec2_instance_get_by_uuid(self):
+ inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
+ inst2 = db.ec2_instance_get_by_uuid(self.ctxt, 'fake-uuid')
+ self.assertEqual(inst['id'], inst2['id'])
+
+ def test_ec2_instance_get_by_id(self):
+ inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
+ inst2 = db.ec2_instance_get_by_id(self.ctxt, inst['id'])
+ self.assertEqual(inst['id'], inst2['id'])
+
+ def test_ec2_instance_get_by_uuid_not_found(self):
+ self.assertRaises(exception.InstanceNotFound,
+ db.ec2_instance_get_by_uuid,
+ self.ctxt, 'uuid-not-present')
+
+ def test_ec2_instance_get_by_id_not_found(self):
+ self.assertRaises(exception.InstanceNotFound,
+ db.ec2_instance_get_by_uuid,
+ self.ctxt, 12345)
+
+ def test_get_ec2_instance_id_by_uuid(self):
+ inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
+ inst_id = db.get_ec2_instance_id_by_uuid(self.ctxt, 'fake-uuid')
+ self.assertEqual(inst['id'], inst_id)
+
+ def test_get_instance_uuid_by_ec2_id(self):
+ inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
+ inst_uuid = db.get_instance_uuid_by_ec2_id(self.ctxt, inst['id'])
+ self.assertEqual(inst_uuid, 'fake-uuid')
+
+ def test_get_ec2_instance_id_by_uuid_not_found(self):
+ self.assertRaises(exception.InstanceNotFound,
+ db.get_ec2_instance_id_by_uuid,
+ self.ctxt, 'uuid-not-present')
+
+ def test_get_instance_uuid_by_ec2_id_not_found(self):
+ self.assertRaises(exception.InstanceNotFound,
+ db.get_instance_uuid_by_ec2_id,
+ self.ctxt, 100500)
+
+
+class ArchiveTestCase(test.TestCase):
+
+ def setUp(self):
+ super(ArchiveTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ self.engine = get_engine()
+ self.conn = self.engine.connect()
+ self.instance_id_mappings = sqlalchemyutils.get_table(
+ self.engine, "instance_id_mappings")
+ self.shadow_instance_id_mappings = sqlalchemyutils.get_table(
+ self.engine, "shadow_instance_id_mappings")
+ self.dns_domains = sqlalchemyutils.get_table(
+ self.engine, "dns_domains")
+ self.shadow_dns_domains = sqlalchemyutils.get_table(
+ self.engine, "shadow_dns_domains")
+ self.consoles = sqlalchemyutils.get_table(self.engine, "consoles")
+ self.console_pools = sqlalchemyutils.get_table(
+ self.engine, "console_pools")
+ self.shadow_consoles = sqlalchemyutils.get_table(
+ self.engine, "shadow_consoles")
+ self.shadow_console_pools = sqlalchemyutils.get_table(
+ self.engine, "shadow_console_pools")
+ self.instances = sqlalchemyutils.get_table(self.engine, "instances")
+ self.shadow_instances = sqlalchemyutils.get_table(
+ self.engine, "shadow_instances")
+ self.uuidstrs = []
+ for unused in range(6):
+ self.uuidstrs.append(stdlib_uuid.uuid4().hex)
+ self.ids = []
+ self.id_tablenames_to_cleanup = set(["console_pools", "consoles"])
+ self.uuid_tablenames_to_cleanup = set(["instance_id_mappings",
+ "instances"])
+ self.domain_tablenames_to_cleanup = set(["dns_domains"])
+
+ def tearDown(self):
+ super(ArchiveTestCase, self).tearDown()
+ for tablename in self.id_tablenames_to_cleanup:
+ for name in [tablename, "shadow_" + tablename]:
+ table = sqlalchemyutils.get_table(self.engine, name)
+ del_statement = table.delete(table.c.id.in_(self.ids))
+ self.conn.execute(del_statement)
+ for tablename in self.uuid_tablenames_to_cleanup:
+ for name in [tablename, "shadow_" + tablename]:
+ table = sqlalchemyutils.get_table(self.engine, name)
+ del_statement = table.delete(table.c.uuid.in_(self.uuidstrs))
+ self.conn.execute(del_statement)
+ for tablename in self.domain_tablenames_to_cleanup:
+ for name in [tablename, "shadow_" + tablename]:
+ table = sqlalchemyutils.get_table(self.engine, name)
+ del_statement = table.delete(table.c.domain.in_(self.uuidstrs))
+ self.conn.execute(del_statement)
+
+ def test_shadow_tables(self):
+ metadata = MetaData(bind=self.engine)
+ metadata.reflect()
+ for table_name in metadata.tables:
+ # NOTE(rpodolyaka): migration 209 introduced a few new tables,
+ # which don't have shadow tables and it's
+ # completely OK, so we should skip them here
+ if table_name.startswith("dump_"):
+ continue
+
+ if table_name.startswith("shadow_"):
+ self.assertIn(table_name[7:], metadata.tables)
+ continue
+ self.assertTrue(db_utils.check_shadow_table(self.engine,
+ table_name))
+
+ def test_archive_deleted_rows(self):
+ # Add 6 rows to table
+ for uuidstr in self.uuidstrs:
+ ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
+ self.conn.execute(ins_stmt)
+ # Set 4 to deleted
+ update_statement = self.instance_id_mappings.update().\
+ where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
+ .values(deleted=1)
+ self.conn.execute(update_statement)
+ qiim = sql.select([self.instance_id_mappings]).where(self.
+ instance_id_mappings.c.uuid.in_(self.uuidstrs))
+ rows = self.conn.execute(qiim).fetchall()
+ # Verify we have 6 in main
+ self.assertEqual(len(rows), 6)
+ qsiim = sql.select([self.shadow_instance_id_mappings]).\
+ where(self.shadow_instance_id_mappings.c.uuid.in_(
+ self.uuidstrs))
+ rows = self.conn.execute(qsiim).fetchall()
+ # Verify we have 0 in shadow
+ self.assertEqual(len(rows), 0)
+ # Archive 2 rows
+ db.archive_deleted_rows(self.context, max_rows=2)
+ rows = self.conn.execute(qiim).fetchall()
+ # Verify we have 4 left in main
+ self.assertEqual(len(rows), 4)
+ rows = self.conn.execute(qsiim).fetchall()
+ # Verify we have 2 in shadow
+ self.assertEqual(len(rows), 2)
+ # Archive 2 more rows
+ db.archive_deleted_rows(self.context, max_rows=2)
+ rows = self.conn.execute(qiim).fetchall()
+ # Verify we have 2 left in main
+ self.assertEqual(len(rows), 2)
+ rows = self.conn.execute(qsiim).fetchall()
+ # Verify we have 4 in shadow
+ self.assertEqual(len(rows), 4)
+ # Try to archive more, but there are no deleted rows left.
+ db.archive_deleted_rows(self.context, max_rows=2)
+ rows = self.conn.execute(qiim).fetchall()
+ # Verify we still have 2 left in main
+ self.assertEqual(len(rows), 2)
+ rows = self.conn.execute(qsiim).fetchall()
+ # Verify we still have 4 in shadow
+ self.assertEqual(len(rows), 4)
+
+ def test_archive_deleted_rows_for_every_uuid_table(self):
+ tablenames = []
+ for model_class in models.__dict__.itervalues():
+ if hasattr(model_class, "__tablename__"):
+ tablenames.append(model_class.__tablename__)
+ tablenames.sort()
+ for tablename in tablenames:
+ ret = self._test_archive_deleted_rows_for_one_uuid_table(tablename)
+ if ret == 0:
+ self.uuid_tablenames_to_cleanup.add(tablename)
+
+ def _test_archive_deleted_rows_for_one_uuid_table(self, tablename):
+ """:returns: 0 on success, 1 if no uuid column, 2 if insert failed."""
+ main_table = sqlalchemyutils.get_table(self.engine, tablename)
+ if not hasattr(main_table.c, "uuid"):
+ # Not a uuid table, so skip it.
+ return 1
+ shadow_table = sqlalchemyutils.get_table(
+ self.engine, "shadow_" + tablename)
+ # Add 6 rows to table
+ for uuidstr in self.uuidstrs:
+ ins_stmt = main_table.insert().values(uuid=uuidstr)
+ try:
+ self.conn.execute(ins_stmt)
+ except db_exc.DBError:
+ # This table has constraints that require a table-specific
+ # insert, so skip it.
+ return 2
+ # Set 4 to deleted
+ update_statement = main_table.update().\
+ where(main_table.c.uuid.in_(self.uuidstrs[:4]))\
+ .values(deleted=1)
+ self.conn.execute(update_statement)
+ qmt = sql.select([main_table]).where(main_table.c.uuid.in_(
+ self.uuidstrs))
+ rows = self.conn.execute(qmt).fetchall()
+ # Verify we have 6 in main
+ self.assertEqual(len(rows), 6)
+ qst = sql.select([shadow_table]).\
+ where(shadow_table.c.uuid.in_(self.uuidstrs))
+ rows = self.conn.execute(qst).fetchall()
+ # Verify we have 0 in shadow
+ self.assertEqual(len(rows), 0)
+ # Archive 2 rows
+ db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
+ # Verify we have 4 left in main
+ rows = self.conn.execute(qmt).fetchall()
+ self.assertEqual(len(rows), 4)
+ # Verify we have 2 in shadow
+ rows = self.conn.execute(qst).fetchall()
+ self.assertEqual(len(rows), 2)
+ # Archive 2 more rows
+ db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
+ # Verify we have 2 left in main
+ rows = self.conn.execute(qmt).fetchall()
+ self.assertEqual(len(rows), 2)
+ # Verify we have 4 in shadow
+ rows = self.conn.execute(qst).fetchall()
+ self.assertEqual(len(rows), 4)
+ # Try to archive more, but there are no deleted rows left.
+ db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
+ # Verify we still have 2 left in main
+ rows = self.conn.execute(qmt).fetchall()
+ self.assertEqual(len(rows), 2)
+ # Verify we still have 4 in shadow
+ rows = self.conn.execute(qst).fetchall()
+ self.assertEqual(len(rows), 4)
+ return 0
+
+ def test_archive_deleted_rows_no_id_column(self):
+ uuidstr0 = self.uuidstrs[0]
+ ins_stmt = self.dns_domains.insert().values(domain=uuidstr0)
+ self.conn.execute(ins_stmt)
+ update_statement = self.dns_domains.update().\
+ where(self.dns_domains.c.domain == uuidstr0).\
+ values(deleted=True)
+ self.conn.execute(update_statement)
+ qdd = sql.select([self.dns_domains], self.dns_domains.c.domain ==
+ uuidstr0)
+ rows = self.conn.execute(qdd).fetchall()
+ self.assertEqual(len(rows), 1)
+ qsdd = sql.select([self.shadow_dns_domains],
+ self.shadow_dns_domains.c.domain == uuidstr0)
+ rows = self.conn.execute(qsdd).fetchall()
+ self.assertEqual(len(rows), 0)
+ db.archive_deleted_rows(self.context, max_rows=1)
+ rows = self.conn.execute(qdd).fetchall()
+ self.assertEqual(len(rows), 0)
+ rows = self.conn.execute(qsdd).fetchall()
+ self.assertEqual(len(rows), 1)
+
+ def test_archive_deleted_rows_fk_constraint(self):
+ # consoles.pool_id depends on console_pools.id
+ # SQLite doesn't enforce foreign key constraints without a pragma.
+ dialect = self.engine.url.get_dialect()
+ if dialect == sqlite.dialect:
+ # We're seeing issues with foreign key support in SQLite 3.6.20
+ # SQLAlchemy doesn't support it at all with < SQLite 3.6.19
+ # It works fine in SQLite 3.7.
+ # So return early to skip this test if running SQLite < 3.7
+ import sqlite3
+ tup = sqlite3.sqlite_version_info
+ if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7):
+ self.skipTest(
+ 'sqlite version too old for reliable SQLA foreign_keys')
+ self.conn.execute("PRAGMA foreign_keys = ON")
+ ins_stmt = self.console_pools.insert().values(deleted=1)
+ result = self.conn.execute(ins_stmt)
+ id1 = result.inserted_primary_key[0]
+ self.ids.append(id1)
+ ins_stmt = self.consoles.insert().values(deleted=1,
+ pool_id=id1)
+ result = self.conn.execute(ins_stmt)
+ id2 = result.inserted_primary_key[0]
+ self.ids.append(id2)
+ # The first try to archive console_pools should fail, due to FK.
+ num = db.archive_deleted_rows_for_table(self.context, "console_pools")
+ self.assertEqual(num, 0)
+ # Then archiving consoles should work.
+ num = db.archive_deleted_rows_for_table(self.context, "consoles")
+ self.assertEqual(num, 1)
+ # Then archiving console_pools should work.
+ num = db.archive_deleted_rows_for_table(self.context, "console_pools")
+ self.assertEqual(num, 1)
+
+ def test_archive_deleted_rows_2_tables(self):
+ # Add 6 rows to each table
+ for uuidstr in self.uuidstrs:
+ ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
+ self.conn.execute(ins_stmt)
+ ins_stmt2 = self.instances.insert().values(uuid=uuidstr)
+ self.conn.execute(ins_stmt2)
+ # Set 4 of each to deleted
+ update_statement = self.instance_id_mappings.update().\
+ where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
+ .values(deleted=1)
+ self.conn.execute(update_statement)
+ update_statement2 = self.instances.update().\
+ where(self.instances.c.uuid.in_(self.uuidstrs[:4]))\
+ .values(deleted=1)
+ self.conn.execute(update_statement2)
+ # Verify we have 6 in each main table
+ qiim = sql.select([self.instance_id_mappings]).where(
+ self.instance_id_mappings.c.uuid.in_(self.uuidstrs))
+ rows = self.conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 6)
+ qi = sql.select([self.instances]).where(self.instances.c.uuid.in_(
+ self.uuidstrs))
+ rows = self.conn.execute(qi).fetchall()
+ self.assertEqual(len(rows), 6)
+ # Verify we have 0 in each shadow table
+ qsiim = sql.select([self.shadow_instance_id_mappings]).\
+ where(self.shadow_instance_id_mappings.c.uuid.in_(
+ self.uuidstrs))
+ rows = self.conn.execute(qsiim).fetchall()
+ self.assertEqual(len(rows), 0)
+ qsi = sql.select([self.shadow_instances]).\
+ where(self.shadow_instances.c.uuid.in_(self.uuidstrs))
+ rows = self.conn.execute(qsi).fetchall()
+ self.assertEqual(len(rows), 0)
+ # Archive 7 rows, which should be 4 in one table and 3 in the other.
+ db.archive_deleted_rows(self.context, max_rows=7)
+ # Verify we have 5 left in the two main tables combined
+ iim_rows = self.conn.execute(qiim).fetchall()
+ i_rows = self.conn.execute(qi).fetchall()
+ self.assertEqual(len(iim_rows) + len(i_rows), 5)
+ # Verify we have 7 in the two shadow tables combined.
+ siim_rows = self.conn.execute(qsiim).fetchall()
+ si_rows = self.conn.execute(qsi).fetchall()
+ self.assertEqual(len(siim_rows) + len(si_rows), 7)
+ # Archive the remaining deleted rows.
+ db.archive_deleted_rows(self.context, max_rows=1)
+ # Verify we have 4 total left in both main tables.
+ iim_rows = self.conn.execute(qiim).fetchall()
+ i_rows = self.conn.execute(qi).fetchall()
+ self.assertEqual(len(iim_rows) + len(i_rows), 4)
+ # Verify we have 8 in shadow
+ siim_rows = self.conn.execute(qsiim).fetchall()
+ si_rows = self.conn.execute(qsi).fetchall()
+ self.assertEqual(len(siim_rows) + len(si_rows), 8)
+ # Try to archive more, but there are no deleted rows left.
+ db.archive_deleted_rows(self.context, max_rows=500)
+ # Verify we have 4 total left in both main tables.
+ iim_rows = self.conn.execute(qiim).fetchall()
+ i_rows = self.conn.execute(qi).fetchall()
+ self.assertEqual(len(iim_rows) + len(i_rows), 4)
+ # Verify we have 8 in shadow
+ siim_rows = self.conn.execute(qsiim).fetchall()
+ si_rows = self.conn.execute(qsi).fetchall()
+ self.assertEqual(len(siim_rows) + len(si_rows), 8)
+
+
+class InstanceGroupDBApiTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(InstanceGroupDBApiTestCase, self).setUp()
+ self.user_id = 'fake_user'
+ self.project_id = 'fake_project'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ def _get_default_values(self):
+ return {'name': 'fake_name',
+ 'user_id': self.user_id,
+ 'project_id': self.project_id}
+
+ def _create_instance_group(self, context, values, policies=None,
+ members=None):
+ return db.instance_group_create(context, values, policies=policies,
+ members=members)
+
+ def test_instance_group_create_no_key(self):
+ values = self._get_default_values()
+ result = self._create_instance_group(self.context, values)
+ ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at']
+ self._assertEqualObjects(result, values, ignored_keys)
+ self.assertTrue(uuidutils.is_uuid_like(result['uuid']))
+
+ def test_instance_group_create_with_key(self):
+ values = self._get_default_values()
+ values['uuid'] = 'fake_id'
+ result = self._create_instance_group(self.context, values)
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at']
+ self._assertEqualObjects(result, values, ignored_keys)
+
+ def test_instance_group_create_with_same_key(self):
+ values = self._get_default_values()
+ values['uuid'] = 'fake_id'
+ self._create_instance_group(self.context, values)
+ self.assertRaises(exception.InstanceGroupIdExists,
+ self._create_instance_group, self.context, values)
+
+ def test_instance_group_get(self):
+ values = self._get_default_values()
+ result1 = self._create_instance_group(self.context, values)
+ result2 = db.instance_group_get(self.context, result1['uuid'])
+ self._assertEqualObjects(result1, result2)
+
+ def test_instance_group_update_simple(self):
+ values = self._get_default_values()
+ result1 = self._create_instance_group(self.context, values)
+ values = {'name': 'new_name', 'user_id': 'new_user',
+ 'project_id': 'new_project'}
+ db.instance_group_update(self.context, result1['uuid'],
+ values)
+ result2 = db.instance_group_get(self.context, result1['uuid'])
+ self.assertEqual(result1['uuid'], result2['uuid'])
+ ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at']
+ self._assertEqualObjects(result2, values, ignored_keys)
+
+ def test_instance_group_delete(self):
+ values = self._get_default_values()
+ result = self._create_instance_group(self.context, values)
+ db.instance_group_delete(self.context, result['uuid'])
+ self.assertRaises(exception.InstanceGroupNotFound,
+ db.instance_group_delete, self.context,
+ result['uuid'])
+
+ def test_instance_group_get_nonexistent(self):
+ self.assertRaises(exception.InstanceGroupNotFound,
+ db.instance_group_get,
+ self.context,
+ 'nonexistent')
+
+ def test_instance_group_delete_nonexistent(self):
+ self.assertRaises(exception.InstanceGroupNotFound,
+ db.instance_group_delete,
+ self.context,
+ 'nonexistent')
+
+ def test_instance_group_get_all(self):
+ groups = db.instance_group_get_all(self.context)
+ self.assertEqual(0, len(groups))
+ value = self._get_default_values()
+ result1 = self._create_instance_group(self.context, value)
+ groups = db.instance_group_get_all(self.context)
+ self.assertEqual(1, len(groups))
+ value = self._get_default_values()
+ result2 = self._create_instance_group(self.context, value)
+ groups = db.instance_group_get_all(self.context)
+ results = [result1, result2]
+ self._assertEqualListsOfObjects(results, groups)
+
+ def test_instance_group_get_all_by_project_id(self):
+ groups = db.instance_group_get_all_by_project_id(self.context,
+ 'invalid_project_id')
+ self.assertEqual(0, len(groups))
+ values = self._get_default_values()
+ result1 = self._create_instance_group(self.context, values)
+ groups = db.instance_group_get_all_by_project_id(self.context,
+ 'fake_project')
+ self.assertEqual(1, len(groups))
+ values = self._get_default_values()
+ values['project_id'] = 'new_project_id'
+ result2 = self._create_instance_group(self.context, values)
+ groups = db.instance_group_get_all(self.context)
+ results = [result1, result2]
+ self._assertEqualListsOfObjects(results, groups)
+ projects = [{'name': 'fake_project', 'value': [result1]},
+ {'name': 'new_project_id', 'value': [result2]}]
+ for project in projects:
+ groups = db.instance_group_get_all_by_project_id(self.context,
+ project['name'])
+ self._assertEqualListsOfObjects(project['value'], groups)
+
+ def test_instance_group_update(self):
+ values = self._get_default_values()
+ result = self._create_instance_group(self.context, values)
+ ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at']
+ self._assertEqualObjects(result, values, ignored_keys)
+ self.assertTrue(uuidutils.is_uuid_like(result['uuid']))
+ id = result['uuid']
+ values = self._get_default_values()
+ values['name'] = 'new_fake_name'
+ db.instance_group_update(self.context, id, values)
+ result = db.instance_group_get(self.context, id)
+ self.assertEqual(result['name'], 'new_fake_name')
+ # update update members
+ values = self._get_default_values()
+ members = ['instance_id1', 'instance_id2']
+ values['members'] = members
+ db.instance_group_update(self.context, id, values)
+ result = db.instance_group_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(result['members'], members)
+ # update update policies
+ values = self._get_default_values()
+ policies = ['policy1', 'policy2']
+ values['policies'] = policies
+ db.instance_group_update(self.context, id, values)
+ result = db.instance_group_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(result['policies'], policies)
+ # test invalid ID
+ self.assertRaises(exception.InstanceGroupNotFound,
+ db.instance_group_update, self.context,
+ 'invalid_id', values)
+
+ def test_instance_group_get_by_instance(self):
+ values = self._get_default_values()
+ group1 = self._create_instance_group(self.context, values)
+
+ members = ['instance_id1', 'instance_id2']
+ db.instance_group_members_add(self.context, group1.uuid, members)
+
+ group2 = db.instance_group_get_by_instance(self.context,
+ 'instance_id1')
+
+ self.assertEqual(group2.uuid, group1.uuid)
+
+
+class InstanceGroupMembersDBApiTestCase(InstanceGroupDBApiTestCase):
+ def test_instance_group_members_on_create(self):
+ values = self._get_default_values()
+ values['uuid'] = 'fake_id'
+ members = ['instance_id1', 'instance_id2']
+ result = self._create_instance_group(self.context, values,
+ members=members)
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at']
+ self._assertEqualObjects(result, values, ignored_keys)
+ self._assertEqualListsOfPrimitivesAsSets(result['members'], members)
+
+ def test_instance_group_members_add(self):
+ values = self._get_default_values()
+ values['uuid'] = 'fake_id'
+ result = self._create_instance_group(self.context, values)
+ id = result['uuid']
+ members = db.instance_group_members_get(self.context, id)
+ self.assertEqual(members, [])
+ members2 = ['instance_id1', 'instance_id2']
+ db.instance_group_members_add(self.context, id, members2)
+ members = db.instance_group_members_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(members, members2)
+
+ def test_instance_group_members_update(self):
+ values = self._get_default_values()
+ values['uuid'] = 'fake_id'
+ result = self._create_instance_group(self.context, values)
+ id = result['uuid']
+ members2 = ['instance_id1', 'instance_id2']
+ db.instance_group_members_add(self.context, id, members2)
+ members = db.instance_group_members_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(members, members2)
+ # check add with existing keys
+ members3 = ['instance_id1', 'instance_id2', 'instance_id3']
+ db.instance_group_members_add(self.context, id, members3)
+ members = db.instance_group_members_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(members, members3)
+
+ def test_instance_group_members_delete(self):
+ values = self._get_default_values()
+ values['uuid'] = 'fake_id'
+ result = self._create_instance_group(self.context, values)
+ id = result['uuid']
+ members3 = ['instance_id1', 'instance_id2', 'instance_id3']
+ db.instance_group_members_add(self.context, id, members3)
+ members = db.instance_group_members_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(members, members3)
+ for instance_id in members3[:]:
+ db.instance_group_member_delete(self.context, id, instance_id)
+ members3.remove(instance_id)
+ members = db.instance_group_members_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(members, members3)
+
+ def test_instance_group_members_invalid_ids(self):
+ values = self._get_default_values()
+ result = self._create_instance_group(self.context, values)
+ id = result['uuid']
+ self.assertRaises(exception.InstanceGroupNotFound,
+ db.instance_group_members_get,
+ self.context, 'invalid')
+ self.assertRaises(exception.InstanceGroupNotFound,
+ db.instance_group_member_delete, self.context,
+ 'invalidid', 'instance_id1')
+ members = ['instance_id1', 'instance_id2']
+ db.instance_group_members_add(self.context, id, members)
+ self.assertRaises(exception.InstanceGroupMemberNotFound,
+ db.instance_group_member_delete,
+ self.context, id, 'invalid_id')
+
+
+class InstanceGroupPoliciesDBApiTestCase(InstanceGroupDBApiTestCase):
+ def test_instance_group_policies_on_create(self):
+ values = self._get_default_values()
+ values['uuid'] = 'fake_id'
+ policies = ['policy1', 'policy2']
+ result = self._create_instance_group(self.context, values,
+ policies=policies)
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at']
+ self._assertEqualObjects(result, values, ignored_keys)
+ self._assertEqualListsOfPrimitivesAsSets(result['policies'], policies)
+
+ def test_instance_group_policies_add(self):
+ values = self._get_default_values()
+ values['uuid'] = 'fake_id'
+ result = self._create_instance_group(self.context, values)
+ id = result['uuid']
+ policies = db.instance_group_policies_get(self.context, id)
+ self.assertEqual(policies, [])
+ policies2 = ['policy1', 'policy2']
+ db.instance_group_policies_add(self.context, id, policies2)
+ policies = db.instance_group_policies_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(policies, policies2)
+
+ def test_instance_group_policies_update(self):
+ values = self._get_default_values()
+ values['uuid'] = 'fake_id'
+ result = self._create_instance_group(self.context, values)
+ id = result['uuid']
+ policies2 = ['policy1', 'policy2']
+ db.instance_group_policies_add(self.context, id, policies2)
+ policies = db.instance_group_policies_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(policies, policies2)
+ policies3 = ['policy1', 'policy2', 'policy3']
+ db.instance_group_policies_add(self.context, id, policies3)
+ policies = db.instance_group_policies_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(policies, policies3)
+
+ def test_instance_group_policies_delete(self):
+ values = self._get_default_values()
+ values['uuid'] = 'fake_id'
+ result = self._create_instance_group(self.context, values)
+ id = result['uuid']
+ policies3 = ['policy1', 'policy2', 'policy3']
+ db.instance_group_policies_add(self.context, id, policies3)
+ policies = db.instance_group_policies_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(policies, policies3)
+ for policy in policies3[:]:
+ db.instance_group_policy_delete(self.context, id, policy)
+ policies3.remove(policy)
+ policies = db.instance_group_policies_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(policies, policies3)
+
+ def test_instance_group_policies_invalid_ids(self):
+ values = self._get_default_values()
+ result = self._create_instance_group(self.context, values)
+ id = result['uuid']
+ self.assertRaises(exception.InstanceGroupNotFound,
+ db.instance_group_policies_get,
+ self.context, 'invalid')
+ self.assertRaises(exception.InstanceGroupNotFound,
+ db.instance_group_policy_delete, self.context,
+ 'invalidid', 'policy1')
+ policies = ['policy1', 'policy2']
+ db.instance_group_policies_add(self.context, id, policies)
+ self.assertRaises(exception.InstanceGroupPolicyNotFound,
+ db.instance_group_policy_delete,
+ self.context, id, 'invalid_policy')
+
+
+class PciDeviceDBApiTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(PciDeviceDBApiTestCase, self).setUp()
+ self.user_id = 'fake_user'
+ self.project_id = 'fake_project'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ self.admin_context = context.get_admin_context()
+ self.ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at']
+
+ def _get_fake_pci_devs(self):
+ return {'id': 3353,
+ 'compute_node_id': 1,
+ 'address': '0000:0f:08.7',
+ 'vendor_id': '8086',
+ 'product_id': '1520',
+ 'dev_type': 'type-VF',
+ 'dev_id': 'pci_0000:0f:08.7',
+ 'extra_info': None,
+ 'label': 'label_8086_1520',
+ 'status': 'available',
+ 'instance_uuid': '00000000-0000-0000-0000-000000000010',
+ 'request_id': None,
+ }, {'id': 3356,
+ 'compute_node_id': 1,
+ 'address': '0000:0f:03.7',
+ 'vendor_id': '8083',
+ 'product_id': '1523',
+ 'dev_type': 'type-VF',
+ 'dev_id': 'pci_0000:0f:08.7',
+ 'extra_info': None,
+ 'label': 'label_8086_1520',
+ 'status': 'available',
+ 'instance_uuid': '00000000-0000-0000-0000-000000000010',
+ 'request_id': None,
+ }
+
+ def _create_fake_pci_devs(self):
+ v1, v2 = self._get_fake_pci_devs()
+ db.pci_device_update(self.admin_context, v1['compute_node_id'],
+ v1['address'], v1)
+ db.pci_device_update(self.admin_context, v2['compute_node_id'],
+ v2['address'], v2)
+ return (v1, v2)
+
+ def test_pci_device_get_by_addr(self):
+ v1, v2 = self._create_fake_pci_devs()
+ result = db.pci_device_get_by_addr(self.admin_context, 1,
+ '0000:0f:08.7')
+ self._assertEqualObjects(v1, result, self.ignored_keys)
+
+ def test_pci_device_get_by_addr_not_found(self):
+ self._create_fake_pci_devs()
+ self.assertRaises(exception.PciDeviceNotFound,
+ db.pci_device_get_by_addr, self.admin_context,
+ 1, '0000:0f:08:09')
+
+ def test_pci_device_get_by_addr_low_priv(self):
+ self._create_fake_pci_devs()
+ self.assertRaises(exception.AdminRequired,
+ db.pci_device_get_by_addr,
+ self.context, 1, '0000:0f:08.7')
+
+ def test_pci_device_get_by_id(self):
+ v1, v2 = self._create_fake_pci_devs()
+ result = db.pci_device_get_by_id(self.admin_context, 3353)
+ self._assertEqualObjects(v1, result, self.ignored_keys)
+
+ def test_pci_device_get_by_id_not_found(self):
+ self._create_fake_pci_devs()
+ self.assertRaises(exception.PciDeviceNotFoundById,
+ db.pci_device_get_by_id,
+ self.admin_context, 3354)
+
+ def test_pci_device_get_by_id_low_priv(self):
+ self._create_fake_pci_devs()
+ self.assertRaises(exception.AdminRequired,
+ db.pci_device_get_by_id,
+ self.context, 3553)
+
+ def test_pci_device_get_all_by_node(self):
+ v1, v2 = self._create_fake_pci_devs()
+ results = db.pci_device_get_all_by_node(self.admin_context, 1)
+ self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
+
+ def test_pci_device_get_all_by_node_empty(self):
+ v1, v2 = self._get_fake_pci_devs()
+ results = db.pci_device_get_all_by_node(self.admin_context, 9)
+ self.assertEqual(len(results), 0)
+
+ def test_pci_device_get_all_by_node_low_priv(self):
+ self._create_fake_pci_devs()
+ self.assertRaises(exception.AdminRequired,
+ db.pci_device_get_all_by_node,
+ self.context, 1)
+
+ def test_pci_device_get_by_instance_uuid(self):
+ v1, v2 = self._get_fake_pci_devs()
+ v1['status'] = 'allocated'
+ v2['status'] = 'allocated'
+ db.pci_device_update(self.admin_context, v1['compute_node_id'],
+ v1['address'], v1)
+ db.pci_device_update(self.admin_context, v2['compute_node_id'],
+ v2['address'], v2)
+ results = db.pci_device_get_all_by_instance_uuid(
+ self.context,
+ '00000000-0000-0000-0000-000000000010')
+ self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
+
+ def test_pci_device_get_by_instance_uuid_check_status(self):
+ v1, v2 = self._get_fake_pci_devs()
+ v1['status'] = 'allocated'
+ v2['status'] = 'claimed'
+ db.pci_device_update(self.admin_context, v1['compute_node_id'],
+ v1['address'], v1)
+ db.pci_device_update(self.admin_context, v2['compute_node_id'],
+ v2['address'], v2)
+ results = db.pci_device_get_all_by_instance_uuid(
+ self.context,
+ '00000000-0000-0000-0000-000000000010')
+ self._assertEqualListsOfObjects(results, [v1], self.ignored_keys)
+
+ def test_pci_device_update(self):
+ v1, v2 = self._get_fake_pci_devs()
+ v1['status'] = 'allocated'
+ db.pci_device_update(self.admin_context, v1['compute_node_id'],
+ v1['address'], v1)
+ result = db.pci_device_get_by_addr(
+ self.admin_context, 1, '0000:0f:08.7')
+ self._assertEqualObjects(v1, result, self.ignored_keys)
+
+ v1['status'] = 'claimed'
+ db.pci_device_update(self.admin_context, v1['compute_node_id'],
+ v1['address'], v1)
+ result = db.pci_device_get_by_addr(
+ self.admin_context, 1, '0000:0f:08.7')
+ self._assertEqualObjects(v1, result, self.ignored_keys)
+
+ def test_pci_device_update_low_priv(self):
+ v1, v2 = self._get_fake_pci_devs()
+ self.assertRaises(exception.AdminRequired,
+ db.pci_device_update, self.context,
+ v1['compute_node_id'], v1['address'], v1)
+
+ def test_pci_device_destroy(self):
+ v1, v2 = self._create_fake_pci_devs()
+ results = db.pci_device_get_all_by_node(self.admin_context, 1)
+ self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
+ db.pci_device_destroy(self.admin_context, v1['compute_node_id'],
+ v1['address'])
+ results = db.pci_device_get_all_by_node(self.admin_context, 1)
+ self._assertEqualListsOfObjects(results, [v2], self.ignored_keys)
+
+ def test_pci_device_destroy_exception(self):
+ v1, v2 = self._get_fake_pci_devs()
+ db.pci_device_update(self.admin_context, v1['compute_node_id'],
+ v1['address'], v1)
+ results = db.pci_device_get_all_by_node(self.admin_context, 1)
+ self._assertEqualListsOfObjects(results, [v1], self.ignored_keys)
+ self.assertRaises(exception.PciDeviceNotFound,
+ db.pci_device_destroy,
+ self.admin_context,
+ v2['compute_node_id'],
+ v2['address'])
+
+
+class RetryOnDeadlockTestCase(test.TestCase):
+ def test_without_deadlock(self):
+ @sqlalchemy_api._retry_on_deadlock
+ def call_api(*args, **kwargs):
+ return True
+ self.assertTrue(call_api())
+
+ def test_raise_deadlock(self):
+ self.attempts = 2
+
+ @sqlalchemy_api._retry_on_deadlock
+ def call_api(*args, **kwargs):
+ while self.attempts:
+ self.attempts = self.attempts - 1
+ raise db_exc.DBDeadlock("fake exception")
+ return True
+ self.assertTrue(call_api())
+
+
+class TestSqlalchemyTypesRepr(test_base.DbTestCase):
+ def setUp(self):
+ super(TestSqlalchemyTypesRepr, self).setUp()
+ meta = MetaData(bind=self.engine)
+ self.table = Table(
+ 'cidr_tbl',
+ meta,
+ Column('id', Integer, primary_key=True),
+ Column('addr', col_types.CIDR())
+ )
+ self.table.create()
+ self.addCleanup(meta.drop_all)
+
+ def test_cidr_repr(self):
+ addrs = [('192.168.3.0/24', '192.168.3.0/24'),
+ ('2001:db8::/64', '2001:db8::/64'),
+ ('192.168.3.0', '192.168.3.0/32'),
+ ('2001:db8::', '2001:db8::/128'),
+ (None, None)]
+ with self.engine.begin() as conn:
+ for i in addrs:
+ conn.execute(self.table.insert(), {'addr': i[0]})
+
+ query = self.table.select().order_by(self.table.c.id)
+ result = conn.execute(query)
+ for idx, row in enumerate(result):
+ self.assertEqual(addrs[idx][1], row.addr)
+
+
+class TestMySQLSqlalchemyTypesRepr(TestSqlalchemyTypesRepr,
+ test_base.MySQLOpportunisticTestCase):
+ pass
+
+
+class TestPostgreSQLSqlalchemyTypesRepr(TestSqlalchemyTypesRepr,
+ test_base.PostgreSQLOpportunisticTestCase):
+ pass
diff --git a/nova/tests/unit/db/test_migration_utils.py b/nova/tests/unit/db/test_migration_utils.py
new file mode 100644
index 0000000000..1d5d155894
--- /dev/null
+++ b/nova/tests/unit/db/test_migration_utils.py
@@ -0,0 +1,256 @@
+# Copyright (c) 2013 Boris Pavlovic (boris@pavlovic.me).
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from oslo.db.sqlalchemy import utils as oslodbutils
+import sqlalchemy
+from sqlalchemy import Integer, String
+from sqlalchemy import MetaData, Table, Column
+from sqlalchemy.exc import NoSuchTableError
+from sqlalchemy import sql
+from sqlalchemy.types import UserDefinedType
+
+from nova.db.sqlalchemy import api as db
+from nova.db.sqlalchemy import utils
+from nova import exception
+from nova.tests.unit.db import test_migrations
+
+
+SA_VERSION = tuple(map(int, sqlalchemy.__version__.split('.')))
+
+
+class CustomType(UserDefinedType):
+ """Dummy column type for testing unsupported types."""
+ def get_col_spec(self):
+ return "CustomType"
+
+
+class TestMigrationUtils(test_migrations.BaseMigrationTestCase):
+ """Class for testing utils that are used in db migrations."""
+
+ def test_delete_from_select(self):
+ table_name = "__test_deletefromselect_table__"
+ uuidstrs = []
+ for unused in range(10):
+ uuidstrs.append(uuid.uuid4().hex)
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+ conn = engine.connect()
+ test_table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True,
+ nullable=False, autoincrement=True),
+ Column('uuid', String(36), nullable=False))
+ test_table.create()
+ # Add 10 rows to table
+ for uuidstr in uuidstrs:
+ ins_stmt = test_table.insert().values(uuid=uuidstr)
+ conn.execute(ins_stmt)
+
+ # Delete 4 rows in one chunk
+ column = test_table.c.id
+ query_delete = sql.select([column],
+ test_table.c.id < 5).order_by(column)
+ delete_statement = utils.DeleteFromSelect(test_table,
+ query_delete, column)
+ result_delete = conn.execute(delete_statement)
+ # Verify we delete 4 rows
+ self.assertEqual(result_delete.rowcount, 4)
+
+ query_all = sql.select([test_table]).\
+ where(test_table.c.uuid.in_(uuidstrs))
+ rows = conn.execute(query_all).fetchall()
+ # Verify we still have 6 rows in table
+ self.assertEqual(len(rows), 6)
+
+ test_table.drop()
+
+ def test_check_shadow_table(self):
+ table_name = 'test_check_shadow_table'
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+
+ table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer),
+ Column('c', String(256)))
+ table.create()
+
+ # check missing shadow table
+ self.assertRaises(NoSuchTableError,
+ utils.check_shadow_table, engine, table_name)
+
+ shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
+ Column('id', Integer),
+ Column('a', Integer))
+ shadow_table.create()
+
+ # check missing column
+ self.assertRaises(exception.NovaException,
+ utils.check_shadow_table, engine, table_name)
+
+ # check when all is ok
+ c = Column('c', String(256))
+ shadow_table.create_column(c)
+ self.assertTrue(utils.check_shadow_table(engine, table_name))
+
+ # check extra column
+ d = Column('d', Integer)
+ shadow_table.create_column(d)
+ self.assertRaises(exception.NovaException,
+ utils.check_shadow_table, engine, table_name)
+
+ table.drop()
+ shadow_table.drop()
+
+ def test_check_shadow_table_different_types(self):
+ table_name = 'test_check_shadow_table_different_types'
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+
+ table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer))
+ table.create()
+
+ shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', String(256)))
+ shadow_table.create()
+ self.assertRaises(exception.NovaException,
+ utils.check_shadow_table, engine, table_name)
+
+ table.drop()
+ shadow_table.drop()
+
+ def test_check_shadow_table_with_unsupported_sqlite_type(self):
+ if 'sqlite' not in self.engines:
+ self.skipTest('sqlite is not configured')
+ table_name = 'test_check_shadow_table_with_unsupported_sqlite_type'
+ engine = self.engines['sqlite']
+ meta = MetaData(bind=engine)
+
+ table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer),
+ Column('c', CustomType))
+ table.create()
+
+ shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer),
+ Column('c', CustomType))
+ shadow_table.create()
+ self.assertTrue(utils.check_shadow_table(engine, table_name))
+ shadow_table.drop()
+
+ def test_create_shadow_table_by_table_instance(self):
+ table_name = 'test_create_shadow_table_by_table_instance'
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+ table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer),
+ Column('b', String(256)))
+ table.create()
+ shadow_table = utils.create_shadow_table(engine, table=table)
+ self.assertTrue(utils.check_shadow_table(engine, table_name))
+ table.drop()
+ shadow_table.drop()
+
+ def test_create_shadow_table_by_name(self):
+ table_name = 'test_create_shadow_table_by_name'
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+
+ table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer),
+ Column('b', String(256)))
+ table.create()
+ shadow_table = utils.create_shadow_table(engine,
+ table_name=table_name)
+ self.assertTrue(utils.check_shadow_table(engine, table_name))
+ table.drop()
+ shadow_table.drop()
+
+ def test_create_shadow_table_not_supported_type(self):
+ if 'sqlite' in self.engines:
+ table_name = 'test_create_shadow_table_not_supported_type'
+ engine = self.engines['sqlite']
+ meta = MetaData()
+ meta.bind = engine
+ table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', CustomType))
+ table.create()
+
+ # reflection of custom types has been fixed upstream
+ if SA_VERSION < (0, 9, 0):
+ self.assertRaises(oslodbutils.ColumnError,
+ utils.create_shadow_table,
+ engine, table_name=table_name)
+
+ shadow_table = utils.create_shadow_table(engine,
+ table_name=table_name,
+ a=Column('a', CustomType())
+ )
+ self.assertTrue(utils.check_shadow_table(engine, table_name))
+ table.drop()
+ shadow_table.drop()
+
+ def test_create_shadow_both_table_and_table_name_are_none(self):
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+ self.assertRaises(exception.NovaException,
+ utils.create_shadow_table, engine)
+
+ def test_create_shadow_both_table_and_table_name_are_specified(self):
+ table_name = ('test_create_shadow_both_table_and_table_name_are_'
+ 'specified')
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+ table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer))
+ table.create()
+ self.assertRaises(exception.NovaException,
+ utils.create_shadow_table,
+ engine, table=table, table_name=table_name)
+ table.drop()
+
+ def test_create_duplicate_shadow_table(self):
+ table_name = 'test_create_duplicate_shadow_table'
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+ table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer))
+ table.create()
+ shadow_table = utils.create_shadow_table(engine,
+ table_name=table_name)
+ self.assertRaises(exception.ShadowTableExists,
+ utils.create_shadow_table,
+ engine, table_name=table_name)
+ table.drop()
+ shadow_table.drop()
diff --git a/nova/tests/unit/db/test_migrations.conf b/nova/tests/unit/db/test_migrations.conf
new file mode 100644
index 0000000000..310b7055c4
--- /dev/null
+++ b/nova/tests/unit/db/test_migrations.conf
@@ -0,0 +1,26 @@
+[unit_tests]
+# Set up any number of databases to test concurrently.
+# The "name" used in the test is the config variable key.
+
+# A few tests rely on one sqlite database with 'sqlite' as the key.
+
+sqlite=sqlite://
+#sqlitefile=sqlite:///test_migrations_utils.db
+#mysql=mysql+mysqldb://user:pass@localhost/test_migrations_utils
+#postgresql=postgresql+psycopg2://user:pass@localhost/test_migrations_utils
+
+[migration_dbs]
+# Migration DB details are listed separately as they can't be connected to
+# concurrently. These databases can't be the same as above
+
+# Note, sqlite:// is in-memory and unique each time it is spawned.
+# However file sqlite's are not unique.
+
+sqlite=sqlite://
+#sqlitefile=sqlite:///test_migrations.db
+#mysql=mysql+mysqldb://user:pass@localhost/test_migrations
+#postgresql=postgresql+psycopg2://user:pass@localhost/test_migrations
+
+[walk_style]
+snake_walk=yes
+downgrade=yes
diff --git a/nova/tests/unit/db/test_migrations.py b/nova/tests/unit/db/test_migrations.py
new file mode 100644
index 0000000000..cd1c3a7fdf
--- /dev/null
+++ b/nova/tests/unit/db/test_migrations.py
@@ -0,0 +1,913 @@
+# Copyright 2010-2011 OpenStack Foundation
+# Copyright 2012-2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests for database migrations. This test case reads the configuration
+file test_migrations.conf for database connection settings
+to use in the tests. For each connection found in the config file,
+the test case runs a series of test cases to ensure that migrations work
+properly both upgrading and downgrading, and that no data loss occurs
+if possible.
+
+There are also "opportunistic" tests for both mysql and postgresql in here,
+which allows testing against all 3 databases (sqlite in memory, mysql, pg) in
+a properly configured unit test environment.
+
+For the opportunistic testing you need to set up db's named 'openstack_citest'
+with user 'openstack_citest' and password 'openstack_citest' on localhost. The
+test will then use that db and u/p combo to run the tests.
+
+For postgres on Ubuntu this can be done with the following commands::
+
+| sudo -u postgres psql
+| postgres=# create user openstack_citest with createdb login password
+| 'openstack_citest';
+| postgres=# create database openstack_citest with owner openstack_citest;
+
+"""
+
+import ConfigParser
+import glob
+import os
+
+from migrate.versioning import repository
+from oslo.concurrency import processutils
+from oslo.db.sqlalchemy import session
+from oslo.db.sqlalchemy import utils as oslodbutils
+import six.moves.urllib.parse as urlparse
+import sqlalchemy
+import sqlalchemy.exc
+
+import nova.db.sqlalchemy.migrate_repo
+from nova.db.sqlalchemy import utils as db_utils
+from nova.i18n import _
+from nova.openstack.common import log as logging
+from nova import test
+from nova import utils
+
+
+LOG = logging.getLogger(__name__)
+
+
+def _have_mysql(user, passwd, database):
+ present = os.environ.get('NOVA_TEST_MYSQL_PRESENT')
+ if present is None:
+ return oslodbutils.is_backend_avail('mysql+mysqldb', database,
+ user, passwd)
+ return present.lower() in ('', 'true')
+
+
+def _have_postgresql(user, passwd, database):
+ present = os.environ.get('NOVA_TEST_POSTGRESQL_PRESENT')
+ if present is None:
+ return oslodbutils.is_backend_avail('postgresql+psycopg2', database,
+ user, passwd)
+ return present.lower() in ('', 'true')
+
+
+def get_mysql_connection_info(conn_pieces):
+ database = conn_pieces.path.strip('/')
+ loc_pieces = conn_pieces.netloc.split('@')
+ host = loc_pieces[1]
+ auth_pieces = loc_pieces[0].split(':')
+ user = auth_pieces[0]
+ password = ""
+ if len(auth_pieces) > 1:
+ if auth_pieces[1].strip():
+ password = "-p\"%s\"" % auth_pieces[1]
+
+ return (user, password, database, host)
+
+
+def get_pgsql_connection_info(conn_pieces):
+ database = conn_pieces.path.strip('/')
+ loc_pieces = conn_pieces.netloc.split('@')
+ host = loc_pieces[1]
+
+ auth_pieces = loc_pieces[0].split(':')
+ user = auth_pieces[0]
+ password = ""
+ if len(auth_pieces) > 1:
+ password = auth_pieces[1].strip()
+
+ return (user, password, database, host)
+
+
+class CommonTestsMixIn(object):
+ """Base class for migration tests.
+
+ BaseMigrationTestCase is effectively an abstract class, meant to be derived
+ from and not directly tested against; that's why these `test_` methods need
+ to be on a Mixin, so that they won't be picked up as valid tests for
+ BaseMigrationTestCase.
+ """
+ def test_walk_versions(self):
+ if not self.engines:
+ self.skipTest("No engines initialized")
+
+ for key, engine in self.engines.items():
+ # We start each walk with a completely blank slate.
+ self._reset_database(key)
+ self._walk_versions(engine, self.snake_walk, self.downgrade)
+
+ def test_mysql_opportunistically(self):
+ self._test_mysql_opportunistically()
+
+ def test_mysql_connect_fail(self):
+ """Test that we can trigger a mysql connection failure and we fail
+ gracefully to ensure we don't break people without mysql
+ """
+ if oslodbutils.is_backend_avail('mysql+mysqldb', self.DATABASE,
+ "openstack_cifail", self.PASSWD):
+ self.fail("Shouldn't have connected")
+
+ def test_postgresql_opportunistically(self):
+ self._test_postgresql_opportunistically()
+
+ def test_postgresql_connect_fail(self):
+ """Test that we can trigger a postgres connection failure and we fail
+ gracefully to ensure we don't break people without postgres
+ """
+ if oslodbutils.is_backend_avail('postgresql+psycopg2', self.DATABASE,
+ "openstack_cifail", self.PASSWD):
+ self.fail("Shouldn't have connected")
+
+
+class BaseMigrationTestCase(test.NoDBTestCase):
+ """Base class for testing migrations and migration utils. This sets up
+ and configures the databases to run tests against.
+ """
+
+ REQUIRES_LOCKING = True
+
+ # NOTE(jhesketh): It is expected that tests clean up after themselves.
+ # This is necessary for concurrency to allow multiple tests to work on
+ # one database.
+ # The full migration walk tests however do call the old _reset_databases()
+ # to throw away whatever was there so they need to operate on their own
+ # database that we know isn't accessed concurrently.
+ # Hence, BaseWalkMigrationTestCase overwrites the engine list.
+
+ USER = None
+ PASSWD = None
+ DATABASE = None
+
+ TIMEOUT_SCALING_FACTOR = 2
+
+ def __init__(self, *args, **kwargs):
+ super(BaseMigrationTestCase, self).__init__(*args, **kwargs)
+
+ self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
+ 'test_migrations.conf')
+ # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable
+ # to override the location of the config file for migration testing
+ self.CONFIG_FILE_PATH = os.environ.get('NOVA_TEST_MIGRATIONS_CONF',
+ self.DEFAULT_CONFIG_FILE)
+ self.MIGRATE_FILE = nova.db.sqlalchemy.migrate_repo.__file__
+ self.REPOSITORY = repository.Repository(
+ os.path.abspath(os.path.dirname(self.MIGRATE_FILE)))
+ self.INIT_VERSION = 0
+
+ self.snake_walk = False
+ self.downgrade = False
+ self.test_databases = {}
+ self.migration = None
+ self.migration_api = None
+
+ def setUp(self):
+ super(BaseMigrationTestCase, self).setUp()
+ self._load_config()
+
+ def _load_config(self):
+ # Load test databases from the config file. Only do this
+ # once. No need to re-run this on each test...
+ LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
+ if os.path.exists(self.CONFIG_FILE_PATH):
+ cp = ConfigParser.RawConfigParser()
+ try:
+ cp.read(self.CONFIG_FILE_PATH)
+ config = cp.options('unit_tests')
+ for key in config:
+ self.test_databases[key] = cp.get('unit_tests', key)
+ self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
+ self.downgrade = cp.getboolean('walk_style', 'downgrade')
+
+ except ConfigParser.ParsingError as e:
+ self.fail("Failed to read test_migrations.conf config "
+ "file. Got error: %s" % e)
+ else:
+ self.fail("Failed to find test_migrations.conf config "
+ "file.")
+
+ self.engines = {}
+ for key, value in self.test_databases.items():
+ self.engines[key] = session.create_engine(value)
+
+ # NOTE(jhesketh): We only need to make sure the databases are created
+ # not necessarily clean of tables.
+ self._create_databases()
+
+ def execute_cmd(self, cmd=None):
+ out, err = processutils.trycmd(cmd, shell=True, discard_warnings=True)
+ output = out or err
+ LOG.debug(output)
+ self.assertEqual('', err,
+ "Failed to run: %s\n%s" % (cmd, output))
+
+ @utils.synchronized('pgadmin', external=True)
+ def _reset_pg(self, conn_pieces):
+ (user, password, database, host) = \
+ get_pgsql_connection_info(conn_pieces)
+ os.environ['PGPASSWORD'] = password
+ os.environ['PGUSER'] = user
+ # note(boris-42): We must create and drop database, we can't
+ # drop database which we have connected to, so for such
+ # operations there is a special database postgres.
+ sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
+ " '%(sql)s' -d postgres")
+ sqldict = {'user': user, 'host': host}
+
+ sqldict['sql'] = ("drop database if exists %s;") % database
+ droptable = sqlcmd % sqldict
+ self.execute_cmd(droptable)
+
+ sqldict['sql'] = ("create database %s;") % database
+ createtable = sqlcmd % sqldict
+ self.execute_cmd(createtable)
+
+ os.unsetenv('PGPASSWORD')
+ os.unsetenv('PGUSER')
+
+ @utils.synchronized('mysql', external=True)
+ def _reset_mysql(self, conn_pieces):
+ # We can execute the MySQL client to destroy and re-create
+ # the MYSQL database, which is easier and less error-prone
+ # than using SQLAlchemy to do this via MetaData...trust me.
+ (user, password, database, host) = \
+ get_mysql_connection_info(conn_pieces)
+ sql = ("drop database if exists %(database)s; "
+ "create database %(database)s;" % {'database': database})
+ cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s "
+ "-e \"%(sql)s\"" % {'user': user, 'password': password,
+ 'host': host, 'sql': sql})
+ self.execute_cmd(cmd)
+
+ @utils.synchronized('sqlite', external=True)
+ def _reset_sqlite(self, conn_pieces):
+ # We can just delete the SQLite database, which is
+ # the easiest and cleanest solution
+ db_path = conn_pieces.path.strip('/')
+ if os.path.exists(db_path):
+ os.unlink(db_path)
+ # No need to recreate the SQLite DB. SQLite will
+ # create it for us if it's not there...
+
+ def _create_databases(self):
+ """Create all configured databases as needed."""
+ for key, engine in self.engines.items():
+ self._create_database(key)
+
+ def _create_database(self, key):
+ """Create database if it doesn't exist."""
+ conn_string = self.test_databases[key]
+ conn_pieces = urlparse.urlparse(conn_string)
+
+ if conn_string.startswith('mysql'):
+ (user, password, database, host) = \
+ get_mysql_connection_info(conn_pieces)
+ sql = "create database if not exists %s;" % database
+ cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s "
+ "-e \"%(sql)s\"" % {'user': user, 'password': password,
+ 'host': host, 'sql': sql})
+ self.execute_cmd(cmd)
+ elif conn_string.startswith('postgresql'):
+ (user, password, database, host) = \
+ get_pgsql_connection_info(conn_pieces)
+ os.environ['PGPASSWORD'] = password
+ os.environ['PGUSER'] = user
+
+ sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
+ " '%(sql)s' -d postgres")
+
+ sql = ("create database if not exists %s;") % database
+ createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
+ # 0 means databases is created
+ # 256 means it already exists (which is fine)
+ # otherwise raise an error
+ out, err = processutils.trycmd(createtable, shell=True,
+ check_exit_code=[0, 256],
+ discard_warnings=True)
+ output = out or err
+ if err != '':
+ self.fail("Failed to run: %s\n%s" % (createtable, output))
+
+ os.unsetenv('PGPASSWORD')
+ os.unsetenv('PGUSER')
+
+ def _reset_databases(self):
+ """Reset all configured databases."""
+ for key, engine in self.engines.items():
+ self._reset_database(key)
+
+ def _reset_database(self, key):
+ """Reset specific database."""
+ engine = self.engines[key]
+ conn_string = self.test_databases[key]
+ conn_pieces = urlparse.urlparse(conn_string)
+ engine.dispose()
+ if conn_string.startswith('sqlite'):
+ self._reset_sqlite(conn_pieces)
+ elif conn_string.startswith('mysql'):
+ self._reset_mysql(conn_pieces)
+ elif conn_string.startswith('postgresql'):
+ self._reset_pg(conn_pieces)
+
+
+class BaseWalkMigrationTestCase(BaseMigrationTestCase):
+ """BaseWalkMigrationTestCase loads in an alternative set of databases for
+ testing against. This is necessary as the default databases can run tests
+ concurrently without interfering with itself. It is expected that
+ databases listed under [migraiton_dbs] in the configuration are only being
+ accessed by one test at a time. Currently only test_walk_versions accesses
+ the databases (and is the only method that calls _reset_database() which
+ is clearly problematic for concurrency).
+ """
+
+ def _load_config(self):
+ # Load test databases from the config file. Only do this
+ # once. No need to re-run this on each test...
+ LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
+ if os.path.exists(self.CONFIG_FILE_PATH):
+ cp = ConfigParser.RawConfigParser()
+ try:
+ cp.read(self.CONFIG_FILE_PATH)
+ config = cp.options('migration_dbs')
+ for key in config:
+ self.test_databases[key] = cp.get('migration_dbs', key)
+ self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
+ self.downgrade = cp.getboolean('walk_style', 'downgrade')
+ except ConfigParser.ParsingError as e:
+ self.fail("Failed to read test_migrations.conf config "
+ "file. Got error: %s" % e)
+ else:
+ self.fail("Failed to find test_migrations.conf config "
+ "file.")
+
+ self.engines = {}
+ for key, value in self.test_databases.items():
+ self.engines[key] = session.create_engine(value)
+
+ self._create_databases()
+
+ def _test_mysql_opportunistically(self):
+ # Test that table creation on mysql only builds InnoDB tables
+ if not _have_mysql(self.USER, self.PASSWD, self.DATABASE):
+ self.skipTest("mysql not available")
+ # add this to the global lists to make reset work with it, it's removed
+ # automatically in tearDown so no need to clean it up here.
+ connect_string = oslodbutils.get_connect_string(
+ "mysql+mysqldb", self.DATABASE, self.USER, self.PASSWD)
+ (user, password, database, host) = \
+ get_mysql_connection_info(urlparse.urlparse(connect_string))
+ engine = session.create_engine(connect_string)
+ self.engines[database] = engine
+ self.test_databases[database] = connect_string
+
+ # build a fully populated mysql database with all the tables
+ self._reset_database(database)
+ self._walk_versions(engine, self.snake_walk, self.downgrade)
+
+ connection = engine.connect()
+ # sanity check
+ total = connection.execute("SELECT count(*) "
+ "from information_schema.TABLES "
+ "where TABLE_SCHEMA='%(database)s'" %
+ {'database': database})
+ self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")
+
+ noninnodb = connection.execute("SELECT count(*) "
+ "from information_schema.TABLES "
+ "where TABLE_SCHEMA='%(database)s' "
+ "and ENGINE!='InnoDB' "
+ "and TABLE_NAME!='migrate_version'" %
+ {'database': database})
+ count = noninnodb.scalar()
+ self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
+ connection.close()
+
+ del(self.engines[database])
+ del(self.test_databases[database])
+
+ def _test_postgresql_opportunistically(self):
+ # Test postgresql database migration walk
+ if not _have_postgresql(self.USER, self.PASSWD, self.DATABASE):
+ self.skipTest("postgresql not available")
+ # add this to the global lists to make reset work with it, it's removed
+ # automatically in tearDown so no need to clean it up here.
+ connect_string = oslodbutils.get_connect_string(
+ "postgresql+psycopg2", self.DATABASE, self.USER, self.PASSWD)
+ engine = session.create_engine(connect_string)
+ (user, password, database, host) = \
+ get_pgsql_connection_info(urlparse.urlparse(connect_string))
+ self.engines[database] = engine
+ self.test_databases[database] = connect_string
+
+ # build a fully populated postgresql database with all the tables
+ self._reset_database(database)
+ self._walk_versions(engine, self.snake_walk, self.downgrade)
+ del(self.engines[database])
+ del(self.test_databases[database])
+
+ def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
+ # Determine latest version script from the repo, then
+ # upgrade from 1 through to the latest, with no data
+ # in the databases. This just checks that the schema itself
+ # upgrades successfully.
+
+ # Place the database under version control
+ self.migration_api.version_control(engine,
+ self.REPOSITORY,
+ self.INIT_VERSION)
+ self.assertEqual(self.INIT_VERSION,
+ self.migration_api.db_version(engine,
+ self.REPOSITORY))
+
+ LOG.debug('latest version is %s' % self.REPOSITORY.latest)
+ versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
+
+ for version in versions:
+ # upgrade -> downgrade -> upgrade
+ self._migrate_up(engine, version, with_data=True)
+ if snake_walk:
+ downgraded = self._migrate_down(
+ engine, version - 1, with_data=True)
+ if downgraded:
+ self._migrate_up(engine, version)
+
+ if downgrade:
+ # Now walk it back down to 0 from the latest, testing
+ # the downgrade paths.
+ for version in reversed(versions):
+ # downgrade -> upgrade -> downgrade
+ downgraded = self._migrate_down(engine, version - 1)
+
+ if snake_walk and downgraded:
+ self._migrate_up(engine, version)
+ self._migrate_down(engine, version - 1)
+
+ def _migrate_down(self, engine, version, with_data=False):
+ try:
+ self.migration_api.downgrade(engine, self.REPOSITORY, version)
+ except NotImplementedError:
+ # NOTE(sirp): some migrations, namely release-level
+ # migrations, don't support a downgrade.
+ return False
+
+ self.assertEqual(version,
+ self.migration_api.db_version(engine,
+ self.REPOSITORY))
+
+ # NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
+ # version). So if we have any downgrade checks, they need to be run for
+ # the previous (higher numbered) migration.
+ if with_data:
+ post_downgrade = getattr(
+ self, "_post_downgrade_%03d" % (version + 1), None)
+ if post_downgrade:
+ post_downgrade(engine)
+
+ return True
+
+ def _skippable_migrations(self):
+ special = [
+ 216, # Havana
+ ]
+
+ havana_placeholders = range(217, 227)
+ icehouse_placeholders = range(235, 244)
+ juno_placeholders = range(255, 265)
+
+ return (special +
+ havana_placeholders +
+ icehouse_placeholders +
+ juno_placeholders)
+
+ def _migrate_up(self, engine, version, with_data=False):
+ """migrate up to a new version of the db.
+
+ We allow for data insertion and post checks at every
+ migration version with special _pre_upgrade_### and
+ _check_### functions in the main test.
+ """
+ # NOTE(sdague): try block is here because it's impossible to debug
+ # where a failed data migration happens otherwise
+ try:
+ if with_data:
+ data = None
+ pre_upgrade = getattr(
+ self, "_pre_upgrade_%03d" % version, None)
+ if pre_upgrade:
+ data = pre_upgrade(engine)
+
+ self.migration_api.upgrade(engine, self.REPOSITORY, version)
+ self.assertEqual(version,
+ self.migration_api.db_version(engine,
+ self.REPOSITORY))
+ if with_data:
+ check = getattr(self, "_check_%03d" % version, None)
+ if version not in self._skippable_migrations():
+ self.assertIsNotNone(check,
+ ('DB Migration %i does not have a '
+ 'test. Please add one!') % version)
+ if check:
+ check(engine, data)
+ except Exception:
+ LOG.error("Failed to migrate to version %s on engine %s" %
+ (version, engine))
+ raise
+
+
+class TestNovaMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
+ """Test sqlalchemy-migrate migrations."""
+ USER = "openstack_citest"
+ PASSWD = "openstack_citest"
+ DATABASE = "openstack_citest"
+
+ def __init__(self, *args, **kwargs):
+ super(TestNovaMigrations, self).__init__(*args, **kwargs)
+
+ self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
+ 'test_migrations.conf')
+ # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable
+ # to override the location of the config file for migration testing
+ self.CONFIG_FILE_PATH = os.environ.get('NOVA_TEST_MIGRATIONS_CONF',
+ self.DEFAULT_CONFIG_FILE)
+ self.MIGRATE_FILE = nova.db.sqlalchemy.migrate_repo.__file__
+ self.REPOSITORY = repository.Repository(
+ os.path.abspath(os.path.dirname(self.MIGRATE_FILE)))
+
+ def setUp(self):
+ super(TestNovaMigrations, self).setUp()
+
+ if self.migration is None:
+ self.migration = __import__('nova.db.migration',
+ globals(), locals(), ['db_initial_version'], -1)
+ self.INIT_VERSION = self.migration.db_initial_version()
+ if self.migration_api is None:
+ temp = __import__('nova.db.sqlalchemy.migration',
+ globals(), locals(), ['versioning_api'], -1)
+ self.migration_api = temp.versioning_api
+
+ def assertColumnExists(self, engine, table, column):
+ t = oslodbutils.get_table(engine, table)
+ self.assertIn(column, t.c)
+
+ def assertColumnNotExists(self, engine, table, column):
+ t = oslodbutils.get_table(engine, table)
+ self.assertNotIn(column, t.c)
+
+ def assertTableNotExists(self, engine, table):
+ self.assertRaises(sqlalchemy.exc.NoSuchTableError,
+ oslodbutils.get_table, engine, table)
+
+ def assertIndexExists(self, engine, table, index):
+ t = oslodbutils.get_table(engine, table)
+ index_names = [idx.name for idx in t.indexes]
+ self.assertIn(index, index_names)
+
+ def assertIndexMembers(self, engine, table, index, members):
+ self.assertIndexExists(engine, table, index)
+
+ t = oslodbutils.get_table(engine, table)
+ index_columns = None
+ for idx in t.indexes:
+ if idx.name == index:
+ index_columns = idx.columns.keys()
+ break
+
+ self.assertEqual(sorted(members), sorted(index_columns))
+
+ def _check_227(self, engine, data):
+ table = oslodbutils.get_table(engine, 'project_user_quotas')
+
+ # Insert fake_quotas with the longest resource name.
+ fake_quotas = {'id': 5,
+ 'project_id': 'fake_project',
+ 'user_id': 'fake_user',
+ 'resource': 'injected_file_content_bytes',
+ 'hard_limit': 10}
+ table.insert().execute(fake_quotas)
+
+ # Check we can get the longest resource name.
+ quota = table.select(table.c.id == 5).execute().first()
+ self.assertEqual(quota['resource'], 'injected_file_content_bytes')
+
+ def _check_228(self, engine, data):
+ self.assertColumnExists(engine, 'compute_nodes', 'metrics')
+
+ compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
+ self.assertIsInstance(compute_nodes.c.metrics.type,
+ sqlalchemy.types.Text)
+
+ def _post_downgrade_228(self, engine):
+ self.assertColumnNotExists(engine, 'compute_nodes', 'metrics')
+
+ def _check_229(self, engine, data):
+ self.assertColumnExists(engine, 'compute_nodes', 'extra_resources')
+
+ compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
+ self.assertIsInstance(compute_nodes.c.extra_resources.type,
+ sqlalchemy.types.Text)
+
+ def _post_downgrade_229(self, engine):
+ self.assertColumnNotExists(engine, 'compute_nodes', 'extra_resources')
+
+ def _check_230(self, engine, data):
+ for table_name in ['instance_actions_events',
+ 'shadow_instance_actions_events']:
+ self.assertColumnExists(engine, table_name, 'host')
+ self.assertColumnExists(engine, table_name, 'details')
+
+ action_events = oslodbutils.get_table(engine,
+ 'instance_actions_events')
+ self.assertIsInstance(action_events.c.host.type,
+ sqlalchemy.types.String)
+ self.assertIsInstance(action_events.c.details.type,
+ sqlalchemy.types.Text)
+
+ def _post_downgrade_230(self, engine):
+ for table_name in ['instance_actions_events',
+ 'shadow_instance_actions_events']:
+ self.assertColumnNotExists(engine, table_name, 'host')
+ self.assertColumnNotExists(engine, table_name, 'details')
+
+ def _check_231(self, engine, data):
+ self.assertColumnExists(engine, 'instances', 'ephemeral_key_uuid')
+
+ instances = oslodbutils.get_table(engine, 'instances')
+ self.assertIsInstance(instances.c.ephemeral_key_uuid.type,
+ sqlalchemy.types.String)
+ self.assertTrue(db_utils.check_shadow_table(engine, 'instances'))
+
+ def _post_downgrade_231(self, engine):
+ self.assertColumnNotExists(engine, 'instances', 'ephemeral_key_uuid')
+ self.assertTrue(db_utils.check_shadow_table(engine, 'instances'))
+
+ def _check_232(self, engine, data):
+ table_names = ['compute_node_stats', 'compute_nodes',
+ 'instance_actions', 'instance_actions_events',
+ 'instance_faults', 'migrations']
+ for table_name in table_names:
+ self.assertTableNotExists(engine, 'dump_' + table_name)
+
+ def _check_233(self, engine, data):
+ self.assertColumnExists(engine, 'compute_nodes', 'stats')
+
+ compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
+ self.assertIsInstance(compute_nodes.c.stats.type,
+ sqlalchemy.types.Text)
+
+ self.assertRaises(sqlalchemy.exc.NoSuchTableError,
+ oslodbutils.get_table, engine, 'compute_node_stats')
+
+ def _post_downgrade_233(self, engine):
+ self.assertColumnNotExists(engine, 'compute_nodes', 'stats')
+
+ # confirm compute_node_stats exists
+ oslodbutils.get_table(engine, 'compute_node_stats')
+
+ def _check_234(self, engine, data):
+ self.assertIndexMembers(engine, 'reservations',
+ 'reservations_deleted_expire_idx',
+ ['deleted', 'expire'])
+
+ def _check_244(self, engine, data):
+ volume_usage_cache = oslodbutils.get_table(
+ engine, 'volume_usage_cache')
+ self.assertEqual(64, volume_usage_cache.c.user_id.type.length)
+
+ def _post_downgrade_244(self, engine):
+ volume_usage_cache = oslodbutils.get_table(
+ engine, 'volume_usage_cache')
+ self.assertEqual(36, volume_usage_cache.c.user_id.type.length)
+
+ def _pre_upgrade_245(self, engine):
+ # create a fake network
+ networks = oslodbutils.get_table(engine, 'networks')
+ fake_network = {'id': 1}
+ networks.insert().execute(fake_network)
+
+ def _check_245(self, engine, data):
+ networks = oslodbutils.get_table(engine, 'networks')
+ network = networks.select(networks.c.id == 1).execute().first()
+ # mtu should default to None
+ self.assertIsNone(network.mtu)
+ # dhcp_server should default to None
+ self.assertIsNone(network.dhcp_server)
+ # enable dhcp should default to true
+ self.assertTrue(network.enable_dhcp)
+ # share address should default to false
+ self.assertFalse(network.share_address)
+
+ def _post_downgrade_245(self, engine):
+ self.assertColumnNotExists(engine, 'networks', 'mtu')
+ self.assertColumnNotExists(engine, 'networks', 'dhcp_server')
+ self.assertColumnNotExists(engine, 'networks', 'enable_dhcp')
+ self.assertColumnNotExists(engine, 'networks', 'share_address')
+
+ def _check_246(self, engine, data):
+ pci_devices = oslodbutils.get_table(engine, 'pci_devices')
+ self.assertEqual(1, len([fk for fk in pci_devices.foreign_keys
+ if fk.parent.name == 'compute_node_id']))
+
+ def _post_downgrade_246(self, engine):
+ pci_devices = oslodbutils.get_table(engine, 'pci_devices')
+ self.assertEqual(0, len([fk for fk in pci_devices.foreign_keys
+ if fk.parent.name == 'compute_node_id']))
+
+ def _check_247(self, engine, data):
+ quota_usages = oslodbutils.get_table(engine, 'quota_usages')
+ self.assertFalse(quota_usages.c.resource.nullable)
+
+ pci_devices = oslodbutils.get_table(engine, 'pci_devices')
+ self.assertTrue(pci_devices.c.deleted.nullable)
+ self.assertFalse(pci_devices.c.product_id.nullable)
+ self.assertFalse(pci_devices.c.vendor_id.nullable)
+ self.assertFalse(pci_devices.c.dev_type.nullable)
+
+ def _post_downgrade_247(self, engine):
+ quota_usages = oslodbutils.get_table(engine, 'quota_usages')
+ self.assertTrue(quota_usages.c.resource.nullable)
+
+ pci_devices = oslodbutils.get_table(engine, 'pci_devices')
+ self.assertFalse(pci_devices.c.deleted.nullable)
+ self.assertTrue(pci_devices.c.product_id.nullable)
+ self.assertTrue(pci_devices.c.vendor_id.nullable)
+ self.assertTrue(pci_devices.c.dev_type.nullable)
+
+ def _check_248(self, engine, data):
+ self.assertIndexMembers(engine, 'reservations',
+ 'reservations_deleted_expire_idx',
+ ['deleted', 'expire'])
+
+ def _post_downgrade_248(self, engine):
+ reservations = oslodbutils.get_table(engine, 'reservations')
+ index_names = [idx.name for idx in reservations.indexes]
+ self.assertNotIn('reservations_deleted_expire_idx', index_names)
+
+ def _check_249(self, engine, data):
+ # Assert that only one index exists that covers columns
+ # instance_uuid and device_name
+ bdm = oslodbutils.get_table(engine, 'block_device_mapping')
+ self.assertEqual(1, len([i for i in bdm.indexes
+ if [c.name for c in i.columns] ==
+ ['instance_uuid', 'device_name']]))
+
+ def _post_downgrade_249(self, engine):
+ # The duplicate index is not created on downgrade, so this
+ # asserts that only one index exists that covers columns
+ # instance_uuid and device_name
+ bdm = oslodbutils.get_table(engine, 'block_device_mapping')
+ self.assertEqual(1, len([i for i in bdm.indexes
+ if [c.name for c in i.columns] ==
+ ['instance_uuid', 'device_name']]))
+
+ def _check_250(self, engine, data):
+ self.assertTableNotExists(engine, 'instance_group_metadata')
+ self.assertTableNotExists(engine, 'shadow_instance_group_metadata')
+
+ def _post_downgrade_250(self, engine):
+ oslodbutils.get_table(engine, 'instance_group_metadata')
+ oslodbutils.get_table(engine, 'shadow_instance_group_metadata')
+
+ def _check_251(self, engine, data):
+ self.assertColumnExists(engine, 'compute_nodes', 'numa_topology')
+ self.assertColumnExists(
+ engine, 'shadow_compute_nodes', 'numa_topology')
+
+ compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
+ shadow_compute_nodes = oslodbutils.get_table(
+ engine, 'shadow_compute_nodes')
+ self.assertIsInstance(compute_nodes.c.numa_topology.type,
+ sqlalchemy.types.Text)
+ self.assertIsInstance(shadow_compute_nodes.c.numa_topology.type,
+ sqlalchemy.types.Text)
+
+ def _post_downgrade_251(self, engine):
+ self.assertColumnNotExists(engine, 'compute_nodes', 'numa_topology')
+ self.assertColumnNotExists(
+ engine, 'shadow_compute_nodes', 'numa_topology')
+
+ def _check_252(self, engine, data):
+ oslodbutils.get_table(engine, 'instance_extra')
+ oslodbutils.get_table(engine, 'shadow_instance_extra')
+ self.assertIndexMembers(engine, 'instance_extra',
+ 'instance_extra_idx',
+ ['instance_uuid'])
+
+ def _post_downgrade_252(self, engine):
+ self.assertTableNotExists(engine, 'instance_extra')
+ self.assertTableNotExists(engine, 'shadow_instance_extra')
+
+ def _check_253(self, engine, data):
+ self.assertColumnExists(engine, 'instance_extra', 'pci_requests')
+ self.assertColumnExists(
+ engine, 'shadow_instance_extra', 'pci_requests')
+
+ instance_extra = oslodbutils.get_table(engine, 'instance_extra')
+ shadow_instance_extra = oslodbutils.get_table(
+ engine, 'shadow_instance_extra')
+ self.assertIsInstance(instance_extra.c.pci_requests.type,
+ sqlalchemy.types.Text)
+ self.assertIsInstance(shadow_instance_extra.c.pci_requests.type,
+ sqlalchemy.types.Text)
+
+ def _post_downgrade_253(self, engine):
+ self.assertColumnNotExists(engine, 'instance_extra', 'pci_requests')
+ self.assertColumnNotExists(
+ engine, 'shadow_instance_extra', 'pci_requests')
+
+ def _check_254(self, engine, data):
+ self.assertColumnExists(engine, 'pci_devices', 'request_id')
+ self.assertColumnExists(
+ engine, 'shadow_pci_devices', 'request_id')
+
+ pci_devices = oslodbutils.get_table(engine, 'pci_devices')
+ shadow_pci_devices = oslodbutils.get_table(
+ engine, 'shadow_pci_devices')
+ self.assertIsInstance(pci_devices.c.request_id.type,
+ sqlalchemy.types.String)
+ self.assertIsInstance(shadow_pci_devices.c.request_id.type,
+ sqlalchemy.types.String)
+
+ def _post_downgrade_254(self, engine):
+ self.assertColumnNotExists(engine, 'pci_devices', 'request_id')
+ self.assertColumnNotExists(
+ engine, 'shadow_pci_devices', 'request_id')
+
+ def _check_265(self, engine, data):
+ # Assert that only one index exists that covers columns
+ # host and deleted
+ instances = oslodbutils.get_table(engine, 'instances')
+ self.assertEqual(1, len([i for i in instances.indexes
+ if [c.name for c in i.columns][:2] ==
+ ['host', 'deleted']]))
+ # and only one index covers host column
+ iscsi_targets = oslodbutils.get_table(engine, 'iscsi_targets')
+ self.assertEqual(1, len([i for i in iscsi_targets.indexes
+ if [c.name for c in i.columns][:1] ==
+ ['host']]))
+
+ def _post_downgrade_265(self, engine):
+ # The duplicated index is not created on downgrade, so this
+ # asserts that only one index exists that covers columns
+ # host and deleted
+ instances = oslodbutils.get_table(engine, 'instances')
+ self.assertEqual(1, len([i for i in instances.indexes
+ if [c.name for c in i.columns][:2] ==
+ ['host', 'deleted']]))
+ # and only one index covers host column
+ iscsi_targets = oslodbutils.get_table(engine, 'iscsi_targets')
+ self.assertEqual(1, len([i for i in iscsi_targets.indexes
+ if [c.name for c in i.columns][:1] ==
+ ['host']]))
+
+
+class ProjectTestCase(test.NoDBTestCase):
+
+ def test_all_migrations_have_downgrade(self):
+ topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../')
+ py_glob = os.path.join(topdir, "nova", "db", "sqlalchemy",
+ "migrate_repo", "versions", "*.py")
+
+ missing_downgrade = []
+ for path in glob.iglob(py_glob):
+ has_upgrade = False
+ has_downgrade = False
+ with open(path, "r") as f:
+ for line in f:
+ if 'def upgrade(' in line:
+ has_upgrade = True
+ if 'def downgrade(' in line:
+ has_downgrade = True
+
+ if has_upgrade and not has_downgrade:
+ fname = os.path.basename(path)
+ missing_downgrade.append(fname)
+
+ helpful_msg = (_("The following migrations are missing a downgrade:"
+ "\n\t%s") % '\n\t'.join(sorted(missing_downgrade)))
+ self.assertFalse(missing_downgrade, helpful_msg)
diff --git a/nova/tests/unit/db/test_sqlite.py b/nova/tests/unit/db/test_sqlite.py
new file mode 100644
index 0000000000..e6a0951017
--- /dev/null
+++ b/nova/tests/unit/db/test_sqlite.py
@@ -0,0 +1,53 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Test cases for sqlite-specific logic"""
+
+from sqlalchemy import create_engine
+from sqlalchemy import Column, BigInteger, String
+import sqlalchemy.engine.reflection
+from sqlalchemy.ext.declarative import declarative_base
+
+from nova import test
+
+
+class TestSqlite(test.NoDBTestCase):
+ """Tests for sqlite-specific logic."""
+
+ def test_big_int_mapping(self):
+ base_class = declarative_base()
+
+ class User(base_class):
+ """Dummy class with a BigInteger column for testing."""
+ __tablename__ = "users"
+ id = Column(BigInteger, primary_key=True)
+ name = Column(String)
+
+ engine = create_engine('sqlite://')
+ base_class.metadata.create_all(engine)
+
+ insp = sqlalchemy.engine.reflection.Inspector.from_engine(engine)
+
+ id_type = None
+ for column in insp.get_columns('users'):
+ if column['name'] == 'id':
+ id_type = column['type'].compile()
+
+ # NOTE(russellb) We have a hook in nova.db.sqlalchemy that makes it so
+ # BigInteger() is compiled to INTEGER for sqlite instead of BIGINT.
+
+ self.assertEqual('INTEGER', id_type)
diff --git a/nova/tests/unit/fake_block_device.py b/nova/tests/unit/fake_block_device.py
new file mode 100644
index 0000000000..6f27eb3749
--- /dev/null
+++ b/nova/tests/unit/fake_block_device.py
@@ -0,0 +1,44 @@
+# Copyright 2013 Red Hat Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from oslo.utils import timeutils
+
+from nova import block_device
+
+
+class FakeDbBlockDeviceDict(block_device.BlockDeviceDict):
+ """Defaults db fields - useful for mocking database calls."""
+
+ def __init__(self, bdm_dict=None, anon=False, **kwargs):
+ bdm_dict = bdm_dict or {}
+ db_id = bdm_dict.pop('id', 1)
+ instance_uuid = bdm_dict.pop('instance_uuid', str(uuid.uuid4()))
+
+ super(FakeDbBlockDeviceDict, self).__init__(bdm_dict=bdm_dict,
+ **kwargs)
+ fake_db_fields = {'instance_uuid': instance_uuid,
+ 'deleted_at': None,
+ 'deleted': 0}
+ if not anon:
+ fake_db_fields['id'] = db_id
+ fake_db_fields['created_at'] = timeutils.utcnow()
+ fake_db_fields['updated_at'] = timeutils.utcnow()
+ self.update(fake_db_fields)
+
+
+def AnonFakeDbBlockDeviceDict(bdm_dict, **kwargs):
+ return FakeDbBlockDeviceDict(bdm_dict=bdm_dict, anon=True, **kwargs)
diff --git a/nova/tests/unit/fake_crypto.py b/nova/tests/unit/fake_crypto.py
new file mode 100644
index 0000000000..cac79a36bc
--- /dev/null
+++ b/nova/tests/unit/fake_crypto.py
@@ -0,0 +1,109 @@
+# Copyright 2012 Nebula, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+def ensure_ca_filesystem():
+ pass
+
+
+def fetch_ca(project_id=None):
+ rootca = """-----BEGIN CERTIFICATE-----
+MIICyzCCAjSgAwIBAgIJAIJ/UoFWKoOUMA0GCSqGSIb3DQEBBAUAME4xEjAQBgNV
+BAoTCU5PVkEgUk9PVDEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzETMBEGA1UECBMK
+Q2FsaWZvcm5pYTELMAkGA1UEBhMCVVMwHhcNMTIxMDAyMTg1NzQ1WhcNMTMxMDAy
+MTg1NzQ1WjBOMRIwEAYDVQQKEwlOT1ZBIFJPT1QxFjAUBgNVBAcTDU1vdW50YWlu
+IFZpZXcxEzARBgNVBAgTCkNhbGlmb3JuaWExCzAJBgNVBAYTAlVTMIGfMA0GCSqG
+SIb3DQEBAQUAA4GNADCBiQKBgQCg0Bn8WSqbJF3QNTZUxo1TzmFBxuqvhjZLKbnQ
+IiShdVIWUK7RC8frq8FJI7dgJNmvkIBn9njABWDoZmurQRCzD65yCSbUc4R2ea5H
+IK4wQIui0CJykvMBNjAe3bzztVVs8/ccDTsjtqq3F/KeQkKzQVfSWBrJSmYtG5tO
+G+dOSwIDAQABo4GwMIGtMAwGA1UdEwQFMAMBAf8wHQYDVR0OBBYEFCljRfaNOsA/
+9mHuq0io7Lt83FtaMH4GA1UdIwR3MHWAFCljRfaNOsA/9mHuq0io7Lt83FtaoVKk
+UDBOMRIwEAYDVQQKEwlOT1ZBIFJPT1QxFjAUBgNVBAcTDU1vdW50YWluIFZpZXcx
+EzARBgNVBAgTCkNhbGlmb3JuaWExCzAJBgNVBAYTAlVTggkAgn9SgVYqg5QwDQYJ
+KoZIhvcNAQEEBQADgYEAEbpJOOlpKCh5omwfAwAfFg1ml4h/FJiCH3PETmOCc+3l
+CtWTBd4MG8AoH7A3PU2JKAGVQ5XWo6+ihpW1RgfQpCnloI6vIeGcws+rSLnlzULt
+IvfCJpRg7iQdR3jZGt3295behtP1GsCqipJEulOkOaEIs8iLlXgSOG94Mkwlb4Q=
+-----END CERTIFICATE-----
+"""
+ return rootca
+
+
+def generate_x509_cert(user_id, project_id, bits=1024):
+ pk = """-----BEGIN RSA PRIVATE KEY-----
+MIICXAIBAAKBgQC4h2d63ijt9l0fIBRY37D3Yj2FYajCMUlftSoHNA4lEw0uTXnH
+Jjbd0j7HNlSADWeAMuaoSDNp7CIsXMt6iA/ASN5nFFTZlLRqIzYoI0RHiiSJjvSG
+d1n4Yrar1eC8tK3Rld1Zo6rj6tOuIxfFVJajJVZykCAHjGNNvulgfhBXFwIDAQAB
+AoGBAIjfxx4YU/vO1lwUC4OwyS92q3OYcPk6XdakJryZHDTb4NcLmNzjt6bqIK7b
+2enyB2fMWdNRWvGiueZ2HmiRLDyOGsAVdEsHvL4qbr9EZGTqC8Qxx+zTevWWf6pB
+F1zxzbXNQDFZDf9kVsSLCkbMHITnW1k4MrM++9gfCO3WrfehAkEA4nd8TyCCZazq
+KMOQwFLTNaiVLeTXCtvGopl4ZNiKYZ1qI3KDXb2wbAyArFuERlotxFlylXpwtlMo
+SlI/C/sYqwJBANCX1sdfRJq8DpdP44ThWqOkWFLB9rBiwyyBt8746fX8amwr8eyz
+H44/z5GT/Vyp8qFsjkuDzeP93eeDnr2qE0UCP1zipRnPO6x4P5J4o+Y+EmLvwkAQ
+nCLYAaCvUbILHrbq2Z2wWjEYnEO03RHUd2xjkGH4TgcBMTmW4e+ZzEIduwJACnIw
+LVfWBbG5QVac3EC021EVoz9XbUnk4Eu2usS4Yrs7USN6QBJQWD1V1cKFg6h3ICJh
+leKJ4wsJm9h5kKH9yQJBAN8CaX223MlTSuBOVuIOwNA+09iLfx4UCLiH1fGMKDpe
+xVcmkM3qCnTqNxrAPSFdT9IyB3IXiaLWbvzl7MfiOwQ=
+-----END RSA PRIVATE KEY-----
+"""
+ csr = """Certificate:
+ Data:
+ Version: 1 (0x0)
+ Serial Number: 23 (0x17)
+ Signature Algorithm: md5WithRSAEncryption
+ Issuer: O=NOVA ROOT, L=Mountain View, ST=California, C=US
+ Validity
+ Not Before: Oct 2 19:31:45 2012 GMT
+ Not After : Oct 2 19:31:45 2013 GMT
+ Subject: C=US, ST=California, O=OpenStack, OU=NovaDev, """
+ """CN=openstack-fake-2012-10-02T19:31:45Z
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ RSA Public Key: (1024 bit)
+ Modulus (1024 bit):
+ 00:b8:87:67:7a:de:28:ed:f6:5d:1f:20:14:58:df:
+ b0:f7:62:3d:85:61:a8:c2:31:49:5f:b5:2a:07:34:
+ 0e:25:13:0d:2e:4d:79:c7:26:36:dd:d2:3e:c7:36:
+ 54:80:0d:67:80:32:e6:a8:48:33:69:ec:22:2c:5c:
+ cb:7a:88:0f:c0:48:de:67:14:54:d9:94:b4:6a:23:
+ 36:28:23:44:47:8a:24:89:8e:f4:86:77:59:f8:62:
+ b6:ab:d5:e0:bc:b4:ad:d1:95:dd:59:a3:aa:e3:ea:
+ d3:ae:23:17:c5:54:96:a3:25:56:72:90:20:07:8c:
+ 63:4d:be:e9:60:7e:10:57:17
+ Exponent: 65537 (0x10001)
+ Signature Algorithm: md5WithRSAEncryption
+ 32:82:ff:8b:92:0e:8d:9c:6b:ce:7e:fe:34:16:2a:4c:47:4f:
+ c7:28:a2:33:1e:48:56:2e:4b:e8:e8:e3:48:b1:3d:a3:43:21:
+ ef:83:e7:df:e2:10:91:7e:9a:c0:4d:1e:96:68:2b:b9:f7:84:
+ 7f:ec:84:8a:bf:bc:5e:50:05:d9:ce:4a:1a:bf:d2:bf:0c:d1:
+ 7e:ec:64:c3:a5:37:78:a3:a6:2b:a1:b7:1c:cc:c8:b9:78:61:
+ 98:50:3c:e6:28:34:f1:0e:62:bb:b5:d7:a1:dd:1f:38:c6:0d:
+ 58:9f:81:67:ff:9c:32:fc:52:7e:6d:8c:91:43:49:fe:e3:48:
+ bb:40
+-----BEGIN CERTIFICATE-----
+MIICMzCCAZwCARcwDQYJKoZIhvcNAQEEBQAwTjESMBAGA1UEChMJTk9WQSBST09U
+MRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQIEwpDYWxpZm9ybmlhMQsw
+CQYDVQQGEwJVUzAeFw0xMjEwMDIxOTMxNDVaFw0xMzEwMDIxOTMxNDVaMHYxCzAJ
+BgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRIwEAYDVQQKEwlPcGVuU3Rh
+Y2sxEDAOBgNVBAsTB05vdmFEZXYxLDAqBgNVBAMTI29wZW5zdGFjay1mYWtlLTIw
+MTItMTAtMDJUMTk6MzE6NDVaMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC4
+h2d63ijt9l0fIBRY37D3Yj2FYajCMUlftSoHNA4lEw0uTXnHJjbd0j7HNlSADWeA
+MuaoSDNp7CIsXMt6iA/ASN5nFFTZlLRqIzYoI0RHiiSJjvSGd1n4Yrar1eC8tK3R
+ld1Zo6rj6tOuIxfFVJajJVZykCAHjGNNvulgfhBXFwIDAQABMA0GCSqGSIb3DQEB
+BAUAA4GBADKC/4uSDo2ca85+/jQWKkxHT8coojMeSFYuS+jo40ixPaNDIe+D59/i
+EJF+msBNHpZoK7n3hH/shIq/vF5QBdnOShq/0r8M0X7sZMOlN3ijpiuhtxzMyLl4
+YZhQPOYoNPEOYru116HdHzjGDVifgWf/nDL8Un5tjJFDSf7jSLtA
+-----END CERTIFICATE-----
+"""
+ return pk, csr
diff --git a/nova/tests/unit/fake_hosts.py b/nova/tests/unit/fake_hosts.py
new file mode 100644
index 0000000000..78fa414ac7
--- /dev/null
+++ b/nova/tests/unit/fake_hosts.py
@@ -0,0 +1,37 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Provides some fake hosts to test host and service related functions
+"""
+
+from nova.tests.unit.objects import test_service
+
+
+HOST_LIST = [
+ {"host_name": "host_c1", "service": "compute", "zone": "nova"},
+ {"host_name": "host_c2", "service": "compute", "zone": "nova"}]
+
+OS_API_HOST_LIST = {"hosts": HOST_LIST}
+
+HOST_LIST_NOVA_ZONE = [
+ {"host_name": "host_c1", "service": "compute", "zone": "nova"},
+ {"host_name": "host_c2", "service": "compute", "zone": "nova"}]
+
+service_base = test_service.fake_service
+
+SERVICES_LIST = [
+ dict(service_base, host='host_c1', topic='compute'),
+ dict(service_base, host='host_c2', topic='compute')]
diff --git a/nova/tests/unit/fake_instance.py b/nova/tests/unit/fake_instance.py
new file mode 100644
index 0000000000..b1a080269d
--- /dev/null
+++ b/nova/tests/unit/fake_instance.py
@@ -0,0 +1,107 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import uuid
+
+from nova import objects
+from nova.objects import fields
+
+
+def fake_db_secgroups(instance, names):
+ secgroups = []
+ for i, name in enumerate(names):
+ group_name = 'secgroup-%i' % i
+ if isinstance(name, dict) and name.get('name'):
+ group_name = name.get('name')
+ secgroups.append(
+ {'id': i,
+ 'instance_uuid': instance['uuid'],
+ 'name': group_name,
+ 'description': 'Fake secgroup',
+ 'user_id': instance['user_id'],
+ 'project_id': instance['project_id'],
+ 'deleted': False,
+ 'deleted_at': None,
+ 'created_at': None,
+ 'updated_at': None,
+ })
+ return secgroups
+
+
+def fake_db_instance(**updates):
+ db_instance = {
+ 'id': 1,
+ 'deleted': False,
+ 'uuid': str(uuid.uuid4()),
+ 'user_id': 'fake-user',
+ 'project_id': 'fake-project',
+ 'host': 'fake-host',
+ 'created_at': datetime.datetime(1955, 11, 5),
+ 'pci_devices': [],
+ 'security_groups': [],
+ 'metadata': {},
+ 'system_metadata': {},
+ 'root_gb': 0,
+ 'ephemeral_gb': 0
+ }
+
+ for name, field in objects.Instance.fields.items():
+ if name in db_instance:
+ continue
+ if field.nullable:
+ db_instance[name] = None
+ elif field.default != fields.UnspecifiedDefault:
+ db_instance[name] = field.default
+ else:
+ raise Exception('fake_db_instance needs help with %s' % name)
+
+ if updates:
+ db_instance.update(updates)
+
+ if db_instance.get('security_groups'):
+ db_instance['security_groups'] = fake_db_secgroups(
+ db_instance, db_instance['security_groups'])
+
+ return db_instance
+
+
+def fake_instance_obj(context, **updates):
+ expected_attrs = updates.pop('expected_attrs', None)
+ return objects.Instance._from_db_object(context,
+ objects.Instance(), fake_db_instance(**updates),
+ expected_attrs=expected_attrs)
+
+
+def fake_fault_obj(context, instance_uuid, code=404,
+ message='HTTPNotFound',
+ details='Stock details for test',
+ **updates):
+ fault = {
+ 'id': 1,
+ 'instance_uuid': instance_uuid,
+ 'code': code,
+ 'message': message,
+ 'details': details,
+ 'host': 'fake_host',
+ 'deleted': False,
+ 'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
+ 'updated_at': None,
+ 'deleted_at': None
+ }
+ if updates:
+ fault.update(updates)
+ return objects.InstanceFault._from_db_object(context,
+ objects.InstanceFault(),
+ fault)
diff --git a/nova/tests/unit/fake_ldap.py b/nova/tests/unit/fake_ldap.py
new file mode 100644
index 0000000000..dd69d42961
--- /dev/null
+++ b/nova/tests/unit/fake_ldap.py
@@ -0,0 +1,330 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Fake LDAP server for test harness.
+
+This class does very little error checking, and knows nothing about ldap
+class definitions. It implements the minimum emulation of the python ldap
+library to work with nova.
+
+"""
+
+import fnmatch
+
+from oslo.serialization import jsonutils
+
+from nova.i18n import _
+
+
+class Store(object):
+ def __init__(self):
+ if hasattr(self.__class__, '_instance'):
+ raise Exception(_('Attempted to instantiate singleton'))
+
+ @classmethod
+ def instance(cls):
+ if not hasattr(cls, '_instance'):
+ cls._instance = _StorageDict()
+ return cls._instance
+
+
+class _StorageDict(dict):
+ def keys(self, pat=None):
+ ret = super(_StorageDict, self).keys()
+ if pat is not None:
+ ret = fnmatch.filter(ret, pat)
+ return ret
+
+ def delete(self, key):
+ try:
+ del self[key]
+ except KeyError:
+ pass
+
+ def flushdb(self):
+ self.clear()
+
+ def hgetall(self, key):
+ """Returns the hash for the given key
+
+ Creates the hash if the key doesn't exist.
+ """
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = {}
+ return self[key]
+
+ def hget(self, key, field):
+ hashdict = self.hgetall(key)
+ try:
+ return hashdict[field]
+ except KeyError:
+ hashdict[field] = {}
+ return hashdict[field]
+
+ def hset(self, key, field, val):
+ hashdict = self.hgetall(key)
+ hashdict[field] = val
+
+ def hmset(self, key, value_dict):
+ hashdict = self.hgetall(key)
+ for field, val in value_dict.items():
+ hashdict[field] = val
+
+
+SCOPE_BASE = 0
+SCOPE_ONELEVEL = 1 # Not implemented
+SCOPE_SUBTREE = 2
+MOD_ADD = 0
+MOD_DELETE = 1
+MOD_REPLACE = 2
+
+
+class NO_SUCH_OBJECT(Exception): # pylint: disable=C0103
+ """Duplicate exception class from real LDAP module."""
+ pass
+
+
+class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable=C0103
+ """Duplicate exception class from real LDAP module."""
+ pass
+
+
+class SERVER_DOWN(Exception): # pylint: disable=C0103
+ """Duplicate exception class from real LDAP module."""
+ pass
+
+
+def initialize(_uri):
+ """Opens a fake connection with an LDAP server."""
+ return FakeLDAP()
+
+
+def _match_query(query, attrs):
+ """Match an ldap query to an attribute dictionary.
+
+ The characters &, |, and ! are supported in the query. No syntax checking
+ is performed, so malformed queries will not work correctly.
+ """
+ # cut off the parentheses
+ inner = query[1:-1]
+ if inner.startswith('&'):
+ # cut off the &
+ l, r = _paren_groups(inner[1:])
+ return _match_query(l, attrs) and _match_query(r, attrs)
+ if inner.startswith('|'):
+ # cut off the |
+ l, r = _paren_groups(inner[1:])
+ return _match_query(l, attrs) or _match_query(r, attrs)
+ if inner.startswith('!'):
+ # cut off the ! and the nested parentheses
+ return not _match_query(query[2:-1], attrs)
+
+ (k, _sep, v) = inner.partition('=')
+ return _match(k, v, attrs)
+
+
+def _paren_groups(source):
+ """Split a string into parenthesized groups."""
+ count = 0
+ start = 0
+ result = []
+ for pos in xrange(len(source)):
+ if source[pos] == '(':
+ if count == 0:
+ start = pos
+ count += 1
+ if source[pos] == ')':
+ count -= 1
+ if count == 0:
+ result.append(source[start:pos + 1])
+ return result
+
+
+def _match(key, value, attrs):
+ """Match a given key and value against an attribute list."""
+ if key not in attrs:
+ return False
+ # This is a wild card search. Implemented as all or nothing for now.
+ if value == "*":
+ return True
+ if key != "objectclass":
+ return value in attrs[key]
+ # it is an objectclass check, so check subclasses
+ values = _subs(value)
+ for v in values:
+ if v in attrs[key]:
+ return True
+ return False
+
+
+def _subs(value):
+ """Returns a list of subclass strings.
+
+ The strings represent the ldap object class plus any subclasses that
+ inherit from it. Fakeldap doesn't know about the ldap object structure,
+ so subclasses need to be defined manually in the dictionary below.
+
+ """
+ subs = {'groupOfNames': ['novaProject']}
+ if value in subs:
+ return [value] + subs[value]
+ return [value]
+
+
+def _from_json(encoded):
+ """Convert attribute values from json representation.
+
+ Args:
+ encoded -- a json encoded string
+
+ Returns a list of strings
+
+ """
+ return [str(x) for x in jsonutils.loads(encoded)]
+
+
+def _to_json(unencoded):
+ """Convert attribute values into json representation.
+
+ Args:
+ unencoded -- an unencoded string or list of strings. If it
+ is a single string, it will be converted into a list.
+
+ Returns a json string
+
+ """
+ return jsonutils.dumps(list(unencoded))
+
+
+server_fail = False
+
+
+class FakeLDAP(object):
+ """Fake LDAP connection."""
+
+ def simple_bind_s(self, dn, password):
+ """This method is ignored, but provided for compatibility."""
+ if server_fail:
+ raise SERVER_DOWN()
+ pass
+
+ def unbind_s(self):
+ """This method is ignored, but provided for compatibility."""
+ if server_fail:
+ raise SERVER_DOWN()
+ pass
+
+ def add_s(self, dn, attr):
+ """Add an object with the specified attributes at dn."""
+ if server_fail:
+ raise SERVER_DOWN()
+
+ key = "%s%s" % (self.__prefix, dn)
+ value_dict = dict([(k, _to_json(v)) for k, v in attr])
+ Store.instance().hmset(key, value_dict)
+
+ def delete_s(self, dn):
+ """Remove the ldap object at specified dn."""
+ if server_fail:
+ raise SERVER_DOWN()
+
+ Store.instance().delete("%s%s" % (self.__prefix, dn))
+
+ def modify_s(self, dn, attrs):
+ """Modify the object at dn using the attribute list.
+
+ :param dn: a dn
+ :param attrs: a list of tuples in the following form::
+
+ ([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
+
+ """
+ if server_fail:
+ raise SERVER_DOWN()
+
+ store = Store.instance()
+ key = "%s%s" % (self.__prefix, dn)
+
+ for cmd, k, v in attrs:
+ values = _from_json(store.hget(key, k))
+ if cmd == MOD_ADD:
+ values.append(v)
+ elif cmd == MOD_REPLACE:
+ values = [v]
+ else:
+ values.remove(v)
+ values = store.hset(key, k, _to_json(values))
+
+ def modrdn_s(self, dn, newrdn):
+ oldobj = self.search_s(dn, SCOPE_BASE)
+ if not oldobj:
+ raise NO_SUCH_OBJECT()
+ newdn = "%s,%s" % (newrdn, dn.partition(',')[2])
+ newattrs = oldobj[0][1]
+
+ modlist = []
+ for attrtype in newattrs.keys():
+ modlist.append((attrtype, newattrs[attrtype]))
+
+ self.add_s(newdn, modlist)
+ self.delete_s(dn)
+
+ def search_s(self, dn, scope, query=None, fields=None):
+ """Search for all matching objects under dn using the query.
+
+ Args:
+ dn -- dn to search under
+ scope -- only SCOPE_BASE and SCOPE_SUBTREE are supported
+ query -- query to filter objects by
+ fields -- fields to return. Returns all fields if not specified
+
+ """
+ if server_fail:
+ raise SERVER_DOWN()
+
+ if scope != SCOPE_BASE and scope != SCOPE_SUBTREE:
+ raise NotImplementedError(str(scope))
+ store = Store.instance()
+ if scope == SCOPE_BASE:
+ pattern = "%s%s" % (self.__prefix, dn)
+ keys = store.keys(pattern)
+ else:
+ keys = store.keys("%s*%s" % (self.__prefix, dn))
+
+ if not keys:
+ raise NO_SUCH_OBJECT()
+
+ objects = []
+ for key in keys:
+ # get the attributes from the store
+ attrs = store.hgetall(key)
+ # turn the values from the store into lists
+ # pylint: disable=E1103
+ attrs = dict([(k, _from_json(v))
+ for k, v in attrs.iteritems()])
+ # filter the objects by query
+ if not query or _match_query(query, attrs):
+ # filter the attributes by fields
+ attrs = dict([(k, v) for k, v in attrs.iteritems()
+ if not fields or k in fields])
+ objects.append((key[len(self.__prefix):], attrs))
+ return objects
+
+ @property
+ def __prefix(self): # pylint: disable=R0201
+ """Get the prefix to use for all keys."""
+ return 'ldap:'
diff --git a/nova/tests/unit/fake_loadables/__init__.py b/nova/tests/unit/fake_loadables/__init__.py
new file mode 100644
index 0000000000..a74c55d21a
--- /dev/null
+++ b/nova/tests/unit/fake_loadables/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2012 OpenStack Foundation # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Fakes For Loadable class handling.
+"""
+
+from nova import loadables
+
+
+class FakeLoadable(object):
+ pass
+
+
+class FakeLoader(loadables.BaseLoader):
+ def __init__(self):
+ super(FakeLoader, self).__init__(FakeLoadable)
diff --git a/nova/tests/unit/fake_loadables/fake_loadable1.py b/nova/tests/unit/fake_loadables/fake_loadable1.py
new file mode 100644
index 0000000000..a30b66bdf7
--- /dev/null
+++ b/nova/tests/unit/fake_loadables/fake_loadable1.py
@@ -0,0 +1,44 @@
+# Copyright 2012 OpenStack Foundation # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Fake Loadable subclasses module #1
+"""
+
+from nova.tests.unit import fake_loadables
+
+
+class FakeLoadableSubClass1(fake_loadables.FakeLoadable):
+ pass
+
+
+class FakeLoadableSubClass2(fake_loadables.FakeLoadable):
+ pass
+
+
+class _FakeLoadableSubClass3(fake_loadables.FakeLoadable):
+ """Classes beginning with '_' will be ignored."""
+ pass
+
+
+class FakeLoadableSubClass4(object):
+ """Not a correct subclass."""
+
+
+def return_valid_classes():
+ return [FakeLoadableSubClass1, FakeLoadableSubClass2]
+
+
+def return_invalid_classes():
+ return [FakeLoadableSubClass1, _FakeLoadableSubClass3,
+ FakeLoadableSubClass4]
diff --git a/nova/tests/unit/fake_loadables/fake_loadable2.py b/nova/tests/unit/fake_loadables/fake_loadable2.py
new file mode 100644
index 0000000000..a70ab5f952
--- /dev/null
+++ b/nova/tests/unit/fake_loadables/fake_loadable2.py
@@ -0,0 +1,39 @@
+# Copyright 2012 OpenStack Foundation # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Fake Loadable subclasses module #2
+"""
+
+from nova.tests.unit import fake_loadables
+
+
+class FakeLoadableSubClass5(fake_loadables.FakeLoadable):
+ pass
+
+
+class FakeLoadableSubClass6(fake_loadables.FakeLoadable):
+ pass
+
+
+class _FakeLoadableSubClass7(fake_loadables.FakeLoadable):
+ """Classes beginning with '_' will be ignored."""
+ pass
+
+
+class FakeLoadableSubClass8(BaseException):
+ """Not a correct subclass."""
+
+
+def return_valid_class():
+ return [FakeLoadableSubClass6]
diff --git a/nova/tests/unit/fake_network.py b/nova/tests/unit/fake_network.py
new file mode 100644
index 0000000000..09f54b13d3
--- /dev/null
+++ b/nova/tests/unit/fake_network.py
@@ -0,0 +1,457 @@
+# Copyright 2011 Rackspace
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+
+from nova.compute import api as compute_api
+from nova.compute import manager as compute_manager
+import nova.context
+from nova import db
+from nova import exception
+from nova.network import api as network_api
+from nova.network import manager as network_manager
+from nova.network import model as network_model
+from nova.network import rpcapi as network_rpcapi
+from nova import objects
+from nova.objects import base as obj_base
+from nova.objects import virtual_interface as vif_obj
+from nova.pci import device as pci_device
+from nova.tests.unit.objects import test_fixed_ip
+from nova.tests.unit.objects import test_instance_info_cache
+from nova.tests.unit.objects import test_pci_device
+
+
+HOST = "testhost"
+CONF = cfg.CONF
+CONF.import_opt('use_ipv6', 'nova.netconf')
+
+
+class FakeModel(dict):
+ """Represent a model from the db."""
+ def __init__(self, *args, **kwargs):
+ self.update(kwargs)
+
+
+class FakeNetworkManager(network_manager.NetworkManager):
+ """This NetworkManager doesn't call the base class so we can bypass all
+ inherited service cruft and just perform unit tests.
+ """
+
+ class FakeDB:
+ vifs = [{'id': 0,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'instance_uuid': '00000000-0000-0000-0000-000000000010',
+ 'network_id': 1,
+ 'uuid': 'fake-uuid',
+ 'address': 'DC:AD:BE:FF:EF:01'},
+ {'id': 1,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'instance_uuid': '00000000-0000-0000-0000-000000000020',
+ 'network_id': 21,
+ 'uuid': 'fake-uuid2',
+ 'address': 'DC:AD:BE:FF:EF:02'},
+ {'id': 2,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'instance_uuid': '00000000-0000-0000-0000-000000000030',
+ 'network_id': 31,
+ 'uuid': 'fake-uuid3',
+ 'address': 'DC:AD:BE:FF:EF:03'}]
+
+ floating_ips = [dict(address='172.16.1.1',
+ fixed_ip_id=100),
+ dict(address='172.16.1.2',
+ fixed_ip_id=200),
+ dict(address='173.16.1.2',
+ fixed_ip_id=210)]
+
+ fixed_ips = [dict(test_fixed_ip.fake_fixed_ip,
+ id=100,
+ address='172.16.0.1',
+ virtual_interface_id=0),
+ dict(test_fixed_ip.fake_fixed_ip,
+ id=200,
+ address='172.16.0.2',
+ virtual_interface_id=1),
+ dict(test_fixed_ip.fake_fixed_ip,
+ id=210,
+ address='173.16.0.2',
+ virtual_interface_id=2)]
+
+ def fixed_ip_get_by_instance(self, context, instance_uuid):
+ return [dict(address='10.0.0.0'), dict(address='10.0.0.1'),
+ dict(address='10.0.0.2')]
+
+ def network_get_by_cidr(self, context, cidr):
+ raise exception.NetworkNotFoundForCidr(cidr=cidr)
+
+ def network_create_safe(self, context, net):
+ fakenet = dict(net)
+ fakenet['id'] = 999
+ return fakenet
+
+ def network_get(self, context, network_id, project_only="allow_none"):
+ return {'cidr_v6': '2001:db8:69:%x::/64' % network_id}
+
+ def network_get_by_uuid(self, context, network_uuid):
+ raise exception.NetworkNotFoundForUUID(uuid=network_uuid)
+
+ def network_get_all(self, context):
+ raise exception.NoNetworksFound()
+
+ def network_get_all_by_uuids(self, context, project_only="allow_none"):
+ raise exception.NoNetworksFound()
+
+ def network_disassociate(self, context, network_id):
+ return True
+
+ def virtual_interface_get_all(self, context):
+ return self.vifs
+
+ def fixed_ips_by_virtual_interface(self, context, vif_id):
+ return [ip for ip in self.fixed_ips
+ if ip['virtual_interface_id'] == vif_id]
+
+ def fixed_ip_disassociate(self, context, address):
+ return True
+
+ def __init__(self, stubs=None):
+ self.db = self.FakeDB()
+ if stubs:
+ stubs.Set(vif_obj, 'db', self.db)
+ self.deallocate_called = None
+ self.deallocate_fixed_ip_calls = []
+ self.network_rpcapi = network_rpcapi.NetworkAPI()
+
+ # TODO(matelakat) method signature should align with the faked one's
+ def deallocate_fixed_ip(self, context, address=None, host=None,
+ instance=None):
+ self.deallocate_fixed_ip_calls.append((context, address, host))
+ # TODO(matelakat) use the deallocate_fixed_ip_calls instead
+ self.deallocate_called = address
+
+ def _create_fixed_ips(self, context, network_id, fixed_cidr=None,
+ extra_reserved=None, bottom_reserved=0,
+ top_reserved=0):
+ pass
+
+ def get_instance_nw_info(context, instance_id, rxtx_factor,
+ host, instance_uuid=None, **kwargs):
+ pass
+
+
+def fake_network(network_id, ipv6=None):
+ if ipv6 is None:
+ ipv6 = CONF.use_ipv6
+ fake_network = {'id': network_id,
+ 'uuid': '00000000-0000-0000-0000-00000000000000%02d' % network_id,
+ 'label': 'test%d' % network_id,
+ 'injected': False,
+ 'multi_host': False,
+ 'cidr': '192.168.%d.0/24' % network_id,
+ 'cidr_v6': None,
+ 'netmask': '255.255.255.0',
+ 'netmask_v6': None,
+ 'bridge': 'fake_br%d' % network_id,
+ 'bridge_interface': 'fake_eth%d' % network_id,
+ 'gateway': '192.168.%d.1' % network_id,
+ 'gateway_v6': None,
+ 'broadcast': '192.168.%d.255' % network_id,
+ 'dns1': '192.168.%d.3' % network_id,
+ 'dns2': '192.168.%d.4' % network_id,
+ 'dns3': '192.168.%d.3' % network_id,
+ 'vlan': None,
+ 'host': None,
+ 'project_id': 'fake_project',
+ 'vpn_public_address': '192.168.%d.2' % network_id,
+ 'vpn_public_port': None,
+ 'vpn_private_address': None,
+ 'dhcp_start': None,
+ 'rxtx_base': network_id * 10,
+ 'priority': None,
+ 'deleted': False,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'mtu': None,
+ 'dhcp_server': '192.168.%d.1' % network_id,
+ 'enable_dhcp': True,
+ 'share_address': False}
+ if ipv6:
+ fake_network['cidr_v6'] = '2001:db8:0:%x::/64' % network_id
+ fake_network['gateway_v6'] = '2001:db8:0:%x::1' % network_id
+ fake_network['netmask_v6'] = '64'
+ if CONF.flat_injected:
+ fake_network['injected'] = True
+
+ return fake_network
+
+
+def fake_vif(x):
+ return{'id': x,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'address': 'DE:AD:BE:EF:00:%02x' % x,
+ 'uuid': '00000000-0000-0000-0000-00000000000000%02d' % x,
+ 'network_id': x,
+ 'instance_uuid': 'fake-uuid'}
+
+
+def floating_ip_ids():
+ for i in xrange(1, 100):
+ yield i
+
+
+def fixed_ip_ids():
+ for i in xrange(1, 100):
+ yield i
+
+
+floating_ip_id = floating_ip_ids()
+fixed_ip_id = fixed_ip_ids()
+
+
+def next_fixed_ip(network_id, num_floating_ips=0):
+ next_id = fixed_ip_id.next()
+ f_ips = [FakeModel(**next_floating_ip(next_id))
+ for i in xrange(num_floating_ips)]
+ return {'id': next_id,
+ 'network_id': network_id,
+ 'address': '192.168.%d.%03d' % (network_id, (next_id + 99)),
+ 'instance_uuid': 1,
+ 'allocated': False,
+ 'reserved': False,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'leased': True,
+ 'host': HOST,
+ 'deleted': 0,
+ 'network': fake_network(network_id),
+ 'virtual_interface': fake_vif(network_id),
+ # and since network_id and vif_id happen to be equivalent
+ 'virtual_interface_id': network_id,
+ 'floating_ips': f_ips}
+
+
+def next_floating_ip(fixed_ip_id):
+ next_id = floating_ip_id.next()
+ return {'id': next_id,
+ 'address': '10.10.10.%03d' % (next_id + 99),
+ 'fixed_ip_id': fixed_ip_id,
+ 'project_id': None,
+ 'auto_assigned': False}
+
+
+def ipv4_like(ip, match_string):
+ ip = ip.split('.')
+ match_octets = match_string.split('.')
+
+ for i, octet in enumerate(match_octets):
+ if octet == '*':
+ continue
+ if octet != ip[i]:
+ return False
+ return True
+
+
+def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2,
+ floating_ips_per_fixed_ip=0):
+ # stubs is the self.stubs from the test
+ # ips_per_vif is the number of ips each vif will have
+ # num_floating_ips is number of float ips for each fixed ip
+ network = network_manager.FlatManager(host=HOST)
+ network.db = db
+
+ # reset the fixed and floating ip generators
+ global floating_ip_id, fixed_ip_id, fixed_ips
+ floating_ip_id = floating_ip_ids()
+ fixed_ip_id = fixed_ip_ids()
+ fixed_ips = []
+
+ def fixed_ips_fake(*args, **kwargs):
+ global fixed_ips
+ ips = [next_fixed_ip(i, floating_ips_per_fixed_ip)
+ for i in xrange(1, num_networks + 1)
+ for j in xrange(ips_per_vif)]
+ fixed_ips = ips
+ return ips
+
+ def update_cache_fake(*args, **kwargs):
+ pass
+
+ stubs.Set(db, 'fixed_ip_get_by_instance', fixed_ips_fake)
+ stubs.Set(db, 'instance_info_cache_update', update_cache_fake)
+
+ class FakeContext(nova.context.RequestContext):
+ def is_admin(self):
+ return True
+
+ nw_model = network.get_instance_nw_info(
+ FakeContext('fakeuser', 'fake_project'),
+ 0, 3, None)
+ return nw_model
+
+
+def stub_out_nw_api_get_instance_nw_info(stubs, func=None,
+ num_networks=1,
+ ips_per_vif=1,
+ floating_ips_per_fixed_ip=0):
+
+ def get_instance_nw_info(self, context, instance, conductor_api=None):
+ return fake_get_instance_nw_info(stubs, num_networks=num_networks,
+ ips_per_vif=ips_per_vif,
+ floating_ips_per_fixed_ip=floating_ips_per_fixed_ip)
+
+ if func is None:
+ func = get_instance_nw_info
+ stubs.Set(network_api.API, 'get_instance_nw_info', func)
+
+
+def stub_out_network_cleanup(stubs):
+ stubs.Set(network_api.API, 'deallocate_for_instance',
+ lambda *args, **kwargs: None)
+
+
+_real_functions = {}
+
+
+def set_stub_network_methods(stubs):
+ global _real_functions
+ cm = compute_manager.ComputeManager
+ if not _real_functions:
+ _real_functions = {
+ '_get_instance_nw_info': cm._get_instance_nw_info,
+ '_allocate_network': cm._allocate_network,
+ '_deallocate_network': cm._deallocate_network}
+
+ def fake_networkinfo(*args, **kwargs):
+ return network_model.NetworkInfo()
+
+ def fake_async_networkinfo(*args, **kwargs):
+ return network_model.NetworkInfoAsyncWrapper(fake_networkinfo)
+
+ stubs.Set(cm, '_get_instance_nw_info', fake_networkinfo)
+ stubs.Set(cm, '_allocate_network', fake_async_networkinfo)
+ stubs.Set(cm, '_deallocate_network', lambda *args, **kwargs: None)
+
+
+def unset_stub_network_methods(stubs):
+ global _real_functions
+ if _real_functions:
+ cm = compute_manager.ComputeManager
+ for name in _real_functions:
+ stubs.Set(cm, name, _real_functions[name])
+
+
+def stub_compute_with_ips(stubs):
+ orig_get = compute_api.API.get
+ orig_get_all = compute_api.API.get_all
+ orig_create = compute_api.API.create
+
+ def fake_get(*args, **kwargs):
+ return _get_instances_with_cached_ips(orig_get, *args, **kwargs)
+
+ def fake_get_all(*args, **kwargs):
+ return _get_instances_with_cached_ips(orig_get_all, *args, **kwargs)
+
+ def fake_create(*args, **kwargs):
+ return _create_instances_with_cached_ips(orig_create, *args, **kwargs)
+
+ def fake_pci_device_get_by_addr(context, node_id, dev_addr):
+ return test_pci_device.fake_db_dev
+
+ stubs.Set(db, 'pci_device_get_by_addr', fake_pci_device_get_by_addr)
+ stubs.Set(compute_api.API, 'get', fake_get)
+ stubs.Set(compute_api.API, 'get_all', fake_get_all)
+ stubs.Set(compute_api.API, 'create', fake_create)
+
+
+def _get_fake_cache():
+ def _ip(ip, fixed=True, floats=None):
+ ip_dict = {'address': ip, 'type': 'fixed'}
+ if not fixed:
+ ip_dict['type'] = 'floating'
+ if fixed and floats:
+ ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
+ return ip_dict
+
+ info = [{'address': 'aa:bb:cc:dd:ee:ff',
+ 'id': 1,
+ 'network': {'bridge': 'br0',
+ 'id': 1,
+ 'label': 'private',
+ 'subnets': [{'cidr': '192.168.0.0/24',
+ 'ips': [_ip('192.168.0.3')]}]}}]
+ if CONF.use_ipv6:
+ ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
+ info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
+ 'ips': [_ip(ipv6_addr)]})
+ return jsonutils.dumps(info)
+
+
+def _get_instances_with_cached_ips(orig_func, *args, **kwargs):
+ """Kludge the cache into instance(s) without having to create DB
+ entries
+ """
+ instances = orig_func(*args, **kwargs)
+ context = args[0]
+ fake_device = objects.PciDevice.get_by_dev_addr(context, 1, 'a')
+
+ def _info_cache_for(instance):
+ info_cache = dict(test_instance_info_cache.fake_info_cache,
+ network_info=_get_fake_cache(),
+ instance_uuid=instance['uuid'])
+ if isinstance(instance, obj_base.NovaObject):
+ _info_cache = objects.InstanceInfoCache(context)
+ objects.InstanceInfoCache._from_db_object(context, _info_cache,
+ info_cache)
+ info_cache = _info_cache
+ instance['info_cache'] = info_cache
+
+ if isinstance(instances, (list, obj_base.ObjectListBase)):
+ for instance in instances:
+ _info_cache_for(instance)
+ pci_device.claim(fake_device, instance)
+ pci_device.allocate(fake_device, instance)
+ else:
+ _info_cache_for(instances)
+ pci_device.claim(fake_device, instances)
+ pci_device.allocate(fake_device, instances)
+ return instances
+
+
+def _create_instances_with_cached_ips(orig_func, *args, **kwargs):
+ """Kludge the above kludge so that the database doesn't get out
+ of sync with the actual instance.
+ """
+ instances, reservation_id = orig_func(*args, **kwargs)
+ fake_cache = _get_fake_cache()
+ for instance in instances:
+ instance['info_cache']['network_info'] = fake_cache
+ db.instance_info_cache_update(args[1], instance['uuid'],
+ {'network_info': fake_cache})
+ return (instances, reservation_id)
diff --git a/nova/tests/unit/fake_network_cache_model.py b/nova/tests/unit/fake_network_cache_model.py
new file mode 100644
index 0000000000..9757773ba9
--- /dev/null
+++ b/nova/tests/unit/fake_network_cache_model.py
@@ -0,0 +1,77 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.network import model
+
+
+def new_ip(ip_dict=None):
+ new_ip = dict(address='192.168.1.100')
+ ip_dict = ip_dict or {}
+ new_ip.update(ip_dict)
+ return model.IP(**new_ip)
+
+
+def new_fixed_ip(ip_dict=None):
+ new_fixed_ip = dict(address='192.168.1.100')
+ ip_dict = ip_dict or {}
+ new_fixed_ip.update(ip_dict)
+ return model.FixedIP(**new_fixed_ip)
+
+
+def new_route(route_dict=None):
+ new_route = dict(
+ cidr='0.0.0.0/24',
+ gateway=new_ip(dict(address='192.168.1.1')),
+ interface='eth0')
+
+ route_dict = route_dict or {}
+ new_route.update(route_dict)
+ return model.Route(**new_route)
+
+
+def new_subnet(subnet_dict=None):
+ new_subnet = dict(
+ cidr='10.10.0.0/24',
+ dns=[new_ip(dict(address='1.2.3.4')),
+ new_ip(dict(address='2.3.4.5'))],
+ gateway=new_ip(dict(address='10.10.0.1')),
+ ips=[new_fixed_ip(dict(address='10.10.0.2')),
+ new_fixed_ip(dict(address='10.10.0.3'))],
+ routes=[new_route()])
+ subnet_dict = subnet_dict or {}
+ new_subnet.update(subnet_dict)
+ return model.Subnet(**new_subnet)
+
+
+def new_network(network_dict=None):
+ new_net = dict(
+ id=1,
+ bridge='br0',
+ label='public',
+ subnets=[new_subnet(), new_subnet(dict(cidr='255.255.255.255'))])
+ network_dict = network_dict or {}
+ new_net.update(network_dict)
+ return model.Network(**new_net)
+
+
+def new_vif(vif_dict=None):
+ vif = dict(
+ id=1,
+ address='aa:aa:aa:aa:aa:aa',
+ type='bridge',
+ network=new_network())
+ vif_dict = vif_dict or {}
+ vif.update(vif_dict)
+ return model.VIF(**vif)
diff --git a/nova/tests/unit/fake_notifier.py b/nova/tests/unit/fake_notifier.py
new file mode 100644
index 0000000000..110418215d
--- /dev/null
+++ b/nova/tests/unit/fake_notifier.py
@@ -0,0 +1,69 @@
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import functools
+
+import anyjson
+from oslo import messaging
+
+from nova import rpc
+
+NOTIFICATIONS = []
+
+
+def reset():
+ del NOTIFICATIONS[:]
+
+
+FakeMessage = collections.namedtuple('Message',
+ ['publisher_id', 'priority',
+ 'event_type', 'payload'])
+
+
+class FakeNotifier(object):
+
+ def __init__(self, transport, publisher_id, serializer=None):
+ self.transport = transport
+ self.publisher_id = publisher_id
+ self._serializer = serializer or messaging.serializer.NoOpSerializer()
+
+ for priority in ['debug', 'info', 'warn', 'error', 'critical']:
+ setattr(self, priority,
+ functools.partial(self._notify, priority.upper()))
+
+ def prepare(self, publisher_id=None):
+ if publisher_id is None:
+ publisher_id = self.publisher_id
+ return self.__class__(self.transport, publisher_id,
+ serializer=self._serializer)
+
+ def _notify(self, priority, ctxt, event_type, payload):
+ payload = self._serializer.serialize_entity(ctxt, payload)
+ # NOTE(sileht): simulate the kombu serializer
+ # this permit to raise an exception if something have not
+ # been serialized correctly
+ anyjson.serialize(payload)
+ msg = FakeMessage(self.publisher_id, priority, event_type, payload)
+ NOTIFICATIONS.append(msg)
+
+
+def stub_notifier(stubs):
+ stubs.Set(messaging, 'Notifier', FakeNotifier)
+ if rpc.NOTIFIER:
+ stubs.Set(rpc, 'NOTIFIER',
+ FakeNotifier(rpc.NOTIFIER.transport,
+ rpc.NOTIFIER.publisher_id,
+ serializer=getattr(rpc.NOTIFIER, '_serializer',
+ None)))
diff --git a/nova/tests/unit/fake_policy.py b/nova/tests/unit/fake_policy.py
new file mode 100644
index 0000000000..8344af475d
--- /dev/null
+++ b/nova/tests/unit/fake_policy.py
@@ -0,0 +1,412 @@
+# Copyright (c) 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+policy_data = """
+{
+ "admin_api": "is_admin:True",
+
+ "cells_scheduler_filter:TargetCellFilter": "is_admin:True",
+
+ "context_is_admin": "role:admin or role:administrator",
+ "compute:create": "",
+ "compute:create:attach_network": "",
+ "compute:create:attach_volume": "",
+
+ "compute:get": "",
+ "compute:get_all": "",
+ "compute:get_all_tenants": "",
+
+ "compute:update": "",
+
+ "compute:get_instance_metadata": "",
+ "compute:get_all_instance_metadata": "",
+ "compute:get_all_instance_system_metadata": "",
+ "compute:update_instance_metadata": "",
+ "compute:delete_instance_metadata": "",
+
+ "compute:get_instance_faults": "",
+ "compute:get_diagnostics": "",
+ "compute:get_instance_diagnostics": "",
+
+ "compute:get_lock": "",
+ "compute:lock": "",
+ "compute:unlock": "",
+ "compute:unlock_override": "is_admin:True",
+
+ "compute:get_vnc_console": "",
+ "compute:get_spice_console": "",
+ "compute:get_rdp_console": "",
+ "compute:get_serial_console": "",
+ "compute:get_console_output": "",
+
+ "compute:associate_floating_ip": "",
+ "compute:reset_network": "",
+ "compute:inject_network_info": "",
+ "compute:add_fixed_ip": "",
+ "compute:remove_fixed_ip": "",
+
+ "compute:attach_volume": "",
+ "compute:detach_volume": "",
+
+ "compute:attach_interface": "",
+ "compute:detach_interface": "",
+
+ "compute:set_admin_password": "",
+
+ "compute:rescue": "",
+ "compute:unrescue": "",
+
+ "compute:suspend": "",
+ "compute:resume": "",
+
+ "compute:pause": "",
+ "compute:unpause": "",
+
+ "compute:start": "",
+ "compute:stop": "",
+
+ "compute:resize": "",
+ "compute:confirm_resize": "",
+ "compute:revert_resize": "",
+
+ "compute:rebuild": "",
+
+ "compute:reboot": "",
+
+ "compute:snapshot": "",
+ "compute:backup": "",
+
+ "compute:shelve": "",
+ "compute:shelve_offload": "",
+ "compute:unshelve": "",
+
+ "compute:security_groups:add_to_instance": "",
+ "compute:security_groups:remove_from_instance": "",
+
+ "compute:delete": "",
+ "compute:soft_delete": "",
+ "compute:force_delete": "",
+ "compute:restore": "",
+ "compute:swap_volume": "",
+
+ "compute:volume_snapshot_create": "",
+ "compute:volume_snapshot_delete": "",
+
+ "compute:v3:servers:start": "",
+ "compute:v3:servers:stop": "",
+ "compute_extension:v3:os-access-ips": "",
+ "compute_extension:accounts": "",
+ "compute_extension:admin_actions:pause": "",
+ "compute_extension:admin_actions:unpause": "",
+ "compute_extension:admin_actions:suspend": "",
+ "compute_extension:admin_actions:resume": "",
+ "compute_extension:admin_actions:lock": "",
+ "compute_extension:admin_actions:unlock": "",
+ "compute_extension:admin_actions:resetNetwork": "",
+ "compute_extension:admin_actions:injectNetworkInfo": "",
+ "compute_extension:admin_actions:createBackup": "",
+ "compute_extension:admin_actions:migrateLive": "",
+ "compute_extension:admin_actions:resetState": "",
+ "compute_extension:admin_actions:migrate": "",
+ "compute_extension:v3:os-admin-actions:reset_network": "",
+ "compute_extension:v3:os-admin-actions:inject_network_info": "",
+ "compute_extension:v3:os-admin-actions:reset_state": "",
+ "compute_extension:v3:os-admin-password": "",
+ "compute_extension:aggregates": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:index": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:create": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:show": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:update": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:delete": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:add_host": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:remove_host": "rule:admin_api",
+ "compute_extension:v3:os-aggregates:set_metadata": "rule:admin_api",
+ "compute_extension:agents": "",
+ "compute_extension:v3:os-agents": "",
+ "compute_extension:attach_interfaces": "",
+ "compute_extension:v3:os-attach-interfaces": "",
+ "compute_extension:baremetal_nodes": "",
+ "compute_extension:v3:os-baremetal-nodes": "",
+ "compute_extension:cells": "",
+ "compute_extension:cells:create": "rule:admin_api",
+ "compute_extension:cells:delete": "rule:admin_api",
+ "compute_extension:cells:update": "rule:admin_api",
+ "compute_extension:cells:sync_instances": "rule:admin_api",
+ "compute_extension:v3:os-cells": "",
+ "compute_extension:v3:os-cells:create": "rule:admin_api",
+ "compute_extension:v3:os-cells:delete": "rule:admin_api",
+ "compute_extension:v3:os-cells:update": "rule:admin_api",
+ "compute_extension:v3:os-cells:sync_instances": "rule:admin_api",
+ "compute_extension:certificates": "",
+ "compute_extension:v3:os-certificates:create": "",
+ "compute_extension:v3:os-certificates:show": "",
+ "compute_extension:cloudpipe": "",
+ "compute_extension:v3:os-cloudpipe": "",
+ "compute_extension:cloudpipe_update": "",
+ "compute_extension:config_drive": "",
+ "compute_extension:v3:os-config-drive": "",
+ "compute_extension:console_output": "",
+ "compute_extension:v3:os-console-output": "",
+ "compute_extension:consoles": "",
+ "compute_extension:v3:os-remote-consoles": "",
+ "compute_extension:createserverext": "",
+ "compute_extension:v3:os-create-backup": "",
+ "compute_extension:deferred_delete": "",
+ "compute_extension:v3:os-deferred-delete": "",
+ "compute_extension:disk_config": "",
+ "compute_extension:v3:os-disk-config": "",
+ "compute_extension:evacuate": "is_admin:True",
+ "compute_extension:v3:os-evacuate": "is_admin:True",
+ "compute_extension:extended_server_attributes": "",
+ "compute_extension:v3:os-extended-server-attributes": "",
+ "compute_extension:extended_status": "",
+ "compute_extension:v3:os-extended-status": "",
+ "compute_extension:extended_availability_zone": "",
+ "compute_extension:v3:os-extended-availability-zone": "",
+ "compute_extension:extended_ips": "",
+ "compute_extension:extended_ips_mac": "",
+ "compute_extension:extended_vif_net": "",
+ "compute_extension:extended_volumes": "",
+ "compute_extension:v3:os-extended-volumes": "",
+ "compute_extension:v3:os-extended-volumes:swap": "",
+ "compute_extension:v3:os-extended-volumes:attach": "",
+ "compute_extension:v3:os-extended-volumes:detach": "",
+ "compute_extension:v3:extensions:discoverable": "",
+ "compute_extension:fixed_ips": "",
+ "compute_extension:v3:os-fixed-ips": "",
+ "compute_extension:flavor_access": "",
+ "compute_extension:flavor_access:addTenantAccess": "rule:admin_api",
+ "compute_extension:flavor_access:removeTenantAccess": "rule:admin_api",
+ "compute_extension:v3:os-flavor-access": "",
+ "compute_extension:v3:os-flavor-access:remove_tenant_access":
+ "rule:admin_api",
+ "compute_extension:v3:os-flavor-access:add_tenant_access":
+ "rule:admin_api",
+ "compute_extension:flavor_disabled": "",
+ "compute_extension:v3:os-flavor-disabled": "",
+ "compute_extension:flavor_rxtx": "",
+ "compute_extension:v3:os-flavor-rxtx": "",
+ "compute_extension:flavor_swap": "",
+ "compute_extension:flavorextradata": "",
+ "compute_extension:flavorextraspecs:index": "",
+ "compute_extension:flavorextraspecs:show": "",
+ "compute_extension:flavorextraspecs:create": "is_admin:True",
+ "compute_extension:flavorextraspecs:update": "is_admin:True",
+ "compute_extension:flavorextraspecs:delete": "is_admin:True",
+ "compute_extension:v3:flavor-extra-specs:index": "",
+ "compute_extension:v3:flavor-extra-specs:show": "",
+ "compute_extension:v3:flavor-extra-specs:create": "is_admin:True",
+ "compute_extension:v3:flavor-extra-specs:update": "is_admin:True",
+ "compute_extension:v3:flavor-extra-specs:delete": "is_admin:True",
+ "compute_extension:flavormanage": "",
+ "compute_extension:v3:flavor-manage": "",
+ "compute_extension:v3:flavors:discoverable": "",
+ "compute_extension:floating_ip_dns": "",
+ "compute_extension:v3:os-floating-ip-dns": "",
+ "compute_extension:floating_ip_pools": "",
+ "compute_extension:v3:os-floating-ip-pools": "",
+ "compute_extension:floating_ips": "",
+ "compute_extension:floating_ips_bulk": "",
+ "compute_extension:v3:os-floating-ips-bulk": "",
+ "compute_extension:fping": "",
+ "compute_extension:fping:all_tenants": "is_admin:True",
+ "compute_extension:v3:os-fping": "",
+ "compute_extension:v3:os-fping:all_tenants": "is_admin:True",
+ "compute_extension:hide_server_addresses": "",
+ "compute_extension:v3:os-hide-server-addresses": "",
+ "compute_extension:hosts": "rule:admin_api",
+ "compute_extension:v3:os-hosts": "rule:admin_api",
+ "compute_extension:hypervisors": "rule:admin_api",
+ "compute_extension:v3:os-hypervisors": "rule:admin_api",
+ "compute_extension:image_size": "",
+ "compute_extension:v3:image-size": "",
+ "compute_extension:instance_actions": "",
+ "compute_extension:v3:os-instance-actions": "",
+ "compute_extension:instance_actions:events": "is_admin:True",
+ "compute_extension:v3:os-instance-actions:events": "is_admin:True",
+ "compute_extension:instance_usage_audit_log": "rule:admin_api",
+ "compute_extension:keypairs": "",
+ "compute_extension:keypairs:index": "",
+ "compute_extension:keypairs:show": "",
+ "compute_extension:keypairs:create": "",
+ "compute_extension:keypairs:delete": "",
+
+ "compute_extension:v3:os-keypairs": "",
+ "compute_extension:v3:os-keypairs:index": "",
+ "compute_extension:v3:os-keypairs:show": "",
+ "compute_extension:v3:os-keypairs:create": "",
+ "compute_extension:v3:os-keypairs:delete": "",
+ "compute_extension:v3:os-lock-server:lock": "",
+ "compute_extension:v3:os-lock-server:unlock": "",
+ "compute_extension:v3:os-migrate-server:migrate": "",
+ "compute_extension:v3:os-migrate-server:migrate_live": "",
+ "compute_extension:multinic": "",
+ "compute_extension:v3:os-multinic": "",
+ "compute_extension:networks": "",
+ "compute_extension:networks:view": "",
+ "compute_extension:v3:os-networks": "",
+ "compute_extension:v3:os-networks:view": "",
+ "compute_extension:networks_associate": "",
+ "compute_extension:v3:os-networks-associate": "",
+ "compute_extension:os-tenant-networks": "",
+ "compute_extension:v3:os-tenant-networks": "",
+ "compute_extension:v3:os-pause-server:pause": "",
+ "compute_extension:v3:os-pause-server:unpause": "",
+ "compute_extension:v3:os-pci:pci_servers": "",
+ "compute_extension:v3:os-pci:index": "",
+ "compute_extension:v3:os-pci:detail": "",
+ "compute_extension:v3:os-pci:show": "",
+ "compute_extension:quotas:show": "",
+ "compute_extension:quotas:update": "",
+ "compute_extension:quotas:delete": "",
+ "compute_extension:v3:os-quota-sets:show": "",
+ "compute_extension:v3:os-quota-sets:update": "",
+ "compute_extension:v3:os-quota-sets:delete": "",
+ "compute_extension:v3:os-quota-sets:detail": "",
+ "compute_extension:quota_classes": "",
+ "compute_extension:rescue": "",
+ "compute_extension:v3:os-rescue": "",
+ "compute_extension:security_group_default_rules": "",
+ "compute_extension:v3:os-security-group-default-rules": "",
+ "compute_extension:security_groups": "",
+ "compute_extension:v3:os-security-groups": "",
+ "compute_extension:server_diagnostics": "",
+ "compute_extension:v3:os-server-diagnostics": "",
+ "compute_extension:server_groups": "",
+ "compute_extension:server_password": "",
+ "compute_extension:v3:os-server-password": "",
+ "compute_extension:server_usage": "",
+ "compute_extension:v3:os-server-usage": "",
+ "compute_extension:v3:os-server-groups": "",
+ "compute_extension:services": "",
+ "compute_extension:v3:os-services": "",
+ "compute_extension:shelve": "",
+ "compute_extension:shelveOffload": "",
+ "compute_extension:v3:os-shelve:shelve": "",
+ "compute_extension:v3:os-shelve:shelve_offload": "",
+ "compute_extension:simple_tenant_usage:show": "",
+ "compute_extension:simple_tenant_usage:list": "",
+ "compute_extension:v3:os-simple-tenant-usage:show": "",
+ "compute_extension:v3:os-simple-tenant-usage:list": "",
+ "compute_extension:unshelve": "",
+ "compute_extension:v3:os-shelve:unshelve": "",
+ "compute_extension:v3:os-suspend-server:suspend": "",
+ "compute_extension:v3:os-suspend-server:resume": "",
+ "compute_extension:users": "",
+ "compute_extension:virtual_interfaces": "",
+ "compute_extension:virtual_storage_arrays": "",
+ "compute_extension:volumes": "",
+ "compute_extension:volume_attachments:index": "",
+ "compute_extension:volume_attachments:show": "",
+ "compute_extension:volume_attachments:create": "",
+ "compute_extension:volume_attachments:update": "",
+ "compute_extension:volume_attachments:delete": "",
+ "compute_extension:v3:os-volumes": "",
+ "compute_extension:volumetypes": "",
+ "compute_extension:zones": "",
+ "compute_extension:availability_zone:list": "",
+ "compute_extension:v3:os-availability-zone:list": "",
+ "compute_extension:availability_zone:detail": "is_admin:True",
+ "compute_extension:v3:os-availability-zone:detail": "is_admin:True",
+ "compute_extension:used_limits_for_admin": "is_admin:True",
+ "compute_extension:v3:os-used-limits": "is_admin:True",
+ "compute_extension:migrations:index": "is_admin:True",
+ "compute_extension:v3:os-migrations:index": "is_admin:True",
+ "compute_extension:os-assisted-volume-snapshots:create": "",
+ "compute_extension:os-assisted-volume-snapshots:delete": "",
+ "compute_extension:console_auth_tokens": "is_admin:True",
+ "compute_extension:v3:os-console-auth-tokens": "is_admin:True",
+ "compute_extension:os-server-external-events:create": "rule:admin_api",
+ "compute_extension:v3:os-server-external-events:create": "rule:admin_api",
+
+ "volume:create": "",
+ "volume:get": "",
+ "volume:get_all": "",
+ "volume:get_volume_metadata": "",
+ "volume:delete": "",
+ "volume:update": "",
+ "volume:delete_volume_metadata": "",
+ "volume:update_volume_metadata": "",
+ "volume:attach": "",
+ "volume:detach": "",
+ "volume:reserve_volume": "",
+ "volume:unreserve_volume": "",
+ "volume:begin_detaching": "",
+ "volume:roll_detaching": "",
+ "volume:check_attach": "",
+ "volume:check_detach": "",
+ "volume:initialize_connection": "",
+ "volume:terminate_connection": "",
+ "volume:create_snapshot": "",
+ "volume:delete_snapshot": "",
+ "volume:get_snapshot": "",
+ "volume:get_all_snapshots": "",
+
+
+ "volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
+ "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
+ "volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
+ "volume_extension:volume_actions:upload_image": "",
+ "volume_extension:types_manage": "",
+ "volume_extension:types_extra_specs": "",
+
+
+ "network:get_all": "",
+ "network:get": "",
+ "network:create": "",
+ "network:delete": "",
+ "network:associate": "",
+ "network:disassociate": "",
+ "network:get_vifs_by_instance": "",
+ "network:get_vif_by_mac_address": "",
+ "network:allocate_for_instance": "",
+ "network:deallocate_for_instance": "",
+ "network:validate_networks": "",
+ "network:get_instance_uuids_by_ip_filter": "",
+ "network:get_instance_id_by_floating_address": "",
+ "network:setup_networks_on_host": "",
+
+ "network:get_floating_ip": "",
+ "network:get_floating_ip_pools": "",
+ "network:get_floating_ip_by_address": "",
+ "network:get_floating_ips_by_project": "",
+ "network:get_floating_ips_by_fixed_address": "",
+ "network:allocate_floating_ip": "",
+ "network:deallocate_floating_ip": "",
+ "network:associate_floating_ip": "",
+ "network:disassociate_floating_ip": "",
+ "network:release_floating_ip": "",
+ "network:migrate_instance_start": "",
+ "network:migrate_instance_finish": "",
+
+ "network:get_fixed_ip": "",
+ "network:get_fixed_ip_by_address": "",
+ "network:add_fixed_ip_to_instance": "",
+ "network:remove_fixed_ip_from_instance": "",
+ "network:add_network_to_project": "",
+ "network:get_instance_nw_info": "",
+
+ "network:get_dns_domains": "",
+ "network:add_dns_entry": "",
+ "network:modify_dns_entry": "",
+ "network:delete_dns_entry": "",
+ "network:get_dns_entries_by_address": "",
+ "network:get_dns_entries_by_name": "",
+ "network:create_private_dns_domain": "",
+ "network:create_public_dns_domain": "",
+ "network:delete_dns_domain": "",
+ "network:attach_external_network": "rule:admin_api"
+}
+"""
diff --git a/nova/tests/unit/fake_processutils.py b/nova/tests/unit/fake_processutils.py
new file mode 100644
index 0000000000..111540d1d1
--- /dev/null
+++ b/nova/tests/unit/fake_processutils.py
@@ -0,0 +1,108 @@
+# Copyright (c) 2011 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""This modules stubs out functions in oslo.concurrency.processutils."""
+
+import re
+
+from eventlet import greenthread
+from oslo.concurrency import processutils
+import six
+
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+_fake_execute_repliers = []
+_fake_execute_log = []
+
+
+def fake_execute_get_log():
+ return _fake_execute_log
+
+
+def fake_execute_clear_log():
+ global _fake_execute_log
+ _fake_execute_log = []
+
+
+def fake_execute_set_repliers(repliers):
+ """Allows the client to configure replies to commands."""
+ global _fake_execute_repliers
+ _fake_execute_repliers = repliers
+
+
+def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs):
+ """A reply handler for commands that haven't been added to the reply list.
+
+ Returns empty strings for stdout and stderr.
+
+ """
+ return '', ''
+
+
+def fake_execute(*cmd_parts, **kwargs):
+ """This function stubs out execute.
+
+ It optionally executes a preconfigued function to return expected data.
+
+ """
+ global _fake_execute_repliers
+
+ process_input = kwargs.get('process_input', None)
+ check_exit_code = kwargs.get('check_exit_code', 0)
+ delay_on_retry = kwargs.get('delay_on_retry', True)
+ attempts = kwargs.get('attempts', 1)
+ run_as_root = kwargs.get('run_as_root', False)
+ cmd_str = ' '.join(str(part) for part in cmd_parts)
+
+ LOG.debug("Faking execution of cmd (subprocess): %s", cmd_str)
+ _fake_execute_log.append(cmd_str)
+
+ reply_handler = fake_execute_default_reply_handler
+
+ for fake_replier in _fake_execute_repliers:
+ if re.match(fake_replier[0], cmd_str):
+ reply_handler = fake_replier[1]
+ LOG.debug('Faked command matched %s', fake_replier[0])
+ break
+
+ if isinstance(reply_handler, six.string_types):
+ # If the reply handler is a string, return it as stdout
+ reply = reply_handler, ''
+ else:
+ try:
+ # Alternative is a function, so call it
+ reply = reply_handler(cmd_parts,
+ process_input=process_input,
+ delay_on_retry=delay_on_retry,
+ attempts=attempts,
+ run_as_root=run_as_root,
+ check_exit_code=check_exit_code)
+ except processutils.ProcessExecutionError as e:
+ LOG.debug('Faked command raised an exception %s', e)
+ raise
+
+ LOG.debug("Reply to faked command is stdout='%(stdout)s' "
+ "stderr='%(stderr)s'", {'stdout': reply[0], 'stderr': reply[1]})
+
+ # Replicate the sleep call in the real function
+ greenthread.sleep(0)
+ return reply
+
+
+def stub_out_processutils_execute(stubs):
+ fake_execute_set_repliers([])
+ fake_execute_clear_log()
+ stubs.Set(processutils, 'execute', fake_execute)
diff --git a/nova/tests/unit/fake_server_actions.py b/nova/tests/unit/fake_server_actions.py
new file mode 100644
index 0000000000..63047bfbae
--- /dev/null
+++ b/nova/tests/unit/fake_server_actions.py
@@ -0,0 +1,119 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from nova import db
+
+
+FAKE_UUID = 'b48316c5-71e8-45e4-9884-6c78055b9b13'
+FAKE_REQUEST_ID1 = 'req-3293a3f1-b44c-4609-b8d2-d81b105636b8'
+FAKE_REQUEST_ID2 = 'req-25517360-b757-47d3-be45-0e8d2a01b36a'
+FAKE_ACTION_ID1 = 123
+FAKE_ACTION_ID2 = 456
+
+FAKE_ACTIONS = {
+ FAKE_UUID: {
+ FAKE_REQUEST_ID1: {'id': FAKE_ACTION_ID1,
+ 'action': 'reboot',
+ 'instance_uuid': FAKE_UUID,
+ 'request_id': FAKE_REQUEST_ID1,
+ 'project_id': '147',
+ 'user_id': '789',
+ 'start_time': datetime.datetime(
+ 2012, 12, 5, 0, 0, 0, 0),
+ 'finish_time': None,
+ 'message': '',
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ },
+ FAKE_REQUEST_ID2: {'id': FAKE_ACTION_ID2,
+ 'action': 'resize',
+ 'instance_uuid': FAKE_UUID,
+ 'request_id': FAKE_REQUEST_ID2,
+ 'user_id': '789',
+ 'project_id': '842',
+ 'start_time': datetime.datetime(
+ 2012, 12, 5, 1, 0, 0, 0),
+ 'finish_time': None,
+ 'message': '',
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ }
+ }
+}
+
+FAKE_EVENTS = {
+ FAKE_ACTION_ID1: [{'id': 1,
+ 'action_id': FAKE_ACTION_ID1,
+ 'event': 'schedule',
+ 'start_time': datetime.datetime(
+ 2012, 12, 5, 1, 0, 2, 0),
+ 'finish_time': datetime.datetime(
+ 2012, 12, 5, 1, 2, 0, 0),
+ 'result': 'Success',
+ 'traceback': '',
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ },
+ {'id': 2,
+ 'action_id': FAKE_ACTION_ID1,
+ 'event': 'compute_create',
+ 'start_time': datetime.datetime(
+ 2012, 12, 5, 1, 3, 0, 0),
+ 'finish_time': datetime.datetime(
+ 2012, 12, 5, 1, 4, 0, 0),
+ 'result': 'Success',
+ 'traceback': '',
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ }
+ ],
+ FAKE_ACTION_ID2: [{'id': 3,
+ 'action_id': FAKE_ACTION_ID2,
+ 'event': 'schedule',
+ 'start_time': datetime.datetime(
+ 2012, 12, 5, 3, 0, 0, 0),
+ 'finish_time': datetime.datetime(
+ 2012, 12, 5, 3, 2, 0, 0),
+ 'result': 'Error',
+ 'traceback': '',
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ }
+ ]
+}
+
+
+def fake_action_event_start(*args):
+ return FAKE_EVENTS[FAKE_ACTION_ID1][0]
+
+
+def fake_action_event_finish(*args):
+ return FAKE_EVENTS[FAKE_ACTION_ID1][0]
+
+
+def stub_out_action_events(stubs):
+ stubs.Set(db, 'action_event_start', fake_action_event_start)
+ stubs.Set(db, 'action_event_finish', fake_action_event_finish)
diff --git a/nova/tests/unit/fake_utils.py b/nova/tests/unit/fake_utils.py
new file mode 100644
index 0000000000..7a97866d20
--- /dev/null
+++ b/nova/tests/unit/fake_utils.py
@@ -0,0 +1,36 @@
+# Copyright (c) 2013 Rackspace Hosting
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""This modules stubs out functions in nova.utils."""
+
+from nova import utils
+
+
+def stub_out_utils_spawn_n(stubs):
+ """Stubs out spawn_n with a blocking version.
+
+ This aids testing async processes by blocking until they're done.
+ """
+ def no_spawn(func, *args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except Exception:
+ # NOTE(danms): This is supposed to simulate spawning
+ # of a thread, which would run separate from the parent,
+ # and die silently on error. If we don't catch and discard
+ # any exceptions here, we're not honoring the usual
+ # behavior.
+ pass
+
+ stubs.Set(utils, 'spawn_n', no_spawn)
diff --git a/nova/tests/unit/fake_volume.py b/nova/tests/unit/fake_volume.py
new file mode 100644
index 0000000000..6fbe560162
--- /dev/null
+++ b/nova/tests/unit/fake_volume.py
@@ -0,0 +1,290 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Implementation of a fake volume API."""
+
+import uuid
+
+from oslo.config import cfg
+from oslo.utils import timeutils
+
+from nova import exception
+from nova.i18n import _
+from nova.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+CONF.import_opt('cross_az_attach',
+ 'nova.volume.cinder', group='cinder')
+
+
+class fake_volume():
+ user_uuid = '4a3cd440-b9c2-11e1-afa6-0800200c9a66'
+ instance_uuid = '4a3cd441-b9c2-11e1-afa6-0800200c9a66'
+
+ def __init__(self, size, name,
+ description, volume_id, snapshot,
+ volume_type, metadata,
+ availability_zone):
+ snapshot_id = None
+ if snapshot is not None:
+ snapshot_id = snapshot['id']
+ if volume_id is None:
+ volume_id = str(uuid.uuid4())
+ self.vol = {
+ 'created_at': timeutils.utcnow(),
+ 'deleted_at': None,
+ 'updated_at': timeutils.utcnow(),
+ 'uuid': 'WTF',
+ 'deleted': False,
+ 'id': volume_id,
+ 'user_id': self.user_uuid,
+ 'project_id': 'fake-project-id',
+ 'snapshot_id': snapshot_id,
+ 'host': None,
+ 'size': size,
+ 'availability_zone': availability_zone,
+ 'instance_uuid': None,
+ 'mountpoint': None,
+ 'attach_time': timeutils.utcnow(),
+ 'status': 'available',
+ 'attach_status': 'detached',
+ 'scheduled_at': None,
+ 'launched_at': None,
+ 'terminated_at': None,
+ 'display_name': name,
+ 'display_description': description,
+ 'provider_location': 'fake-location',
+ 'provider_auth': 'fake-auth',
+ 'volume_type_id': 99
+ }
+
+ def get(self, key, default=None):
+ return self.vol[key]
+
+ def __setitem__(self, key, value):
+ self.vol[key] = value
+
+ def __getitem__(self, key):
+ self.vol[key]
+
+
+class fake_snapshot():
+ user_uuid = '4a3cd440-b9c2-11e1-afa6-0800200c9a66'
+ instance_uuid = '4a3cd441-b9c2-11e1-afa6-0800200c9a66'
+
+ def __init__(self, volume_id, size, name, desc, id=None):
+ if id is None:
+ id = str(uuid.uuid4())
+ self.snap = {
+ 'created_at': timeutils.utcnow(),
+ 'deleted_at': None,
+ 'updated_at': timeutils.utcnow(),
+ 'uuid': 'WTF',
+ 'deleted': False,
+ 'id': str(id),
+ 'volume_id': volume_id,
+ 'status': 'creating',
+ 'progress': '0%',
+ 'volume_size': 1,
+ 'display_name': name,
+ 'display_description': desc,
+ 'user_id': self.user_uuid,
+ 'project_id': 'fake-project-id'
+ }
+
+ def get(self, key, default=None):
+ return self.snap[key]
+
+ def __setitem__(self, key, value):
+ self.snap[key] = value
+
+ def __getitem__(self, key):
+ self.snap[key]
+
+
+class API(object):
+ volume_list = []
+ snapshot_list = []
+ _instance = None
+
+ class Singleton:
+ def __init__(self):
+ self.API = None
+
+ def __init__(self):
+ if API._instance is None:
+ API._instance = API.Singleton()
+
+ self._EventHandler_instance = API._instance
+
+ def create(self, context, size, name, description, snapshot=None,
+ volume_type=None, metadata=None, availability_zone=None):
+ v = fake_volume(size, name,
+ description, None,
+ snapshot, volume_type,
+ metadata, availability_zone)
+ self.volume_list.append(v.vol)
+ LOG.info('creating volume %s', v.vol['id'])
+ return v.vol
+
+ def create_with_kwargs(self, context, **kwargs):
+ volume_id = kwargs.get('volume_id', None)
+ v = fake_volume(kwargs['size'],
+ kwargs['name'],
+ kwargs['description'],
+ str(volume_id),
+ None,
+ None,
+ None,
+ None)
+ if kwargs.get('status', None) is not None:
+ v.vol['status'] = kwargs['status']
+ if kwargs['host'] is not None:
+ v.vol['host'] = kwargs['host']
+ if kwargs['attach_status'] is not None:
+ v.vol['attach_status'] = kwargs['attach_status']
+ if kwargs.get('snapshot_id', None) is not None:
+ v.vol['snapshot_id'] = kwargs['snapshot_id']
+
+ self.volume_list.append(v.vol)
+ return v.vol
+
+ def get(self, context, volume_id):
+ if str(volume_id) == '87654321':
+ return {'id': volume_id,
+ 'attach_time': '13:56:24',
+ 'attach_status': 'attached',
+ 'status': 'in-use'}
+
+ for v in self.volume_list:
+ if v['id'] == str(volume_id):
+ return v
+
+ raise exception.VolumeNotFound(volume_id=volume_id)
+
+ def get_all(self, context):
+ return self.volume_list
+
+ def delete(self, context, volume_id):
+ LOG.info('deleting volume %s', volume_id)
+ self.volume_list = [v for v in self.volume_list
+ if v['id'] != volume_id]
+
+ def check_attach(self, context, volume, instance=None):
+ if volume['status'] != 'available':
+ msg = _("status must be available")
+ msg = "%s" % volume
+ raise exception.InvalidVolume(reason=msg)
+ if volume['attach_status'] == 'attached':
+ msg = _("already attached")
+ raise exception.InvalidVolume(reason=msg)
+ if instance and not CONF.cinder.cross_az_attach:
+ if instance['availability_zone'] != volume['availability_zone']:
+ msg = _("Instance and volume not in same availability_zone")
+ raise exception.InvalidVolume(reason=msg)
+
+ def check_detach(self, context, volume):
+ if volume['status'] == "available":
+ msg = _("already detached")
+ raise exception.InvalidVolume(reason=msg)
+
+ def attach(self, context, volume_id, instance_uuid, mountpoint, mode='rw'):
+ LOG.info('attaching volume %s', volume_id)
+ volume = self.get(context, volume_id)
+ volume['status'] = 'in-use'
+ volume['mountpoint'] = mountpoint
+ volume['attach_status'] = 'attached'
+ volume['instance_uuid'] = instance_uuid
+ volume['attach_time'] = timeutils.utcnow()
+
+ def fake_set_snapshot_id(self, context, volume, snapshot_id):
+ volume['snapshot_id'] = snapshot_id
+
+ def reset_fake_api(self, context):
+ del self.volume_list[:]
+ del self.snapshot_list[:]
+
+ def detach(self, context, volume_id):
+ LOG.info('detaching volume %s', volume_id)
+ volume = self.get(context, volume_id)
+ volume['status'] = 'available'
+ volume['mountpoint'] = None
+ volume['attach_status'] = 'detached'
+ volume['instance_uuid'] = None
+
+ def initialize_connection(self, context, volume_id, connector):
+ return {'driver_volume_type': 'iscsi', 'data': {}}
+
+ def terminate_connection(self, context, volume_id, connector):
+ return None
+
+ def get_snapshot(self, context, snapshot_id):
+ for snap in self.snapshot_list:
+ if snap['id'] == str(snapshot_id):
+ return snap
+
+ def get_all_snapshots(self, context):
+ return self.snapshot_list
+
+ def create_snapshot(self, context, volume_id, name, description, id=None):
+ volume = self.get(context, volume_id)
+ snapshot = fake_snapshot(volume['id'], volume['size'],
+ name, description, id)
+ self.snapshot_list.append(snapshot.snap)
+ return snapshot.snap
+
+ def create_snapshot_with_kwargs(self, context, **kwargs):
+ snapshot = fake_snapshot(kwargs.get('volume_id'),
+ kwargs.get('volume_size'),
+ kwargs.get('name'),
+ kwargs.get('description'),
+ kwargs.get('snap_id'))
+
+ status = kwargs.get('status', None)
+ snapshot.snap['status'] = status
+ self.snapshot_list.append(snapshot.snap)
+ return snapshot.snap
+
+ def create_snapshot_force(self, context, volume_id,
+ name, description, id=None):
+ volume = self.get(context, volume_id)
+ snapshot = fake_snapshot(volume['id'], volume['size'],
+ name, description, id)
+ self.snapshot_list.append(snapshot.snap)
+ return snapshot.snap
+
+ def delete_snapshot(self, context, snapshot_id):
+ self.snapshot_list = [s for s in self.snapshot_list
+ if s['id'] != snapshot_id]
+
+ def reserve_volume(self, context, volume_id):
+ LOG.info('reserving volume %s', volume_id)
+ volume = self.get(context, volume_id)
+ volume['status'] = 'attaching'
+
+ def unreserve_volume(self, context, volume_id):
+ LOG.info('unreserving volume %s', volume_id)
+ volume = self.get(context, volume_id)
+ volume['status'] = 'available'
+
+ def begin_detaching(self, context, volume_id):
+ LOG.info('beging detaching volume %s', volume_id)
+ volume = self.get(context, volume_id)
+ volume['status'] = 'detaching'
+
+ def roll_detaching(self, context, volume_id):
+ LOG.info('roll detaching volume %s', volume_id)
+ volume = self.get(context, volume_id)
+ volume['status'] = 'in-use'
diff --git a/nova/tests/unit/functional/__init__.py b/nova/tests/unit/functional/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/functional/__init__.py
diff --git a/nova/tests/unit/image/__init__.py b/nova/tests/unit/image/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/image/__init__.py
diff --git a/nova/tests/unit/image/abs.tar.gz b/nova/tests/unit/image/abs.tar.gz
new file mode 100644
index 0000000000..4d39507340
--- /dev/null
+++ b/nova/tests/unit/image/abs.tar.gz
Binary files differ
diff --git a/nova/tests/unit/image/fake.py b/nova/tests/unit/image/fake.py
new file mode 100644
index 0000000000..0292afba60
--- /dev/null
+++ b/nova/tests/unit/image/fake.py
@@ -0,0 +1,257 @@
+# Copyright 2011 Justin Santa Barbara
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Implementation of a fake image service."""
+
+import copy
+import datetime
+import uuid
+
+from oslo.config import cfg
+
+from nova.compute import arch
+from nova import exception
+import nova.image.glance
+from nova.openstack.common import log as logging
+
+CONF = cfg.CONF
+CONF.import_opt('null_kernel', 'nova.compute.api')
+LOG = logging.getLogger(__name__)
+
+
+class _FakeImageService(object):
+ """Mock (fake) image service for unit testing."""
+
+ def __init__(self):
+ self.images = {}
+ # NOTE(justinsb): The OpenStack API can't upload an image?
+ # So, make sure we've got one..
+ timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
+
+ image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ 'name': 'fakeimage123456',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'raw',
+ 'disk_format': 'raw',
+ 'size': '25165824',
+ 'properties': {'kernel_id': CONF.null_kernel,
+ 'ramdisk_id': CONF.null_kernel,
+ 'architecture': arch.X86_64}}
+
+ image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
+ 'name': 'fakeimage123456',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': True,
+ 'container_format': 'ami',
+ 'disk_format': 'ami',
+ 'size': '58145823',
+ 'properties': {'kernel_id': CONF.null_kernel,
+ 'ramdisk_id': CONF.null_kernel}}
+
+ image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ 'name': 'fakeimage123456',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': True,
+ 'container_format': None,
+ 'disk_format': None,
+ 'size': '83594576',
+ 'properties': {'kernel_id': CONF.null_kernel,
+ 'ramdisk_id': CONF.null_kernel}}
+
+ image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fakeimage123456',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': True,
+ 'container_format': 'ami',
+ 'disk_format': 'ami',
+ 'size': '84035174',
+ 'properties': {'kernel_id': CONF.null_kernel,
+ 'ramdisk_id': CONF.null_kernel}}
+
+ image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
+ 'name': 'fakeimage123456',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': True,
+ 'container_format': 'ami',
+ 'disk_format': 'ami',
+ 'size': '26360814',
+ 'properties': {'kernel_id':
+ '155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ 'ramdisk_id': None}}
+
+ image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
+ 'name': 'fakeimage6',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'ova',
+ 'disk_format': 'vhd',
+ 'size': '49163826',
+ 'properties': {'kernel_id': CONF.null_kernel,
+ 'ramdisk_id': CONF.null_kernel,
+ 'architecture': arch.X86_64,
+ 'auto_disk_config': 'False'}}
+
+ image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
+ 'name': 'fakeimage7',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'ova',
+ 'disk_format': 'vhd',
+ 'size': '74185822',
+ 'properties': {'kernel_id': CONF.null_kernel,
+ 'ramdisk_id': CONF.null_kernel,
+ 'architecture': arch.X86_64,
+ 'auto_disk_config': 'True'}}
+
+ self.create(None, image1)
+ self.create(None, image2)
+ self.create(None, image3)
+ self.create(None, image4)
+ self.create(None, image5)
+ self.create(None, image6)
+ self.create(None, image7)
+ self._imagedata = {}
+ super(_FakeImageService, self).__init__()
+
+ # TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
+ def detail(self, context, **kwargs):
+ """Return list of detailed image information."""
+ return copy.deepcopy(self.images.values())
+
+ def download(self, context, image_id, dst_path=None, data=None):
+ self.show(context, image_id)
+ if data:
+ data.write(self._imagedata.get(image_id, ''))
+ elif dst_path:
+ with open(dst_path, 'wb') as data:
+ data.write(self._imagedata.get(image_id, ''))
+
+ def show(self, context, image_id, include_locations=False,
+ show_deleted=True):
+ """Get data about specified image.
+
+ Returns a dict containing image data for the given opaque image id.
+
+ """
+ image = self.images.get(str(image_id))
+ if image:
+ return copy.deepcopy(image)
+ LOG.warn('Unable to find image id %s. Have images: %s',
+ image_id, self.images)
+ raise exception.ImageNotFound(image_id=image_id)
+
+ def create(self, context, metadata, data=None):
+ """Store the image data and return the new image id.
+
+ :raises: Duplicate if the image already exist.
+
+ """
+ image_id = str(metadata.get('id', uuid.uuid4()))
+ metadata['id'] = image_id
+ if image_id in self.images:
+ raise exception.CouldNotUploadImage(image_id=image_id)
+ self.images[image_id] = copy.deepcopy(metadata)
+ if data:
+ self._imagedata[image_id] = data.read()
+ return self.images[image_id]
+
+ def update(self, context, image_id, metadata, data=None,
+ purge_props=False):
+ """Replace the contents of the given image with the new data.
+
+ :raises: ImageNotFound if the image does not exist.
+
+ """
+ if not self.images.get(image_id):
+ raise exception.ImageNotFound(image_id=image_id)
+ if purge_props:
+ self.images[image_id] = copy.deepcopy(metadata)
+ else:
+ image = self.images[image_id]
+ try:
+ image['properties'].update(metadata.pop('properties'))
+ except KeyError:
+ pass
+ image.update(metadata)
+ return self.images[image_id]
+
+ def delete(self, context, image_id):
+ """Delete the given image.
+
+ :raises: ImageNotFound if the image does not exist.
+
+ """
+ removed = self.images.pop(image_id, None)
+ if not removed:
+ raise exception.ImageNotFound(image_id=image_id)
+
+ def get_location(self, context, image_id):
+ if image_id in self.images:
+ return 'fake_location'
+ return None
+
+_fakeImageService = _FakeImageService()
+
+
+def FakeImageService():
+ return _fakeImageService
+
+
+def FakeImageService_reset():
+ global _fakeImageService
+ _fakeImageService = _FakeImageService()
+
+
+def get_valid_image_id():
+ return _fakeImageService.images.keys()[0]
+
+
+def stub_out_image_service(stubs):
+ image_service = FakeImageService()
+ stubs.Set(nova.image.glance, 'get_remote_image_service',
+ lambda x, y: (image_service, y))
+ stubs.Set(nova.image.glance, 'get_default_image_service',
+ lambda: image_service)
+ return image_service
diff --git a/nova/tests/unit/image/rel.tar.gz b/nova/tests/unit/image/rel.tar.gz
new file mode 100644
index 0000000000..b54f55aa79
--- /dev/null
+++ b/nova/tests/unit/image/rel.tar.gz
Binary files differ
diff --git a/nova/tests/unit/image/test_fake.py b/nova/tests/unit/image/test_fake.py
new file mode 100644
index 0000000000..0f985ee16e
--- /dev/null
+++ b/nova/tests/unit/image/test_fake.py
@@ -0,0 +1,117 @@
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import StringIO
+
+from nova import context
+from nova import exception
+from nova import test
+import nova.tests.unit.image.fake
+
+
+class FakeImageServiceTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(FakeImageServiceTestCase, self).setUp()
+ self.image_service = nova.tests.unit.image.fake.FakeImageService()
+ self.context = context.get_admin_context()
+
+ def tearDown(self):
+ super(FakeImageServiceTestCase, self).tearDown()
+ nova.tests.unit.image.fake.FakeImageService_reset()
+
+ def test_detail(self):
+ res = self.image_service.detail(self.context)
+ for image in res:
+ keys = set(image.keys())
+ self.assertEqual(keys, set(['id', 'name', 'created_at',
+ 'updated_at', 'deleted_at', 'deleted',
+ 'status', 'is_public', 'properties',
+ 'disk_format', 'container_format',
+ 'size']))
+ self.assertIsInstance(image['created_at'], datetime.datetime)
+ self.assertIsInstance(image['updated_at'], datetime.datetime)
+
+ if not (isinstance(image['deleted_at'], datetime.datetime) or
+ image['deleted_at'] is None):
+ self.fail('image\'s "deleted_at" attribute was neither a '
+ 'datetime object nor None')
+
+ def check_is_bool(image, key):
+ val = image.get('deleted')
+ if not isinstance(val, bool):
+ self.fail('image\'s "%s" attribute wasn\'t '
+ 'a bool: %r' % (key, val))
+
+ check_is_bool(image, 'deleted')
+ check_is_bool(image, 'is_public')
+
+ def test_show_raises_imagenotfound_for_invalid_id(self):
+ self.assertRaises(exception.ImageNotFound,
+ self.image_service.show,
+ self.context,
+ 'this image does not exist')
+
+ def test_create_adds_id(self):
+ index = self.image_service.detail(self.context)
+ image_count = len(index)
+
+ self.image_service.create(self.context, {})
+
+ index = self.image_service.detail(self.context)
+ self.assertEqual(len(index), image_count + 1)
+
+ self.assertTrue(index[0]['id'])
+
+ def test_create_keeps_id(self):
+ self.image_service.create(self.context, {'id': '34'})
+ self.image_service.show(self.context, '34')
+
+ def test_create_rejects_duplicate_ids(self):
+ self.image_service.create(self.context, {'id': '34'})
+ self.assertRaises(exception.CouldNotUploadImage,
+ self.image_service.create,
+ self.context,
+ {'id': '34'})
+
+ # Make sure there's still one left
+ self.image_service.show(self.context, '34')
+
+ def test_update(self):
+ self.image_service.create(self.context,
+ {'id': '34', 'foo': 'bar'})
+
+ self.image_service.update(self.context, '34',
+ {'id': '34', 'foo': 'baz'})
+
+ img = self.image_service.show(self.context, '34')
+ self.assertEqual(img['foo'], 'baz')
+
+ def test_delete(self):
+ self.image_service.create(self.context, {'id': '34', 'foo': 'bar'})
+ self.image_service.delete(self.context, '34')
+ self.assertRaises(exception.NotFound,
+ self.image_service.show,
+ self.context,
+ '34')
+
+ def test_create_then_get(self):
+ blob = 'some data'
+ s1 = StringIO.StringIO(blob)
+ self.image_service.create(self.context,
+ {'id': '32', 'foo': 'bar'},
+ data=s1)
+ s2 = StringIO.StringIO()
+ self.image_service.download(self.context, '32', data=s2)
+ self.assertEqual(s2.getvalue(), blob, 'Did not get blob back intact')
diff --git a/nova/tests/unit/image/test_glance.py b/nova/tests/unit/image/test_glance.py
new file mode 100644
index 0000000000..63b4d22e1e
--- /dev/null
+++ b/nova/tests/unit/image/test_glance.py
@@ -0,0 +1,1231 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import datetime
+import sys
+
+import glanceclient.exc
+import mock
+from oslo.config import cfg
+import testtools
+
+from nova import context
+from nova import exception
+from nova.image import glance
+from nova import test
+from nova import utils
+
+CONF = cfg.CONF
+NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000"
+
+
+class tzinfo(datetime.tzinfo):
+ @staticmethod
+ def utcoffset(*args, **kwargs):
+ return datetime.timedelta()
+
+NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo())
+
+
+class TestConversions(test.NoDBTestCase):
+ def test_convert_timestamps_to_datetimes(self):
+ fixture = {'name': None,
+ 'properties': {},
+ 'status': None,
+ 'is_public': None,
+ 'created_at': NOW_GLANCE_FORMAT,
+ 'updated_at': NOW_GLANCE_FORMAT,
+ 'deleted_at': NOW_GLANCE_FORMAT}
+ result = glance._convert_timestamps_to_datetimes(fixture)
+ self.assertEqual(result['created_at'], NOW_DATETIME)
+ self.assertEqual(result['updated_at'], NOW_DATETIME)
+ self.assertEqual(result['deleted_at'], NOW_DATETIME)
+
+ def _test_extracting_missing_attributes(self, include_locations):
+ # Verify behavior from glance objects that are missing attributes
+ # TODO(jaypipes): Find a better way of testing this crappy
+ # glanceclient magic object stuff.
+ class MyFakeGlanceImage(object):
+ def __init__(self, metadata):
+ IMAGE_ATTRIBUTES = ['size', 'owner', 'id', 'created_at',
+ 'updated_at', 'status', 'min_disk',
+ 'min_ram', 'is_public']
+ raw = dict.fromkeys(IMAGE_ATTRIBUTES)
+ raw.update(metadata)
+ self.__dict__['raw'] = raw
+
+ def __getattr__(self, key):
+ try:
+ return self.__dict__['raw'][key]
+ except KeyError:
+ raise AttributeError(key)
+
+ def __setattr__(self, key, value):
+ try:
+ self.__dict__['raw'][key] = value
+ except KeyError:
+ raise AttributeError(key)
+
+ metadata = {
+ 'id': 1,
+ 'created_at': NOW_DATETIME,
+ 'updated_at': NOW_DATETIME,
+ }
+ image = MyFakeGlanceImage(metadata)
+ observed = glance._extract_attributes(
+ image, include_locations=include_locations)
+ expected = {
+ 'id': 1,
+ 'name': None,
+ 'is_public': None,
+ 'size': None,
+ 'min_disk': None,
+ 'min_ram': None,
+ 'disk_format': None,
+ 'container_format': None,
+ 'checksum': None,
+ 'created_at': NOW_DATETIME,
+ 'updated_at': NOW_DATETIME,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'status': None,
+ 'properties': {},
+ 'owner': None
+ }
+ if include_locations:
+ expected['locations'] = None
+ expected['direct_url'] = None
+ self.assertEqual(expected, observed)
+
+ def test_extracting_missing_attributes_include_locations(self):
+ self._test_extracting_missing_attributes(include_locations=True)
+
+ def test_extracting_missing_attributes_exclude_locations(self):
+ self._test_extracting_missing_attributes(include_locations=False)
+
+
+class TestExceptionTranslations(test.NoDBTestCase):
+
+ def test_client_forbidden_to_imagenotauthed(self):
+ in_exc = glanceclient.exc.Forbidden('123')
+ out_exc = glance._translate_image_exception('123', in_exc)
+ self.assertIsInstance(out_exc, exception.ImageNotAuthorized)
+
+ def test_client_httpforbidden_converts_to_imagenotauthed(self):
+ in_exc = glanceclient.exc.HTTPForbidden('123')
+ out_exc = glance._translate_image_exception('123', in_exc)
+ self.assertIsInstance(out_exc, exception.ImageNotAuthorized)
+
+ def test_client_notfound_converts_to_imagenotfound(self):
+ in_exc = glanceclient.exc.NotFound('123')
+ out_exc = glance._translate_image_exception('123', in_exc)
+ self.assertIsInstance(out_exc, exception.ImageNotFound)
+
+ def test_client_httpnotfound_converts_to_imagenotfound(self):
+ in_exc = glanceclient.exc.HTTPNotFound('123')
+ out_exc = glance._translate_image_exception('123', in_exc)
+ self.assertIsInstance(out_exc, exception.ImageNotFound)
+
+
+class TestGlanceSerializer(test.NoDBTestCase):
+ def test_serialize(self):
+ metadata = {'name': 'image1',
+ 'is_public': True,
+ 'foo': 'bar',
+ 'properties': {
+ 'prop1': 'propvalue1',
+ 'mappings': [
+ {'virtual': 'aaa',
+ 'device': 'bbb'},
+ {'virtual': 'xxx',
+ 'device': 'yyy'}],
+ 'block_device_mapping': [
+ {'virtual_device': 'fake',
+ 'device_name': '/dev/fake'},
+ {'virtual_device': 'ephemeral0',
+ 'device_name': '/dev/fake0'}]}}
+
+ converted_expected = {
+ 'name': 'image1',
+ 'is_public': True,
+ 'foo': 'bar',
+ 'properties': {
+ 'prop1': 'propvalue1',
+ 'mappings':
+ '[{"device": "bbb", "virtual": "aaa"}, '
+ '{"device": "yyy", "virtual": "xxx"}]',
+ 'block_device_mapping':
+ '[{"virtual_device": "fake", "device_name": "/dev/fake"}, '
+ '{"virtual_device": "ephemeral0", '
+ '"device_name": "/dev/fake0"}]'}}
+ converted = glance._convert_to_string(metadata)
+ self.assertEqual(converted, converted_expected)
+ self.assertEqual(glance._convert_from_string(converted), metadata)
+
+
+class TestGetImageService(test.NoDBTestCase):
+ @mock.patch.object(glance.GlanceClientWrapper, '__init__',
+ return_value=None)
+ def test_get_remote_service_from_id(self, gcwi_mocked):
+ id_or_uri = '123'
+ _ignored, image_id = glance.get_remote_image_service(
+ mock.sentinel.ctx, id_or_uri)
+ self.assertEqual(id_or_uri, image_id)
+ gcwi_mocked.assert_called_once_with()
+
+ @mock.patch.object(glance.GlanceClientWrapper, '__init__',
+ return_value=None)
+ def test_get_remote_service_from_href(self, gcwi_mocked):
+ id_or_uri = 'http://127.0.0.1/123'
+ _ignored, image_id = glance.get_remote_image_service(
+ mock.sentinel.ctx, id_or_uri)
+ self.assertEqual('123', image_id)
+ gcwi_mocked.assert_called_once_with(context=mock.sentinel.ctx,
+ host='127.0.0.1',
+ port=80,
+ use_ssl=False)
+
+
+class TestCreateGlanceClient(test.NoDBTestCase):
+ @mock.patch('nova.utils.is_valid_ipv6')
+ @mock.patch('glanceclient.Client')
+ def test_headers_passed_glanceclient(self, init_mock, ipv6_mock):
+ self.flags(auth_strategy='keystone')
+ ipv6_mock.return_value = False
+ auth_token = 'token'
+ ctx = context.RequestContext('fake', 'fake', auth_token=auth_token)
+ host = 'host4'
+ port = 9295
+ use_ssl = False
+
+ expected_endpoint = 'http://host4:9295'
+ expected_params = {
+ 'identity_headers': {
+ 'X-Auth-Token': 'token',
+ 'X-User-Id': 'fake',
+ 'X-Roles': '',
+ 'X-Tenant-Id': 'fake',
+ 'X-Service-Catalog': '[]',
+ 'X-Identity-Status': 'Confirmed'
+ },
+ 'token': 'token'
+ }
+ glance._create_glance_client(ctx, host, port, use_ssl)
+ init_mock.assert_called_once_with('1', expected_endpoint,
+ **expected_params)
+
+ # Test the version is properly passed to glanceclient.
+ ipv6_mock.reset_mock()
+ init_mock.reset_mock()
+
+ expected_endpoint = 'http://host4:9295'
+ expected_params = {
+ 'identity_headers': {
+ 'X-Auth-Token': 'token',
+ 'X-User-Id': 'fake',
+ 'X-Roles': '',
+ 'X-Tenant-Id': 'fake',
+ 'X-Service-Catalog': '[]',
+ 'X-Identity-Status': 'Confirmed'
+ },
+ 'token': 'token'
+ }
+ glance._create_glance_client(ctx, host, port, use_ssl, version=2)
+ init_mock.assert_called_once_with('2', expected_endpoint,
+ **expected_params)
+
+ # Test that non-keystone auth strategy doesn't bother to pass
+ # glanceclient all the Keystone-related headers.
+ ipv6_mock.reset_mock()
+ init_mock.reset_mock()
+
+ self.flags(auth_strategy='non-keystone')
+
+ expected_endpoint = 'http://host4:9295'
+ expected_params = {
+ }
+ glance._create_glance_client(ctx, host, port, use_ssl)
+ init_mock.assert_called_once_with('1', expected_endpoint,
+ **expected_params)
+
+ # Test that the IPv6 bracketization adapts the endpoint properly.
+ ipv6_mock.reset_mock()
+ init_mock.reset_mock()
+
+ ipv6_mock.return_value = True
+
+ expected_endpoint = 'http://[host4]:9295'
+ expected_params = {
+ }
+ glance._create_glance_client(ctx, host, port, use_ssl)
+ init_mock.assert_called_once_with('1', expected_endpoint,
+ **expected_params)
+
+
+class TestGlanceClientWrapper(test.NoDBTestCase):
+ @mock.patch('time.sleep')
+ @mock.patch('nova.image.glance._create_glance_client')
+ def test_static_client_without_retries(self, create_client_mock,
+ sleep_mock):
+ client_mock = mock.MagicMock()
+ images_mock = mock.MagicMock()
+ images_mock.get.side_effect = glanceclient.exc.ServiceUnavailable
+ type(client_mock).images = mock.PropertyMock(return_value=images_mock)
+ create_client_mock.return_value = client_mock
+ self.flags(num_retries=0, group='glance')
+
+ ctx = context.RequestContext('fake', 'fake')
+ host = 'host4'
+ port = 9295
+ use_ssl = False
+
+ client = glance.GlanceClientWrapper(context=ctx, host=host, port=port,
+ use_ssl=use_ssl)
+ create_client_mock.assert_called_once_with(ctx, host, port, use_ssl, 1)
+ self.assertRaises(exception.GlanceConnectionFailed,
+ client.call, ctx, 1, 'get', 'meow')
+ self.assertFalse(sleep_mock.called)
+
+ @mock.patch('time.sleep')
+ @mock.patch('nova.image.glance._create_glance_client')
+ def test_static_client_with_retries(self, create_client_mock,
+ sleep_mock):
+ self.flags(num_retries=1, group='glance')
+ client_mock = mock.MagicMock()
+ images_mock = mock.MagicMock()
+ images_mock.get.side_effect = [
+ glanceclient.exc.ServiceUnavailable,
+ None
+ ]
+ type(client_mock).images = mock.PropertyMock(return_value=images_mock)
+ create_client_mock.return_value = client_mock
+
+ ctx = context.RequestContext('fake', 'fake')
+ host = 'host4'
+ port = 9295
+ use_ssl = False
+
+ client = glance.GlanceClientWrapper(context=ctx,
+ host=host, port=port, use_ssl=use_ssl)
+ client.call(ctx, 1, 'get', 'meow')
+ sleep_mock.assert_called_once_with(1)
+
+ @mock.patch('random.shuffle')
+ @mock.patch('time.sleep')
+ @mock.patch('nova.image.glance._create_glance_client')
+ def test_default_client_without_retries(self, create_client_mock,
+ sleep_mock, shuffle_mock):
+ api_servers = [
+ 'host1:9292',
+ 'https://host2:9293',
+ 'http://host3:9294'
+ ]
+ client_mock = mock.MagicMock()
+ images_mock = mock.MagicMock()
+ images_mock.get.side_effect = glanceclient.exc.ServiceUnavailable
+ type(client_mock).images = mock.PropertyMock(return_value=images_mock)
+ create_client_mock.return_value = client_mock
+
+ shuffle_mock.return_value = api_servers
+ self.flags(num_retries=0, group='glance')
+ self.flags(api_servers=api_servers, group='glance')
+
+ # Here we are testing the behaviour that calling client.call() twice
+ # when there are no retries will cycle through the api_servers and not
+ # sleep (which would be an indication of a retry)
+ ctx = context.RequestContext('fake', 'fake')
+
+ client = glance.GlanceClientWrapper()
+ self.assertRaises(exception.GlanceConnectionFailed,
+ client.call, ctx, 1, 'get', 'meow')
+ self.assertFalse(sleep_mock.called)
+
+ self.assertRaises(exception.GlanceConnectionFailed,
+ client.call, ctx, 1, 'get', 'meow')
+ self.assertFalse(sleep_mock.called)
+
+ create_client_mock.assert_has_calls(
+ [
+ mock.call(ctx, 'host1', 9292, False, 1),
+ mock.call(ctx, 'host2', 9293, True, 1),
+ ]
+ )
+
+ @mock.patch('random.shuffle')
+ @mock.patch('time.sleep')
+ @mock.patch('nova.image.glance._create_glance_client')
+ def test_default_client_with_retries(self, create_client_mock,
+ sleep_mock, shuffle_mock):
+ api_servers = [
+ 'host1:9292',
+ 'https://host2:9293',
+ 'http://host3:9294'
+ ]
+ client_mock = mock.MagicMock()
+ images_mock = mock.MagicMock()
+ images_mock.get.side_effect = [
+ glanceclient.exc.ServiceUnavailable,
+ None
+ ]
+ type(client_mock).images = mock.PropertyMock(return_value=images_mock)
+ create_client_mock.return_value = client_mock
+
+ self.flags(num_retries=1, group='glance')
+ self.flags(api_servers=api_servers, group='glance')
+
+ ctx = context.RequestContext('fake', 'fake')
+
+ # And here we're testing that if num_retries is not 0, then we attempt
+ # to retry the same connection action against the next client.
+
+ client = glance.GlanceClientWrapper()
+ client.call(ctx, 1, 'get', 'meow')
+
+ create_client_mock.assert_has_calls(
+ [
+ mock.call(ctx, 'host1', 9292, False, 1),
+ mock.call(ctx, 'host2', 9293, True, 1),
+ ]
+ )
+ sleep_mock.assert_called_once_with(1)
+
+ @mock.patch('glanceclient.Client')
+ def test_create_glance_client_with_ssl(self, client_mock):
+ self.flags(ca_file='foo.cert', cert_file='bar.cert',
+ key_file='wut.key', group='ssl')
+ ctxt = mock.sentinel.ctx
+ glance._create_glance_client(ctxt, 'host4', 9295, use_ssl=True)
+ client_mock.assert_called_once_with(
+ '1', 'https://host4:9295', insecure=False, ssl_compression=False,
+ cert_file='bar.cert', key_file='wut.key', cacert='foo.cert')
+
+
+class TestDownloadNoDirectUri(test.NoDBTestCase):
+
+ """Tests the download method of the GlanceImageService when the
+ default of not allowing direct URI transfers is set.
+ """
+
+ @mock.patch('__builtin__.open')
+ @mock.patch('nova.image.glance.GlanceImageService.show')
+ def test_download_no_data_no_dest_path(self, show_mock, open_mock):
+ client = mock.MagicMock()
+ client.call.return_value = mock.sentinel.image_chunks
+ ctx = mock.sentinel.ctx
+ service = glance.GlanceImageService(client)
+ res = service.download(ctx, mock.sentinel.image_id)
+
+ self.assertFalse(show_mock.called)
+ self.assertFalse(open_mock.called)
+ client.call.assert_called_once_with(ctx, 1, 'data',
+ mock.sentinel.image_id)
+ self.assertEqual(mock.sentinel.image_chunks, res)
+
+ @mock.patch('__builtin__.open')
+ @mock.patch('nova.image.glance.GlanceImageService.show')
+ def test_download_data_no_dest_path(self, show_mock, open_mock):
+ client = mock.MagicMock()
+ client.call.return_value = [1, 2, 3]
+ ctx = mock.sentinel.ctx
+ data = mock.MagicMock()
+ service = glance.GlanceImageService(client)
+ res = service.download(ctx, mock.sentinel.image_id, data=data)
+
+ self.assertFalse(show_mock.called)
+ self.assertFalse(open_mock.called)
+ client.call.assert_called_once_with(ctx, 1, 'data',
+ mock.sentinel.image_id)
+ self.assertIsNone(res)
+ data.write.assert_has_calls(
+ [
+ mock.call(1),
+ mock.call(2),
+ mock.call(3)
+ ]
+ )
+ self.assertFalse(data.close.called)
+
+ @mock.patch('__builtin__.open')
+ @mock.patch('nova.image.glance.GlanceImageService.show')
+ def test_download_no_data_dest_path(self, show_mock, open_mock):
+ client = mock.MagicMock()
+ client.call.return_value = [1, 2, 3]
+ ctx = mock.sentinel.ctx
+ writer = mock.MagicMock()
+ open_mock.return_value = writer
+ service = glance.GlanceImageService(client)
+ res = service.download(ctx, mock.sentinel.image_id,
+ dst_path=mock.sentinel.dst_path)
+
+ self.assertFalse(show_mock.called)
+ client.call.assert_called_once_with(ctx, 1, 'data',
+ mock.sentinel.image_id)
+ open_mock.assert_called_once_with(mock.sentinel.dst_path, 'wb')
+ self.assertIsNone(res)
+ writer.write.assert_has_calls(
+ [
+ mock.call(1),
+ mock.call(2),
+ mock.call(3)
+ ]
+ )
+ writer.close.assert_called_once_with()
+
+ @mock.patch('__builtin__.open')
+ @mock.patch('nova.image.glance.GlanceImageService.show')
+ def test_download_data_dest_path(self, show_mock, open_mock):
+ # NOTE(jaypipes): This really shouldn't be allowed, but because of the
+ # horrible design of the download() method in GlanceImageService, no
+ # error is raised, and the dst_path is ignored...
+ # #TODO(jaypipes): Fix the aforementioned horrible design of
+ # the download() method.
+ client = mock.MagicMock()
+ client.call.return_value = [1, 2, 3]
+ ctx = mock.sentinel.ctx
+ data = mock.MagicMock()
+ service = glance.GlanceImageService(client)
+ res = service.download(ctx, mock.sentinel.image_id, data=data)
+
+ self.assertFalse(show_mock.called)
+ self.assertFalse(open_mock.called)
+ client.call.assert_called_once_with(ctx, 1, 'data',
+ mock.sentinel.image_id)
+ self.assertIsNone(res)
+ data.write.assert_has_calls(
+ [
+ mock.call(1),
+ mock.call(2),
+ mock.call(3)
+ ]
+ )
+ self.assertFalse(data.close.called)
+
+ @mock.patch('nova.image.glance.GlanceImageService._get_transfer_module')
+ @mock.patch('nova.image.glance.GlanceImageService.show')
+ def test_download_direct_file_uri(self, show_mock, get_tran_mock):
+ self.flags(allowed_direct_url_schemes=['file'], group='glance')
+ show_mock.return_value = {
+ 'locations': [
+ {
+ 'url': 'file:///files/image',
+ 'metadata': mock.sentinel.loc_meta
+ }
+ ]
+ }
+ tran_mod = mock.MagicMock()
+ get_tran_mock.return_value = tran_mod
+ client = mock.MagicMock()
+ ctx = mock.sentinel.ctx
+ service = glance.GlanceImageService(client)
+ res = service.download(ctx, mock.sentinel.image_id,
+ dst_path=mock.sentinel.dst_path)
+
+ self.assertIsNone(res)
+ self.assertFalse(client.call.called)
+ show_mock.assert_called_once_with(ctx,
+ mock.sentinel.image_id,
+ include_locations=True)
+ get_tran_mock.assert_called_once_with('file')
+ tran_mod.download.assert_called_once_with(ctx, mock.ANY,
+ mock.sentinel.dst_path,
+ mock.sentinel.loc_meta)
+
+ @mock.patch('__builtin__.open')
+ @mock.patch('nova.image.glance.GlanceImageService._get_transfer_module')
+ @mock.patch('nova.image.glance.GlanceImageService.show')
+ def test_download_direct_exception_fallback(self, show_mock,
+ get_tran_mock,
+ open_mock):
+ # Test that we fall back to downloading to the dst_path
+ # if the download method of the transfer module raised
+ # an exception.
+ self.flags(allowed_direct_url_schemes=['file'], group='glance')
+ show_mock.return_value = {
+ 'locations': [
+ {
+ 'url': 'file:///files/image',
+ 'metadata': mock.sentinel.loc_meta
+ }
+ ]
+ }
+ tran_mod = mock.MagicMock()
+ tran_mod.download.side_effect = Exception
+ get_tran_mock.return_value = tran_mod
+ client = mock.MagicMock()
+ client.call.return_value = [1, 2, 3]
+ ctx = mock.sentinel.ctx
+ writer = mock.MagicMock()
+ open_mock.return_value = writer
+ service = glance.GlanceImageService(client)
+ res = service.download(ctx, mock.sentinel.image_id,
+ dst_path=mock.sentinel.dst_path)
+
+ self.assertIsNone(res)
+ show_mock.assert_called_once_with(ctx,
+ mock.sentinel.image_id,
+ include_locations=True)
+ get_tran_mock.assert_called_once_with('file')
+ tran_mod.download.assert_called_once_with(ctx, mock.ANY,
+ mock.sentinel.dst_path,
+ mock.sentinel.loc_meta)
+ client.call.assert_called_once_with(ctx, 1, 'data',
+ mock.sentinel.image_id)
+ # NOTE(jaypipes): log messages call open() in part of the
+ # download path, so here, we just check that the last open()
+ # call was done for the dst_path file descriptor.
+ open_mock.assert_called_with(mock.sentinel.dst_path, 'wb')
+ self.assertIsNone(res)
+ writer.write.assert_has_calls(
+ [
+ mock.call(1),
+ mock.call(2),
+ mock.call(3)
+ ]
+ )
+
+ @mock.patch('__builtin__.open')
+ @mock.patch('nova.image.glance.GlanceImageService._get_transfer_module')
+ @mock.patch('nova.image.glance.GlanceImageService.show')
+ def test_download_direct_no_mod_fallback(self, show_mock,
+ get_tran_mock,
+ open_mock):
+ # Test that we fall back to downloading to the dst_path
+ # if no appropriate transfer module is found...
+ # an exception.
+ self.flags(allowed_direct_url_schemes=['funky'], group='glance')
+ show_mock.return_value = {
+ 'locations': [
+ {
+ 'url': 'file:///files/image',
+ 'metadata': mock.sentinel.loc_meta
+ }
+ ]
+ }
+ get_tran_mock.return_value = None
+ client = mock.MagicMock()
+ client.call.return_value = [1, 2, 3]
+ ctx = mock.sentinel.ctx
+ writer = mock.MagicMock()
+ open_mock.return_value = writer
+ service = glance.GlanceImageService(client)
+ res = service.download(ctx, mock.sentinel.image_id,
+ dst_path=mock.sentinel.dst_path)
+
+ self.assertIsNone(res)
+ show_mock.assert_called_once_with(ctx,
+ mock.sentinel.image_id,
+ include_locations=True)
+ get_tran_mock.assert_called_once_with('file')
+ client.call.assert_called_once_with(ctx, 1, 'data',
+ mock.sentinel.image_id)
+ # NOTE(jaypipes): log messages call open() in part of the
+ # download path, so here, we just check that the last open()
+ # call was done for the dst_path file descriptor.
+ open_mock.assert_called_with(mock.sentinel.dst_path, 'wb')
+ self.assertIsNone(res)
+ writer.write.assert_has_calls(
+ [
+ mock.call(1),
+ mock.call(2),
+ mock.call(3)
+ ]
+ )
+ writer.close.assert_called_once_with()
+
+
+class TestIsImageAvailable(test.NoDBTestCase):
+ """Tests the internal _is_image_available function."""
+
+ class ImageSpecV2(object):
+ visibility = None
+ properties = None
+
+ class ImageSpecV1(object):
+ is_public = None
+ properties = None
+
+ def test_auth_token_override(self):
+ ctx = mock.MagicMock(auth_token=True)
+ img = mock.MagicMock()
+
+ res = glance._is_image_available(ctx, img)
+ self.assertTrue(res)
+ self.assertFalse(img.called)
+
+ def test_admin_override(self):
+ ctx = mock.MagicMock(auth_token=False, is_admin=True)
+ img = mock.MagicMock()
+
+ res = glance._is_image_available(ctx, img)
+ self.assertTrue(res)
+ self.assertFalse(img.called)
+
+ def test_v2_visibility(self):
+ ctx = mock.MagicMock(auth_token=False, is_admin=False)
+ # We emulate warlock validation that throws an AttributeError
+ # if you try to call is_public on an image model returned by
+ # a call to V2 image.get(). Here, the ImageSpecV2 does not have
+ # an is_public attribute and MagicMock will throw an AttributeError.
+ img = mock.MagicMock(visibility='PUBLIC',
+ spec=TestIsImageAvailable.ImageSpecV2)
+
+ res = glance._is_image_available(ctx, img)
+ self.assertTrue(res)
+
+ def test_v1_is_public(self):
+ ctx = mock.MagicMock(auth_token=False, is_admin=False)
+ img = mock.MagicMock(is_public=True,
+ spec=TestIsImageAvailable.ImageSpecV1)
+
+ res = glance._is_image_available(ctx, img)
+ self.assertTrue(res)
+
+ def test_project_is_owner(self):
+ ctx = mock.MagicMock(auth_token=False, is_admin=False,
+ project_id='123')
+ props = {
+ 'owner_id': '123'
+ }
+ img = mock.MagicMock(visibility='private', properties=props,
+ spec=TestIsImageAvailable.ImageSpecV2)
+
+ res = glance._is_image_available(ctx, img)
+ self.assertTrue(res)
+
+ ctx.reset_mock()
+ img = mock.MagicMock(is_public=False, properties=props,
+ spec=TestIsImageAvailable.ImageSpecV1)
+
+ res = glance._is_image_available(ctx, img)
+ self.assertTrue(res)
+
+ def test_project_context_matches_project_prop(self):
+ ctx = mock.MagicMock(auth_token=False, is_admin=False,
+ project_id='123')
+ props = {
+ 'project_id': '123'
+ }
+ img = mock.MagicMock(visibility='private', properties=props,
+ spec=TestIsImageAvailable.ImageSpecV2)
+
+ res = glance._is_image_available(ctx, img)
+ self.assertTrue(res)
+
+ ctx.reset_mock()
+ img = mock.MagicMock(is_public=False, properties=props,
+ spec=TestIsImageAvailable.ImageSpecV1)
+
+ res = glance._is_image_available(ctx, img)
+ self.assertTrue(res)
+
+ def test_no_user_in_props(self):
+ ctx = mock.MagicMock(auth_token=False, is_admin=False,
+ project_id='123')
+ props = {
+ }
+ img = mock.MagicMock(visibility='private', properties=props,
+ spec=TestIsImageAvailable.ImageSpecV2)
+
+ res = glance._is_image_available(ctx, img)
+ self.assertFalse(res)
+
+ ctx.reset_mock()
+ img = mock.MagicMock(is_public=False, properties=props,
+ spec=TestIsImageAvailable.ImageSpecV1)
+
+ res = glance._is_image_available(ctx, img)
+ self.assertFalse(res)
+
+ def test_user_matches_context(self):
+ ctx = mock.MagicMock(auth_token=False, is_admin=False,
+ user_id='123')
+ props = {
+ 'user_id': '123'
+ }
+ img = mock.MagicMock(visibility='private', properties=props,
+ spec=TestIsImageAvailable.ImageSpecV2)
+
+ res = glance._is_image_available(ctx, img)
+ self.assertTrue(res)
+
+ ctx.reset_mock()
+ img = mock.MagicMock(is_public=False, properties=props,
+ spec=TestIsImageAvailable.ImageSpecV1)
+
+ res = glance._is_image_available(ctx, img)
+ self.assertTrue(res)
+
+
+class TestShow(test.NoDBTestCase):
+
+ """Tests the show method of the GlanceImageService."""
+
+ @mock.patch('nova.image.glance._translate_from_glance')
+ @mock.patch('nova.image.glance._is_image_available')
+ def test_show_success(self, is_avail_mock, trans_from_mock):
+ is_avail_mock.return_value = True
+ trans_from_mock.return_value = {'mock': mock.sentinel.trans_from}
+ client = mock.MagicMock()
+ client.call.return_value = {}
+ ctx = mock.sentinel.ctx
+ service = glance.GlanceImageService(client)
+ info = service.show(ctx, mock.sentinel.image_id)
+
+ client.call.assert_called_once_with(ctx, 1, 'get',
+ mock.sentinel.image_id)
+ is_avail_mock.assert_called_once_with(ctx, {})
+ trans_from_mock.assert_called_once_with({}, include_locations=False)
+ self.assertIn('mock', info)
+ self.assertEqual(mock.sentinel.trans_from, info['mock'])
+
+ @mock.patch('nova.image.glance._translate_from_glance')
+ @mock.patch('nova.image.glance._is_image_available')
+ def test_show_not_available(self, is_avail_mock, trans_from_mock):
+ is_avail_mock.return_value = False
+ client = mock.MagicMock()
+ client.call.return_value = mock.sentinel.images_0
+ ctx = mock.sentinel.ctx
+ service = glance.GlanceImageService(client)
+
+ with testtools.ExpectedException(exception.ImageNotFound):
+ service.show(ctx, mock.sentinel.image_id)
+
+ client.call.assert_called_once_with(ctx, 1, 'get',
+ mock.sentinel.image_id)
+ is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
+ self.assertFalse(trans_from_mock.called)
+
+ @mock.patch('nova.image.glance._reraise_translated_image_exception')
+ @mock.patch('nova.image.glance._translate_from_glance')
+ @mock.patch('nova.image.glance._is_image_available')
+ def test_show_client_failure(self, is_avail_mock, trans_from_mock,
+ reraise_mock):
+ raised = exception.ImageNotAuthorized(image_id=123)
+ client = mock.MagicMock()
+ client.call.side_effect = glanceclient.exc.Forbidden
+ ctx = mock.sentinel.ctx
+ reraise_mock.side_effect = raised
+ service = glance.GlanceImageService(client)
+
+ with testtools.ExpectedException(exception.ImageNotAuthorized):
+ service.show(ctx, mock.sentinel.image_id)
+ client.call.assert_called_once_with(ctx, 1, 'get',
+ mock.sentinel.image_id)
+ self.assertFalse(is_avail_mock.called)
+ self.assertFalse(trans_from_mock.called)
+ reraise_mock.assert_called_once_with(mock.sentinel.image_id)
+
+ @mock.patch('nova.image.glance._is_image_available')
+ def test_show_queued_image_without_some_attrs(self, is_avail_mock):
+ is_avail_mock.return_value = True
+ client = mock.MagicMock()
+
+ # fake image cls without disk_format, container_format, name attributes
+ class fake_image_cls(dict):
+ id = 'b31aa5dd-f07a-4748-8f15-398346887584'
+ deleted = False
+ protected = False
+ min_disk = 0
+ created_at = '2014-05-20T08:16:48'
+ size = 0
+ status = 'queued'
+ is_public = False
+ min_ram = 0
+ owner = '980ec4870033453ead65c0470a78b8a8'
+ updated_at = '2014-05-20T08:16:48'
+ glance_image = fake_image_cls()
+ client.call.return_value = glance_image
+ ctx = mock.sentinel.ctx
+ service = glance.GlanceImageService(client)
+ image_info = service.show(ctx, glance_image.id)
+ client.call.assert_called_once_with(ctx, 1, 'get',
+ glance_image.id)
+ NOVA_IMAGE_ATTRIBUTES = set(['size', 'disk_format', 'owner',
+ 'container_format', 'status', 'id',
+ 'name', 'created_at', 'updated_at',
+ 'deleted', 'deleted_at', 'checksum',
+ 'min_disk', 'min_ram', 'is_public',
+ 'properties'])
+
+ self.assertEqual(NOVA_IMAGE_ATTRIBUTES, set(image_info.keys()))
+
+ @mock.patch('nova.image.glance._translate_from_glance')
+ @mock.patch('nova.image.glance._is_image_available')
+ def test_include_locations_success(self, avail_mock, trans_from_mock):
+ locations = [mock.sentinel.loc1]
+ avail_mock.return_value = True
+ trans_from_mock.return_value = {'locations': locations}
+
+ client = mock.Mock()
+ client.call.return_value = mock.sentinel.image
+ service = glance.GlanceImageService(client)
+ ctx = mock.sentinel.ctx
+ image_id = mock.sentinel.image_id
+ info = service.show(ctx, image_id, include_locations=True)
+
+ client.call.assert_called_once_with(ctx, 2, 'get', image_id)
+ avail_mock.assert_called_once_with(ctx, mock.sentinel.image)
+ trans_from_mock.assert_called_once_with(mock.sentinel.image,
+ include_locations=True)
+ self.assertIn('locations', info)
+ self.assertEqual(locations, info['locations'])
+
+ @mock.patch('nova.image.glance._translate_from_glance')
+ @mock.patch('nova.image.glance._is_image_available')
+ def test_include_direct_uri_success(self, avail_mock, trans_from_mock):
+ locations = [mock.sentinel.loc1]
+ avail_mock.return_value = True
+ trans_from_mock.return_value = {'locations': locations,
+ 'direct_uri': mock.sentinel.duri}
+
+ client = mock.Mock()
+ client.call.return_value = mock.sentinel.image
+ service = glance.GlanceImageService(client)
+ ctx = mock.sentinel.ctx
+ image_id = mock.sentinel.image_id
+ info = service.show(ctx, image_id, include_locations=True)
+
+ client.call.assert_called_once_with(ctx, 2, 'get', image_id)
+ expected = locations
+ expected.append({'url': mock.sentinel.duri, 'metadata': {}})
+ self.assertIn('locations', info)
+ self.assertEqual(expected, info['locations'])
+
+ @mock.patch('nova.image.glance._translate_from_glance')
+ @mock.patch('nova.image.glance._is_image_available')
+ def test_do_not_show_deleted_images(self, is_avail_mock, trans_from_mock):
+ class fake_image_cls(dict):
+ id = 'b31aa5dd-f07a-4748-8f15-398346887584'
+ deleted = True
+
+ glance_image = fake_image_cls()
+ client = mock.MagicMock()
+ client.call.return_value = glance_image
+ ctx = mock.sentinel.ctx
+ service = glance.GlanceImageService(client)
+
+ with testtools.ExpectedException(exception.ImageNotFound):
+ service.show(ctx, glance_image.id, show_deleted=False)
+
+ client.call.assert_called_once_with(ctx, 1, 'get',
+ glance_image.id)
+ self.assertFalse(is_avail_mock.called)
+ self.assertFalse(trans_from_mock.called)
+
+
+class TestDetail(test.NoDBTestCase):
+
+ """Tests the detail method of the GlanceImageService."""
+
+ @mock.patch('nova.image.glance._extract_query_params')
+ @mock.patch('nova.image.glance._translate_from_glance')
+ @mock.patch('nova.image.glance._is_image_available')
+ def test_detail_success_available(self, is_avail_mock, trans_from_mock,
+ ext_query_mock):
+ params = {}
+ is_avail_mock.return_value = True
+ ext_query_mock.return_value = params
+ trans_from_mock.return_value = mock.sentinel.trans_from
+ client = mock.MagicMock()
+ client.call.return_value = [mock.sentinel.images_0]
+ ctx = mock.sentinel.ctx
+ service = glance.GlanceImageService(client)
+ images = service.detail(ctx, **params)
+
+ client.call.assert_called_once_with(ctx, 1, 'list')
+ is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
+ trans_from_mock.assert_called_once_with(mock.sentinel.images_0)
+ self.assertEqual([mock.sentinel.trans_from], images)
+
+ @mock.patch('nova.image.glance._extract_query_params')
+ @mock.patch('nova.image.glance._translate_from_glance')
+ @mock.patch('nova.image.glance._is_image_available')
+ def test_detail_success_unavailable(self, is_avail_mock, trans_from_mock,
+ ext_query_mock):
+ params = {}
+ is_avail_mock.return_value = False
+ ext_query_mock.return_value = params
+ trans_from_mock.return_value = mock.sentinel.trans_from
+ client = mock.MagicMock()
+ client.call.return_value = [mock.sentinel.images_0]
+ ctx = mock.sentinel.ctx
+ service = glance.GlanceImageService(client)
+ images = service.detail(ctx, **params)
+
+ client.call.assert_called_once_with(ctx, 1, 'list')
+ is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
+ self.assertFalse(trans_from_mock.called)
+ self.assertEqual([], images)
+
+ @mock.patch('nova.image.glance._translate_from_glance')
+ @mock.patch('nova.image.glance._is_image_available')
+ def test_detail_params_passed(self, is_avail_mock, _trans_from_mock):
+ client = mock.MagicMock()
+ client.call.return_value = [mock.sentinel.images_0]
+ ctx = mock.sentinel.ctx
+ service = glance.GlanceImageService(client)
+ service.detail(ctx, page_size=5, limit=10)
+
+ expected_filters = {
+ 'is_public': 'none'
+ }
+ client.call.assert_called_once_with(ctx, 1, 'list',
+ filters=expected_filters,
+ page_size=5,
+ limit=10)
+
+ @mock.patch('nova.image.glance._reraise_translated_exception')
+ @mock.patch('nova.image.glance._extract_query_params')
+ @mock.patch('nova.image.glance._translate_from_glance')
+ @mock.patch('nova.image.glance._is_image_available')
+ def test_detail_client_failure(self, is_avail_mock, trans_from_mock,
+ ext_query_mock, reraise_mock):
+ params = {}
+ ext_query_mock.return_value = params
+ raised = exception.Forbidden()
+ client = mock.MagicMock()
+ client.call.side_effect = glanceclient.exc.Forbidden
+ ctx = mock.sentinel.ctx
+ reraise_mock.side_effect = raised
+ service = glance.GlanceImageService(client)
+
+ with testtools.ExpectedException(exception.Forbidden):
+ service.detail(ctx, **params)
+
+ client.call.assert_called_once_with(ctx, 1, 'list')
+ self.assertFalse(is_avail_mock.called)
+ self.assertFalse(trans_from_mock.called)
+ reraise_mock.assert_called_once_with()
+
+
+class TestCreate(test.NoDBTestCase):
+
+ """Tests the create method of the GlanceImageService."""
+
+ @mock.patch('nova.image.glance._translate_from_glance')
+ @mock.patch('nova.image.glance._translate_to_glance')
+ def test_create_success(self, trans_to_mock, trans_from_mock):
+ translated = {
+ 'image_id': mock.sentinel.image_id
+ }
+ trans_to_mock.return_value = translated
+ trans_from_mock.return_value = mock.sentinel.trans_from
+ image_mock = mock.MagicMock(spec=dict)
+ client = mock.MagicMock()
+ client.call.return_value = mock.sentinel.image_meta
+ ctx = mock.sentinel.ctx
+ service = glance.GlanceImageService(client)
+ image_meta = service.create(ctx, image_mock)
+
+ trans_to_mock.assert_called_once_with(image_mock)
+ client.call.assert_called_once_with(ctx, 1, 'create',
+ image_id=mock.sentinel.image_id)
+ trans_from_mock.assert_called_once_with(mock.sentinel.image_meta)
+
+ self.assertEqual(mock.sentinel.trans_from, image_meta)
+
+ # Now verify that if we supply image data to the call,
+ # that the client is also called with the data kwarg
+ client.reset_mock()
+ image_meta = service.create(ctx, image_mock, data=mock.sentinel.data)
+
+ client.call.assert_called_once_with(ctx, 1, 'create',
+ image_id=mock.sentinel.image_id,
+ data=mock.sentinel.data)
+
+ @mock.patch('nova.image.glance._reraise_translated_exception')
+ @mock.patch('nova.image.glance._translate_from_glance')
+ @mock.patch('nova.image.glance._translate_to_glance')
+ def test_create_client_failure(self, trans_to_mock, trans_from_mock,
+ reraise_mock):
+ translated = {}
+ trans_to_mock.return_value = translated
+ image_mock = mock.MagicMock(spec=dict)
+ raised = exception.Invalid()
+ client = mock.MagicMock()
+ client.call.side_effect = glanceclient.exc.BadRequest
+ ctx = mock.sentinel.ctx
+ reraise_mock.side_effect = raised
+ service = glance.GlanceImageService(client)
+
+ self.assertRaises(exception.Invalid, service.create, ctx, image_mock)
+ trans_to_mock.assert_called_once_with(image_mock)
+ self.assertFalse(trans_from_mock.called)
+
+
+class TestUpdate(test.NoDBTestCase):
+
+ """Tests the update method of the GlanceImageService."""
+
+ @mock.patch('nova.image.glance._translate_from_glance')
+ @mock.patch('nova.image.glance._translate_to_glance')
+ def test_update_success(self, trans_to_mock, trans_from_mock):
+ translated = {
+ 'id': mock.sentinel.image_id,
+ 'name': mock.sentinel.name
+ }
+ trans_to_mock.return_value = translated
+ trans_from_mock.return_value = mock.sentinel.trans_from
+ image_mock = mock.MagicMock(spec=dict)
+ client = mock.MagicMock()
+ client.call.return_value = mock.sentinel.image_meta
+ ctx = mock.sentinel.ctx
+ service = glance.GlanceImageService(client)
+ image_meta = service.update(ctx, mock.sentinel.image_id, image_mock)
+
+ trans_to_mock.assert_called_once_with(image_mock)
+ # Verify that the 'id' element has been removed as a kwarg to
+ # the call to glanceclient's update (since the image ID is
+ # supplied as a positional arg), and that the
+ # purge_props default is True.
+ client.call.assert_called_once_with(ctx, 1, 'update',
+ mock.sentinel.image_id,
+ name=mock.sentinel.name,
+ purge_props=True)
+ trans_from_mock.assert_called_once_with(mock.sentinel.image_meta)
+ self.assertEqual(mock.sentinel.trans_from, image_meta)
+
+ # Now verify that if we supply image data to the call,
+ # that the client is also called with the data kwarg
+ client.reset_mock()
+ image_meta = service.update(ctx, mock.sentinel.image_id,
+ image_mock, data=mock.sentinel.data)
+
+ client.call.assert_called_once_with(ctx, 1, 'update',
+ mock.sentinel.image_id,
+ name=mock.sentinel.name,
+ purge_props=True,
+ data=mock.sentinel.data)
+
+ @mock.patch('nova.image.glance._reraise_translated_image_exception')
+ @mock.patch('nova.image.glance._translate_from_glance')
+ @mock.patch('nova.image.glance._translate_to_glance')
+ def test_update_client_failure(self, trans_to_mock, trans_from_mock,
+ reraise_mock):
+ translated = {
+ 'name': mock.sentinel.name
+ }
+ trans_to_mock.return_value = translated
+ trans_from_mock.return_value = mock.sentinel.trans_from
+ image_mock = mock.MagicMock(spec=dict)
+ raised = exception.ImageNotAuthorized(image_id=123)
+ client = mock.MagicMock()
+ client.call.side_effect = glanceclient.exc.Forbidden
+ ctx = mock.sentinel.ctx
+ reraise_mock.side_effect = raised
+ service = glance.GlanceImageService(client)
+
+ self.assertRaises(exception.ImageNotAuthorized,
+ service.update, ctx, mock.sentinel.image_id,
+ image_mock)
+ client.call.assert_called_once_with(ctx, 1, 'update',
+ mock.sentinel.image_id,
+ purge_props=True,
+ name=mock.sentinel.name)
+ self.assertFalse(trans_from_mock.called)
+ reraise_mock.assert_called_once_with(mock.sentinel.image_id)
+
+
+class TestDelete(test.NoDBTestCase):
+
+ """Tests the delete method of the GlanceImageService."""
+
+ def test_delete_success(self):
+ client = mock.MagicMock()
+ client.call.return_value = True
+ ctx = mock.sentinel.ctx
+ service = glance.GlanceImageService(client)
+ service.delete(ctx, mock.sentinel.image_id)
+ client.call.assert_called_once_with(ctx, 1, 'delete',
+ mock.sentinel.image_id)
+
+ def test_delete_client_failure(self):
+ client = mock.MagicMock()
+ client.call.side_effect = glanceclient.exc.NotFound
+ ctx = mock.sentinel.ctx
+ service = glance.GlanceImageService(client)
+ self.assertRaises(exception.ImageNotFound, service.delete, ctx,
+ mock.sentinel.image_id)
+
+
+class TestGlanceUrl(test.NoDBTestCase):
+
+ def test_generate_glance_http_url(self):
+ generated_url = glance.generate_glance_url()
+ glance_host = CONF.glance.host
+ # ipv6 address, need to wrap it with '[]'
+ if utils.is_valid_ipv6(glance_host):
+ glance_host = '[%s]' % glance_host
+ http_url = "http://%s:%d" % (glance_host, CONF.glance.port)
+ self.assertEqual(generated_url, http_url)
+
+ def test_generate_glance_https_url(self):
+ self.flags(protocol="https", group='glance')
+ generated_url = glance.generate_glance_url()
+ glance_host = CONF.glance.host
+ # ipv6 address, need to wrap it with '[]'
+ if utils.is_valid_ipv6(glance_host):
+ glance_host = '[%s]' % glance_host
+ https_url = "https://%s:%d" % (glance_host, CONF.glance.port)
+ self.assertEqual(generated_url, https_url)
+
+
+class TestGlanceApiServers(test.NoDBTestCase):
+
+ def test_get_ipv4_api_servers(self):
+ self.flags(api_servers=['10.0.1.1:9292',
+ 'https://10.0.0.1:9293',
+ 'http://10.0.2.2:9294'], group='glance')
+ glance_host = ['10.0.1.1', '10.0.0.1',
+ '10.0.2.2']
+ api_servers = glance.get_api_servers()
+ i = 0
+ for server in api_servers:
+ i += 1
+ self.assertIn(server[0], glance_host)
+ if i > 2:
+ break
+
+ # Python 2.6 can not parse ipv6 address correctly
+ @testtools.skipIf(sys.version_info < (2, 7), "py27 or greater only")
+ def test_get_ipv6_api_servers(self):
+ self.flags(api_servers=['[2001:2012:1:f101::1]:9292',
+ 'https://[2010:2013:1:f122::1]:9293',
+ 'http://[2001:2011:1:f111::1]:9294'],
+ group='glance')
+ glance_host = ['2001:2012:1:f101::1', '2010:2013:1:f122::1',
+ '2001:2011:1:f111::1']
+ api_servers = glance.get_api_servers()
+ i = 0
+ for server in api_servers:
+ i += 1
+ self.assertIn(server[0], glance_host)
+ if i > 2:
+ break
+
+
+class TestUpdateGlanceImage(test.NoDBTestCase):
+ @mock.patch('nova.image.glance.GlanceImageService')
+ def test_start(self, mock_glance_image_service):
+ consumer = glance.UpdateGlanceImage(
+ 'context', 'id', 'metadata', 'stream')
+
+ with mock.patch.object(glance, 'get_remote_image_service') as a_mock:
+ a_mock.return_value = (mock_glance_image_service, 'image_id')
+
+ consumer.start()
+ mock_glance_image_service.update.assert_called_with(
+ 'context', 'image_id', 'metadata', 'stream', purge_props=False)
diff --git a/nova/tests/unit/image/test_s3.py b/nova/tests/unit/image/test_s3.py
new file mode 100644
index 0000000000..d9ef08d3fe
--- /dev/null
+++ b/nova/tests/unit/image/test_s3.py
@@ -0,0 +1,267 @@
+# Copyright 2011 Isaku Yamahata
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import binascii
+import os
+import tempfile
+
+import eventlet
+import fixtures
+import mox
+
+from nova.api.ec2 import ec2utils
+from nova import context
+from nova import db
+from nova import exception
+from nova.image import s3
+from nova import test
+from nova.tests.unit.image import fake
+
+
+ami_manifest_xml = """<?xml version="1.0" ?>
+<manifest>
+ <version>2011-06-17</version>
+ <bundler>
+ <name>test-s3</name>
+ <version>0</version>
+ <release>0</release>
+ </bundler>
+ <machine_configuration>
+ <architecture>x86_64</architecture>
+ <block_device_mapping>
+ <mapping>
+ <virtual>ami</virtual>
+ <device>sda1</device>
+ </mapping>
+ <mapping>
+ <virtual>root</virtual>
+ <device>/dev/sda1</device>
+ </mapping>
+ <mapping>
+ <virtual>ephemeral0</virtual>
+ <device>sda2</device>
+ </mapping>
+ <mapping>
+ <virtual>swap</virtual>
+ <device>sda3</device>
+ </mapping>
+ </block_device_mapping>
+ <kernel_id>aki-00000001</kernel_id>
+ <ramdisk_id>ari-00000001</ramdisk_id>
+ </machine_configuration>
+</manifest>
+"""
+
+file_manifest_xml = """<?xml version="1.0" ?>
+<manifest>
+ 
+</manifest>
+"""
+
+
+class TestS3ImageService(test.TestCase):
+ def setUp(self):
+ super(TestS3ImageService, self).setUp()
+ self.context = context.RequestContext(None, None)
+ self.useFixture(fixtures.FakeLogger('boto'))
+
+ # set up 3 fixtures to test shows, should have id '1', '2', and '3'
+ db.s3_image_create(self.context,
+ '155d900f-4e14-4e4c-a73d-069cbf4541e6')
+ db.s3_image_create(self.context,
+ 'a2459075-d96c-40d5-893e-577ff92e721c')
+ db.s3_image_create(self.context,
+ '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
+
+ fake.stub_out_image_service(self.stubs)
+ self.image_service = s3.S3ImageService()
+ ec2utils.reset_cache()
+
+ def tearDown(self):
+ super(TestS3ImageService, self).tearDown()
+ fake.FakeImageService_reset()
+
+ def _assertEqualList(self, list0, list1, keys):
+ self.assertEqual(len(list0), len(list1))
+ key = keys[0]
+ for x in list0:
+ self.assertEqual(len(x), len(keys))
+ self.assertIn(key, x)
+ for y in list1:
+ self.assertIn(key, y)
+ if x[key] == y[key]:
+ for k in keys:
+ self.assertEqual(x[k], y[k])
+
+ def test_show_cannot_use_uuid(self):
+ self.assertRaises(exception.ImageNotFound,
+ self.image_service.show, self.context,
+ '155d900f-4e14-4e4c-a73d-069cbf4541e6')
+
+ def test_show_translates_correctly(self):
+ self.image_service.show(self.context, '1')
+
+ def test_show_translates_image_state_correctly(self):
+ def my_fake_show(self, context, image_id, **kwargs):
+ fake_state_map = {
+ '155d900f-4e14-4e4c-a73d-069cbf4541e6': 'downloading',
+ 'a2459075-d96c-40d5-893e-577ff92e721c': 'failed_decrypt',
+ '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6': 'available'}
+ return {'id': image_id,
+ 'name': 'fakeimage123456',
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'raw',
+ 'disk_format': 'raw',
+ 'size': '25165824',
+ 'properties': {'image_state': fake_state_map[image_id]}}
+
+ # Override part of the fake image service as well just for
+ # this test so we can set the image_state to various values
+ # and test that S3ImageService does the correct mapping for
+ # us. We can't put fake bad or pending states in the real fake
+ # image service as it causes other tests to fail
+ self.stubs.Set(fake._FakeImageService, 'show', my_fake_show)
+ ret_image = self.image_service.show(self.context, '1')
+ self.assertEqual(ret_image['properties']['image_state'], 'pending')
+ ret_image = self.image_service.show(self.context, '2')
+ self.assertEqual(ret_image['properties']['image_state'], 'failed')
+ ret_image = self.image_service.show(self.context, '3')
+ self.assertEqual(ret_image['properties']['image_state'], 'available')
+
+ def test_detail(self):
+ self.image_service.detail(self.context)
+
+ def test_s3_create(self):
+ metadata = {'properties': {
+ 'root_device_name': '/dev/sda1',
+ 'block_device_mapping': [
+ {'device_name': '/dev/sda1',
+ 'snapshot_id': 'snap-12345678',
+ 'delete_on_termination': True},
+ {'device_name': '/dev/sda2',
+ 'virtual_name': 'ephemeral0'},
+ {'device_name': '/dev/sdb0',
+ 'no_device': True}]}}
+ _manifest, image, image_uuid = self.image_service._s3_parse_manifest(
+ self.context, metadata, ami_manifest_xml)
+
+ ret_image = self.image_service.show(self.context, image['id'])
+ self.assertIn('properties', ret_image)
+ properties = ret_image['properties']
+
+ self.assertIn('mappings', properties)
+ mappings = properties['mappings']
+ expected_mappings = [
+ {"device": "sda1", "virtual": "ami"},
+ {"device": "/dev/sda1", "virtual": "root"},
+ {"device": "sda2", "virtual": "ephemeral0"},
+ {"device": "sda3", "virtual": "swap"}]
+ self._assertEqualList(mappings, expected_mappings,
+ ['device', 'virtual'])
+
+ self.assertIn('block_device_mapping', properties)
+ block_device_mapping = properties['block_device_mapping']
+ expected_bdm = [
+ {'device_name': '/dev/sda1',
+ 'snapshot_id': 'snap-12345678',
+ 'delete_on_termination': True},
+ {'device_name': '/dev/sda2',
+ 'virtual_name': 'ephemeral0'},
+ {'device_name': '/dev/sdb0',
+ 'no_device': True}]
+ self.assertEqual(block_device_mapping, expected_bdm)
+
+ def _initialize_mocks(self):
+ handle, tempf = tempfile.mkstemp(dir='/tmp')
+ ignore = mox.IgnoreArg()
+ mockobj = self.mox.CreateMockAnything()
+ self.stubs.Set(self.image_service, '_conn', mockobj)
+ mockobj(ignore).AndReturn(mockobj)
+ self.stubs.Set(mockobj, 'get_bucket', mockobj)
+ mockobj(ignore).AndReturn(mockobj)
+ self.stubs.Set(mockobj, 'get_key', mockobj)
+ mockobj(ignore).AndReturn(mockobj)
+ self.stubs.Set(mockobj, 'get_contents_as_string', mockobj)
+ mockobj().AndReturn(file_manifest_xml)
+ self.stubs.Set(self.image_service, '_download_file', mockobj)
+ mockobj(ignore, ignore, ignore).AndReturn(tempf)
+ self.stubs.Set(binascii, 'a2b_hex', mockobj)
+ mockobj(ignore).AndReturn('foo')
+ mockobj(ignore).AndReturn('foo')
+ self.stubs.Set(self.image_service, '_decrypt_image', mockobj)
+ mockobj(ignore, ignore, ignore, ignore, ignore).AndReturn(mockobj)
+ self.stubs.Set(self.image_service, '_untarzip_image', mockobj)
+ mockobj(ignore, ignore).AndReturn(tempf)
+ self.mox.ReplayAll()
+
+ def test_s3_create_image_locations(self):
+ image_location_1 = 'testbucket_1/test.img.manifest.xml'
+ # Use another location that starts with a '/'
+ image_location_2 = '/testbucket_2/test.img.manifest.xml'
+
+ metadata = [{'properties': {'image_location': image_location_1}},
+ {'properties': {'image_location': image_location_2}}]
+
+ for mdata in metadata:
+ self._initialize_mocks()
+ image = self.image_service._s3_create(self.context, mdata)
+ eventlet.sleep()
+ translated = self.image_service._translate_id_to_uuid(self.context,
+ image)
+ uuid = translated['id']
+ image_service = fake.FakeImageService()
+ updated_image = image_service.update(self.context, uuid,
+ {'properties': {'image_state': 'available'}},
+ purge_props=False)
+ self.assertEqual(updated_image['properties']['image_state'],
+ 'available')
+
+ def test_s3_create_is_public(self):
+ self._initialize_mocks()
+ metadata = {'properties': {
+ 'image_location': 'mybucket/my.img.manifest.xml'},
+ 'name': 'mybucket/my.img'}
+ img = self.image_service._s3_create(self.context, metadata)
+ eventlet.sleep()
+ translated = self.image_service._translate_id_to_uuid(self.context,
+ img)
+ uuid = translated['id']
+ image_service = fake.FakeImageService()
+ updated_image = image_service.update(self.context, uuid,
+ {'is_public': True}, purge_props=False)
+ self.assertTrue(updated_image['is_public'])
+ self.assertEqual(updated_image['status'], 'active')
+ self.assertEqual(updated_image['properties']['image_state'],
+ 'available')
+
+ def test_s3_malicious_tarballs(self):
+ self.assertRaises(exception.NovaException,
+ self.image_service._test_for_malicious_tarball,
+ "/unused", os.path.join(os.path.dirname(__file__), 'abs.tar.gz'))
+ self.assertRaises(exception.NovaException,
+ self.image_service._test_for_malicious_tarball,
+ "/unused", os.path.join(os.path.dirname(__file__), 'rel.tar.gz'))
diff --git a/nova/tests/unit/image/test_transfer_modules.py b/nova/tests/unit/image/test_transfer_modules.py
new file mode 100644
index 0000000000..51920c36aa
--- /dev/null
+++ b/nova/tests/unit/image/test_transfer_modules.py
@@ -0,0 +1,101 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import urlparse
+
+import mock
+
+from nova import exception
+from nova.image.download import file as tm_file
+from nova import test
+
+
+class TestFileTransferModule(test.NoDBTestCase):
+
+ @mock.patch('nova.virt.libvirt.utils.copy_image')
+ def test_filesystem_success(self, copy_mock):
+ self.flags(allowed_direct_url_schemes=['file'], group='glance')
+ self.flags(group='image_file_url', filesystems=['gluster'])
+
+ mountpoint = '/gluster'
+ url = 'file:///gluster/my/image/path'
+ url_parts = urlparse.urlparse(url)
+ fs_id = 'someid'
+ loc_meta = {
+ 'id': fs_id,
+ 'mountpoint': mountpoint
+ }
+ dst_file = mock.MagicMock()
+
+ tm = tm_file.FileTransfer()
+
+ # NOTE(Jbresnah) The following options must be added after the module
+ # has added the specific groups.
+ self.flags(group='image_file_url:gluster', id=fs_id)
+ self.flags(group='image_file_url:gluster', mountpoint=mountpoint)
+
+ tm.download(mock.sentinel.ctx, url_parts, dst_file, loc_meta)
+ copy_mock.assert_called_once_with('/gluster/my/image/path', dst_file)
+
+ @mock.patch('nova.virt.libvirt.utils.copy_image')
+ def test_filesystem_mismatched_mountpoint(self, copy_mock):
+ self.flags(allowed_direct_url_schemes=['file'], group='glance')
+ self.flags(group='image_file_url', filesystems=['gluster'])
+
+ mountpoint = '/gluster'
+ # Should include the mountpoint before my/image/path
+ url = 'file:///my/image/path'
+ url_parts = urlparse.urlparse(url)
+ fs_id = 'someid'
+ loc_meta = {
+ 'id': fs_id,
+ 'mountpoint': mountpoint
+ }
+ dst_file = mock.MagicMock()
+
+ tm = tm_file.FileTransfer()
+
+ self.flags(group='image_file_url:gluster', id=fs_id)
+ self.flags(group='image_file_url:gluster', mountpoint=mountpoint)
+
+ self.assertRaises(exception.ImageDownloadModuleMetaDataError,
+ tm.download, mock.sentinel.ctx, url_parts,
+ dst_file, loc_meta)
+ self.assertFalse(copy_mock.called)
+
+ @mock.patch('nova.virt.libvirt.utils.copy_image')
+ def test_filesystem_mismatched_filesystem(self, copy_mock):
+ self.flags(allowed_direct_url_schemes=['file'], group='glance')
+ self.flags(group='image_file_url', filesystems=['gluster'])
+
+ mountpoint = '/gluster'
+ # Should include the mountpoint before my/image/path
+ url = 'file:///my/image/path'
+ url_parts = urlparse.urlparse(url)
+ fs_id = 'someid'
+ loc_meta = {
+ 'id': 'funky',
+ 'mountpoint': mountpoint
+ }
+ dst_file = mock.MagicMock()
+
+ tm = tm_file.FileTransfer()
+
+ self.flags(group='image_file_url:gluster', id=fs_id)
+ self.flags(group='image_file_url:gluster', mountpoint=mountpoint)
+
+ self.assertRaises(exception.ImageDownloadModuleError,
+ tm.download, mock.sentinel.ctx, url_parts,
+ dst_file, loc_meta)
+ self.assertFalse(copy_mock.called)
diff --git a/nova/tests/unit/image_fixtures.py b/nova/tests/unit/image_fixtures.py
new file mode 100644
index 0000000000..9ab09b989a
--- /dev/null
+++ b/nova/tests/unit/image_fixtures.py
@@ -0,0 +1,79 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+# nova.image.glance._translate_from_glance() returns datetime
+# objects, not strings.
+NOW_DATE = datetime.datetime(2010, 10, 11, 10, 30, 22)
+
+
+def get_image_fixtures():
+ """Returns a set of image fixture dicts for use in unit tests.
+
+ Returns a set of dicts representing images/snapshots of varying statuses
+ that would be returned from a call to
+ `glanceclient.client.Client.images.list`. The IDs of the images returned
+ start at 123 and go to 131, with the following brief summary of image
+ attributes:
+
+ | ID Type Status Notes
+ | ----------------------------------------------------------
+ | 123 Public image active
+ | 124 Snapshot queued
+ | 125 Snapshot saving
+ | 126 Snapshot active
+ | 127 Snapshot killed
+ | 128 Snapshot deleted
+ | 129 Snapshot pending_delete
+ | 130 Public image active Has no name
+
+ """
+
+ image_id = 123
+
+ fixtures = []
+
+ def add_fixture(**kwargs):
+ kwargs.update(created_at=NOW_DATE,
+ updated_at=NOW_DATE)
+ fixtures.append(kwargs)
+
+ # Public image
+ add_fixture(id=str(image_id), name='public image', is_public=True,
+ status='active', properties={'key1': 'value1'},
+ min_ram="128", min_disk="10", size='25165824')
+ image_id += 1
+
+ # Snapshot for User 1
+ uuid = 'aa640691-d1a7-4a67-9d3c-d35ee6b3cc74'
+ snapshot_properties = {'instance_uuid': uuid, 'user_id': 'fake'}
+ for status in ('queued', 'saving', 'active', 'killed',
+ 'deleted', 'pending_delete'):
+ deleted = False if status != 'deleted' else True
+ deleted_at = NOW_DATE if deleted else None
+
+ add_fixture(id=str(image_id), name='%s snapshot' % status,
+ is_public=False, status=status,
+ properties=snapshot_properties, size='25165824',
+ deleted=deleted, deleted_at=deleted_at)
+ image_id += 1
+
+ # Image without a name
+ add_fixture(id=str(image_id), is_public=True, status='active',
+ properties={})
+ # Image for permission tests
+ image_id += 1
+ add_fixture(id=str(image_id), is_public=True, status='active',
+ properties={}, owner='authorized_fake')
+
+ return fixtures
diff --git a/nova/tests/unit/integrated/__init__.py b/nova/tests/unit/integrated/__init__.py
new file mode 100644
index 0000000000..16b4b921b7
--- /dev/null
+++ b/nova/tests/unit/integrated/__init__.py
@@ -0,0 +1,18 @@
+# Copyright (c) 2011 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`integrated` -- Tests whole systems, using mock services where needed
+=================================
+"""
diff --git a/nova/tests/unit/integrated/api/__init__.py b/nova/tests/unit/integrated/api/__init__.py
new file mode 100644
index 0000000000..6168280c24
--- /dev/null
+++ b/nova/tests/unit/integrated/api/__init__.py
@@ -0,0 +1,18 @@
+# Copyright (c) 2011 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`api` -- OpenStack API client, for testing rather than production
+=================================
+"""
diff --git a/nova/tests/unit/integrated/api/client.py b/nova/tests/unit/integrated/api/client.py
new file mode 100644
index 0000000000..733592ec26
--- /dev/null
+++ b/nova/tests/unit/integrated/api/client.py
@@ -0,0 +1,304 @@
+# Copyright (c) 2011 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import urllib
+
+from oslo.serialization import jsonutils
+import requests
+
+from nova.i18n import _
+from nova.openstack.common import log as logging
+from nova.tests.unit.image import fake
+
+
+LOG = logging.getLogger(__name__)
+
+
+class OpenStackApiException(Exception):
+ def __init__(self, message=None, response=None):
+ self.response = response
+ if not message:
+ message = 'Unspecified error'
+
+ if response:
+ _status = response.status_code
+ _body = response.content
+
+ message = (_('%(message)s\nStatus Code: %(_status)s\n'
+ 'Body: %(_body)s') %
+ {'message': message, '_status': _status,
+ '_body': _body})
+
+ super(OpenStackApiException, self).__init__(message)
+
+
+class OpenStackApiAuthenticationException(OpenStackApiException):
+ def __init__(self, response=None, message=None):
+ if not message:
+ message = _("Authentication error")
+ super(OpenStackApiAuthenticationException, self).__init__(message,
+ response)
+
+
+class OpenStackApiAuthorizationException(OpenStackApiException):
+ def __init__(self, response=None, message=None):
+ if not message:
+ message = _("Authorization error")
+ super(OpenStackApiAuthorizationException, self).__init__(message,
+ response)
+
+
+class OpenStackApiNotFoundException(OpenStackApiException):
+ def __init__(self, response=None, message=None):
+ if not message:
+ message = _("Item not found")
+ super(OpenStackApiNotFoundException, self).__init__(message, response)
+
+
+class TestOpenStackClient(object):
+ """Simple OpenStack API Client.
+
+ This is a really basic OpenStack API client that is under our control,
+ so we can make changes / insert hooks for testing
+
+ """
+
+ def __init__(self, auth_user, auth_key, auth_uri):
+ super(TestOpenStackClient, self).__init__()
+ self.auth_result = None
+ self.auth_user = auth_user
+ self.auth_key = auth_key
+ self.auth_uri = auth_uri
+ # default project_id
+ self.project_id = 'openstack'
+
+ def request(self, url, method='GET', body=None, headers=None):
+ _headers = {'Content-Type': 'application/json'}
+ _headers.update(headers or {})
+
+ response = requests.request(method, url, data=body, headers=_headers)
+ return response
+
+ def _authenticate(self):
+ if self.auth_result:
+ return self.auth_result
+
+ auth_uri = self.auth_uri
+ headers = {'X-Auth-User': self.auth_user,
+ 'X-Auth-Key': self.auth_key,
+ 'X-Auth-Project-Id': self.project_id}
+ response = self.request(auth_uri,
+ headers=headers)
+
+ http_status = response.status_code
+ LOG.debug("%(auth_uri)s => code %(http_status)s",
+ {'auth_uri': auth_uri, 'http_status': http_status})
+
+ if http_status == 401:
+ raise OpenStackApiAuthenticationException(response=response)
+
+ self.auth_result = response.headers
+ return self.auth_result
+
+ def api_request(self, relative_uri, check_response_status=None,
+ strip_version=False, **kwargs):
+ auth_result = self._authenticate()
+
+ # NOTE(justinsb): httplib 'helpfully' converts headers to lower case
+ base_uri = auth_result['x-server-management-url']
+ if strip_version:
+ # NOTE(vish): cut out version number and tenant_id
+ base_uri = '/'.join(base_uri.split('/', 3)[:-1])
+
+ full_uri = '%s/%s' % (base_uri, relative_uri)
+
+ headers = kwargs.setdefault('headers', {})
+ headers['X-Auth-Token'] = auth_result['x-auth-token']
+
+ response = self.request(full_uri, **kwargs)
+
+ http_status = response.status_code
+ LOG.debug("%(relative_uri)s => code %(http_status)s",
+ {'relative_uri': relative_uri, 'http_status': http_status})
+
+ if check_response_status:
+ if http_status not in check_response_status:
+ if http_status == 404:
+ raise OpenStackApiNotFoundException(response=response)
+ elif http_status == 401:
+ raise OpenStackApiAuthorizationException(response=response)
+ else:
+ raise OpenStackApiException(
+ message=_("Unexpected status code"),
+ response=response)
+
+ return response
+
+ def _decode_json(self, response):
+ body = response.content
+ LOG.debug("Decoding JSON: %s", body)
+ if body:
+ return jsonutils.loads(body)
+ else:
+ return ""
+
+ def api_get(self, relative_uri, **kwargs):
+ kwargs.setdefault('check_response_status', [200])
+ response = self.api_request(relative_uri, **kwargs)
+ return self._decode_json(response)
+
+ def api_post(self, relative_uri, body, **kwargs):
+ kwargs['method'] = 'POST'
+ if body:
+ headers = kwargs.setdefault('headers', {})
+ headers['Content-Type'] = 'application/json'
+ kwargs['body'] = jsonutils.dumps(body)
+
+ kwargs.setdefault('check_response_status', [200, 202])
+ response = self.api_request(relative_uri, **kwargs)
+ return self._decode_json(response)
+
+ def api_put(self, relative_uri, body, **kwargs):
+ kwargs['method'] = 'PUT'
+ if body:
+ headers = kwargs.setdefault('headers', {})
+ headers['Content-Type'] = 'application/json'
+ kwargs['body'] = jsonutils.dumps(body)
+
+ kwargs.setdefault('check_response_status', [200, 202, 204])
+ response = self.api_request(relative_uri, **kwargs)
+ return self._decode_json(response)
+
+ def api_delete(self, relative_uri, **kwargs):
+ kwargs['method'] = 'DELETE'
+ kwargs.setdefault('check_response_status', [200, 202, 204])
+ return self.api_request(relative_uri, **kwargs)
+
+ def get_server(self, server_id):
+ return self.api_get('/servers/%s' % server_id)['server']
+
+ def get_servers(self, detail=True, search_opts=None):
+ rel_url = '/servers/detail' if detail else '/servers'
+
+ if search_opts is not None:
+ qparams = {}
+ for opt, val in search_opts.iteritems():
+ qparams[opt] = val
+ if qparams:
+ query_string = "?%s" % urllib.urlencode(qparams)
+ rel_url += query_string
+ return self.api_get(rel_url)['servers']
+
+ def post_server(self, server):
+ response = self.api_post('/servers', server)
+ if 'reservation_id' in response:
+ return response
+ else:
+ return response['server']
+
+ def put_server(self, server_id, server):
+ return self.api_put('/servers/%s' % server_id, server)
+
+ def post_server_action(self, server_id, data):
+ return self.api_post('/servers/%s/action' % server_id, data)
+
+ def delete_server(self, server_id):
+ return self.api_delete('/servers/%s' % server_id)
+
+ def get_image(self, image_id):
+ return self.api_get('/images/%s' % image_id)['image']
+
+ def get_images(self, detail=True):
+ rel_url = '/images/detail' if detail else '/images'
+ return self.api_get(rel_url)['images']
+
+ def post_image(self, image):
+ return self.api_post('/images', image)['image']
+
+ def delete_image(self, image_id):
+ return self.api_delete('/images/%s' % image_id)
+
+ def get_flavor(self, flavor_id):
+ return self.api_get('/flavors/%s' % flavor_id)['flavor']
+
+ def get_flavors(self, detail=True):
+ rel_url = '/flavors/detail' if detail else '/flavors'
+ return self.api_get(rel_url)['flavors']
+
+ def post_flavor(self, flavor):
+ return self.api_post('/flavors', flavor)['flavor']
+
+ def delete_flavor(self, flavor_id):
+ return self.api_delete('/flavors/%s' % flavor_id)
+
+ def get_volume(self, volume_id):
+ return self.api_get('/volumes/%s' % volume_id)['volume']
+
+ def get_volumes(self, detail=True):
+ rel_url = '/volumes/detail' if detail else '/volumes'
+ return self.api_get(rel_url)['volumes']
+
+ def post_volume(self, volume):
+ return self.api_post('/volumes', volume)['volume']
+
+ def delete_volume(self, volume_id):
+ return self.api_delete('/volumes/%s' % volume_id)
+
+ def get_server_volume(self, server_id, attachment_id):
+ return self.api_get('/servers/%s/os-volume_attachments/%s' %
+ (server_id, attachment_id))['volumeAttachment']
+
+ def get_server_volumes(self, server_id):
+ return self.api_get('/servers/%s/os-volume_attachments' %
+ (server_id))['volumeAttachments']
+
+ def post_server_volume(self, server_id, volume_attachment):
+ return self.api_post('/servers/%s/os-volume_attachments' %
+ (server_id), volume_attachment
+ )['volumeAttachment']
+
+ def delete_server_volume(self, server_id, attachment_id):
+ return self.api_delete('/servers/%s/os-volume_attachments/%s' %
+ (server_id, attachment_id))
+
+
+class TestOpenStackClientV3(TestOpenStackClient):
+ """Simple OpenStack v3 API Client.
+
+ This is a really basic OpenStack API client that is under our control,
+ so we can make changes / insert hooks for testing.
+
+ Note that the V3 API does not have an image API and so it is
+ not possible to query the api for the image information.
+ So instead we just access the fake image service used by the unittests
+ directly.
+
+ """
+
+ def get_image(self, image_id):
+ return fake._fakeImageService.show(None, image_id)
+
+ def get_images(self, detail=True):
+ return fake._fakeImageService.detail(None)
+
+ def post_image(self, image):
+ raise NotImplementedError
+
+ def delete_image(self, image_id):
+ return fake._fakeImageService.delete(None, image_id)
+
+
+class TestOpenStackClientV3Mixin(object):
+ def _get_test_client(self):
+ return TestOpenStackClientV3('fake', 'fake', self.auth_url)
diff --git a/nova/tests/unit/integrated/api_samples/NMN/multinic-add-fixed-ip-req.json.tpl b/nova/tests/unit/integrated/api_samples/NMN/multinic-add-fixed-ip-req.json.tpl
new file mode 100644
index 0000000000..b9744ab2c7
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/NMN/multinic-add-fixed-ip-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "addFixedIp":{
+ "networkId": %(networkId)s
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/NMN/multinic-add-fixed-ip-req.xml.tpl b/nova/tests/unit/integrated/api_samples/NMN/multinic-add-fixed-ip-req.xml.tpl
new file mode 100644
index 0000000000..ad11129129
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/NMN/multinic-add-fixed-ip-req.xml.tpl
@@ -0,0 +1,3 @@
+<addFixedIp>
+ <networkId>%(networkId)s</networkId>
+</addFixedIp>
diff --git a/nova/tests/unit/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.json.tpl b/nova/tests/unit/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.json.tpl
new file mode 100644
index 0000000000..7367e1242c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "removeFixedIp":{
+ "address": "%(ip)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.xml.tpl b/nova/tests/unit/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.xml.tpl
new file mode 100644
index 0000000000..10b722220f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.xml.tpl
@@ -0,0 +1,3 @@
+<removeFixedIp>
+ <address>%(ip)s</address>
+</removeFixedIp>
diff --git a/nova/tests/unit/integrated/api_samples/NMN/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/NMN/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/NMN/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/NMN/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/NMN/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/NMN/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/NMN/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/NMN/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/NMN/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/NMN/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/NMN/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/NMN/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/image-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/image-get-resp.json.tpl
new file mode 100644
index 0000000000..72ff4448b8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/image-get-resp.json.tpl
@@ -0,0 +1,34 @@
+{
+ "image": {
+ "OS-DCF:diskConfig": "AUTO",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(image_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/%(image_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/%(image_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/%(image_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/image-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/image-get-resp.xml.tpl
new file mode 100644
index 0000000000..bf82d296d1
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/image-get-resp.xml.tpl
@@ -0,0 +1,12 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<image xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage7" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="%(image_id)s" OS-DCF:diskConfig="AUTO">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">True</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/%(image_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/%(image_id)s" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/%(image_id)s" type="application/vnd.openstack.image" rel="alternate"/>
+</image>
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/image-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/image-list-resp.json.tpl
new file mode 100644
index 0000000000..62f02287f8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/image-list-resp.json.tpl
@@ -0,0 +1,214 @@
+{
+ "images": [
+ {
+ "OS-DCF:diskConfig": "AUTO",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/%(id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/%(id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/%(id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/%(id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/%(uuid)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-DCF:diskConfig": "MANUAL",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/%(uuid)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "False",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage6",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/%(id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/%(id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "%(id)s",
+ "ramdisk_id": null
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/%(id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/%(id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/%(id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/%(id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/image-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/image-list-resp.xml.tpl
new file mode 100644
index 0000000000..b4213312d7
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/image-list-resp.xml.tpl
@@ -0,0 +1,71 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<images xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage7" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="%(id)s" OS-DCF:diskConfig="AUTO">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">True</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/%(id)s" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/%(id)s" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="%(id)s">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/%(id)s" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/%(id)s" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="%(id)s">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/%(id)s" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/%(id)s" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage6" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="%(id)s" OS-DCF:diskConfig="MANUAL">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">False</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/%(id)s" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/%(id)s" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="%(id)s">
+ <metadata>
+ <meta key="kernel_id">%(id)s</meta>
+ <meta key="ramdisk_id">None</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/%(id)s" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/%(id)s" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="%(id)s">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/%(id)s" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/%(id)s" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="%(id)s">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/%(id)s" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/%(id)s" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+</images>
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/list-servers-detail-get.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/list-servers-detail-get.json.tpl
new file mode 100644
index 0000000000..37c8e57dba
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/list-servers-detail-get.json.tpl
@@ -0,0 +1,57 @@
+{
+ "servers": [
+ {
+ "OS-DCF:diskConfig": "AUTO",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/list-servers-detail-get.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/list-servers-detail-get.xml.tpl
new file mode 100644
index 0000000000..36b53957c6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/list-servers-detail-get.xml.tpl
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-DCF:diskConfig="AUTO">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-req.json.tpl
new file mode 100644
index 0000000000..b239818a8a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "rebuild": {
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "OS-DCF:diskConfig": "AUTO"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-req.xml.tpl
new file mode 100644
index 0000000000..93bfb0d4e9
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-req.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<rebuild xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ imageRef="%(host)s/openstack/images/%(image_id)s"
+ OS-DCF:diskConfig="AUTO" />
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-resp.json.tpl
new file mode 100644
index 0000000000..9b9f188023
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-resp.json.tpl
@@ -0,0 +1,56 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "%(password)s",
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-resp.xml.tpl
new file mode 100644
index 0000000000..5835392c31
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-resp.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" adminPass="%(password)s" OS-DCF:diskConfig="AUTO">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-get-resp.json.tpl
new file mode 100644
index 0000000000..d9cc795728
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-get-resp.json.tpl
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-get-resp.xml.tpl
new file mode 100644
index 0000000000..b9e8a2b365
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-get-resp.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-DCF:diskConfig="AUTO">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-post-req.json.tpl
new file mode 100644
index 0000000000..81b89adf23
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-post-req.json.tpl
@@ -0,0 +1,17 @@
+{
+ "server" : {
+ "OS-DCF:diskConfig": "AUTO",
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-post-req.xml.tpl
new file mode 100644
index 0000000000..fcfada031b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test" OS-DCF:diskConfig="AUTO">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-post-resp.json.tpl
new file mode 100644
index 0000000000..7c8371f161
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-post-resp.json.tpl
@@ -0,0 +1,17 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..1309e6dfee
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s" OS-DCF:diskConfig="AUTO">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/server-resize-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-resize-post-req.json.tpl
new file mode 100644
index 0000000000..a290485e1c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-resize-post-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "resize": {
+ "flavorRef": "3",
+ "OS-DCF:diskConfig": "AUTO"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/server-resize-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-resize-post-req.xml.tpl
new file mode 100644
index 0000000000..aa0b0b67a3
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-resize-post-req.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<resize xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ OS-DCF:diskConfig="AUTO"
+ flavorRef="3" />
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-req.json.tpl
new file mode 100644
index 0000000000..4ac22820df
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-req.xml.tpl
new file mode 100644
index 0000000000..8088846987
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-req.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ OS-DCF:diskConfig="AUTO" />
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-resp.json.tpl
new file mode 100644
index 0000000000..d9cc795728
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-resp.json.tpl
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-resp.xml.tpl
new file mode 100644
index 0000000000..cb8c662442
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-resp.xml.tpl
@@ -0,0 +1,24 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
+ xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s"
+ hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake"
+ tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s"
+ OS-DCF:diskConfig="AUTO">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-get-resp.json.tpl
new file mode 100644
index 0000000000..a852da1207
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-get-resp.json.tpl
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "updated": "%(isotime)s",
+ "created": "%(isotime)s",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-get-resp.xml.tpl
new file mode 100644
index 0000000000..2c4cdc07f2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-get-resp.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(uuid)s" OS-EXT-AZ:availability_zone="nova">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.json.tpl
new file mode 100644
index 0000000000..b0ddc7c051
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.json.tpl
@@ -0,0 +1,57 @@
+{
+ "servers": [
+ {
+ "updated": "%(isotime)s",
+ "created": "%(isotime)s",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "user_id": "fake"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.xml.tpl
new file mode 100644
index 0000000000..35e1618678
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.xml.tpl
@@ -0,0 +1,20 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1"> <server status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-EXT-AZ:availability_zone="nova">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json.tpl
new file mode 100644
index 0000000000..c460cd0260
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json.tpl
@@ -0,0 +1,34 @@
+{
+ "image": {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(image_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/%(image_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/%(image_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/%(image_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml.tpl
new file mode 100644
index 0000000000..e8dffa3ba4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml.tpl
@@ -0,0 +1,12 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<image xmlns:OS-EXT-IMG-SIZE="http://docs.openstack.org/compute/ext/image_size/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage7" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="%(image_id)s">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">True</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/%(image_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/%(image_id)s" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/%(image_id)s" type="application/vnd.openstack.image" rel="alternate"/>
+</image>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json.tpl
new file mode 100644
index 0000000000..16d62deb4d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json.tpl
@@ -0,0 +1,219 @@
+{
+ "images": [
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "a2459075-d96c-40d5-893e-577ff92e721c",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "a440c04b-79fa-479c-bed1-0b816eaec379",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "False",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage6",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "ramdisk_id": null
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "cedef40a-ed67-4d10-800e-17455edce175",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml.tpl
new file mode 100644
index 0000000000..586c8ed46d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml.tpl
@@ -0,0 +1,71 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<images xmlns:OS-EXT-IMG-SIZE="http://docs.openstack.org/compute/ext/image_size/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <image xmlns:OS-EXT-IMG-SIZE="http://docs.openstack.org/compute/ext/image_size/api/v1.1" status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage7" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">True</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/70a599e0-31e7-49b7-b260-868f441e862b" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="155d900f-4e14-4e4c-a73d-069cbf4541e6">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="a2459075-d96c-40d5-893e-577ff92e721c">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/a2459075-d96c-40d5-893e-577ff92e721c" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage6" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="a440c04b-79fa-479c-bed1-0b816eaec379">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">False</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/a440c04b-79fa-479c-bed1-0b816eaec379" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="c905cedb-7281-47e4-8a62-f26bc5fc4c77">
+ <metadata>
+ <meta key="kernel_id">155d900f-4e14-4e4c-a73d-069cbf4541e6</meta>
+ <meta key="ramdisk_id">None</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="cedef40a-ed67-4d10-800e-17455edce175">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/cedef40a-ed67-4d10-800e-17455edce175" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="76fa36fc-c930-4bf3-8c8a-ea2a2420deb6">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+</images>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-get-resp.json.tpl
new file mode 100644
index 0000000000..7b9f1ba519
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-get-resp.json.tpl
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4,
+ "OS-EXT-IPS-MAC:mac_addr": "%(mac_addr)s"
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-get-resp.xml.tpl
new file mode 100644
index 0000000000..49d8a8e2bf
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-get-resp.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-EXT-IPS-MAC="http://docs.openstack.org/compute/ext/extended_ips_mac/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s" OS-EXT-IPS-MAC:mac_addr="%(mac_addr)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/servers-detail-resp.json.tpl
new file mode 100644
index 0000000000..743abc7c70
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/servers-detail-resp.json.tpl
@@ -0,0 +1,56 @@
+{
+ "servers": [
+ {
+ "status": "ACTIVE",
+ "updated": "%(isotime)s",
+ "user_id": "fake",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4,
+ "OS-EXT-IPS-MAC:mac_addr": "%(mac_addr)s"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "created": "%(isotime)s",
+ "name": "new-server-test",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "id": "%(uuid)s",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "tenant_id": "openstack",
+ "progress": 0,
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "metadata": {
+ "My Server Name": "Apache1"
+ }
+ }]
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/servers-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/servers-detail-resp.xml.tpl
new file mode 100644
index 0000000000..23dda7c583
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/servers-detail-resp.xml.tpl
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:OS-EXT-IPS-MAC="http://docs.openstack.org/compute/ext/extended_ips_mac/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server xmlns:OS-EXT-IPS-MAC="http://docs.openstack.org/compute/ext/extended_ips_mac/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s" OS-EXT-IPS-MAC:mac_addr="%(mac_addr)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-get-resp.json.tpl
new file mode 100644
index 0000000000..acc784fb18
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-get-resp.json.tpl
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-get-resp.xml.tpl
new file mode 100644
index 0000000000..aa78378f84
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-get-resp.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-EXT-IPS="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip OS-EXT-IPS:type="fixed" version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.json.tpl
new file mode 100644
index 0000000000..26d812ceb8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.json.tpl
@@ -0,0 +1,56 @@
+{
+ "servers": [
+ {
+ "status": "ACTIVE",
+ "updated": "%(isotime)s",
+ "user_id": "fake",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "created": "%(isotime)s",
+ "name": "new-server-test",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "id": "%(uuid)s",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "tenant_id": "openstack",
+ "progress": 0,
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "metadata": {
+ "My Server Name": "Apache1"
+ }
+ }]
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.xml.tpl
new file mode 100644
index 0000000000..d3b5c524d3
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.xml.tpl
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:OS-EXT-IPS="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server xmlns:OS-EXT-IPS="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip OS-EXT-IPS:type="fixed" version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json.tpl
new file mode 100644
index 0000000000..398f0f7027
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json.tpl
@@ -0,0 +1,57 @@
+{
+ "server": {
+ "OS-EXT-SRV-ATTR:host": "%(compute_host)s",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
+ "OS-EXT-SRV-ATTR:instance_name": "%(instance_name)s",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml.tpl
new file mode 100644
index 0000000000..10495ff9d5
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-EXT-SRV-ATTR:instance_name="%(instance_name)s" OS-EXT-SRV-ATTR:host="%(compute_host)s" OS-EXT-SRV-ATTR:hypervisor_hostname="%(hypervisor_hostname)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json.tpl
new file mode 100644
index 0000000000..81f247192a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json.tpl
@@ -0,0 +1,59 @@
+{
+ "servers": [
+ {
+ "OS-EXT-SRV-ATTR:host": "%(compute_host)s",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
+ "OS-EXT-SRV-ATTR:instance_name": "%(instance_name)s",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml.tpl
new file mode 100644
index 0000000000..f7da4086a3
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml.tpl
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-EXT-SRV-ATTR:instance_name="%(instance_name)s" OS-EXT-SRV-ATTR:host="%(compute_host)s" OS-EXT-SRV-ATTR:hypervisor_hostname="%(hypervisor_hostname)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ </server>
+</servers> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-get-resp.json.tpl
new file mode 100644
index 0000000000..fc48b73a4e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-get-resp.json.tpl
@@ -0,0 +1,57 @@
+{
+ "server": {
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-EXT-STS:power_state": 1,
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-get-resp.xml.tpl
new file mode 100644
index 0000000000..6b28dde2da
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-get-resp.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-EXT-STS:vm_state="active" OS-EXT-STS:task_state="None" OS-EXT-STS:power_state="1">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-STS/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/servers-detail-resp.json.tpl
new file mode 100644
index 0000000000..94b3e2200c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/servers-detail-resp.json.tpl
@@ -0,0 +1,58 @@
+{
+ "servers": [
+ {
+ "status": "ACTIVE",
+ "updated": "%(isotime)s",
+ "OS-EXT-STS:task_state": null,
+ "user_id": "fake",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "created": "%(isotime)s",
+ "name": "new-server-test",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "id": "%(uuid)s",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "OS-EXT-STS:vm_state": "active",
+ "tenant_id": "openstack",
+ "progress": 0,
+ "OS-EXT-STS:power_state": 1,
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "metadata": {
+ "My Server Name": "Apache1"
+ }
+ }]
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-STS/servers-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/servers-detail-resp.xml.tpl
new file mode 100644
index 0000000000..89c3b9396e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/servers-detail-resp.xml.tpl
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-EXT-STS:vm_state="active" OS-EXT-STS:task_state="None" OS-EXT-STS:power_state="1" >
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/vifs-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/vifs-list-resp.json.tpl
new file mode 100644
index 0000000000..684b93448d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/vifs-list-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "virtual_interfaces": [
+ {
+ "id": "%(id)s",
+ "mac_address": "%(mac_addr)s",
+ "OS-EXT-VIF-NET:net_id": "%(id)s"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/vifs-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/vifs-list-resp.xml.tpl
new file mode 100644
index 0000000000..adf7d7baa3
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/vifs-list-resp.xml.tpl
@@ -0,0 +1,8 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<virtual_interfaces
+ xmlns:OS-EXT-VIF-NET="http://docs.openstack.org/compute/ext/extended-virtual-interfaces-net/api/v1.1"
+ xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <virtual_interface id="%(id)s"
+ mac_address="%(mac_addr)s"
+ OS-EXT-VIF-NET:net_id="%(id)s"/>
+</virtual_interfaces>
diff --git a/nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.json.tpl
new file mode 100644
index 0000000000..04083d063c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.json.tpl
@@ -0,0 +1,94 @@
+{
+ "flavors": [
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 1,
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "vcpus": 1
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 20,
+ "id": "2",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "ram": 2048,
+ "vcpus": 1
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 40,
+ "id": "3",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "ram": 4096,
+ "vcpus": 2
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 80,
+ "id": "4",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "ram": 8192,
+ "vcpus": 4
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 160,
+ "id": "5",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "ram": 16384,
+ "vcpus": 8
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.xml.tpl
new file mode 100644
index 0000000000..5d73195fad
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.xml.tpl
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavors xmlns:OS-FLV-DISABLED="http://docs.openstack.org/compute/ext/flavor_disabled/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor disk="1" vcpus="1" ram="512" name="m1.tiny" id="1" OS-FLV-DISABLED:disabled="False">
+ <atom:link href="%(host)s/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <flavor disk="20" vcpus="1" ram="2048" name="m1.small" id="2" OS-FLV-DISABLED:disabled="False">
+ <atom:link href="%(host)s/v2/openstack/flavors/2" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/2" rel="bookmark"/>
+ </flavor>
+ <flavor disk="40" vcpus="2" ram="4096" name="m1.medium" id="3" OS-FLV-DISABLED:disabled="False">
+ <atom:link href="%(host)s/v2/openstack/flavors/3" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/3" rel="bookmark"/>
+ </flavor>
+ <flavor disk="80" vcpus="4" ram="8192" name="m1.large" id="4" OS-FLV-DISABLED:disabled="False">
+ <atom:link href="%(host)s/v2/openstack/flavors/4" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/4" rel="bookmark"/>
+ </flavor>
+ <flavor disk="160" vcpus="8" ram="16384" name="m1.xlarge" id="5" OS-FLV-DISABLED:disabled="False">
+ <atom:link href="%(host)s/v2/openstack/flavors/5" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/5" rel="bookmark"/>
+ </flavor>
+</flavors>
diff --git a/nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.json.tpl
new file mode 100644
index 0000000000..a47af7b187
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 1,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "vcpus": 1
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.xml.tpl
new file mode 100644
index 0000000000..13908e2ac6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:OS-FLV-DISABLED="http://docs.openstack.org/compute/ext/flavor_disabled/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="1" vcpus="1" ram="512" name="m1.tiny" id="%(flavor_id)s" OS-FLV-DISABLED:disabled="False">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.json.tpl
new file mode 100644
index 0000000000..b86db0a461
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "disk": 1,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "ram": 512,
+ "vcpus": 1
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.xml.tpl
new file mode 100644
index 0000000000..da45536c37
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="1" vcpus="1" ram="512" name="%(flavor_name)s" id="%(flavor_id)s" OS-FLV-EXT-DATA:ephemeral="0">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.json.tpl
new file mode 100644
index 0000000000..a798262f35
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.json.tpl
@@ -0,0 +1,94 @@
+{
+ "flavors": [
+ {
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "disk": 1,
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "vcpus": 1
+ },
+ {
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "disk": 20,
+ "id": "2",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "ram": 2048,
+ "vcpus": 1
+ },
+ {
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "disk": 40,
+ "id": "3",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "ram": 4096,
+ "vcpus": 2
+ },
+ {
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "disk": 80,
+ "id": "4",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "ram": 8192,
+ "vcpus": 4
+ },
+ {
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "disk": 160,
+ "id": "5",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "ram": 16384,
+ "vcpus": 8
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.xml.tpl
new file mode 100644
index 0000000000..5ba4631884
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.xml.tpl
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavors xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor disk="1" vcpus="1" ram="512" name="m1.tiny" id="1" OS-FLV-EXT-DATA:ephemeral="0">
+ <atom:link href="%(host)s/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <flavor disk="20" vcpus="1" ram="2048" name="m1.small" id="2" OS-FLV-EXT-DATA:ephemeral="0">
+ <atom:link href="%(host)s/v2/openstack/flavors/2" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/2" rel="bookmark"/>
+ </flavor>
+ <flavor disk="40" vcpus="2" ram="4096" name="m1.medium" id="3" OS-FLV-EXT-DATA:ephemeral="0">
+ <atom:link href="%(host)s/v2/openstack/flavors/3" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/3" rel="bookmark"/>
+ </flavor>
+ <flavor disk="80" vcpus="4" ram="8192" name="m1.large" id="4" OS-FLV-EXT-DATA:ephemeral="0">
+ <atom:link href="%(host)s/v2/openstack/flavors/4" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/4" rel="bookmark"/>
+ </flavor>
+ <flavor disk="160" vcpus="8" ram="16384" name="m1.xlarge" id="5" OS-FLV-EXT-DATA:ephemeral="0">
+ <atom:link href="%(host)s/v2/openstack/flavors/5" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/5" rel="bookmark"/>
+ </flavor>
+</flavors>
diff --git a/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.json.tpl
new file mode 100644
index 0000000000..64385ad682
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.json.tpl
@@ -0,0 +1,12 @@
+{
+ "flavor": {
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "rxtx_factor": 2.0,
+ "OS-FLV-EXT-DATA:ephemeral": 30,
+ "swap": 5
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.xml.tpl
new file mode 100644
index 0000000000..df74ab383f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.xml.tpl
@@ -0,0 +1,11 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<flavor xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1"
+ name="%(flavor_name)s"
+ ram="1024"
+ vcpus="2"
+ disk="10"
+ id="%(flavor_id)s"
+ swap="5"
+ rxtx_factor="2.0"
+ OS-FLV-EXT-DATA:ephemeral="30" />
diff --git a/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.json.tpl
new file mode 100644
index 0000000000..3a92dabca8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "OS-FLV-EXT-DATA:ephemeral": 30,
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "vcpus": 2
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.xml.tpl
new file mode 100644
index 0000000000..df7fc07a32
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="%(flavor_name)s" id="%(flavor_id)s" OS-FLV-EXT-DATA:ephemeral="30">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-req.json.tpl
new file mode 100644
index 0000000000..1a19960c24
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-req.json.tpl
@@ -0,0 +1,11 @@
+{
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "%(image_id)s",
+ "flavorRef": "1"
+ },
+ "os:scheduler_hints": {
+ "hypervisor": "xen",
+ "near": "%(image_near)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-req.xml.tpl
new file mode 100644
index 0000000000..a680e3476e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-req.xml.tpl
@@ -0,0 +1,12 @@
+<server
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:OS-SCH-HNT="http://docs.openstack.org/compute/ext/scheduler-hints/api/v2"
+ name='new-server-test'
+ imageRef='%(image_id)s'
+ flavorRef='1'
+>
+ <OS-SCH-HNT:scheduler_hints>
+ <hypervisor>xen</hypervisor>
+ <near>%(image_near)s</near>
+ </OS-SCH-HNT:scheduler_hints>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-resp.xml.tpl
new file mode 100644
index 0000000000..5a11c73c3c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-get-resp.json.tpl
new file mode 100644
index 0000000000..2b2121d34b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-get-resp.json.tpl
@@ -0,0 +1,56 @@
+{
+ "server": {
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-get-resp.xml.tpl
new file mode 100644
index 0000000000..ee35f36fa0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-get-resp.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-SRV-USG="http://docs.openstack.org/compute/ext/server_usage/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-SRV-USG:terminated_at="None" OS-SRV-USG:launched_at="%(xmltime)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/OS-SRV-USG/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/servers-detail-resp.json.tpl
new file mode 100644
index 0000000000..bef5a2002a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/servers-detail-resp.json.tpl
@@ -0,0 +1,57 @@
+{
+ "servers": [
+ {
+ "status": "ACTIVE",
+ "updated": "%(isotime)s",
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "user_id": "fake",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "created": "%(isotime)s",
+ "name": "new-server-test",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "id": "%(uuid)s",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "OS-SRV-USG:terminated_at": null,
+ "tenant_id": "openstack",
+ "progress": 0,
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "metadata": {
+ "My Server Name": "Apache1"
+ }
+ }]
+}
diff --git a/nova/tests/unit/integrated/api_samples/OS-SRV-USG/servers-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/servers-detail-resp.xml.tpl
new file mode 100644
index 0000000000..bce5e2eabe
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/servers-detail-resp.xml.tpl
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:OS-SRV-USG="http://docs.openstack.org/compute/ext/server_usage/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server xmlns:OS-SRV-USG="http://docs.openstack.org/compute/ext/server_usage/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-SRV-USG:launched_at="%(xmltime)s" OS-SRV-USG:terminated_at="None" >
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/unit/integrated/api_samples/README.rst b/nova/tests/unit/integrated/api_samples/README.rst
new file mode 100644
index 0000000000..a08cac3a42
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/README.rst
@@ -0,0 +1,29 @@
+Api Samples
+===========
+
+This part of the tree contains templates for API samples. The
+documentation in doc/api_samples is completely autogenerated from the
+tests in this directory.
+
+To add a new api sample, add tests for the common passing and failing
+cases in this directory for your extension, and modify test_samples.py
+for your tests. There should be both JSON and XML tests included.
+
+Then run the following command:
+
+ GENERATE_SAMPLES=True tox -epy27 nova.tests.unit.integrated
+
+Which will create the files on doc/api_samples.
+
+If new tests are added or the .tpl files are changed due to bug fixes, the
+samples must be regenerated so they are in sync with the templates, as
+there is an additional test which reloads the documentation and
+ensures that it's in sync.
+
+Debugging sample generation
+---------------------------
+
+If a .tpl is changed, its matching .xml and .json must be removed
+else the samples won't be generated. If an entirely new extension is
+added, a directory for it must be created before its samples will
+be generated.
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
new file mode 100644
index 0000000000..668e282e2b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
@@ -0,0 +1,716 @@
+{
+ "extensions": [
+ {
+ "alias": "NMN",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Multinic",
+ "namespace": "http://docs.openstack.org/compute/ext/multinic/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "OS-DCF",
+ "description": "%(text)s",
+ "links": [],
+ "name": "DiskConfig",
+ "namespace": "http://docs.openstack.org/compute/ext/disk_config/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "OS-EXT-AZ",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ExtendedAvailabilityZone",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "OS-EXT-IPS",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ExtendedIps",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_ips/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "OS-EXT-IPS-MAC",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ExtendedIpsMac",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_ips_mac/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "OS-EXT-IMG-SIZE",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ImageSize",
+ "namespace": "http://docs.openstack.org/compute/ext/image_size/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "OS-EXT-SRV-ATTR",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ExtendedServerAttributes",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_status/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "OS-EXT-STS",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ExtendedStatus",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_status/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-extended-volumes",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ExtendedVolumes",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_volumes/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "OS-EXT-VIF-NET",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ExtendedVIFNet",
+ "namespace": "http://docs.openstack.org/compute/ext/extended-virtual-interfaces-net/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "OS-FLV-DISABLED",
+ "description": "%(text)s",
+ "links": [],
+ "name": "FlavorDisabled",
+ "namespace": "http://docs.openstack.org/compute/ext/flavor_disabled/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "OS-FLV-EXT-DATA",
+ "description": "%(text)s",
+ "links": [],
+ "name": "FlavorExtraData",
+ "namespace": "http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "OS-SRV-USG",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ServerUsage",
+ "namespace": "http://docs.openstack.org/compute/ext/server_usage/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-console-auth-tokens",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ConsoleAuthTokens",
+ "namespace": "http://docs.openstack.org/compute/ext/consoles-auth-tokens/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "OS-SCH-HNT",
+ "description": "%(text)s",
+ "links": [],
+ "name": "SchedulerHints",
+ "namespace": "http://docs.openstack.org/compute/ext/scheduler-hints/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-admin-actions",
+ "description": "%(text)s",
+ "links": [],
+ "name": "AdminActions",
+ "namespace": "http://docs.openstack.org/compute/ext/admin-actions/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-aggregates",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Aggregates",
+ "namespace": "http://docs.openstack.org/compute/ext/aggregates/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-agents",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Agents",
+ "namespace": "http://docs.openstack.org/compute/ext/agents/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-assisted-volume-snapshots",
+ "description": "%(text)s",
+ "links": [],
+ "name": "AssistedVolumeSnapshots",
+ "namespace": "http://docs.openstack.org/compute/ext/assisted-volume-snapshots/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-attach-interfaces",
+ "description": "Attach interface support.",
+ "links": [],
+ "name": "AttachInterfaces",
+ "namespace": "http://docs.openstack.org/compute/ext/interfaces/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-availability-zone",
+ "description": "%(text)s",
+ "links": [],
+ "name": "AvailabilityZone",
+ "namespace": "http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-baremetal-nodes",
+ "description": "%(text)s",
+ "links": [],
+ "name": "BareMetalNodes",
+ "namespace": "http://docs.openstack.org/compute/ext/baremetal_nodes/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-block-device-mapping-v2-boot",
+ "description": "%(text)s",
+ "links": [],
+ "name": "BlockDeviceMappingV2Boot",
+ "namespace": "http://docs.openstack.org/compute/ext/block_device_mapping_v2_boot/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-cells",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Cells",
+ "namespace": "http://docs.openstack.org/compute/ext/cells/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-cell-capacities",
+ "description": "%(text)s",
+ "links": [],
+ "name": "CellCapacities",
+ "namespace": "http://docs.openstack.org/compute/ext/cell_capacities/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-certificates",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Certificates",
+ "namespace": "http://docs.openstack.org/compute/ext/certificates/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-cloudpipe",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Cloudpipe",
+ "namespace": "http://docs.openstack.org/compute/ext/cloudpipe/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-cloudpipe-update",
+ "description": "%(text)s",
+ "links": [],
+ "name": "CloudpipeUpdate",
+ "namespace": "http://docs.openstack.org/compute/ext/cloudpipe-update/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-config-drive",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ConfigDrive",
+ "namespace": "http://docs.openstack.org/compute/ext/config_drive/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-console-output",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ConsoleOutput",
+ "namespace": "http://docs.openstack.org/compute/ext/os-console-output/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-consoles",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Consoles",
+ "namespace": "http://docs.openstack.org/compute/ext/os-consoles/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-create-server-ext",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Createserverext",
+ "namespace": "http://docs.openstack.org/compute/ext/createserverext/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-deferred-delete",
+ "description": "%(text)s",
+ "links": [],
+ "name": "DeferredDelete",
+ "namespace": "http://docs.openstack.org/compute/ext/deferred-delete/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-evacuate",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Evacuate",
+ "namespace": "http://docs.openstack.org/compute/ext/evacuate/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-extended-floating-ips",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ExtendedFloatingIps",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_floating_ips/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-fixed-ips",
+ "description": "Fixed IPs support.",
+ "links": [],
+ "name": "FixedIPs",
+ "namespace": "http://docs.openstack.org/compute/ext/fixed_ips/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-flavor-access",
+ "description": "%(text)s",
+ "links": [],
+ "name": "FlavorAccess",
+ "namespace": "http://docs.openstack.org/compute/ext/flavor_access/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-flavor-extra-specs",
+ "description": "%(text)s",
+ "links": [],
+ "name": "FlavorExtraSpecs",
+ "namespace": "http://docs.openstack.org/compute/ext/flavor_extra_specs/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-flavor-manage",
+ "description": "%(text)s",
+ "links": [],
+ "name": "FlavorManage",
+ "namespace": "http://docs.openstack.org/compute/ext/flavor_manage/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-flavor-rxtx",
+ "description": "%(text)s",
+ "links": [],
+ "name": "FlavorRxtx",
+ "namespace": "http://docs.openstack.org/compute/ext/flavor_rxtx/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-flavor-swap",
+ "description": "%(text)s",
+ "links": [],
+ "name": "FlavorSwap",
+ "namespace": "http://docs.openstack.org/compute/ext/flavor_swap/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-floating-ip-dns",
+ "description": "%(text)s",
+ "links": [],
+ "name": "FloatingIpDns",
+ "namespace": "http://docs.openstack.org/ext/floating_ip_dns/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-floating-ip-pools",
+ "description": "%(text)s",
+ "links": [],
+ "name": "FloatingIpPools",
+ "namespace": "http://docs.openstack.org/compute/ext/floating_ip_pools/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-floating-ips",
+ "description": "%(text)s",
+ "links": [],
+ "name": "FloatingIps",
+ "namespace": "http://docs.openstack.org/compute/ext/floating_ips/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-floating-ips-bulk",
+ "description": "%(text)s",
+ "links": [],
+ "name": "FloatingIpsBulk",
+ "namespace": "http://docs.openstack.org/compute/ext/floating_ips_bulk/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-hide-server-addresses",
+ "description": "Support hiding server addresses in certain states.",
+ "links": [],
+ "name": "HideServerAddresses",
+ "namespace": "http://docs.openstack.org/compute/ext/hide_server_addresses/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-hosts",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Hosts",
+ "namespace": "http://docs.openstack.org/compute/ext/hosts/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-services",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Services",
+ "namespace": "http://docs.openstack.org/compute/ext/services/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-extended-services",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ExtendedServices",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_services/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-fping",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Fping",
+ "namespace": "http://docs.openstack.org/compute/ext/fping/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-hypervisors",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Hypervisors",
+ "namespace": "http://docs.openstack.org/compute/ext/hypervisors/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-extended-hypervisors",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ExtendedHypervisors",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_hypervisors/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-hypervisor-status",
+ "description": "%(text)s",
+ "links": [],
+ "name": "HypervisorStatus",
+ "namespace": "http://docs.openstack.org/compute/ext/hypervisor_status/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-server-external-events",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ServerExternalEvents",
+ "namespace": "http://docs.openstack.org/compute/ext/server-external-events/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-instance_usage_audit_log",
+ "description": "%(text)s",
+ "links": [],
+ "name": "OSInstanceUsageAuditLog",
+ "namespace": "http://docs.openstack.org/ext/services/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-keypairs",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Keypairs",
+ "namespace": "http://docs.openstack.org/compute/ext/keypairs/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-multiple-create",
+ "description": "%(text)s",
+ "links": [],
+ "name": "MultipleCreate",
+ "namespace": "http://docs.openstack.org/compute/ext/multiplecreate/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-networks",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Networks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-tenant-networks",
+ "description": "%(text)s",
+ "links": [],
+ "name": "OSTenantNetworks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-networks-associate",
+ "description": "%(text)s",
+ "links": [],
+ "name": "NetworkAssociationSupport",
+ "namespace": "http://docs.openstack.org/compute/ext/networks_associate/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-quota-class-sets",
+ "description": "%(text)s",
+ "links": [],
+ "name": "QuotaClasses",
+ "namespace": "http://docs.openstack.org/compute/ext/quota-classes-sets/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-extended-networks",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ExtendedNetworks",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_networks/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-extended-quotas",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ExtendedQuotas",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_quotas/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-quota-sets",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Quotas",
+ "namespace": "http://docs.openstack.org/compute/ext/quotas-sets/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-user-quotas",
+ "description": "%(text)s",
+ "links": [],
+ "name": "UserQuotas",
+ "namespace": "http://docs.openstack.org/compute/ext/user_quotas/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-rescue",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Rescue",
+ "namespace": "http://docs.openstack.org/compute/ext/rescue/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-extended-rescue-with-image",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ExtendedRescueWithImage",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_rescue_with_image/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-security-group-default-rules",
+ "description": "%(text)s",
+ "links": [],
+ "name": "SecurityGroupDefaultRules",
+ "namespace": "http://docs.openstack.org/compute/ext/securitygroupdefaultrules/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-security-groups",
+ "description": "%(text)s",
+ "links": [],
+ "name": "SecurityGroups",
+ "namespace": "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-server-diagnostics",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ServerDiagnostics",
+ "namespace": "http://docs.openstack.org/compute/ext/server-diagnostics/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-server-list-multi-status",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ServerListMultiStatus",
+ "namespace": "http://docs.openstack.org/compute/ext/os-server-list-multi-status/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-server-password",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ServerPassword",
+ "namespace": "http://docs.openstack.org/compute/ext/server-password/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-server-start-stop",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ServerStartStop",
+ "namespace": "http://docs.openstack.org/compute/ext/servers/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-shelve",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Shelve",
+ "namespace": "http://docs.openstack.org/compute/ext/shelve/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-simple-tenant-usage",
+ "description": "%(text)s",
+ "links": [],
+ "name": "SimpleTenantUsage",
+ "namespace": "http://docs.openstack.org/compute/ext/os-simple-tenant-usage/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-used-limits",
+ "description": "%(text)s",
+ "links": [],
+ "name": "UsedLimits",
+ "namespace": "http://docs.openstack.org/compute/ext/used_limits/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-used-limits-for-admin",
+ "description": "%(text)s",
+ "links": [],
+ "name": "UsedLimitsForAdmin",
+ "namespace": "http://docs.openstack.org/compute/ext/used_limits_for_admin/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-user-data",
+ "description": "%(text)s",
+ "links": [],
+ "name": "UserData",
+ "namespace": "http://docs.openstack.org/compute/ext/userdata/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-virtual-interfaces",
+ "description": "%(text)s",
+ "links": [],
+ "name": "VirtualInterfaces",
+ "namespace": "http://docs.openstack.org/compute/ext/virtual_interfaces/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-volume-attachment-update",
+ "description": "%(text)s",
+ "links": [],
+ "name": "VolumeAttachmentUpdate",
+ "namespace": "http://docs.openstack.org/compute/ext/os-volume-attachment-update/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-volumes",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Volumes",
+ "namespace": "http://docs.openstack.org/compute/ext/volumes/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-instance-actions",
+ "description": "%(text)s",
+ "links": [],
+ "name": "InstanceActions",
+ "namespace": "http://docs.openstack.org/compute/ext/instance-actions/api/v1.1",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-migrations",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Migrations",
+ "namespace": "http://docs.openstack.org/compute/ext/migrations/api/v2.0",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-baremetal-ext-status",
+ "description": "%(text)s",
+ "links": [],
+ "name": "BareMetalExtStatus",
+ "namespace": "http://docs.openstack.org/compute/ext/baremetal_ext_status/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-preserve-ephemeral-rebuild",
+ "description": "%(text)s",
+ "links": [],
+ "name": "PreserveEphemeralOnRebuild",
+ "namespace": "http://docs.openstack.org/compute/ext/preserve_ephemeral_rebuild/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-extended-services-delete",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ExtendedServicesDelete",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_services_delete/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-server-groups",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ServerGroups",
+ "namespace": "http://docs.openstack.org/compute/ext/servergroups/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-extended-evacuate-find-host",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ExtendedEvacuateFindHost",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_evacuate_find_host/api/v2",
+ "updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-server-group-quotas",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ServerGroupQuotas",
+ "namespace": "http://docs.openstack.org/compute/ext/server-group-quotas/api/v2",
+ "updated": "%(isotime)s"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
new file mode 100644
index 0000000000..eaa679f35f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
@@ -0,0 +1,269 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<extensions xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/common/api/v1.0">
+ <extension alias="NMN" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/multinic/api/v1.1" name="Multinic">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="OS-DCF" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" name="DiskConfig">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="OS-EXT-AZ" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" name="ExtendedAvailabilityZone">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="OS-EXT-IPS" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" name="ExtendedIps">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="OS-EXT-IPS-MAC" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/extended_ips_mac/api/v1.1" name="ExtendedIpsMac">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="OS-EXT-IMG-SIZE" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/image_size/api/v1.1" name="ImageSize">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="OS-EXT-SRV-ATTR" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" name="ExtendedServerAttributes">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="OS-EXT-STS" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" name="ExtendedStatus">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-extended-volumes" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/extended_volumes/api/v1.1" name="ExtendedVolumes">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="OS-EXT-VIF-NET" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/extended-virtual-interfaces-net/api/v1.1" name="ExtendedVIFNet">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="OS-FLV-DISABLED" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/flavor_disabled/api/v1.1" name="FlavorDisabled">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="OS-FLV-EXT-DATA" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1" name="FlavorExtraData">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="OS-SRV-USG" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/server_usage/api/v1.1" name="ServerUsage">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="OS-SCH-HNT" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/scheduler-hints/api/v2" name="SchedulerHints">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-admin-actions" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/admin-actions/api/v1.1" name="AdminActions">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-aggregates" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/aggregates/api/v1.1" name="Aggregates">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-attach-interfaces" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/interfaces/api/v1.1" name="AttachInterfaces">
+ <description>Attach interface support.</description>
+ </extension>
+ <extension alias="os-availability-zone" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1" name="AvailabilityZone">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-agents" name="Agents" namespace="http://docs.openstack.org/compute/ext/agents/api/v2" updated="%(isotime)s">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-baremetal-nodes" name="BareMetalNodes" namespace="http://docs.openstack.org/compute/ext/baremetal_nodes/api/v2" updated="%(isotime)s">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-block-device-mapping-v2-boot" name="BlockDeviceMappingV2Boot" namespace="http://docs.openstack.org/compute/ext/block_device_mapping_v2_boot/api/v2" updated="%(isotime)s">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-cells" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/cells/api/v1.1" name="Cells">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-cell-capacities" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/cell_capacities/api/v1.1" name="CellCapacities">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-certificates" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/certificates/api/v1.1" name="Certificates">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-cloudpipe" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/cloudpipe/api/v1.1" name="Cloudpipe">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-cloudpipe-update" updated="%(isotime)s" name="CloudpipeUpdate" namespace="http://docs.openstack.org/compute/ext/cloudpipe-update/api/v2">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-config-drive" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/config_drive/api/v1.1" name="ConfigDrive">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-console-output" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/os-console-output/api/v2" name="ConsoleOutput">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-consoles" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/os-consoles/api/v2" name="Consoles">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-create-server-ext" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/createserverext/api/v1.1" name="Createserverext">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-deferred-delete" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/deferred-delete/api/v1.1" name="DeferredDelete">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-evacuate" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/evacuate/api/v2" name="Evacuate">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-extended-floating-ips" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/extended_floating_ips/api/v2" name="ExtendedFloatingIps">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-fixed-ips" name="FixedIPs" namespace="http://docs.openstack.org/compute/ext/fixed_ips/api/v2" updated="%(isotime)s">
+ <description>Fixed IPs support.</description>
+ </extension>
+ <extension alias="os-flavor-access" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/flavor_access/api/v2" name="FlavorAccess">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-flavor-extra-specs" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/flavor_extra_specs/api/v1.1" name="FlavorExtraSpecs">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-flavor-manage" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/flavor_manage/api/v1.1" name="FlavorManage">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-flavor-rxtx" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/flavor_rxtx/api/v1.1" name="FlavorRxtx">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-flavor-swap" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/flavor_swap/api/v1.1" name="FlavorSwap">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-floating-ip-dns" updated="%(isotime)s" namespace="http://docs.openstack.org/ext/floating_ip_dns/api/v1.1" name="FloatingIpDns">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-floating-ip-pools" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/floating_ip_pools/api/v1.1" name="FloatingIpPools">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-floating-ips" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/floating_ips/api/v1.1" name="FloatingIps">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-floating-ips-bulk" name="FloatingIpsBulk" namespace="http://docs.openstack.org/compute/ext/floating_ips_bulk/api/v2" updated="%(isotime)s">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-hide-server-addresses" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/hide_server_addresses/api/v1.1" name="HideServerAddresses">
+ <description>Support hiding server addresses in certain states.</description>
+ </extension>
+ <extension alias="os-hosts" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/hosts/api/v1.1" name="Hosts">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-services" name="Services" namespace="http://docs.openstack.org/compute/ext/services/api/v2" updated="%(isotime)s">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-extended-services" name="ExtendedServices" namespace="http://docs.openstack.org/compute/ext/extended_services/api/v2" updated="%(isotime)s">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-fping" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/fping/api/v1.1" name="Fping">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-hypervisors" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/hypervisors/api/v1.1" name="Hypervisors">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-extended-hypervisors" name="ExtendedHypervisors" namespace="http://docs.openstack.org/compute/ext/extended_hypervisors/api/v1.1" updated="%(isotime)s">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-hypervisor-status" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/hypervisor_status/api/v1.1" name="HypervisorStatus">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-instance_usage_audit_log" updated="%(isotime)s" namespace="http://docs.openstack.org/ext/services/api/v1.1" name="OSInstanceUsageAuditLog">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-keypairs" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/keypairs/api/v1.1" name="Keypairs">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-multiple-create" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/multiplecreate/api/v1.1" name="MultipleCreate">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-networks" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="Networks">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-tenant-networks" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2" name="OSTenantNetworks">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-networks-associate" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/networks_associate/api/v2" name="NetworkAssociationSupport">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-quota-class-sets" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/quota-classes-sets/api/v1.1" name="QuotaClasses">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-extended-networks" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/extended_networks/api/v2" name="ExtendedNetworks">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-extended-quotas" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/extended_quotas/api/v1.1" name="ExtendedQuotas">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-quota-sets" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/quotas-sets/api/v1.1" name="Quotas">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-user-quotas" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/user_quotas/api/v1.1" name="UserQuotas">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-rescue" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/rescue/api/v1.1" name="Rescue">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-extended-rescue-with-image" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/extended_rescue_with_image/api/v2" name="ExtendedRescueWithImage">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-security-group-default-rules" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/securitygroupdefaultrules/api/v1.1" name="SecurityGroupDefaultRules">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-security-groups" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/securitygroups/api/v1.1" name="SecurityGroups">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-server-diagnostics" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/server-diagnostics/api/v1.1" name="ServerDiagnostics">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-server-list-multi-status" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/os-server-list-multi-status/api/v2" name="ServerListMultiStatus">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-server-password" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/server-password/api/v2" name="ServerPassword">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-server-start-stop" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/servers/api/v1.1" name="ServerStartStop">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-shelve" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/shelve/api/v1.1" name="Shelve">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-simple-tenant-usage" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/os-simple-tenant-usage/api/v1.1" name="SimpleTenantUsage">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-used-limits" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/used_limits/api/v1.1" name="UsedLimits">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-used-limits-for-admin" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/used_limits_for_admin/api/v1.1" name="UsedLimitsForAdmin">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-user-data" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/userdata/api/v1.1" name="UserData">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-virtual-interfaces" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/virtual_interfaces/api/v1.1" name="VirtualInterfaces">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-volume-attachment-update" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/os-volume-attachment-update/api/v2" name="VolumeAttachmentUpdate">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-volumes" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/volumes/api/v1.1" name="Volumes">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-instance-actions" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/instance-actions/api/v1.1" name="InstanceActions">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-server-external-events" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/server-external-events/api/v2" name="ServerExternalEvents">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-migrations" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/migrations/api/v2.0" name="Migrations">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-assisted-volume-snapshots" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/assisted-volume-snapshots/api/v2" name="AssistedVolumeSnapshots">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-baremetal-ext-status" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/baremetal_ext_status/api/v2" name="BareMetalExtStatus"> <description>%(text)s</description>
+ </extension>
+ <extension alias="os-preserve-ephemeral-rebuild" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/preserve_ephemeral_rebuild/api/v2" name="PreserveEphemeralOnRebuild">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-extended-services-delete" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/extended_services_delete/api/v2" name="ExtendedServicesDelete">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-console-auth-tokens" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/consoles-auth-tokens/api/v2" name="ConsoleAuthTokens">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-server-groups" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/servergroups/api/v2" name="ServerGroups">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-extended-evacuate-find-host" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/extended_evacuate_find_host/api/v2" name="ExtendedEvacuateFindHost">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-server-group-quotas" updated="%(isotime)s" namespace="http://docs.openstack.org/compute/ext/server-group-quotas/api/v2" name="ServerGroupQuotas">
+ <description>%(text)s</description>
+ </extension>
+</extensions>
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/flavor-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/flavor-get-resp.json.tpl
new file mode 100644
index 0000000000..b68bc3c979
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/flavor-get-resp.json.tpl
@@ -0,0 +1,24 @@
+{
+ "flavor": {
+ "OS-FLV-DISABLED:disabled": false,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "disk": 1,
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "os-flavor-access:is_public": true,
+ "ram": 512,
+ "rxtx_factor": 1.0,
+ "swap": "",
+ "vcpus": 1
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/flavor-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/flavor-get-resp.xml.tpl
new file mode 100644
index 0000000000..53f870ec4d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/flavor-get-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:OS-FLV-DISABLED="http://docs.openstack.org/compute/ext/flavor_disabled/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1" xmlns:os-flavor-access="http://docs.openstack.org/compute/ext/flavor_access/api/v2" xmlns="http://docs.openstack.org/compute/api/v1.1" name="m1.tiny" ram="512" vcpus="1" swap="" rxtx_factor="1.0" disk="1" id="1" os-flavor-access:is_public="True" OS-FLV-EXT-DATA:ephemeral="0" OS-FLV-DISABLED:disabled="False">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/flavors-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/flavors-list-resp.json.tpl
new file mode 100644
index 0000000000..ab86d2a52a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/flavors-list-resp.json.tpl
@@ -0,0 +1,74 @@
+{
+ "flavors": [
+ {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny"
+ },
+ {
+ "id": "2",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small"
+ },
+ {
+ "id": "3",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium"
+ },
+ {
+ "id": "4",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large"
+ },
+ {
+ "id": "5",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge"
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/flavors-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/flavors-list-resp.xml.tpl
new file mode 100644
index 0000000000..435f96be56
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/flavors-list-resp.xml.tpl
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavors xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor name="m1.tiny" id="1">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <flavor name="m1.small" id="2">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/2" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/2" rel="bookmark"/>
+ </flavor>
+ <flavor name="m1.medium" id="3">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/3" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/3" rel="bookmark"/>
+ </flavor>
+ <flavor name="m1.large" id="4">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/4" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/4" rel="bookmark"/>
+ </flavor>
+ <flavor name="m1.xlarge" id="5">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/5" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/5" rel="bookmark"/>
+ </flavor>
+</flavors> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-action-changepassword.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-changepassword.json.tpl
new file mode 100644
index 0000000000..da615718fe
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-changepassword.json.tpl
@@ -0,0 +1,5 @@
+{
+ "changePassword" : {
+ "adminPass" : "%(password)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-action-changepassword.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-changepassword.xml.tpl
new file mode 100644
index 0000000000..6c343024e2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-changepassword.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<changePassword
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ adminPass="%(password)s"/>
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-action-confirmresize.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-confirmresize.json.tpl
new file mode 100644
index 0000000000..432f6126e9
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-confirmresize.json.tpl
@@ -0,0 +1,3 @@
+{
+ "confirmResize" : null
+}
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-action-confirmresize.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-confirmresize.xml.tpl
new file mode 100644
index 0000000000..18f07bd67b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-confirmresize.xml.tpl
@@ -0,0 +1,3 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<confirmResize
+ xmlns="http://docs.openstack.org/compute/api/v1.1"/>
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-action-createimage.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-createimage.json.tpl
new file mode 100644
index 0000000000..0b9e39ffb3
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-createimage.json.tpl
@@ -0,0 +1,9 @@
+{
+ "createImage" : {
+ "name" : "%(name)s",
+ "metadata": {
+ "%(meta_var)s": "%(meta_val)s"
+ }
+ }
+}
+
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-action-createimage.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-createimage.xml.tpl
new file mode 100644
index 0000000000..aa1eccf8a5
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-createimage.xml.tpl
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<createImage
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="%(name)s">
+ <metadata>
+ <meta key="%(meta_var)s">%(meta_val)s</meta>
+ </metadata>
+</createImage>
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-action-reboot.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-reboot.json.tpl
new file mode 100644
index 0000000000..18eda9b9ab
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-reboot.json.tpl
@@ -0,0 +1,5 @@
+{
+ "reboot" : {
+ "type" : "%(type)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-action-reboot.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-reboot.xml.tpl
new file mode 100644
index 0000000000..d4cfe198c7
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-reboot.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<reboot
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ type="%(type)s"/>
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild-resp.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild-resp.json.tpl
new file mode 100644
index 0000000000..8705a8749c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild-resp.json.tpl
@@ -0,0 +1,56 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "accessIPv4": "%(ip)s",
+ "accessIPv6": "%(ip6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "%(password)s",
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "meta var": "meta val"
+ },
+ "name": "%(name)s",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild-resp.xml.tpl
new file mode 100644
index 0000000000..6fa0505367
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild-resp.xml.tpl
@@ -0,0 +1,39 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
+ id="%(uuid)s"
+ tenantId="openstack" userId="fake"
+ name="%(name)s"
+ hostId="%(hostid)s" progress="0"
+ status="ACTIVE" adminPass="%(password)s"
+ created="%(isotime)s"
+ updated="%(isotime)s"
+ accessIPv4="%(ip)s"
+ accessIPv6="%(ip6)s"
+ OS-DCF:diskConfig="AUTO">
+ <image id="%(uuid)s">
+ <atom:link
+ rel="bookmark"
+ href="%(host)s/openstack/images/%(uuid)s"/>
+ </image>
+ <flavor id="1">
+ <atom:link
+ rel="bookmark"
+ href="%(host)s/openstack/flavors/1"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link
+ rel="self"
+ href="%(host)s/v2/openstack/servers/%(uuid)s"/>
+ <atom:link
+ rel="bookmark"
+ href="%(host)s/openstack/servers/%(uuid)s"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild.json.tpl
new file mode 100644
index 0000000000..273906a349
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild.json.tpl
@@ -0,0 +1,18 @@
+{
+ "rebuild" : {
+ "imageRef" : "%(host)s/v2/32278/images/%(uuid)s",
+ "name" : "%(name)s",
+ "adminPass" : "%(pass)s",
+ "accessIPv4" : "%(ip)s",
+ "accessIPv6" : "%(ip6)s",
+ "metadata" : {
+ "meta var" : "meta val"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild.xml.tpl
new file mode 100644
index 0000000000..bd42f88b22
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild.xml.tpl
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<rebuild
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="%(name)s"
+ imageRef="%(host)s/v1.1/32278/images/%(uuid)s"
+ accessIPv4="%(ip)s"
+ accessIPv6="%(ip6)s"
+ adminPass="%(pass)s">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</rebuild>
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-action-resize.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-resize.json.tpl
new file mode 100644
index 0000000000..468a88da24
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-resize.json.tpl
@@ -0,0 +1,5 @@
+{
+ "resize" : {
+ "flavorRef" : "%(id)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-action-resize.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-resize.xml.tpl
new file mode 100644
index 0000000000..cbe49ea59a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-resize.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<resize
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ flavorRef="%(id)s"/>
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-action-revertresize.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-revertresize.json.tpl
new file mode 100644
index 0000000000..2ddf6e5ab0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-revertresize.json.tpl
@@ -0,0 +1,3 @@
+{
+ "revertResize" : null
+}
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-action-revertresize.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-revertresize.xml.tpl
new file mode 100644
index 0000000000..5c13bbdc0c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-revertresize.xml.tpl
@@ -0,0 +1,3 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<revertResize
+ xmlns="http://docs.openstack.org/compute/api/v1.1"/>
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-get-resp.json.tpl
new file mode 100644
index 0000000000..c83ab91068
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-get-resp.json.tpl
@@ -0,0 +1,74 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:host": "%(compute_host)s",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "%(ip)s",
+ "version": 4,
+ "OS-EXT-IPS-MAC:mac_addr": "%(mac_addr)s"
+ }
+ ]
+ },
+ "config_drive": "",
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake",
+ "os-extended-volumes:volumes_attached": []
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-get-resp.xml.tpl
new file mode 100644
index 0000000000..1bd75a99b9
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-get-resp.xml.tpl
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-IPS="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" xmlns:OS-EXT-IPS-MAC="http://docs.openstack.org/compute/ext/extended_ips_mac/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:os-extended-volumes="http://docs.openstack.org/compute/ext/extended_volumes/api/v1.1" xmlns:OS-SRV-USG="http://docs.openstack.org/compute/ext/server_usage/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="%(compute_host)s" OS-EXT-SRV-ATTR:hypervisor_hostname="%(hypervisor_hostname)s" OS-EXT-AZ:availability_zone="nova" OS-DCF:diskConfig="AUTO" OS-SRV-USG:launched_at="%(xmltime)s" OS-SRV-USG:terminated_at="None">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip OS-EXT-IPS:type="fixed" version="4" addr="%(ip)s"
+ OS-EXT-IPS-MAC:mac_addr="%(mac_addr)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ <security_groups>
+ <security_group name="default"/>
+ </security_groups>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-post-resp.json.tpl
new file mode 100644
index 0000000000..c931eb3fdc
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-post-resp.json.tpl
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..d7dc316552
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-post-resp.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s" OS-DCF:diskConfig="AUTO">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+ <security_groups>
+ <security_group name="default"/>
+ </security_groups>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/servers-details-resp.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/servers-details-resp.json.tpl
new file mode 100644
index 0000000000..d2aea31149
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/servers-details-resp.json.tpl
@@ -0,0 +1,76 @@
+{
+ "servers": [
+ {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:host": "%(compute_host)s",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "%(ip)s",
+ "version": 4,
+ "OS-EXT-IPS-MAC:mac_addr": "%(mac_addr)s"
+ }
+ ]
+ },
+ "config_drive": "",
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake",
+ "os-extended-volumes:volumes_attached": []
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl
new file mode 100644
index 0000000000..bf8dc083cd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl
@@ -0,0 +1,25 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-IPS="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" xmlns:OS-EXT-IPS-MAC="http://docs.openstack.org/compute/ext/extended_ips_mac/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:os-extended-volumes="http://docs.openstack.org/compute/ext/extended_volumes/api/v1.1" xmlns:OS-SRV-USG="http://docs.openstack.org/compute/ext/server_usage/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-IPS="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:os-extended-volumes="http://docs.openstack.org/compute/ext/extended_volumes/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="%(compute_host)s" OS-EXT-SRV-ATTR:hypervisor_hostname="%(hypervisor_hostname)s" OS-EXT-AZ:availability_zone="nova" OS-DCF:diskConfig="AUTO" OS-SRV-USG:launched_at="%(xmltime)s" OS-SRV-USG:terminated_at="None">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip OS-EXT-IPS:type="fixed" version="4" addr="%(ip)s"
+ OS-EXT-IPS-MAC:mac_addr="%(mac_addr)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ <security_groups>
+ <security_group name="default"/>
+ </security_groups>
+ </server>
+</servers>
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/servers-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/servers-list-resp.json.tpl
new file mode 100644
index 0000000000..8b97dc28d7
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/servers-list-resp.json.tpl
@@ -0,0 +1,18 @@
+{
+ "servers": [
+ {
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/all_extensions/servers-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/servers-list-resp.xml.tpl
new file mode 100644
index 0000000000..03bee03a6e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/servers-list-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server name="new-server-test" id="%(id)s">
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/unit/integrated/api_samples/flavor-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/flavor-get-resp.json.tpl
new file mode 100644
index 0000000000..723be2898f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/flavor-get-resp.json.tpl
@@ -0,0 +1,19 @@
+{
+ "flavor": {
+ "disk": 1,
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "vcpus": 1
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/flavor-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/flavor-get-resp.xml.tpl
new file mode 100644
index 0000000000..5925c588d0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/flavor-get-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" name="m1.tiny" ram="512" vcpus="1" disk="1" id="1">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/unit/integrated/api_samples/flavors-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/flavors-list-resp.json.tpl
new file mode 100644
index 0000000000..fb9a8ff1f6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/flavors-list-resp.json.tpl
@@ -0,0 +1,74 @@
+{
+ "flavors": [
+ {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny"
+ },
+ {
+ "id": "2",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small"
+ },
+ {
+ "id": "3",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium"
+ },
+ {
+ "id": "4",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large"
+ },
+ {
+ "id": "5",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/flavors-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/flavors-list-resp.xml.tpl
new file mode 100644
index 0000000000..435f96be56
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/flavors-list-resp.xml.tpl
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavors xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor name="m1.tiny" id="1">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <flavor name="m1.small" id="2">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/2" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/2" rel="bookmark"/>
+ </flavor>
+ <flavor name="m1.medium" id="3">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/3" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/3" rel="bookmark"/>
+ </flavor>
+ <flavor name="m1.large" id="4">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/4" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/4" rel="bookmark"/>
+ </flavor>
+ <flavor name="m1.xlarge" id="5">
+ <atom:link href="http://openstack.example.com/v2/openstack/flavors/5" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/flavors/5" rel="bookmark"/>
+ </flavor>
+</flavors> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/image-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/image-get-resp.json.tpl
new file mode 100644
index 0000000000..3d260b7e90
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/image-get-resp.json.tpl
@@ -0,0 +1,33 @@
+{
+ "image": {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(image_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/%(image_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/%(image_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/%(image_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/image-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/image-get-resp.xml.tpl
new file mode 100644
index 0000000000..2a69728071
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/image-get-resp.xml.tpl
@@ -0,0 +1,12 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<image xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage7" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="%(image_id)s">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">True</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/%(image_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/%(image_id)s" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/%(image_id)s" type="application/vnd.openstack.image" rel="alternate"/>
+</image>
diff --git a/nova/tests/unit/integrated/api_samples/image-meta-key-get.json.tpl b/nova/tests/unit/integrated/api_samples/image-meta-key-get.json.tpl
new file mode 100644
index 0000000000..6d022eb97d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/image-meta-key-get.json.tpl
@@ -0,0 +1,5 @@
+{
+ "meta": {
+ "kernel_id": "nokernel"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/image-meta-key-get.xml.tpl b/nova/tests/unit/integrated/api_samples/image-meta-key-get.xml.tpl
new file mode 100644
index 0000000000..1de6b40781
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/image-meta-key-get.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="kernel_id">nokernel</meta> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/image-meta-key-put-req.json.tpl b/nova/tests/unit/integrated/api_samples/image-meta-key-put-req.json.tpl
new file mode 100644
index 0000000000..7d8ab69a51
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/image-meta-key-put-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "meta" : {
+ "auto_disk_config" : "False"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/image-meta-key-put-req.xml.tpl b/nova/tests/unit/integrated/api_samples/image-meta-key-put-req.xml.tpl
new file mode 100644
index 0000000000..319e075eef
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/image-meta-key-put-req.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<meta
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ key="auto_disk_config">False</meta>
diff --git a/nova/tests/unit/integrated/api_samples/image-meta-key-put-resp.json.tpl b/nova/tests/unit/integrated/api_samples/image-meta-key-put-resp.json.tpl
new file mode 100644
index 0000000000..3db563ec14
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/image-meta-key-put-resp.json.tpl
@@ -0,0 +1,5 @@
+{
+ "meta": {
+ "auto_disk_config": "False"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/image-meta-key-put-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/image-meta-key-put-resp.xml.tpl
new file mode 100644
index 0000000000..c989c38a2d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/image-meta-key-put-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="auto_disk_config">False</meta> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/image-metadata-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-get-resp.json.tpl
new file mode 100644
index 0000000000..588f688d5a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-get-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/image-metadata-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-get-resp.xml.tpl
new file mode 100644
index 0000000000..8409016bf4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-get-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">True</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+</metadata> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/image-metadata-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-post-req.json.tpl
new file mode 100644
index 0000000000..b51e5f00fc
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-post-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "metadata": {
+ "kernel_id": "False",
+ "Label": "UpdatedImage"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/image-metadata-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-post-req.xml.tpl
new file mode 100644
index 0000000000..6170aab5ae
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-post-req.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="kernel_id">False</meta>
+ <meta key="Label">UpdatedImage</meta>
+</metadata>
diff --git a/nova/tests/unit/integrated/api_samples/image-metadata-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-post-resp.json.tpl
new file mode 100644
index 0000000000..9479bb3395
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-post-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "metadata": {
+ "Label": "UpdatedImage",
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "False",
+ "ramdisk_id": "nokernel"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/image-metadata-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-post-resp.xml.tpl
new file mode 100644
index 0000000000..1d96bd9af5
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-post-resp.xml.tpl
@@ -0,0 +1,8 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="kernel_id">False</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="Label">UpdatedImage</meta>
+ <meta key="architecture">x86_64</meta>
+ <meta key="auto_disk_config">True</meta>
+</metadata> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/image-metadata-put-req.json.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-put-req.json.tpl
new file mode 100644
index 0000000000..eec6152d77
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-put-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "metadata": {
+ "auto_disk_config": "True",
+ "Label": "Changed"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/image-metadata-put-req.xml.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-put-req.xml.tpl
new file mode 100644
index 0000000000..e5f5d8991c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-put-req.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="auto_disk_config">True</meta>
+ <meta key="Label">Changed</meta>
+</metadata>
diff --git a/nova/tests/unit/integrated/api_samples/image-metadata-put-resp.json.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-put-resp.json.tpl
new file mode 100644
index 0000000000..c8c5ee9c4a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-put-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "metadata": {
+ "Label": "Changed",
+ "auto_disk_config": "True"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/image-metadata-put-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-put-resp.xml.tpl
new file mode 100644
index 0000000000..7011871cc9
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-put-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="auto_disk_config">True</meta>
+ <meta key="Label">Changed</meta>
+</metadata> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/image-metadata-resp.json.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-resp.json.tpl
new file mode 100644
index 0000000000..657f0b1974
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/images-details-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/images-details-get-resp.json.tpl
new file mode 100644
index 0000000000..e353b98956
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/images-details-get-resp.json.tpl
@@ -0,0 +1,212 @@
+{
+ "images": [
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "a2459075-d96c-40d5-893e-577ff92e721c",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "a440c04b-79fa-479c-bed1-0b816eaec379",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "False",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage6",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "ramdisk_id": null
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "cedef40a-ed67-4d10-800e-17455edce175",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/images-details-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/images-details-get-resp.xml.tpl
new file mode 100644
index 0000000000..2194789790
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/images-details-get-resp.xml.tpl
@@ -0,0 +1,71 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<images xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage7" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">True</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/70a599e0-31e7-49b7-b260-868f441e862b" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="155d900f-4e14-4e4c-a73d-069cbf4541e6">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="a2459075-d96c-40d5-893e-577ff92e721c">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/a2459075-d96c-40d5-893e-577ff92e721c" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage6" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="a440c04b-79fa-479c-bed1-0b816eaec379">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">False</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/a440c04b-79fa-479c-bed1-0b816eaec379" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="c905cedb-7281-47e4-8a62-f26bc5fc4c77">
+ <metadata>
+ <meta key="kernel_id">155d900f-4e14-4e4c-a73d-069cbf4541e6</meta>
+ <meta key="ramdisk_id">None</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="cedef40a-ed67-4d10-800e-17455edce175">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/cedef40a-ed67-4d10-800e-17455edce175" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="76fa36fc-c930-4bf3-8c8a-ea2a2420deb6">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+</images>
diff --git a/nova/tests/unit/integrated/api_samples/images-details-resp.json.tpl b/nova/tests/unit/integrated/api_samples/images-details-resp.json.tpl
new file mode 100644
index 0000000000..5cd76d6c6b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/images-details-resp.json.tpl
@@ -0,0 +1,212 @@
+{
+ "images": [
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "a2459075-d96c-40d5-893e-577ff92e721c",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "a440c04b-79fa-479c-bed1-0b816eaec379",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "False",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage6",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "ramdisk_id": null
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "cedef40a-ed67-4d10-800e-17455edce175",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/images-details-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/images-details-resp.xml.tpl
new file mode 100644
index 0000000000..aa1d973b9a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/images-details-resp.xml.tpl
@@ -0,0 +1,71 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<images xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage7" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">True</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="155d900f-4e14-4e4c-a73d-069cbf4541e6">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="a2459075-d96c-40d5-893e-577ff92e721c">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage6" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="a440c04b-79fa-479c-bed1-0b816eaec379">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">False</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="c905cedb-7281-47e4-8a62-f26bc5fc4c77">
+ <metadata>
+ <meta key="kernel_id">155d900f-4e14-4e4c-a73d-069cbf4541e6</meta>
+ <meta key="ramdisk_id">None</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="cedef40a-ed67-4d10-800e-17455edce175">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="76fa36fc-c930-4bf3-8c8a-ea2a2420deb6">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+</images>
diff --git a/nova/tests/unit/integrated/api_samples/images-list-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/images-list-get-resp.json.tpl
new file mode 100644
index 0000000000..83e6accada
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/images-list-get-resp.json.tpl
@@ -0,0 +1,137 @@
+{
+ "images": [
+ {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage7"
+ },
+ {
+ "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "a2459075-d96c-40d5-893e-577ff92e721c",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "a440c04b-79fa-479c-bed1-0b816eaec379",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage6"
+ },
+ {
+ "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "cedef40a-ed67-4d10-800e-17455edce175",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/images-list-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/images-list-get-resp.xml.tpl
new file mode 100644
index 0000000000..71b9bfc8bf
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/images-list-get-resp.xml.tpl
@@ -0,0 +1,38 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<images xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <image name="fakeimage7" id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <atom:link href="%(host)s/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/70a599e0-31e7-49b7-b260-868f441e862b" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image name="fakeimage123456" id="155d900f-4e14-4e4c-a73d-069cbf4541e6">
+ <atom:link href="%(host)s/v2/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image name="fakeimage123456" id="a2459075-d96c-40d5-893e-577ff92e721c">
+ <atom:link href="%(host)s/v2/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/a2459075-d96c-40d5-893e-577ff92e721c" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image name="fakeimage6" id="a440c04b-79fa-479c-bed1-0b816eaec379">
+ <atom:link href="%(host)s/v2/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/a440c04b-79fa-479c-bed1-0b816eaec379" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image name="fakeimage123456" id="c905cedb-7281-47e4-8a62-f26bc5fc4c77">
+ <atom:link href="%(host)s/v2/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image name="fakeimage123456" id="cedef40a-ed67-4d10-800e-17455edce175">
+ <atom:link href="%(host)s/v2/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/cedef40a-ed67-4d10-800e-17455edce175" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image name="fakeimage123456" id="76fa36fc-c930-4bf3-8c8a-ea2a2420deb6">
+ <atom:link href="%(host)s/v2/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+</images>
diff --git a/nova/tests/unit/integrated/api_samples/images-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/images-list-resp.json.tpl
new file mode 100644
index 0000000000..6ed1616770
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/images-list-resp.json.tpl
@@ -0,0 +1,137 @@
+{
+ "images": [
+ {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage7"
+ },
+ {
+ "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "a2459075-d96c-40d5-893e-577ff92e721c",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "a440c04b-79fa-479c-bed1-0b816eaec379",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage6"
+ },
+ {
+ "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "cedef40a-ed67-4d10-800e-17455edce175",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/images-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/images-list-resp.xml.tpl
new file mode 100644
index 0000000000..701e958926
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/images-list-resp.xml.tpl
@@ -0,0 +1,38 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<images xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <image name="fakeimage7" id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <atom:link href="%(host)s/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image name="fakeimage123456" id="155d900f-4e14-4e4c-a73d-069cbf4541e6">
+ <atom:link href="%(host)s/v2/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image name="fakeimage123456" id="a2459075-d96c-40d5-893e-577ff92e721c">
+ <atom:link href="%(host)s/v2/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image name="fakeimage6" id="a440c04b-79fa-479c-bed1-0b816eaec379">
+ <atom:link href="%(host)s/v2/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image name="fakeimage123456" id="c905cedb-7281-47e4-8a62-f26bc5fc4c77">
+ <atom:link href="%(host)s/v2/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image name="fakeimage123456" id="cedef40a-ed67-4d10-800e-17455edce175">
+ <atom:link href="%(host)s/v2/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image name="fakeimage123456" id="76fa36fc-c930-4bf3-8c8a-ea2a2420deb6">
+ <atom:link href="%(host)s/v2/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+</images>
diff --git a/nova/tests/unit/integrated/api_samples/limit-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/limit-get-resp.json.tpl
new file mode 100644
index 0000000000..f5b30047da
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/limit-get-resp.json.tpl
@@ -0,0 +1,85 @@
+{
+ "limits": {
+ "absolute": {
+ "maxImageMeta": 128,
+ "maxPersonality": 5,
+ "maxPersonalitySize": 10240,
+ "maxServerMeta": 128,
+ "maxTotalCores": 20,
+ "maxTotalFloatingIps": 10,
+ "maxTotalInstances": 10,
+ "maxTotalKeypairs": 100,
+ "maxTotalRAMSize": 51200,
+ "maxSecurityGroups": 10,
+ "maxSecurityGroupRules": 20
+ },
+ "rate": [
+ {
+ "limit": [
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "POST"
+ },
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "PUT"
+ },
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "DELETE"
+ }
+ ],
+ "regex": ".*",
+ "uri": "*"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "POST"
+ }
+ ],
+ "regex": "^/servers",
+ "uri": "*/servers"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "GET"
+ }
+ ],
+ "regex": ".*changes-since.*",
+ "uri": "*changes-since*"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 12,
+ "unit": "MINUTE",
+ "value": 12,
+ "verb": "GET"
+ }
+ ],
+ "regex": "^/os-fping",
+ "uri": "*/os-fping"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/limit-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/limit-get-resp.xml.tpl
new file mode 100644
index 0000000000..ecc7b3b5da
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/limit-get-resp.xml.tpl
@@ -0,0 +1,32 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<limits xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/common/api/v1.0">
+ <rates>
+ <rate regex=".*" uri="*">
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="POST" remaining="120" value="120"/>
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="PUT" remaining="120" value="120"/>
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="DELETE" remaining="120" value="120"/>
+ </rate>
+ <rate regex="^/servers" uri="*/servers">
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="POST" remaining="120" value="120"/>
+ </rate>
+ <rate regex=".*changes-since.*" uri="*changes-since*">
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="GET" remaining="120" value="120"/>
+ </rate>
+ <rate regex="^/os-fping" uri="*/os-fping">
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="GET" remaining="12" value="12"/>
+ </rate>
+ </rates>
+ <absolute>
+ <limit name="maxServerMeta" value="128"/>
+ <limit name="maxTotalInstances" value="10"/>
+ <limit name="maxPersonality" value="5"/>
+ <limit name="maxImageMeta" value="128"/>
+ <limit name="maxPersonalitySize" value="10240"/>
+ <limit name="maxSecurityGroupRules" value="20"/>
+ <limit name="maxTotalKeypairs" value="100"/>
+ <limit name="maxSecurityGroups" value="10"/>
+ <limit name="maxTotalCores" value="20"/>
+ <limit name="maxTotalFloatingIps" value="10"/>
+ <limit name="maxTotalRAMSize" value="51200"/>
+ </absolute>
+</limits>
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-backup-server.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-backup-server.json.tpl
new file mode 100644
index 0000000000..60f5e1d9fe
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-backup-server.json.tpl
@@ -0,0 +1,7 @@
+{
+ "createBackup": {
+ "name": "Backup 1",
+ "backup_type": "daily",
+ "rotation": 1
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-backup-server.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-backup-server.xml.tpl
new file mode 100644
index 0000000000..f2f9024bd7
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-backup-server.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <createBackup>
+ <name>Backup 1</name>
+ <backup_type>daily</backup_type>
+ <rotation>1</rotation>
+ </createBackup>
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl
new file mode 100644
index 0000000000..62e16737b0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl
@@ -0,0 +1,3 @@
+{
+ "injectNetworkInfo": null
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.xml.tpl
new file mode 100644
index 0000000000..e5b71ffcdb
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <injectNetworkInfo />
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.json.tpl
new file mode 100644
index 0000000000..4800d4aa11
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.json.tpl
@@ -0,0 +1,7 @@
+{
+ "os-migrateLive": {
+ "host": "%(hostname)s",
+ "block_migration": false,
+ "disk_over_commit": false
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.xml.tpl
new file mode 100644
index 0000000000..88ead85f20
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+ <os-migrateLive>
+ <host>%(hostname)s</host>
+ <block_migration>false</block_migration>
+ <disk_over_commit>false</disk_over_commit>
+ </os-migrateLive>
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-lock-server.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-lock-server.json.tpl
new file mode 100644
index 0000000000..a1863f2f39
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-lock-server.json.tpl
@@ -0,0 +1,3 @@
+{
+ "lock": null
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-lock-server.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-lock-server.xml.tpl
new file mode 100644
index 0000000000..f86b130547
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-lock-server.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <lock />
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-migrate.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-migrate.json.tpl
new file mode 100644
index 0000000000..a9bf8c483e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-migrate.json.tpl
@@ -0,0 +1,3 @@
+{
+ "migrate": null
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-migrate.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-migrate.xml.tpl
new file mode 100644
index 0000000000..431284448d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-migrate.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <migrate />
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-pause.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-pause.json.tpl
new file mode 100644
index 0000000000..2e7c1fad30
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-pause.json.tpl
@@ -0,0 +1,3 @@
+{
+ "pause": null
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-pause.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-pause.xml.tpl
new file mode 100644
index 0000000000..a37359338c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-pause.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <pause/>
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl
new file mode 100644
index 0000000000..7c79cb68a5
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl
@@ -0,0 +1,3 @@
+{
+ "resetNetwork": null
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-network.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-network.xml.tpl
new file mode 100644
index 0000000000..6034983911
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-network.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <resetNetwork />
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl
new file mode 100644
index 0000000000..013aed4824
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl
@@ -0,0 +1,5 @@
+{
+ "os-resetState": {
+ "state": "active"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.xml.tpl
new file mode 100644
index 0000000000..435c1c7d76
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <os-resetState>
+ <state>active</state>
+ </os-resetState>
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl
new file mode 100644
index 0000000000..72d9478678
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl
@@ -0,0 +1,5 @@
+{
+ 'os-resetState': {
+ 'state': 'active'
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-state.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-state.xml.tpl
new file mode 100644
index 0000000000..435c1c7d76
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-state.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <os-resetState>
+ <state>active</state>
+ </os-resetState>
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-resume.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-resume.json.tpl
new file mode 100644
index 0000000000..ff00d97a14
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-resume.json.tpl
@@ -0,0 +1,3 @@
+{
+ "resume": null
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-resume.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-resume.xml.tpl
new file mode 100644
index 0000000000..4d6aaa750c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-resume.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <resume />
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-suspend.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-suspend.json.tpl
new file mode 100644
index 0000000000..8c2206a063
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-suspend.json.tpl
@@ -0,0 +1,3 @@
+{
+ "suspend": null
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-suspend.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-suspend.xml.tpl
new file mode 100644
index 0000000000..02aeac572a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-suspend.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <suspend />
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.json.tpl
new file mode 100644
index 0000000000..9e905ca2b9
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.json.tpl
@@ -0,0 +1,3 @@
+{
+ "unlock": null
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.xml.tpl
new file mode 100644
index 0000000000..8331e2258a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <unlock />
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unpause.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unpause.json.tpl
new file mode 100644
index 0000000000..ce5024f0c9
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unpause.json.tpl
@@ -0,0 +1,3 @@
+{
+ "unpause": null
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unpause.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unpause.xml.tpl
new file mode 100644
index 0000000000..b674f09269
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unpause.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <unpause />
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-agents/agent-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-agents/agent-post-req.json.tpl
new file mode 100644
index 0000000000..6dbd2f17cb
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-agents/agent-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "agent": {
+ "hypervisor": "%(hypervisor)s",
+ "os": "%(os)s",
+ "architecture": "%(architecture)s",
+ "version": "%(version)s",
+ "md5hash": "%(md5hash)s",
+ "url": "%(url)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-agents/agent-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-agents/agent-post-req.xml.tpl
new file mode 100644
index 0000000000..5c777749a2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-agents/agent-post-req.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<agent>
+ <hypervisor>%(hypervisor)s</hypervisor>
+ <os>%(os)s</os>
+ <architecture>%(architecture)s</architecture>
+ <version>%(version)s</version>
+ <md5hash>%(md5hash)s</md5hash>
+ <url>%(url)s</url>
+</agent>
diff --git a/nova/tests/unit/integrated/api_samples/os-agents/agent-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-agents/agent-post-resp.json.tpl
new file mode 100644
index 0000000000..79e41ceafc
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-agents/agent-post-resp.json.tpl
@@ -0,0 +1,12 @@
+{
+ "agent": {
+ "hypervisor": "%(hypervisor)s",
+ "os": "%(os)s",
+ "architecture": "%(architecture)s",
+ "version": "%(version)s",
+ "md5hash": "%(md5hash)s",
+ "url": "%(url)s",
+ "agent_id": 1
+ }
+}
+
diff --git a/nova/tests/unit/integrated/api_samples/os-agents/agent-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-agents/agent-post-resp.xml.tpl
new file mode 100644
index 0000000000..ecf97b91e9
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-agents/agent-post-resp.xml.tpl
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<agent>
+ <url>%(url)s</url>
+ <hypervisor>%(hypervisor)s</hypervisor>
+ <md5hash>%(md5hash)s</md5hash>
+ <version>%(version)s</version>
+ <architecture>%(architecture)s</architecture>
+ <os>%(os)s</os>
+ <agent_id>%(agent_id)d</agent_id>
+</agent>
diff --git a/nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-req.json.tpl
new file mode 100644
index 0000000000..d447350e0d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-req.json.tpl
@@ -0,0 +1,7 @@
+{
+ "para": {
+ "url": "%(url)s",
+ "md5hash": "%(md5hash)s",
+ "version": "%(version)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-req.xml.tpl
new file mode 100644
index 0000000000..19751dc807
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-req.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<para>
+ <version>%(version)s</version>
+ <url>%(url)s</url>
+ <md5hash>%(md5hash)s</md5hash>
+</para>
diff --git a/nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-resp.json.tpl
new file mode 100644
index 0000000000..110e52cd33
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "agent": {
+ "agent_id": "%(agent_id)d",
+ "url": "%(url)s",
+ "md5hash": "%(md5hash)s",
+ "version": "%(version)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-resp.xml.tpl
new file mode 100644
index 0000000000..2c9e50572c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<agent>
+ <agent_id>%(agent_id)d</agent_id>
+ <version>%(version)s</version>
+ <url>%(url)s</url>
+ <md5hash>%(md5hash)s</md5hash>
+</agent>
diff --git a/nova/tests/unit/integrated/api_samples/os-agents/agents-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-agents/agents-get-resp.json.tpl
new file mode 100644
index 0000000000..30562289fc
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-agents/agents-get-resp.json.tpl
@@ -0,0 +1,13 @@
+{
+ "agents": [
+ {
+ "hypervisor": "%(hypervisor)s",
+ "os": "%(os)s",
+ "architecture": "%(architecture)s",
+ "version": "%(version)s",
+ "md5hash": "%(md5hash)s",
+ "url": "%(url)s",
+ "agent_id": 1
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-agents/agents-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-agents/agents-get-resp.xml.tpl
new file mode 100644
index 0000000000..fbbbdad288
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-agents/agents-get-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<agents>
+ <agent hypervisor="%(hypervisor)s" os="%(os)s" architecture="%(architecture)s" version="%(version)s" md5hash="%(md5hash)s" url="%(url)s" agent_id="%(agent_id)d"/>
+</agents>
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl
new file mode 100644
index 0000000000..2a84101a16
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "add_host":
+ {
+ "host": "%(host_name)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.xml.tpl
new file mode 100644
index 0000000000..4454134efb
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<add_host host="%(host_name)s" />
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl
new file mode 100644
index 0000000000..63a2921cac
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl
@@ -0,0 +1,9 @@
+{
+ "set_metadata":
+ {
+ "metadata":
+ {
+ "key": "value"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.xml.tpl
new file mode 100644
index 0000000000..72b1e742aa
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<set_metadata>
+ <metadata>
+ <key>value</key>
+ </metadata>
+</set_metadata>
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-req.json.tpl
new file mode 100644
index 0000000000..fc806061e8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-req.json.tpl
@@ -0,0 +1,7 @@
+{
+ "aggregate":
+ {
+ "name": "name",
+ "availability_zone": "nova"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-req.xml.tpl
new file mode 100644
index 0000000000..4931476ae5
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<aggregate name="name" availability_zone="nova" />
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-resp.json.tpl
new file mode 100644
index 0000000000..935643d03c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "aggregate": {
+ "availability_zone": "nova",
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "id": %(aggregate_id)s,
+ "name": "name",
+ "updated_at": null
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-resp.xml.tpl
new file mode 100644
index 0000000000..2a1bee5868
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-resp.xml.tpl
@@ -0,0 +1,10 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<aggregate>
+ <name>name</name>
+ <availability_zone>nova</availability_zone>
+ <deleted>False</deleted>
+ <created_at>%(xmltime)s</created_at>
+ <updated_at>None</updated_at>
+ <deleted_at>None</deleted_at>
+ <id>%(aggregate_id)s</id>
+</aggregate>
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl
new file mode 100644
index 0000000000..66ecf30cd6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "remove_host":
+ {
+ "host": "%(host_name)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.xml.tpl
new file mode 100644
index 0000000000..bc2896835f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<remove_host host="%(host_name)s" />
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-req.json.tpl
new file mode 100644
index 0000000000..55e4b09346
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-req.json.tpl
@@ -0,0 +1,7 @@
+{
+ "aggregate":
+ {
+ "name": "newname",
+ "availability_zone": "nova2"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-req.xml.tpl
new file mode 100644
index 0000000000..04ce4fba58
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<aggregate name="newname" availability_zone="nova2" />
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl
new file mode 100644
index 0000000000..2e229a473a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl
@@ -0,0 +1,15 @@
+{
+ "aggregate": {
+ "availability_zone": "nova2",
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "hosts": [],
+ "id": 1,
+ "metadata": {
+ "availability_zone": "nova2"
+ },
+ "name": "newname",
+ "updated_at": "%(strtime)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl
new file mode 100644
index 0000000000..1ff22bc0e0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<aggregate>
+ <name>newname</name>
+ <availability_zone>nova2</availability_zone>
+ <deleted>False</deleted>
+ <created_at>%(xmltime)s</created_at>
+ <updated_at>%(xmltime)s</updated_at>
+ <hosts/>
+ <deleted_at>None</deleted_at>
+ <id>1</id>
+ <metadata>
+ <availability_zone>nova2</availability_zone>
+ </metadata>
+</aggregate>
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl
new file mode 100644
index 0000000000..e5775c206d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl
@@ -0,0 +1,17 @@
+{
+ "aggregate": {
+ "availability_zone": "nova",
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "hosts": [
+ "%(compute_host)s"
+ ],
+ "id": 1,
+ "metadata": {
+ "availability_zone": "nova"
+ },
+ "name": "name",
+ "updated_at": null
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl
new file mode 100644
index 0000000000..7412dee66d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl
@@ -0,0 +1,16 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<aggregate>
+ <name>name</name>
+ <availability_zone>nova</availability_zone>
+ <deleted>False</deleted>
+ <created_at>%(xmltime)s</created_at>
+ <updated_at>None</updated_at>
+ <hosts>
+ <host>%(compute_host)s</host>
+ </hosts>
+ <deleted_at>None</deleted_at>
+ <id>1</id>
+ <metadata>
+ <availability_zone>nova</availability_zone>
+ </metadata>
+</aggregate>
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl
new file mode 100644
index 0000000000..b91781fae2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl
@@ -0,0 +1,15 @@
+{
+ "aggregate": {
+ "availability_zone": "nova",
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "hosts": [],
+ "id": 1,
+ "metadata": {
+ "availability_zone": "nova"
+ },
+ "name": "name",
+ "updated_at": null
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl
new file mode 100644
index 0000000000..7f44a231cb
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<aggregate>
+ <name>name</name>
+ <availability_zone>nova</availability_zone>
+ <deleted>False</deleted>
+ <created_at>%(xmltime)s</created_at>
+ <updated_at>None</updated_at>
+ <hosts/>
+ <deleted_at>None</deleted_at>
+ <id>1</id>
+ <metadata>
+ <availability_zone>nova</availability_zone>
+ </metadata>
+</aggregate>
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl
new file mode 100644
index 0000000000..642653d1e6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl
@@ -0,0 +1,17 @@
+{
+ "aggregates": [
+ {
+ "availability_zone": "nova",
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "hosts": [],
+ "id": 1,
+ "metadata": {
+ "availability_zone": "nova"
+ },
+ "name": "name",
+ "updated_at": null
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl
new file mode 100644
index 0000000000..79af4a8d89
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl
@@ -0,0 +1,16 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<aggregates>
+ <aggregate>
+ <name>name</name>
+ <availability_zone>nova</availability_zone>
+ <deleted>False</deleted>
+ <created_at>%(xmltime)s</created_at>
+ <updated_at>None</updated_at>
+ <hosts/>
+ <deleted_at>None</deleted_at>
+ <id>1</id>
+ <metadata>
+ <availability_zone>nova</availability_zone>
+ </metadata>
+ </aggregate>
+</aggregates>
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl
new file mode 100644
index 0000000000..b15c40fa5d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "aggregate": {
+ "availability_zone": "nova",
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "hosts": [],
+ "id": 1,
+ "metadata": {
+ "availability_zone": "nova",
+ "key": "value"
+ },
+ "name": "name",
+ "updated_at": null
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl
new file mode 100644
index 0000000000..01245a4dbb
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl
@@ -0,0 +1,15 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<aggregate>
+ <name>name</name>
+ <availability_zone>nova</availability_zone>
+ <deleted>False</deleted>
+ <created_at>%(xmltime)s</created_at>
+ <updated_at>None</updated_at>
+ <hosts/>
+ <deleted_at>None</deleted_at>
+ <id>1</id>
+ <metadata>
+ <key>value</key>
+ <availability_zone>nova</availability_zone>
+ </metadata>
+</aggregate>
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl
new file mode 100644
index 0000000000..b91781fae2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl
@@ -0,0 +1,15 @@
+{
+ "aggregate": {
+ "availability_zone": "nova",
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "hosts": [],
+ "id": 1,
+ "metadata": {
+ "availability_zone": "nova"
+ },
+ "name": "name",
+ "updated_at": null
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl
new file mode 100644
index 0000000000..7f44a231cb
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<aggregate>
+ <name>name</name>
+ <availability_zone>nova</availability_zone>
+ <deleted>False</deleted>
+ <created_at>%(xmltime)s</created_at>
+ <updated_at>None</updated_at>
+ <hosts/>
+ <deleted_at>None</deleted_at>
+ <id>1</id>
+ <metadata>
+ <availability_zone>nova</availability_zone>
+ </metadata>
+</aggregate>
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-aggregates/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json.tpl
new file mode 100644
index 0000000000..defa10203e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json.tpl
@@ -0,0 +1,9 @@
+{
+ "snapshot": {
+ "volume_id": "%(volume_id)s",
+ "create_info": {
+ "snapshot_id": "%(snapshot_id)s",
+ "type": "%(type)s",
+ "new_file": "%(new_file)s"}
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.xml.tpl
new file mode 100644
index 0000000000..772bb43d92
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <snapshot>
+ <volume_id>%(volume_id)s</volume_id>
+ <create_info>
+ <snapshot_id>%(snapshot_id)s</snapshot_id>
+ <type>%(type)s</type>
+ <new_file>%(new_file)s</new_file>
+ </create_info>
+ </snapshot>
diff --git a/nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.json.tpl
new file mode 100644
index 0000000000..8d4e7f5709
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "snapshot": {
+ "id": 100,
+ "volumeId": "%(uuid)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.xml.tpl
new file mode 100644
index 0000000000..5da7d148b1
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<snapshot volumeId="521752a6-acf6-4b2d-bc7a-119f9148cd8c" id="100"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl
new file mode 100644
index 0000000000..11dcf64373
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "interfaceAttachment": {
+ "port_id": "ce531f90-199f-48c0-816c-13e38010b442"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.xml.tpl
new file mode 100644
index 0000000000..75e9b97c8c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<interfaceAttachment>
+ <port_id>%(port_id)s</port_id>
+</interfaceAttachment>
diff --git a/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl
new file mode 100644
index 0000000000..d882cdc612
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl
@@ -0,0 +1,12 @@
+{
+ "interfaceAttachment": {
+ "fixed_ips": [{
+ "subnet_id": "%(subnet_id)s",
+ "ip_address": "%(ip_address)s"
+ }],
+ "mac_addr": "fa:16:3e:4c:2c:30",
+ "net_id": "%(net_id)s",
+ "port_id": "%(port_id)s",
+ "port_state": "%(port_state)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.xml.tpl
new file mode 100644
index 0000000000..b391e59733
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.xml.tpl
@@ -0,0 +1,12 @@
+<interfaceAttachment>
+ <net_id>%(net_id)s</net_id>
+ <port_id>%(port_id)s</port_id>
+ <fixed_ips>
+ <fixed_ip>
+ <subnet_id>%(subnet_id)s</subnet_id>
+ <ip_address>%(ip_address)s</ip_address>
+ </fixed_ip>
+ </fixed_ips>
+ <port_state>%(port_state)s</port_state>
+ <mac_addr>%(mac_addr)s</mac_addr>
+</interfaceAttachment>
diff --git a/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl
new file mode 100644
index 0000000000..47dcf2dc64
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "interfaceAttachments": [
+ {
+ "port_state": "%(port_state)s",
+ "fixed_ips": [
+ {
+ "subnet_id": "%(subnet_id)s",
+ "ip_address": "%(ip_address)s"
+ }
+ ],
+ "net_id": "%(net_id)s",
+ "port_id": "%(port_id)s",
+ "mac_addr": "%(mac_addr)s"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.xml.tpl
new file mode 100644
index 0000000000..f3262e948e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.xml.tpl
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<interfaceAttachments>
+ <interfaceAttachment>
+ <port_state>%(port_state)s</port_state>
+ <fixed_ips>
+ <fixed_ip>
+ <subnet_id>%(subnet_id)s</subnet_id>
+ <ip_address>%(ip_address)s</ip_address>
+ </fixed_ip>
+ </fixed_ips>
+ <port_id>%(port_id)s</port_id>
+ <net_id>%(net_id)s</net_id>
+ <mac_addr>%(mac_addr)s</mac_addr>
+ </interfaceAttachment>
+</interfaceAttachments>
diff --git a/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl
new file mode 100644
index 0000000000..3333bb4999
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl
@@ -0,0 +1,14 @@
+{
+ "interfaceAttachment": {
+ "port_state": "%(port_state)s",
+ "fixed_ips": [
+ {
+ "subnet_id": "%(subnet_id)s",
+ "ip_address": "%(ip_address)s"
+ }
+ ],
+ "net_id": "%(net_id)s",
+ "port_id": "%(port_id)s",
+ "mac_addr": "%(mac_addr)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.xml.tpl
new file mode 100644
index 0000000000..a3393448d4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.xml.tpl
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<interfaceAttachment>
+ <port_state>%(port_state)s</port_state>
+ <fixed_ips>
+ <fixed_ip>
+ <subnet_id>%(subnet_id)s</subnet_id>
+ <ip_address>%(ip_address)s</ip_address>
+ </fixed_ip>
+ </fixed_ips>
+ <port_id>%(port_id)s</port_id>
+ <net_id>%(net_id)s</net_id>
+ <mac_addr>%(mac_addr)s</mac_addr>
+</interfaceAttachment>
diff --git a/nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-details-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-details-resp.json.tpl
new file mode 100644
index 0000000000..07529dfc93
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-details-resp.json.tpl
@@ -0,0 +1,48 @@
+{
+ "availabilityZoneInfo": [
+ {
+ "zoneName": "zone-1",
+ "zoneState": {
+ "available": true
+ },
+ "hosts": {
+ "fake_host-1": {
+ "nova-compute": {
+ "active": true,
+ "available": true,
+ "updated_at": "2012-12-26T14:45:25.000000"
+ }
+ }
+ }
+ },
+ {
+ "zoneName": "internal",
+ "zoneState": {
+ "available": true
+ },
+ "hosts": {
+ "fake_host-1": {
+ "nova-sched": {
+ "active": true,
+ "available": true,
+ "updated_at": "2012-12-26T14:45:25.000000"
+ }
+ },
+ "fake_host-2": {
+ "nova-network": {
+ "active": true,
+ "available": false,
+ "updated_at": "2012-12-26T14:45:24.000000"
+ }
+ }
+ }
+ },
+ {
+ "zoneName": "zone-2",
+ "zoneState": {
+ "available": false
+ },
+ "hosts": null
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-details-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-details-resp.xml.tpl
new file mode 100644
index 0000000000..856a649577
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-details-resp.xml.tpl
@@ -0,0 +1,44 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<availabilityZones
+ xmlns:os-availability-zone="http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1">
+ <availabilityZone name="zone-1">
+ <zoneState available="True" />
+ <hosts>
+ <host name="fake_host-1">
+ <services>
+ <service name="nova-compute">
+ <serviceState available="True" active="True"
+ updated_at="2012-12-26 14:45:25" />
+ </service>
+ </services>
+ </host>
+ </hosts>
+ <metadata />
+ </availabilityZone>
+ <availabilityZone name="internal">
+ <zoneState available="True" />
+ <hosts>
+ <host name="fake_host-1">
+ <services>
+ <service name="nova-sched">
+ <serviceState available="True" active="True"
+ updated_at="2012-12-26 14:45:25" />
+ </service>
+ </services>
+ </host>
+ <host name="fake_host-2">
+ <services>
+ <service name="nova-network">
+ <serviceState available="False" active="True"
+ updated_at="2012-12-26 14:45:24" />
+ </service>
+ </services>
+ </host>
+ </hosts>
+ <metadata />
+ </availabilityZone>
+ <availabilityZone name="zone-2">
+ <zoneState available="False" />
+ <metadata />
+ </availabilityZone>
+</availabilityZones> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-get-resp.json.tpl
new file mode 100644
index 0000000000..c512d182fb
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-get-resp.json.tpl
@@ -0,0 +1,18 @@
+{
+ "availabilityZoneInfo": [
+ {
+ "zoneName": "zone-1",
+ "zoneState": {
+ "available": true
+ },
+ "hosts": null
+ },
+ {
+ "zoneName": "zone-2",
+ "zoneState": {
+ "available": false
+ },
+ "hosts": null
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-get-resp.xml.tpl
new file mode 100644
index 0000000000..1eff177dee
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-get-resp.xml.tpl
@@ -0,0 +1,12 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<availabilityZones
+ xmlns:os-availability-zone="http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1">
+ <availabilityZone name="zone-1">
+ <zoneState available="True" />
+ <metadata />
+ </availabilityZone>
+ <availabilityZone name="zone-2">
+ <zoneState available="False" />
+ <metadata />
+ </availabilityZone>
+</availabilityZones> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-req.json.tpl
new file mode 100644
index 0000000000..f013ba0796
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-req.json.tpl
@@ -0,0 +1,17 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "availability_zone": "%(availability_zone)s",
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-req.xml.tpl
new file mode 100644
index 0000000000..9c55b49cbe
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-req.xml.tpl
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1"
+ imageRef="%(host)s/openstack/images/%(image_id)s"
+ flavorRef="%(host)s/openstack/flavors/1"
+ name="new-server-test"
+ availability_zone="%(availability_zone)s">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-resp.xml.tpl
new file mode 100644
index 0000000000..2ad5c102b0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-resp.xml.tpl
@@ -0,0 +1,8 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-req.json.tpl
new file mode 100644
index 0000000000..48912edfc0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-req.json.tpl
@@ -0,0 +1,33 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ],
+ "block_device_mapping_v2": [
+ {
+ "device_name": "/dev/sdb1",
+ "source_type": "blank",
+ "destination_type": "local",
+ "delete_on_termination": "True",
+ "guest_format": "swap",
+ "boot_index": "-1"
+ },
+ {
+ "device_name": "/dev/sda1",
+ "source_type": "volume",
+ "destination_type": "volume",
+ "uuid": "fake-volume-id-1",
+ "boot_index": "0"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-req.xml.tpl
new file mode 100644
index 0000000000..962b507658
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-req.xml.tpl
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+ <block_device_mapping_v2>
+ <mapping device_name="/dev/sdb1" source_type="blank" destination_type="local" delete_on_termination="True" guest_format="swap" boot_index="-1"></mapping>
+ <mapping device_name="/dev/sda1" source_type="volume" destination_type="volume" uuid="fake-volume-id-1" boot_index="0"></mapping>
+ </block_device_mapping_v2>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-cell-capacities/cells-capacities-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-cell-capacities/cells-capacities-resp.json.tpl
new file mode 100644
index 0000000000..b926f8d1df
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-cell-capacities/cells-capacities-resp.json.tpl
@@ -0,0 +1,18 @@
+{
+ "cell": {
+ "capacities": {
+ "ram_free": {
+ "units_by_mb": {
+ "8192": 0, "512": 13, "4096": 1, "2048": 3, "16384": 0
+ },
+ "total_mb": 7680
+ },
+ "disk_free": {
+ "units_by_mb": {
+ "81920": 11, "20480": 46, "40960": 23, "163840": 5, "0": 0
+ },
+ "total_mb": 1052672
+ }
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-cell-capacities/cells-capacities-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-cell-capacities/cells-capacities-resp.xml.tpl
new file mode 100644
index 0000000000..63672b00bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-cell-capacities/cells-capacities-resp.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<cell xmlns="http://docs.rackspacecloud.com/servers/api/v1.0">
+ <capacities>
+ <ram_free total_mb="7680">
+ <unit_by_mb unit="0" mb="8192"/>
+ <unit_by_mb unit="13" mb="512"/>
+ <unit_by_mb unit="1" mb="4096"/>
+ <unit_by_mb unit="3" mb="2048"/>
+ <unit_by_mb unit="0" mb="16384"/>
+ </ram_free>
+ <disk_free total_mb="1052672">
+ <unit_by_mb unit="11" mb="81920"/>
+ <unit_by_mb unit="46" mb="20480"/>
+ <unit_by_mb unit="23" mb="40960"/>
+ <unit_by_mb unit="5" mb="163840"/>
+ <unit_by_mb unit="0" mb="0"/>
+ </disk_free>
+ </capacities>
+</cell>
diff --git a/nova/tests/unit/integrated/api_samples/os-cells/cells-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-cells/cells-get-resp.json.tpl
new file mode 100644
index 0000000000..2993b1df88
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-cells/cells-get-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "cell": {
+ "name": "cell3",
+ "username": "username3",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-cells/cells-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-cells/cells-get-resp.xml.tpl
new file mode 100644
index 0000000000..d31a674a2f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-cells/cells-get-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<cell xmlns="http://docs.rackspacecloud.com/servers/api/v1.0" name="cell3" username="username3" rpc_port="None" rpc_host="None" type="child"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl
new file mode 100644
index 0000000000..b16e12cd69
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl
@@ -0,0 +1,4 @@
+{
+ "cells": []
+}
+
diff --git a/nova/tests/unit/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl
new file mode 100644
index 0000000000..32fef4f048
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<cells xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-cells/cells-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-cells/cells-list-resp.json.tpl
new file mode 100644
index 0000000000..3d7a6c207c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-cells/cells-list-resp.json.tpl
@@ -0,0 +1,39 @@
+{
+ "cells": [
+ {
+ "name": "cell1",
+ "username": "username1",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child"
+ },
+ {
+ "name": "cell2",
+ "username": "username2",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "parent"
+ },
+ {
+ "name": "cell3",
+ "username": "username3",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child"
+ },
+ {
+ "name": "cell4",
+ "username": "username4",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "parent"
+ },
+ {
+ "name": "cell5",
+ "username": "username5",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-cells/cells-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-cells/cells-list-resp.xml.tpl
new file mode 100644
index 0000000000..58312201f6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-cells/cells-list-resp.xml.tpl
@@ -0,0 +1,8 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<cells xmlns="http://docs.rackspacecloud.com/servers/api/v1.0">
+ <cell name="cell1" username="username1" rpc_port="None" rpc_host="None" type="child"/>
+ <cell name="cell2" username="username2" rpc_port="None" rpc_host="None" type="parent"/>
+ <cell name="cell3" username="username3" rpc_port="None" rpc_host="None" type="child"/>
+ <cell name="cell4" username="username4" rpc_port="None" rpc_host="None" type="parent"/>
+ <cell name="cell5" username="username5" rpc_port="None" rpc_host="None" type="child"/>
+</cells>
diff --git a/nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-req.json.tpl
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-req.json.tpl
diff --git a/nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-req.xml.tpl
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-req.xml.tpl
diff --git a/nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-resp.json.tpl
new file mode 100644
index 0000000000..35c063c820
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "certificate": {
+ "data": "%(text)s",
+ "private_key": "%(text)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-resp.xml.tpl
new file mode 100644
index 0000000000..75f2d5d7f4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<certificate private_key="%(text)s" data="%(text)s"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-certificates/certificate-get-root-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-get-root-resp.json.tpl
new file mode 100644
index 0000000000..4938e92fba
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-get-root-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "certificate": {
+ "data": "%(text)s",
+ "private_key": null
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-certificates/certificate-get-root-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-get-root-resp.xml.tpl
new file mode 100644
index 0000000000..bbc54284a5
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-get-root-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<certificate private_key="None" data="%(text)s"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.json.tpl
new file mode 100644
index 0000000000..0ab9141aea
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "configure_project": {
+ "vpn_ip": "%(vpn_ip)s",
+ "vpn_port": "%(vpn_port)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.xml.tpl
new file mode 100644
index 0000000000..34d2be9dfc
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<configure_project>
+ <vpn_ip>%(vpn_ip)s</vpn_ip>
+ <vpn_port>%(vpn_port)s</vpn_port>
+</configure_project>
diff --git a/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl
new file mode 100644
index 0000000000..c8fc75995a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "cloudpipe": {
+ "project_id": "%(project_id)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.xml.tpl
new file mode 100644
index 0000000000..b0a60b896c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.xml.tpl
@@ -0,0 +1,3 @@
+<cloudpipe
+ project_id="%(project_id)s"
+/>
diff --git a/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl
new file mode 100644
index 0000000000..6aa2ff60e2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "instance_id": "%(id)s"
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.xml.tpl
new file mode 100644
index 0000000000..63064cc51a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.xml.tpl
@@ -0,0 +1,3 @@
+<cloudpipe>
+ <instance_id>%(uuid)s</instance_id>
+</cloudpipe>
diff --git a/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl
new file mode 100644
index 0000000000..698008802e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl
@@ -0,0 +1,13 @@
+{
+ "cloudpipes": [
+ {
+ "created_at": "%(isotime)s",
+ "instance_id": "%(uuid)s",
+ "internal_ip": "%(ip)s",
+ "project_id": "%(project_id)s",
+ "public_ip": "%(ip)s",
+ "public_port": 22,
+ "state": "down"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.xml.tpl
new file mode 100644
index 0000000000..63eb40be4f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.xml.tpl
@@ -0,0 +1,12 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<cloudpipes>
+ <cloudpipe>
+ <public_ip>%(ip)s</public_ip>
+ <created_at>%(isotime)s</created_at>
+ <public_port>22</public_port>
+ <state>down</state>
+ <instance_id>%(uuid)s</instance_id>
+ <internal_ip>%(ip)s</internal_ip>
+ <project_id>%(project_id)s</project_id>
+ </cloudpipe>
+</cloudpipes>
diff --git a/nova/tests/unit/integrated/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl
new file mode 100644
index 0000000000..723714bf73
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "config_drive": "%(cdrive)s",
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-config-drive/server-config-drive-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-config-drive/server-config-drive-get-resp.xml.tpl
new file mode 100644
index 0000000000..6cd025045e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-config-drive/server-config-drive-get-resp.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" config_drive="%(cdrive)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-config-drive/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-config-drive/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-config-drive/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-config-drive/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-config-drive/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-config-drive/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-config-drive/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-config-drive/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-config-drive/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-config-drive/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-config-drive/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-config-drive/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl
new file mode 100644
index 0000000000..f3ae979ecb
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl
@@ -0,0 +1,57 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "config_drive": "%(cdrive)s",
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.xml.tpl
new file mode 100644
index 0000000000..1882ba835a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.xml.tpl
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" config_drive="%(cdrive)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl
new file mode 100644
index 0000000000..f5be11801e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "console": {
+ "instance_uuid": "%(id)s",
+ "host": "%(host)s",
+ "port": %(port)s,
+ "internal_access_path": "%(internal_access_path)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.xml.tpl
new file mode 100644
index 0000000000..de81f08fe8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<console>
+ <instance_uuid>%(id)s</instance_uuid>
+ <host>%(host)s</host>
+ <port>%(port)s</port>
+ <internal_access_path>%(internal_access_path)s</internal_access_path>
+</console>
diff --git a/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl
new file mode 100644
index 0000000000..00956b90e4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "os-getRDPConsole": {
+ "type": "rdp-html5"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-rdp-console-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-rdp-console-post-req.xml.tpl
new file mode 100644
index 0000000000..b761d78b67
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-rdp-console-post-req.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<os-getRDPConsole>
+ <type>rdp-html5</type>
+</os-getRDPConsole>
diff --git a/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-req.json.tpl
new file mode 100644
index 0000000000..caeb2a5502
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "os-getConsoleOutput": {
+ "length": 50
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-req.xml.tpl
new file mode 100644
index 0000000000..af477004df
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<os-getConsoleOutput length="50" />
diff --git a/nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-resp.json.tpl
new file mode 100644
index 0000000000..fae6b128e9
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "output": "FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE"
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-resp.xml.tpl
new file mode 100644
index 0000000000..e93d81df35
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<output>FAKE CONSOLE OUTPUT
+ANOTHER
+LAST LINE</output>
diff --git a/nova/tests/unit/integrated/api_samples/os-console-output/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-console-output/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-console-output/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-console-output/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-console-output/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-console-output/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-console-output/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-console-output/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-console-output/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-console-output/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-console-output/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-console-output/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-req.json.tpl
new file mode 100644
index 0000000000..00956b90e4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "os-getRDPConsole": {
+ "type": "rdp-html5"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-req.xml.tpl
new file mode 100644
index 0000000000..b761d78b67
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-req.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<os-getRDPConsole>
+ <type>rdp-html5</type>
+</os-getRDPConsole>
diff --git a/nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-resp.json.tpl
new file mode 100644
index 0000000000..b8272ca5c0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "console": {
+ "type": "rdp-html5",
+ "url":"%(url)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-resp.xml.tpl
new file mode 100644
index 0000000000..24fc3cd848
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<console>
+ <type>rdp-html5</type>
+ <url>%(url)s</url>
+</console>
diff --git a/nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-req.json.tpl
new file mode 100644
index 0000000000..1d754d6608
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "os-getSerialConsole": {
+ "type": "serial"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-req.xml.tpl
new file mode 100644
index 0000000000..71eb3ae555
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-req.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<os-getSerialConsole>
+ <type>serial</type>
+</os-getSerialConsole>
diff --git a/nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-resp.json.tpl
new file mode 100644
index 0000000000..67fbfec5b4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "console": {
+ "type": "serial",
+ "url":"%(url)s"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-resp.xml.tpl
new file mode 100644
index 0000000000..1bef48769d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<console>
+ <type>serial</type>
+ <url>%(url)s</url>
+</console>
diff --git a/nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-req.json.tpl
new file mode 100644
index 0000000000..d04f7c7ae9
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "os-getSPICEConsole": {
+ "type": "spice-html5"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-req.xml.tpl
new file mode 100644
index 0000000000..c8cd2df9f4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-req.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<os-getSPICEConsole>
+ <type>spice-html5</type>
+</os-getSPICEConsole>
diff --git a/nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-resp.json.tpl
new file mode 100644
index 0000000000..20e260e9ef
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "console": {
+ "type": "spice-html5",
+ "url":"%(url)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-resp.xml.tpl
new file mode 100644
index 0000000000..77e35ae5b8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<console>
+ <type>spice-html5</type>
+ <url>%(url)s</url>
+</console>
diff --git a/nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-req.json.tpl
new file mode 100644
index 0000000000..1926119ced
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "os-getVNCConsole": {
+ "type": "novnc"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl
new file mode 100644
index 0000000000..c1f73180e8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<os-getVNCConsole>
+ <type>novnc</type>
+</os-getVNCConsole>
diff --git a/nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-resp.json.tpl
new file mode 100644
index 0000000000..3cf7255759
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "console": {
+ "type": "novnc",
+ "url":"%(url)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-resp.xml.tpl
new file mode 100644
index 0000000000..d4904aa9a5
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<console>
+ <type>novnc</type>
+ <url>%(url)s</url>
+</console>
diff --git a/nova/tests/unit/integrated/api_samples/os-consoles/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-consoles/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-consoles/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-consoles/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-deferred-delete/force-delete-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-deferred-delete/force-delete-post-req.json.tpl
new file mode 100644
index 0000000000..d3562d390d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-deferred-delete/force-delete-post-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "forceDelete": null
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-deferred-delete/force-delete-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-deferred-delete/force-delete-post-req.xml.tpl
new file mode 100644
index 0000000000..31928207e8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-deferred-delete/force-delete-post-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<forceDelete />
diff --git a/nova/tests/unit/integrated/api_samples/os-deferred-delete/restore-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-deferred-delete/restore-post-req.json.tpl
new file mode 100644
index 0000000000..d38291fe08
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-deferred-delete/restore-post-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "restore": null
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-deferred-delete/restore-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-deferred-delete/restore-post-req.xml.tpl
new file mode 100644
index 0000000000..8a95b4fccf
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-deferred-delete/restore-post-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<restore />
diff --git a/nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-req.json.tpl
new file mode 100644
index 0000000000..179cddce73
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-req.json.tpl
@@ -0,0 +1,7 @@
+{
+ "evacuate": {
+ "host": "%(host)s",
+ "adminPass": "%(adminPass)s",
+ "onSharedStorage": "%(onSharedStorage)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-req.xml.tpl
new file mode 100644
index 0000000000..b0471f9162
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-req.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<evacuate xmlns="http://docs.openstack.org/compute/api/v2"
+ host="%(host)s"
+ adminPass="%(adminPass)s"
+ onSharedStorage="%(onSharedStorage)s"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-resp.json.tpl
new file mode 100644
index 0000000000..0da07da5b8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "adminPass": "%(password)s"
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-resp.xml.tpl
new file mode 100644
index 0000000000..2a779af6d1
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-resp.xml.tpl
@@ -0,0 +1 @@
+<adminPass>%(password)s</adminPass> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-evacuate/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-evacuate/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-evacuate/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-evacuate/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-evacuate/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-evacuate/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-evacuate/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-evacuate/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-evacuate/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-evacuate/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-evacuate/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-evacuate/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json.tpl
new file mode 100644
index 0000000000..5e2c2e6ef0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "evacuate": {
+ "adminPass": "%(adminPass)s",
+ "onSharedStorage": "%(onSharedStorage)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml.tpl
new file mode 100644
index 0000000000..a86c9e5c8a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<evacuate xmlns="http://docs.openstack.org/compute/api/v2"
+ adminPass="%(adminPass)s"
+ onSharedStorage="%(onSharedStorage)s"/>
+
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json.tpl
new file mode 100644
index 0000000000..0da07da5b8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "adminPass": "%(password)s"
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml.tpl
new file mode 100644
index 0000000000..b3b95fdde4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<adminPass>%(password)s</adminPass> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-nopool-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-nopool-req.json.tpl
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-nopool-req.json.tpl
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-nopool-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-nopool-req.xml.tpl
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-nopool-req.xml.tpl
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-req.json.tpl
new file mode 100644
index 0000000000..24129f4958
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "pool": "%(pool)s"
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-req.xml.tpl
new file mode 100644
index 0000000000..a80147389d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<pool>%(pool)s</pool> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-resp.json.tpl
new file mode 100644
index 0000000000..10ee8d9bd4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "floating_ip": {
+ "fixed_ip": null,
+ "id": 1,
+ "instance_id": null,
+ "ip": "10.10.10.1",
+ "pool": "nova"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-resp.xml.tpl
new file mode 100644
index 0000000000..e0f68ef503
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ip instance_id="None" ip="10.10.10.1" fixed_ip="None" id="1" pool="nova"/> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-get-resp.json.tpl
new file mode 100644
index 0000000000..10ee8d9bd4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-get-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "floating_ip": {
+ "fixed_ip": null,
+ "id": 1,
+ "instance_id": null,
+ "ip": "10.10.10.1",
+ "pool": "nova"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-get-resp.xml.tpl
new file mode 100644
index 0000000000..e0f68ef503
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-get-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ip instance_id="None" ip="10.10.10.1" fixed_ip="None" id="1" pool="nova"/> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-empty-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-empty-resp.json.tpl
new file mode 100644
index 0000000000..12f118da50
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-empty-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "floating_ips": []
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-empty-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-empty-resp.xml.tpl
new file mode 100644
index 0000000000..da6f0d4ce9
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-empty-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ips/> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-resp.json.tpl
new file mode 100644
index 0000000000..06f57451c9
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-resp.json.tpl
@@ -0,0 +1,19 @@
+{
+ "floating_ips": [
+ {
+ "fixed_ip": null,
+ "id": 1,
+ "instance_id": null,
+ "ip": "10.10.10.1",
+ "pool": "nova"
+ },
+ {
+ "fixed_ip": null,
+ "id": 2,
+ "instance_id": null,
+ "ip": "10.10.10.2",
+ "pool": "nova"
+ }
+ ]
+}
+
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-resp.xml.tpl
new file mode 100644
index 0000000000..bbd0b117ef
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ips>
+ <floating_ip instance_id="None" ip="10.10.10.1" fixed_ip="None" id="1" pool="nova"/>
+ <floating_ip instance_id="None" ip="10.10.10.2" fixed_ip="None" id="2" pool="nova"/>
+</floating_ips>
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json.tpl
new file mode 100644
index 0000000000..a1e5f2080b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json.tpl
@@ -0,0 +1,25 @@
+{
+ "hypervisor": {
+ "cpu_info": "?",
+ "current_workload": 0,
+ "disk_available_least": 0,
+ "host_ip": "%(ip)s",
+ "free_disk_gb": 1028,
+ "free_ram_mb": 7680,
+ "hypervisor_hostname": "fake-mini",
+ "hypervisor_type": "fake",
+ "hypervisor_version": 1000,
+ "id": %(hypervisor_id)s,
+ "local_gb": 1028,
+ "local_gb_used": 0,
+ "memory_mb": 8192,
+ "memory_mb_used": 512,
+ "running_vms": 0,
+ "service": {
+ "host": "%(host_name)s",
+ "id": 2
+ },
+ "vcpus": 1,
+ "vcpus_used": 0
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml.tpl
new file mode 100644
index 0000000000..ed2a8b0829
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisor vcpus_used="0" hypervisor_type="fake" local_gb_used="0" hypervisor_hostname="fake-mini" memory_mb_used="512" memory_mb="8192" current_workload="0" vcpus="1" cpu_info="?" running_vms="0" free_disk_gb="1028" hypervisor_version="1000" disk_available_least="0" host_ip="%(ip)s" local_gb="1028" free_ram_mb="7680" id="%(hypervisor_id)s">
+ <service host="%(host_name)s" id="2"/>
+</hypervisor>
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-req.json.tpl
new file mode 100644
index 0000000000..18515bd6c4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-req.json.tpl
@@ -0,0 +1,12 @@
+{
+ "network": {
+ "label": "new net 111",
+ "cidr": "10.20.105.0/24",
+ "mtu": 9000,
+ "dhcp_server": "10.20.105.2",
+ "enable_dhcp": false,
+ "share_address": true,
+ "allowed_start": "10.20.105.10",
+ "allowed_end": "10.20.105.200"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-req.xml.tpl
new file mode 100644
index 0000000000..3cc79bd837
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-req.xml.tpl
@@ -0,0 +1,10 @@
+<network>
+ <label>new net 111</label>
+ <cidr>10.20.105.0/24</cidr>
+ <mtu>9000</mtu>
+ <dhcp_server>10.20.105.2</dhcp_server>
+ <enable_dhcp>False</enable_dhcp>
+ <share_address>True</share_address>
+ <allowed_start>10.20.105.10</allowed_start>
+ <allowed_end>10.20.105.200</allowed_end>
+</network>
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-resp.json.tpl
new file mode 100644
index 0000000000..5cf155b13f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-resp.json.tpl
@@ -0,0 +1,36 @@
+{
+ "network": {
+ "bridge": null,
+ "vpn_public_port": null,
+ "dhcp_start": "%(ip)s",
+ "bridge_interface": null,
+ "updated_at": null,
+ "id": "%(id)s",
+ "cidr_v6": null,
+ "deleted_at": null,
+ "gateway": "%(ip)s",
+ "rxtx_base": null,
+ "label": "new net 111",
+ "priority": null,
+ "project_id": null,
+ "vpn_private_address": null,
+ "deleted": null,
+ "vlan": null,
+ "broadcast": "%(ip)s",
+ "netmask": "%(ip)s",
+ "injected": null,
+ "cidr": "10.20.105.0/24",
+ "vpn_public_address": null,
+ "multi_host": null,
+ "dns2": null,
+ "created_at": null,
+ "host": null,
+ "gateway_v6": null,
+ "netmask_v6": null,
+ "dns1": null,
+ "mtu": 9000,
+ "dhcp_server": "10.20.105.2",
+ "enable_dhcp": false,
+ "share_address": true
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-resp.xml.tpl
new file mode 100644
index 0000000000..3a757c5f2f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-resp.xml.tpl
@@ -0,0 +1,34 @@
+<network>
+ <bridge>None</bridge>
+ <vpn_public_port>None</vpn_public_port>
+ <dhcp_start>%(ip)s</dhcp_start>
+ <bridge_interface>None</bridge_interface>
+ <updated_at>None</updated_at>
+ <id>%(id)s</id>
+ <cidr_v6>None</cidr_v6>
+ <deleted_at>None</deleted_at>
+ <gateway>%(ip)s</gateway>
+ <rxtx_base>None</rxtx_base>
+ <label>new net 111</label>
+ <priority>None</priority>
+ <project_id>None</project_id>
+ <vpn_private_address>None</vpn_private_address>
+ <deleted>False</deleted>
+ <vlan>None</vlan>
+ <broadcast>%(ip)s</broadcast>
+ <netmask>%(ip)s</netmask>
+ <injected>None</injected>
+ <cidr>10.20.105.0/24</cidr>
+ <vpn_public_address>None</vpn_public_address>
+ <multi_host>None</multi_host>
+ <dns2>None</dns2>
+ <created_at>None</created_at>
+ <host>None</host>
+ <gateway_v6>None</gateway_v6>
+ <netmask_v6>None</netmask_v6>
+ <dns1>None</dns1>
+ <mtu>9000</mtu>
+ <dhcp_server>10.20.105.2</dhcp_server>
+ <enable_dhcp>False</enable_dhcp>
+ <share_address>True</share_address>
+</network>
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-networks/network-show-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-show-resp.json.tpl
new file mode 100644
index 0000000000..ac75fe7fb1
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-show-resp.json.tpl
@@ -0,0 +1,37 @@
+{
+ "network":
+ {
+ "bridge": "br100",
+ "bridge_interface": "eth0",
+ "broadcast": "%(ip)s",
+ "cidr": "10.0.0.0/29",
+ "cidr_v6": null,
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_start": "%(ip)s",
+ "dns1": null,
+ "dns2": null,
+ "gateway": "%(ip)s",
+ "gateway_v6": null,
+ "host": "nsokolov-desktop",
+ "id": "%(id)s",
+ "injected": false,
+ "label": "mynet_0",
+ "multi_host": false,
+ "netmask": "%(ip)s",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": "1234",
+ "rxtx_base": null,
+ "updated_at": "%(strtime)s",
+ "vlan": 100,
+ "vpn_private_address": "%(ip)s",
+ "vpn_public_address": "%(ip)s",
+ "vpn_public_port": 1000,
+ "mtu": null,
+ "dhcp_server": "%(ip)s",
+ "enable_dhcp": true,
+ "share_address": false
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-networks/network-show-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-show-resp.xml.tpl
new file mode 100644
index 0000000000..3139ca88a8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-show-resp.xml.tpl
@@ -0,0 +1,35 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<network>
+ <bridge>br100</bridge>
+ <vpn_public_port>1000</vpn_public_port>
+ <dhcp_start>%(ip)s</dhcp_start>
+ <bridge_interface>eth0</bridge_interface>
+ <updated_at>%(xmltime)s</updated_at>
+ <id>%(id)s</id>
+ <cidr_v6>None</cidr_v6>
+ <deleted_at>None</deleted_at>
+ <gateway>%(ip)s</gateway>
+ <rxtx_base>None</rxtx_base>
+ <label>mynet_0</label>
+ <priority>None</priority>
+ <project_id>1234</project_id>
+ <vpn_private_address>%(ip)s</vpn_private_address>
+ <deleted>False</deleted>
+ <vlan>100</vlan>
+ <broadcast>%(ip)s</broadcast>
+ <netmask>%(ip)s</netmask>
+ <injected>False</injected>
+ <cidr>10.0.0.0/29</cidr>
+ <vpn_public_address>%(ip)s</vpn_public_address>
+ <multi_host>False</multi_host>
+ <dns2>None</dns2>
+ <created_at>%(xmltime)s</created_at>
+ <host>nsokolov-desktop</host>
+ <gateway_v6>None</gateway_v6>
+ <netmask_v6>None</netmask_v6>
+ <dns1>None</dns1>
+ <mtu>None</mtu>
+ <dhcp_server>%(ip)s</dhcp_server>
+ <enable_dhcp>True</enable_dhcp>
+ <share_address>False</share_address>
+</network>
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-networks/networks-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-networks/networks-list-resp.json.tpl
new file mode 100644
index 0000000000..ccdd586a0f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-networks/networks-list-resp.json.tpl
@@ -0,0 +1,72 @@
+{
+ "networks": [
+ {
+ "bridge": "br100",
+ "bridge_interface": "eth0",
+ "broadcast": "%(ip)s",
+ "cidr": "10.0.0.0/29",
+ "cidr_v6": null,
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_start": "%(ip)s",
+ "dns1": null,
+ "dns2": null,
+ "gateway": "%(ip)s",
+ "gateway_v6": null,
+ "host": "nsokolov-desktop",
+ "id": "%(id)s",
+ "injected": false,
+ "label": "mynet_0",
+ "multi_host": false,
+ "netmask": "%(ip)s",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": "1234",
+ "rxtx_base": null,
+ "updated_at": "%(strtime)s",
+ "vlan": 100,
+ "vpn_private_address": "%(ip)s",
+ "vpn_public_address": "%(ip)s",
+ "vpn_public_port": 1000,
+ "mtu": null,
+ "dhcp_server": "%(ip)s",
+ "enable_dhcp": true,
+ "share_address": false
+ },
+ {
+ "bridge": "br101",
+ "bridge_interface": "eth0",
+ "broadcast": "%(ip)s",
+ "cidr": "10.0.0.10/29",
+ "cidr_v6": null,
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_start": "%(ip)s",
+ "dns1": null,
+ "dns2": null,
+ "gateway": "%(ip)s",
+ "gateway_v6": null,
+ "host": null,
+ "id": "%(id)s",
+ "injected": false,
+ "label": "mynet_1",
+ "multi_host": false,
+ "netmask": "%(ip)s",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": null,
+ "rxtx_base": null,
+ "updated_at": null,
+ "vlan": 101,
+ "vpn_private_address": "%(ip)s",
+ "vpn_public_address": null,
+ "vpn_public_port": 1001,
+ "mtu": null,
+ "dhcp_server": "%(ip)s",
+ "enable_dhcp": true,
+ "share_address": false
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-networks/networks-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-networks/networks-list-resp.xml.tpl
new file mode 100644
index 0000000000..0b7f456402
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-networks/networks-list-resp.xml.tpl
@@ -0,0 +1,71 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<networks>
+ <network>
+ <bridge>br100</bridge>
+ <vpn_public_port>1000</vpn_public_port>
+ <dhcp_start>%(ip)s</dhcp_start>
+ <bridge_interface>eth0</bridge_interface>
+ <updated_at>%(xmltime)s</updated_at>
+ <id>%(id)s</id>
+ <cidr_v6>None</cidr_v6>
+ <deleted_at>None</deleted_at>
+ <gateway>%(ip)s</gateway>
+ <rxtx_base>None</rxtx_base>
+ <label>mynet_0</label>
+ <priority>None</priority>
+ <project_id>1234</project_id>
+ <vpn_private_address>%(ip)s</vpn_private_address>
+ <deleted>False</deleted>
+ <vlan>100</vlan>
+ <broadcast>%(ip)s</broadcast>
+ <netmask>%(ip)s</netmask>
+ <injected>False</injected>
+ <cidr>10.0.0.0/29</cidr>
+ <vpn_public_address>%(ip)s</vpn_public_address>
+ <multi_host>False</multi_host>
+ <dns2>None</dns2>
+ <created_at>%(xmltime)s</created_at>
+ <host>nsokolov-desktop</host>
+ <gateway_v6>None</gateway_v6>
+ <netmask_v6>None</netmask_v6>
+ <dns1>None</dns1>
+ <mtu>None</mtu>
+ <dhcp_server>%(ip)s</dhcp_server>
+ <enable_dhcp>True</enable_dhcp>
+ <share_address>False</share_address>
+ </network>
+ <network>
+ <bridge>br101</bridge>
+ <vpn_public_port>1001</vpn_public_port>
+ <dhcp_start>%(ip)s</dhcp_start>
+ <bridge_interface>eth0</bridge_interface>
+ <updated_at>None</updated_at>
+ <id>%(id)s</id>
+ <cidr_v6>None</cidr_v6>
+ <deleted_at>None</deleted_at>
+ <gateway>%(ip)s</gateway>
+ <rxtx_base>None</rxtx_base>
+ <label>mynet_1</label>
+ <priority>None</priority>
+ <project_id>None</project_id>
+ <vpn_private_address>%(ip)s</vpn_private_address>
+ <deleted>False</deleted>
+ <vlan>101</vlan>
+ <broadcast>%(ip)s</broadcast>
+ <netmask>%(ip)s</netmask>
+ <injected>False</injected>
+ <cidr>10.0.0.10/29</cidr>
+ <vpn_public_address>None</vpn_public_address>
+ <multi_host>False</multi_host>
+ <dns2>None</dns2>
+ <created_at>%(xmltime)s</created_at>
+ <host>None</host>
+ <gateway_v6>None</gateway_v6>
+ <netmask_v6>None</netmask_v6>
+ <dns1>None</dns1>
+ <mtu>None</mtu>
+ <dhcp_server>%(ip)s</dhcp_server>
+ <enable_dhcp>True</enable_dhcp>
+ <share_address>False</share_address>
+ </network>
+</networks>
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-req.json.tpl
new file mode 100644
index 0000000000..a58a179123
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "quota_set": {
+ "force": "True",
+ "instances": 45
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-req.xml.tpl
new file mode 100644
index 0000000000..499b890f03
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-req.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <force>True</force>
+ <instances>45</instances>
+</quota_set> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-resp.json.tpl
new file mode 100644
index 0000000000..c882a8cb12
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 45,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-resp.xml.tpl
new file mode 100644
index 0000000000..b8c4c0d831
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-resp.xml.tpl
@@ -0,0 +1,15 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set>
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <fixed_ips>-1</fixed_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>45</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_set>
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-get-resp-rescue.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-get-resp-rescue.json.tpl
new file mode 100755
index 0000000000..d9a355319e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-get-resp-rescue.json.tpl
@@ -0,0 +1,53 @@
+{
+ "server": {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "status": "%(status)s",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-get-resp-rescue.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-get-resp-rescue.xml.tpl
new file mode 100755
index 0000000000..fee8326e0c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-get-resp-rescue.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="%(status)s" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" id="%(id)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-req.json.tpl
new file mode 100755
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-req.xml.tpl
new file mode 100755
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-resp.json.tpl
new file mode 100755
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-resp.xml.tpl
new file mode 100755
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue-req.json.tpl
new file mode 100755
index 0000000000..0f2e751fcf
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "rescue": {
+ "adminPass": "%(password)s",
+ "rescue_image_ref" : "%(rescue_image_ref)s"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue-req.xml.tpl
new file mode 100755
index 0000000000..75666d81a2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue-req.xml.tpl
@@ -0,0 +1,3 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<rescue xmlns="http://docs.openstack.org/compute/api/v1.1"
+ adminPass="%(password)s" rescue_image_ref="%(rescue_image_ref)s"/> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue.json.tpl
new file mode 100755
index 0000000000..1922e4db1b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue.json.tpl
@@ -0,0 +1,3 @@
+{
+ "adminPass": "%(password)s"
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue.xml.tpl
new file mode 100755
index 0000000000..b3b95fdde4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<adminPass>%(password)s</adminPass> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-services-delete/services-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-services-delete/services-get-resp.json.tpl
new file mode 100644
index 0000000000..d91fe367f9
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-services-delete/services-get-resp.json.tpl
@@ -0,0 +1,40 @@
+{
+ "services": [
+ {
+ "id": 1,
+ "binary": "nova-scheduler",
+ "host": "host1",
+ "state": "up",
+ "status": "disabled",
+ "updated_at": "%(strtime)s",
+ "zone": "internal"
+ },
+ {
+ "id": 2,
+ "binary": "nova-compute",
+ "host": "host1",
+ "state": "up",
+ "status": "disabled",
+ "updated_at": "%(strtime)s",
+ "zone": "nova"
+ },
+ {
+ "id": 3,
+ "binary": "nova-scheduler",
+ "host": "host2",
+ "state": "down",
+ "status": "enabled",
+ "updated_at": "%(strtime)s",
+ "zone": "internal"
+ },
+ {
+ "id": 4,
+ "binary": "nova-compute",
+ "host": "host2",
+ "state": "down",
+ "status": "disabled",
+ "updated_at": "%(strtime)s",
+ "zone": "nova"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-services-delete/services-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-services-delete/services-get-resp.xml.tpl
new file mode 100644
index 0000000000..8e87af9173
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-services-delete/services-get-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<services>
+ <service status="disabled" binary="nova-scheduler" zone="internal" state="up" updated_at="%(xmltime)s" host="host1" id="1"/>
+ <service status="disabled" binary="nova-compute" zone="nova" state="up" updated_at="%(xmltime)s" host="host1" id="2"/>
+ <service status="enabled" binary="nova-scheduler" zone="internal" state="down" updated_at="%(xmltime)s" host="host2" id="3"/>
+ <service status="disabled" binary="nova-compute" zone="nova" state="down" updated_at="%(xmltime)s" host="host2" id="4"/>
+</services>
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-get-resp.json.tpl
new file mode 100644
index 0000000000..70b1deabe2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-get-resp.json.tpl
@@ -0,0 +1,58 @@
+{
+ "server": {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1"},
+ {"id": "volume_id2"}
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-get-resp.xml.tpl
new file mode 100644
index 0000000000..75a8fb10cc
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-get-resp.xml.tpl
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:os-extended-volumes="http://docs.openstack.org/compute/ext/extended_volumes/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <os-extended-volumes:volume_attached id="volume_id1"/>
+ <os-extended-volumes:volume_attached id="volume_id2"/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-volumes/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-volumes/servers-detail-resp.json.tpl
new file mode 100644
index 0000000000..1962d6a6af
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-volumes/servers-detail-resp.json.tpl
@@ -0,0 +1,59 @@
+{
+ "servers": [
+ {
+ "status": "ACTIVE",
+ "updated": "%(isotime)s",
+ "user_id": "fake",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "created": "%(isotime)s",
+ "name": "new-server-test",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "id": "%(uuid)s",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "tenant_id": "openstack",
+ "progress": 0,
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1"},
+ {"id": "volume_id2"}
+ ]
+ }]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-extended-volumes/servers-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-volumes/servers-detail-resp.xml.tpl
new file mode 100644
index 0000000000..a2ecf018eb
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-extended-volumes/servers-detail-resp.xml.tpl
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:os-extended-volumes="http://docs.openstack.org/compute/ext/extended_volumes/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server xmlns:os-extended-volumes="http://docs.openstack.org/compute/ext/extended_volumes/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" >
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <os-extended-volumes:volume_attached id="volume_id1"/>
+ <os-extended-volumes:volume_attached id="volume_id2"/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedip-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedip-post-req.json.tpl
new file mode 100644
index 0000000000..85ae4890ad
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedip-post-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "reserve": "%(reserve)s"
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedip-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedip-post-req.xml.tpl
new file mode 100644
index 0000000000..3896b24eb6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedip-post-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<reserve>%(reserve)s</reserve>
diff --git a/nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl
new file mode 100644
index 0000000000..a3d11475bf
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "fixed_ip": {
+ "cidr": "%(cidr)s",
+ "hostname": "%(hostname)s",
+ "host": "%(host)s",
+ "address": "%(address)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedips-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedips-get-resp.xml.tpl
new file mode 100644
index 0000000000..3e9598f347
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedips-get-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<fixed_ip>
+ <cidr>%(cidr)s</cidr>
+ <hostname>%(hostname)s</hostname>
+ <host>%(host)s</host>
+ <address>%(address)s</address>
+</fixed_ip>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-req.json.tpl
new file mode 100644
index 0000000000..94f5439e04
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "addTenantAccess": {
+ "tenant": "%(tenant_id)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-req.xml.tpl
new file mode 100644
index 0000000000..312819dadb
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-req.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<addTenantAccess>
+ <tenant>%(tenant_id)s</tenant>
+</addTenantAccess>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-resp.json.tpl
new file mode 100644
index 0000000000..d797155795
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "flavor_access": [
+ {
+ "flavor_id": "%(flavor_id)s",
+ "tenant_id": "%(tenant_id)s"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-resp.xml.tpl
new file mode 100644
index 0000000000..2223052aae
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor_access>
+ <access tenant_id="%(tenant_id)s" flavor_id="%(flavor_id)s"/>
+</flavor_access>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-req.json.tpl
new file mode 100644
index 0000000000..02ac4e695d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "flavor": {
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "os-flavor-access:is_public": false
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-req.xml.tpl
new file mode 100644
index 0000000000..5714fb9d0d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-req.xml.tpl
@@ -0,0 +1,10 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:os-flavor-access="http://docs.openstack.org/compute/ext/flavor_access/api/v1.1"
+ name="%(flavor_name)s"
+ ram="1024"
+ vcpus="2"
+ disk="10"
+ id="%(flavor_id)s"
+ os-flavor-access:is_public="False"
+/>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-resp.json.tpl
new file mode 100644
index 0000000000..4110795ec9
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "os-flavor-access:is_public": false,
+ "ram": 1024,
+ "vcpus": 2
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-resp.xml.tpl
new file mode 100644
index 0000000000..c3a8994078
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:os-flavor-access="http://docs.openstack.org/compute/ext/flavor_access/api/v2" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="%(flavor_name)s" id="%(flavor_id)s" os-flavor-access:is_public="False">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-detail-resp.json.tpl
new file mode 100644
index 0000000000..b5f1eea542
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-detail-resp.json.tpl
@@ -0,0 +1,94 @@
+{
+ "flavors": [
+ {
+ "disk": 1,
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "os-flavor-access:is_public": true,
+ "ram": 512,
+ "vcpus": 1
+ },
+ {
+ "disk": 20,
+ "id": "2",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "os-flavor-access:is_public": true,
+ "ram": 2048,
+ "vcpus": 1
+ },
+ {
+ "disk": 40,
+ "id": "3",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "os-flavor-access:is_public": true,
+ "ram": 4096,
+ "vcpus": 2
+ },
+ {
+ "disk": 80,
+ "id": "4",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "os-flavor-access:is_public": true,
+ "ram": 8192,
+ "vcpus": 4
+ },
+ {
+ "disk": 160,
+ "id": "5",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "os-flavor-access:is_public": true,
+ "ram": 16384,
+ "vcpus": 8
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-detail-resp.xml.tpl
new file mode 100644
index 0000000000..8ee66226a5
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-detail-resp.xml.tpl
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavors xmlns:os-flavor-access="http://docs.openstack.org/compute/ext/flavor_access/api/v2" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor disk="1" vcpus="1" ram="512" name="m1.tiny" id="1" os-flavor-access:is_public="True">
+ <atom:link href="%(host)s/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <flavor disk="20" vcpus="1" ram="2048" name="m1.small" id="2" os-flavor-access:is_public="True">
+ <atom:link href="%(host)s/v2/openstack/flavors/2" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/2" rel="bookmark"/>
+ </flavor>
+ <flavor disk="40" vcpus="2" ram="4096" name="m1.medium" id="3" os-flavor-access:is_public="True">
+ <atom:link href="%(host)s/v2/openstack/flavors/3" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/3" rel="bookmark"/>
+ </flavor>
+ <flavor disk="80" vcpus="4" ram="8192" name="m1.large" id="4" os-flavor-access:is_public="True">
+ <atom:link href="%(host)s/v2/openstack/flavors/4" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/4" rel="bookmark"/>
+ </flavor>
+ <flavor disk="160" vcpus="8" ram="16384" name="m1.xlarge" id="5" os-flavor-access:is_public="True">
+ <atom:link href="%(host)s/v2/openstack/flavors/5" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/5" rel="bookmark"/>
+ </flavor>
+</flavors>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-list-resp.json.tpl
new file mode 100644
index 0000000000..a6b6dbdcda
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-list-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "flavor_access": [
+ {
+ "flavor_id": "%(flavor_id)s",
+ "tenant_id": "fake_tenant"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-list-resp.xml.tpl
new file mode 100644
index 0000000000..1e55ad2f95
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-list-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor_access>
+ <access tenant_id="fake_tenant" flavor_id="10"/>
+</flavor_access> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-req.json.tpl
new file mode 100644
index 0000000000..20711e02b4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "removeTenantAccess": {
+ "tenant": "%(tenant_id)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-req.xml.tpl
new file mode 100644
index 0000000000..490de3e315
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-req.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<removeTenantAccess>
+ <tenant>%(tenant_id)s</tenant>
+</removeTenantAccess>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-resp.json.tpl
new file mode 100644
index 0000000000..5cab03334d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "flavor_access": []
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-resp.xml.tpl
new file mode 100644
index 0000000000..80d1ecc48c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor_access/>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-show-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-show-resp.json.tpl
new file mode 100644
index 0000000000..2e991a4cef
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-show-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 1,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "os-flavor-access:is_public": true,
+ "ram": 512,
+ "vcpus": 1
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-show-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-show-resp.xml.tpl
new file mode 100644
index 0000000000..ae18daba22
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-show-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:os-flavor-access="http://docs.openstack.org/compute/ext/flavor_access/api/v2" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="1" vcpus="1" ram="512" name="m1.tiny" id="%(flavor_id)s" os-flavor-access:is_public="True">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.json.tpl
new file mode 100644
index 0000000000..dd858e76c5
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "extra_specs": {
+ "key1": "%(value1)s",
+ "key2": "%(value2)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.xml.tpl
new file mode 100644
index 0000000000..c94595cad1
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<extra_specs>
+ <key1>%(value1)s</key1>
+ <key2>%(value2)s</key2>
+</extra_specs>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl
new file mode 100644
index 0000000000..dd858e76c5
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "extra_specs": {
+ "key1": "%(value1)s",
+ "key2": "%(value2)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.xml.tpl
new file mode 100644
index 0000000000..1008b5bb0e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<extra_specs>
+ <key2>%(value2)s</key2>
+ <key1>%(value1)s</key1>
+</extra_specs>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl
new file mode 100644
index 0000000000..adfa77008f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "key1": "%(value1)s"
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.xml.tpl
new file mode 100644
index 0000000000..e3de59a342
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<extra_spec key="key1">%(value1)s</extra_spec>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl
new file mode 100644
index 0000000000..dd858e76c5
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "extra_specs": {
+ "key1": "%(value1)s",
+ "key2": "%(value2)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.xml.tpl
new file mode 100644
index 0000000000..1008b5bb0e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<extra_specs>
+ <key2>%(value2)s</key2>
+ <key1>%(value1)s</key1>
+</extra_specs>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.json.tpl
new file mode 100644
index 0000000000..adfa77008f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "key1": "%(value1)s"
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.xml.tpl
new file mode 100644
index 0000000000..6421e59592
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <key1>%(value1)s</key1>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl
new file mode 100644
index 0000000000..adfa77008f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "key1": "%(value1)s"
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.xml.tpl
new file mode 100644
index 0000000000..e3de59a342
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<extra_spec key="key1">%(value1)s</extra_spec>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-req.json.tpl
new file mode 100644
index 0000000000..5383e5d15e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-req.json.tpl
@@ -0,0 +1,9 @@
+{
+ "flavor": {
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "%(flavor_id)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-req.xml.tpl
new file mode 100644
index 0000000000..764cebe8e4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-req.xml.tpl
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<flavor>
+ <name>%(flavor_name)s</name>
+ <ram>1024</ram>
+ <vcpus>2</vcpus>
+ <disk>10</disk>
+ <id>%(flavor_id)s</id>
+</flavor>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.json.tpl
new file mode 100644
index 0000000000..ae0ce80ba2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.json.tpl
@@ -0,0 +1,19 @@
+{
+ "flavor": {
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "vcpus": 2
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.xml.tpl
new file mode 100644
index 0000000000..156ef215e6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="%(flavor_name)s" id="%(flavor_id)s">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl
new file mode 100644
index 0000000000..241cf7c800
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 1,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "ram": 512,
+ "rxtx_factor": 1.0,
+ "vcpus": 1
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml.tpl
new file mode 100644
index 0000000000..d461b443ed
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="1" vcpus="1" ram="512" name="m1.tiny" id="1" rxtx_factor="1.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl
new file mode 100644
index 0000000000..035c860c9a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl
@@ -0,0 +1,94 @@
+{
+ "flavors": [
+ {
+ "disk": 1,
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "rxtx_factor": 1.0,
+ "vcpus": 1
+ },
+ {
+ "disk": 20,
+ "id": "2",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "ram": 2048,
+ "rxtx_factor": 1.0,
+ "vcpus": 1
+ },
+ {
+ "disk": 40,
+ "id": "3",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "ram": 4096,
+ "rxtx_factor": 1.0,
+ "vcpus": 2
+ },
+ {
+ "disk": 80,
+ "id": "4",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "ram": 8192,
+ "rxtx_factor": 1.0,
+ "vcpus": 4
+ },
+ {
+ "disk": 160,
+ "id": "5",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "ram": 16384,
+ "rxtx_factor": 1.0,
+ "vcpus": 8
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml.tpl
new file mode 100644
index 0000000000..ee937b974b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml.tpl
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavors xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor disk="1" vcpus="1" ram="512" name="m1.tiny" id="1" rxtx_factor="1.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <flavor disk="20" vcpus="1" ram="2048" name="m1.small" id="2" rxtx_factor="1.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/2" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/2" rel="bookmark"/>
+ </flavor>
+ <flavor disk="40" vcpus="2" ram="4096" name="m1.medium" id="3" rxtx_factor="1.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/3" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/3" rel="bookmark"/>
+ </flavor>
+ <flavor disk="80" vcpus="4" ram="8192" name="m1.large" id="4" rxtx_factor="1.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/4" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/4" rel="bookmark"/>
+ </flavor>
+ <flavor disk="160" vcpus="8" ram="16384" name="m1.xlarge" id="5" rxtx_factor="1.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/5" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/5" rel="bookmark"/>
+ </flavor>
+</flavors>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl
new file mode 100644
index 0000000000..70d0a57de8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "flavor": {
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "rxtx_factor": 2.0
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml.tpl
new file mode 100644
index 0000000000..a87b47670e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<flavor xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1"
+ name="%(flavor_name)s"
+ ram="1024"
+ vcpus="2"
+ disk="10"
+ id="%(flavor_id)s"
+ rxtx_factor="2.0" />
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl
new file mode 100644
index 0000000000..abf652fae3
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "rxtx_factor": 2.0,
+ "vcpus": 2
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml.tpl
new file mode 100644
index 0000000000..d24623c555
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="%(flavor_name)s" id="%(flavor_id)s" rxtx_factor="2.0">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl
new file mode 100644
index 0000000000..9b7e57c8a9
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 1,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "ram": 512,
+ "swap": "",
+ "vcpus": 1
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl
new file mode 100644
index 0000000000..9375b14b5e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="1" vcpus="1" ram="512" name="%(flavor_name)s" id="%(flavor_id)s" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl
new file mode 100644
index 0000000000..1367e75de5
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl
@@ -0,0 +1,94 @@
+{
+ "flavors": [
+ {
+ "disk": 1,
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "swap": "",
+ "vcpus": 1
+ },
+ {
+ "disk": 20,
+ "id": "2",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "ram": 2048,
+ "swap": "",
+ "vcpus": 1
+ },
+ {
+ "disk": 40,
+ "id": "3",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "ram": 4096,
+ "swap": "",
+ "vcpus": 2
+ },
+ {
+ "disk": 80,
+ "id": "4",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "ram": 8192,
+ "swap": "",
+ "vcpus": 4
+ },
+ {
+ "disk": 160,
+ "id": "5",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "ram": 16384,
+ "swap": "",
+ "vcpus": 8
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl
new file mode 100644
index 0000000000..7c9c589bef
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl
@@ -0,0 +1,23 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavors xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <flavor disk="1" vcpus="1" ram="512" name="m1.tiny" id="1" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/1" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <flavor disk="20" vcpus="1" ram="2048" name="m1.small" id="2" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/2" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/2" rel="bookmark"/>
+ </flavor>
+ <flavor disk="40" vcpus="2" ram="4096" name="m1.medium" id="3" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/3" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/3" rel="bookmark"/>
+ </flavor>
+ <flavor disk="80" vcpus="4" ram="8192" name="m1.large" id="4" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/4" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/4" rel="bookmark"/>
+ </flavor>
+ <flavor disk="160" vcpus="8" ram="16384" name="m1.xlarge" id="5" swap="">
+ <atom:link href="%(host)s/v2/openstack/flavors/5" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/5" rel="bookmark"/>
+ </flavor>
+</flavors>
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl
new file mode 100644
index 0000000000..ca86aeb4e4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "flavor": {
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "swap": 5
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl
new file mode 100644
index 0000000000..5f54df5cd2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<flavor xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:OS-FLV-EXT-DATA="http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1"
+ name="%(flavor_name)s"
+ ram="1024"
+ vcpus="2"
+ disk="10"
+ id="%(flavor_id)s"
+ swap="5" />
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl
new file mode 100644
index 0000000000..e61a08dc17
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "flavor": {
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "swap": 5,
+ "vcpus": 2
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl
new file mode 100644
index 0000000000..e8c69ecee7
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<flavor xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" disk="10" vcpus="2" ram="1024" name="%(flavor_name)s" id="%(flavor_id)s" swap="5">
+ <atom:link href="%(host)s/v2/openstack/flavors/%(flavor_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/flavors/%(flavor_id)s" rel="bookmark"/>
+</flavor>
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl
new file mode 100644
index 0000000000..7dc33ddb10
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl
@@ -0,0 +1,7 @@
+{
+ "dns_entry" :
+ {
+ "ip": "%(ip)s",
+ "dns_type": "%(dns_type)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml.tpl
new file mode 100644
index 0000000000..bd62d34186
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<dns_entry>
+ <ip>%(ip)s</ip>
+ <dns_type>%(dns_type)s</dns_type>
+</dns_entry>
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl
new file mode 100644
index 0000000000..3ec0743ba7
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "dns_entry": {
+ "domain": "%(domain)s",
+ "id": null,
+ "ip": "%(ip)s",
+ "name": "%(name)s",
+ "type": "%(dns_type)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml.tpl
new file mode 100644
index 0000000000..38a659b78e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<dns_entry ip="%(ip)s" domain="%(domain)s" type="%(dns_type)s" id="None" name="%(name)s"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl
new file mode 100644
index 0000000000..db73be14a8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "domain_entry" :
+ {
+ "domain": "%(domain)s",
+ "scope": "%(scope)s",
+ "project": "%(project)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml.tpl
new file mode 100644
index 0000000000..40866a5373
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<domain_entry>
+ <domain>%(domain)s</domain>
+ <scope>%(scope)s</scope>
+ <project>%(project)s</project>
+</domain_entry>
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl
new file mode 100644
index 0000000000..a14d395d23
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "domain_entry": {
+ "availability_zone": null,
+ "domain": "%(domain)s",
+ "project": "%(project)s",
+ "scope": "%(scope)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml.tpl
new file mode 100644
index 0000000000..1759c403af
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<domain_entry project="%(project)s" scope="%(scope)s" domain="%(domain)s" availability_zone="None"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl
new file mode 100644
index 0000000000..8edd0603f7
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "dns_entry": {
+ "domain": "%(domain)s",
+ "id": null,
+ "ip": "%(ip)s",
+ "name": "%(name)s",
+ "type": null
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml.tpl
new file mode 100644
index 0000000000..a889ef6e2c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<dns_entry ip="%(ip)s" domain="%(domain)s" type="None" id="None" name="%(name)s"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl
new file mode 100644
index 0000000000..831cda7b55
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "dns_entries": [
+ {
+ "domain": "%(domain)s",
+ "id": null,
+ "ip": "%(ip)s",
+ "name": "%(name)s",
+ "type": null
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml.tpl
new file mode 100644
index 0000000000..bf7788f94d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<dns_entries>
+ <dns_entry ip="%(ip)s" domain="%(domain)s" type="None" id="None" name="%(name)s"/>
+</dns_entries>
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl
new file mode 100644
index 0000000000..a6055cfecc
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl
@@ -0,0 +1,10 @@
+{
+ "domain_entries": [
+ {
+ "availability_zone": null,
+ "domain": "%(domain)s",
+ "project": "%(project)s",
+ "scope": "%(scope)s"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml.tpl
new file mode 100644
index 0000000000..e57c290cb8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<domain_entries>
+ <domain_entry project="%(project)s" scope="%(scope)s" domain="%(domain)s" availability_zone="None"/>
+</domain_entries>
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl
new file mode 100644
index 0000000000..607109d70d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl
@@ -0,0 +1,10 @@
+{
+ "floating_ip_pools": [
+ {
+ "name": "%(pool1)s"
+ },
+ {
+ "name": "%(pool2)s"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.xml.tpl
new file mode 100644
index 0000000000..ae4b3a4bb3
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.xml.tpl
@@ -0,0 +1,4 @@
+<floating_ip_pools>
+ <floating_ip_pool name="%(pool1)s"/>
+ <floating_ip_pool name="%(pool2)s"/>
+</floating_ip_pools>
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl
new file mode 100644
index 0000000000..426f07e989
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "floating_ips_bulk_create" :
+ {
+ "ip_range": "%(ip_range)s",
+ "pool": "%(pool)s",
+ "interface": "%(interface)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.xml.tpl
new file mode 100644
index 0000000000..ebe0b9aa9a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ips_bulk_create>
+<ip_range>%(ip_range)s</ip_range>
+<pool>%(pool)s</pool>
+<interface>%(interface)s</interface>
+</floating_ips_bulk_create>
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl
new file mode 100644
index 0000000000..ef1cbfb17f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl
@@ -0,0 +1,7 @@
+{
+ "floating_ips_bulk_create": {
+ "interface": "eth0",
+ "ip_range": "192.168.1.0/24",
+ "pool": "nova"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.xml.tpl
new file mode 100644
index 0000000000..db80bbfc10
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ips_bulk_create>
+ <interface>eth0</interface>
+ <ip_range>192.168.1.0/24</ip_range>
+ <pool>nova</pool>
+</floating_ips_bulk_create> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl
new file mode 100644
index 0000000000..d630d669cd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "ip_range": "%(ip_range)s"
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.xml.tpl
new file mode 100644
index 0000000000..27a6b0e95a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<ip_range>%(ip_range)s</ip_range>
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl
new file mode 100644
index 0000000000..166984b24a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "floating_ips_bulk_delete": "192.168.1.0/24"
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.xml.tpl
new file mode 100644
index 0000000000..3d77af334a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ips_bulk_delete>192.168.1.0/24</floating_ips_bulk_delete> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl
new file mode 100644
index 0000000000..0eaaf75ae0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "floating_ip_info": [
+ {
+ "address": "10.10.10.3",
+ "instance_uuid": null,
+ "interface": "eth0",
+ "pool": "nova",
+ "project_id": null
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.xml.tpl
new file mode 100644
index 0000000000..4c3c8cd9ca
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.xml.tpl
@@ -0,0 +1,10 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ip_info>
+ <item>
+ <interface>eth0</interface>
+ <instance_uuid>None</instance_uuid>
+ <project_id>None</project_id>
+ <pool>nova</pool>
+ <address>10.10.10.3</address>
+ </item>
+</floating_ip_info> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl
new file mode 100644
index 0000000000..de1e622bb1
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl
@@ -0,0 +1,25 @@
+{
+ "floating_ip_info": [
+ {
+ "address": "10.10.10.1",
+ "instance_uuid": null,
+ "interface": "eth0",
+ "pool": "nova",
+ "project_id": null
+ },
+ {
+ "address": "10.10.10.2",
+ "instance_uuid": null,
+ "interface": "eth0",
+ "pool": "nova",
+ "project_id": null
+ },
+ {
+ "address": "10.10.10.3",
+ "instance_uuid": null,
+ "interface": "eth0",
+ "pool": "nova",
+ "project_id": null
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.xml.tpl
new file mode 100644
index 0000000000..6ef85bd874
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.xml.tpl
@@ -0,0 +1,24 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ip_info>
+ <item>
+ <interface>eth0</interface>
+ <instance_uuid>None</instance_uuid>
+ <project_id>None</project_id>
+ <pool>nova</pool>
+ <address>10.10.10.1</address>
+ </item>
+ <item>
+ <interface>eth0</interface>
+ <instance_uuid>None</instance_uuid>
+ <project_id>None</project_id>
+ <pool>nova</pool>
+ <address>10.10.10.2</address>
+ </item>
+ <item>
+ <interface>eth0</interface>
+ <instance_uuid>None</instance_uuid>
+ <project_id>None</project_id>
+ <pool>nova</pool>
+ <address>10.10.10.3</address>
+ </item>
+</floating_ip_info> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-nopool-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-nopool-req.json.tpl
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-nopool-req.json.tpl
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-nopool-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-nopool-req.xml.tpl
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-nopool-req.xml.tpl
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-req.json.tpl
new file mode 100644
index 0000000000..24129f4958
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "pool": "%(pool)s"
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-req.xml.tpl
new file mode 100644
index 0000000000..a80147389d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<pool>%(pool)s</pool> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-resp.json.tpl
new file mode 100644
index 0000000000..10ee8d9bd4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "floating_ip": {
+ "fixed_ip": null,
+ "id": 1,
+ "instance_id": null,
+ "ip": "10.10.10.1",
+ "pool": "nova"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-resp.xml.tpl
new file mode 100644
index 0000000000..e0f68ef503
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ip instance_id="None" ip="10.10.10.1" fixed_ip="None" id="1" pool="nova"/> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-get-resp.json.tpl
new file mode 100644
index 0000000000..10ee8d9bd4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-get-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "floating_ip": {
+ "fixed_ip": null,
+ "id": 1,
+ "instance_id": null,
+ "ip": "10.10.10.1",
+ "pool": "nova"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-get-resp.xml.tpl
new file mode 100644
index 0000000000..e0f68ef503
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-get-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ip instance_id="None" ip="10.10.10.1" fixed_ip="None" id="1" pool="nova"/> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-empty-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-empty-resp.json.tpl
new file mode 100644
index 0000000000..12f118da50
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-empty-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "floating_ips": []
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-empty-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-empty-resp.xml.tpl
new file mode 100644
index 0000000000..da6f0d4ce9
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-empty-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ips/> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-resp.json.tpl
new file mode 100644
index 0000000000..06f57451c9
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-resp.json.tpl
@@ -0,0 +1,19 @@
+{
+ "floating_ips": [
+ {
+ "fixed_ip": null,
+ "id": 1,
+ "instance_id": null,
+ "ip": "10.10.10.1",
+ "pool": "nova"
+ },
+ {
+ "fixed_ip": null,
+ "id": 2,
+ "instance_id": null,
+ "ip": "10.10.10.2",
+ "pool": "nova"
+ }
+ ]
+}
+
diff --git a/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-resp.xml.tpl
new file mode 100644
index 0000000000..bbd0b117ef
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<floating_ips>
+ <floating_ip instance_id="None" ip="10.10.10.1" fixed_ip="None" id="1" pool="nova"/>
+ <floating_ip instance_id="None" ip="10.10.10.2" fixed_ip="None" id="2" pool="nova"/>
+</floating_ips>
diff --git a/nova/tests/unit/integrated/api_samples/os-fping/fping-get-details-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-fping/fping-get-details-resp.json.tpl
new file mode 100644
index 0000000000..f3b222c399
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-fping/fping-get-details-resp.json.tpl
@@ -0,0 +1,7 @@
+{
+ "server": {
+ "alive": false,
+ "id": "%(uuid)s",
+ "project_id": "openstack"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-fping/fping-get-details-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-fping/fping-get-details-resp.xml.tpl
new file mode 100644
index 0000000000..758519b60e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-fping/fping-get-details-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server>
+ <project_id>openstack</project_id>
+ <id>%(uuid)s</id>
+ <alive>False</alive>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-fping/fping-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-fping/fping-get-resp.json.tpl
new file mode 100644
index 0000000000..b33e80668b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-fping/fping-get-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "servers": [
+ {
+ "alive": false,
+ "id": "%(uuid)s",
+ "project_id": "openstack"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-fping/fping-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-fping/fping-get-resp.xml.tpl
new file mode 100644
index 0000000000..290ad6ca68
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-fping/fping-get-resp.xml.tpl
@@ -0,0 +1,8 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers>
+ <server>
+ <project_id>openstack</project_id>
+ <id>%(uuid)s</id>
+ <alive>False</alive>
+ </server>
+</servers>
diff --git a/nova/tests/unit/integrated/api_samples/os-fping/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-fping/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-fping/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-fping/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-fping/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-fping/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-fping/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-fping/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-fping/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-fping/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-fping/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-fping/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-get-resp.json.tpl
new file mode 100644
index 0000000000..4ac6374529
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-get-resp.json.tpl
@@ -0,0 +1,54 @@
+{
+ "server": {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-get-resp.xml.tpl
new file mode 100644
index 0000000000..cee28db35c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-get-resp.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl
new file mode 100644
index 0000000000..81afe431c0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl
@@ -0,0 +1,56 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-details-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-details-resp.xml.tpl
new file mode 100644
index 0000000000..da0472dbcf
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-details-resp.xml.tpl
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl
new file mode 100644
index 0000000000..8b97dc28d7
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl
@@ -0,0 +1,18 @@
+{
+ "servers": [
+ {
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-list-resp.xml.tpl
new file mode 100644
index 0000000000..03bee03a6e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-list-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server name="new-server-test" id="%(id)s">
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/unit/integrated/api_samples/os-hosts/host-get-reboot.json.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-reboot.json.tpl
new file mode 100644
index 0000000000..4ed89a182d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-reboot.json.tpl
@@ -0,0 +1,4 @@
+{
+ "host": "%(host_name)s",
+ "power_action": "reboot"
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-hosts/host-get-reboot.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-reboot.xml.tpl
new file mode 100644
index 0000000000..4f9c8e4378
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-reboot.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<host host="%(host_name)s" power_action="reboot"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-hosts/host-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-resp.json.tpl
new file mode 100644
index 0000000000..efb234b436
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-resp.json.tpl
@@ -0,0 +1,31 @@
+{
+ "host": [
+ {
+ "resource": {
+ "cpu": 1,
+ "disk_gb": 1028,
+ "host": "%(host_name)s",
+ "memory_mb": 8192,
+ "project": "(total)"
+ }
+ },
+ {
+ "resource": {
+ "cpu": 0,
+ "disk_gb": 0,
+ "host": "%(host_name)s",
+ "memory_mb": 512,
+ "project": "(used_now)"
+ }
+ },
+ {
+ "resource": {
+ "cpu": 0,
+ "disk_gb": 0,
+ "host": "%(host_name)s",
+ "memory_mb": 0,
+ "project": "(used_max)"
+ }
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-hosts/host-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-resp.xml.tpl
new file mode 100644
index 0000000000..e162734ba3
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-resp.xml.tpl
@@ -0,0 +1,24 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<host>
+ <resource>
+ <project>(total)</project>
+ <memory_mb>8192</memory_mb>
+ <host>%(host_name)s</host>
+ <cpu>1</cpu>
+ <disk_gb>1028</disk_gb>
+ </resource>
+ <resource>
+ <project>(used_now)</project>
+ <memory_mb>512</memory_mb>
+ <host>%(host_name)s</host>
+ <cpu>0</cpu>
+ <disk_gb>0</disk_gb>
+ </resource>
+ <resource>
+ <project>(used_max)</project>
+ <memory_mb>0</memory_mb>
+ <host>%(host_name)s</host>
+ <cpu>0</cpu>
+ <disk_gb>0</disk_gb>
+ </resource>
+</host>
diff --git a/nova/tests/unit/integrated/api_samples/os-hosts/host-get-shutdown.json.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-shutdown.json.tpl
new file mode 100644
index 0000000000..c0df4481a2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-shutdown.json.tpl
@@ -0,0 +1,4 @@
+{
+ "host": "%(host_name)s",
+ "power_action": "shutdown"
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-hosts/host-get-shutdown.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-shutdown.xml.tpl
new file mode 100644
index 0000000000..d78bd32a5d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-shutdown.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<host host="%(host_name)s" power_action="shutdown"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-hosts/host-get-startup.json.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-startup.json.tpl
new file mode 100644
index 0000000000..90f5ac7bcb
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-startup.json.tpl
@@ -0,0 +1,4 @@
+{
+ "host": "%(host_name)s",
+ "power_action": "startup"
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-hosts/host-get-startup.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-startup.xml.tpl
new file mode 100644
index 0000000000..581f7cf07f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-startup.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<host host="%(host_name)s" power_action="startup"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-req.json.tpl
new file mode 100644
index 0000000000..6accac1644
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-req.json.tpl
@@ -0,0 +1,4 @@
+{
+ "status": "enable",
+ "maintenance_mode": "disable"
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-req.xml.tpl
new file mode 100644
index 0000000000..d127a7a26b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-req.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<updates>
+ <status>enable</status>
+ <maintenance_mode>disable</maintenance_mode>
+</updates>
diff --git a/nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-resp.json.tpl
new file mode 100644
index 0000000000..92f73892b3
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-resp.json.tpl
@@ -0,0 +1,5 @@
+{
+ "host": "%(host_name)s",
+ "maintenance_mode": "off_maintenance",
+ "status": "enabled"
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-resp.xml.tpl
new file mode 100644
index 0000000000..e9c99512b8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<host status="enabled" maintenance_mode="off_maintenance" host="%(host_name)s"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl
new file mode 100644
index 0000000000..9fb47106db
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl
@@ -0,0 +1,39 @@
+{
+ "hosts": [
+ {
+ "host_name": "%(host_name)s",
+ "service": "compute",
+ "zone": "nova"
+ },
+ {
+ "host_name": "%(host_name)s",
+ "service": "cert",
+ "zone": "internal"
+ },
+ {
+ "host_name": "%(host_name)s",
+ "service": "network",
+ "zone": "internal"
+ },
+ {
+ "host_name": "%(host_name)s",
+ "service": "scheduler",
+ "zone": "internal"
+ },
+ {
+ "host_name": "%(host_name)s",
+ "service": "conductor",
+ "zone": "internal"
+ },
+ {
+ "host_name": "%(host_name)s",
+ "service": "cells",
+ "zone": "internal"
+ },
+ {
+ "host_name": "%(host_name)s",
+ "service": "consoleauth",
+ "zone": "internal"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl
new file mode 100644
index 0000000000..a031c9b661
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl
@@ -0,0 +1,10 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hosts>
+ <host host_name="%(host_name)s" service="compute" zone="nova"/>
+ <host host_name="%(host_name)s" service="cert" zone="internal"/>
+ <host host_name="%(host_name)s" service="network" zone="internal"/>
+ <host host_name="%(host_name)s" service="scheduler" zone="internal"/>
+ <host host_name="%(host_name)s" service="conductor" zone="internal"/>
+ <host host_name="%(host_name)s" service="cells" zone="internal"/>
+ <host host_name="%(host_name)s" service="consoleauth" zone="internal"/>
+</hosts>
diff --git a/nova/tests/unit/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json.tpl
new file mode 100644
index 0000000000..14464ccf4d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json.tpl
@@ -0,0 +1,27 @@
+{
+ "hypervisor": {
+ "cpu_info": "?",
+ "current_workload": 0,
+ "disk_available_least": 0,
+ "free_disk_gb": 1028,
+ "free_ram_mb": 7680,
+ "hypervisor_hostname": "fake-mini",
+ "hypervisor_type": "fake",
+ "hypervisor_version": 1000,
+ "id": %(hypervisor_id)s,
+ "local_gb": 1028,
+ "local_gb_used": 0,
+ "memory_mb": 8192,
+ "memory_mb_used": 512,
+ "running_vms": 0,
+ "state": "up",
+ "status": "enabled",
+ "service": {
+ "host": "%(host_name)s",
+ "id": 2,
+ "disabled_reason": null
+ },
+ "vcpus": 1,
+ "vcpus_used": 0
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml.tpl
new file mode 100644
index 0000000000..6cfd860af5
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisor vcpus_used="0" hypervisor_type="fake" local_gb_used="0" hypervisor_hostname="fake-mini" memory_mb_used="512" memory_mb="8192" current_workload="0" vcpus="1" cpu_info="?" running_vms="0" free_disk_gb="1028" hypervisor_version="1000" disk_available_least="0" local_gb="1028" free_ram_mb="7680" id="%(hypervisor_id)s" state="up" status="enabled">
+ <service host="%(host_name)s" id="2" disabled_reason="None"/>
+</hypervisor>
diff --git a/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl
new file mode 100644
index 0000000000..9ccda9c7e6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl
@@ -0,0 +1,27 @@
+{
+ "hypervisors": [
+ {
+ "cpu_info": "?",
+ "current_workload": 0,
+ "disk_available_least": null,
+ "host_ip": "%(ip)s",
+ "free_disk_gb": 1028,
+ "free_ram_mb": 7680,
+ "hypervisor_hostname": "fake-mini",
+ "hypervisor_type": "fake",
+ "hypervisor_version": 1000,
+ "id": 1,
+ "local_gb": 1028,
+ "local_gb_used": 0,
+ "memory_mb": 8192,
+ "memory_mb_used": 512,
+ "running_vms": 0,
+ "service": {
+ "host": "%(host_name)s",
+ "id": 2
+ },
+ "vcpus": 1,
+ "vcpus_used": 0
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl
new file mode 100644
index 0000000000..1169ce1e01
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisors>
+ <hypervisor vcpus_used="0" hypervisor_type="fake" local_gb_used="0" hypervisor_hostname="fake-mini" memory_mb_used="512" memory_mb="8192" current_workload="0" vcpus="1" cpu_info="?" running_vms="0" free_disk_gb="1028" hypervisor_version="1000" disk_available_least="None" host_ip="%(ip)s" local_gb="1028" free_ram_mb="7680" id="1">
+ <service host="%(host_name)s" id="2"/>
+ </hypervisor>
+</hypervisors>
diff --git a/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl
new file mode 100644
index 0000000000..8d94021274
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "hypervisors": [
+ {
+ "hypervisor_hostname": "fake-mini",
+ "id": 1
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-list-resp.xml.tpl
new file mode 100644
index 0000000000..6b7d9d7ca1
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-list-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisors>
+ <hypervisor id="1" hypervisor_hostname="fake-mini"/>
+</hypervisors>
diff --git a/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl
new file mode 100644
index 0000000000..8d94021274
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "hypervisors": [
+ {
+ "hypervisor_hostname": "fake-mini",
+ "id": 1
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-search-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-search-resp.xml.tpl
new file mode 100644
index 0000000000..6b7d9d7ca1
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-search-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisors>
+ <hypervisor id="1" hypervisor_hostname="fake-mini"/>
+</hypervisors>
diff --git a/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl
new file mode 100644
index 0000000000..8d94021274
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "hypervisors": [
+ {
+ "hypervisor_hostname": "fake-mini",
+ "id": 1
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.xml.tpl
new file mode 100644
index 0000000000..7782732ba6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisors>
+ <hypervisor id="1" hypervisor_hostname="fake-mini">
+ <servers/>
+ </hypervisor>
+</hypervisors>
diff --git a/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl
new file mode 100644
index 0000000000..356316d61f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl
@@ -0,0 +1,24 @@
+{
+ "hypervisor": {
+ "cpu_info": "?",
+ "current_workload": 0,
+ "disk_available_least": 0,
+ "free_disk_gb": 1028,
+ "free_ram_mb": 7680,
+ "hypervisor_hostname": "fake-mini",
+ "hypervisor_type": "fake",
+ "hypervisor_version": 1000,
+ "id": %(hypervisor_id)s,
+ "local_gb": 1028,
+ "local_gb_used": 0,
+ "memory_mb": 8192,
+ "memory_mb_used": 512,
+ "running_vms": 0,
+ "service": {
+ "host": "%(host_name)s",
+ "id": 2
+ },
+ "vcpus": 1,
+ "vcpus_used": 0
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl
new file mode 100644
index 0000000000..090f720398
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisor vcpus_used="0" hypervisor_type="fake" local_gb_used="0" hypervisor_hostname="fake-mini" memory_mb_used="512" memory_mb="8192" current_workload="0" vcpus="1" cpu_info="?" running_vms="0" free_disk_gb="1028" hypervisor_version="1000" disk_available_least="0" local_gb="1028" free_ram_mb="7680" id="%(hypervisor_id)s">
+ <service host="%(host_name)s" id="2"/>
+</hypervisor>
diff --git a/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl
new file mode 100644
index 0000000000..2cfb51e703
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "hypervisor_statistics": {
+ "count": 1,
+ "current_workload": 0,
+ "disk_available_least": 0,
+ "free_disk_gb": 1028,
+ "free_ram_mb": 7680,
+ "local_gb": 1028,
+ "local_gb_used": 0,
+ "memory_mb": 8192,
+ "memory_mb_used": 512,
+ "running_vms": 0,
+ "vcpus": 1,
+ "vcpus_used": 0
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.xml.tpl
new file mode 100644
index 0000000000..5d10411e3a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisor_statistics count="1" vcpus_used="0" local_gb_used="0" memory_mb="8192" current_workload="0" vcpus="1" running_vms="0" free_disk_gb="1028" disk_available_least="0" local_gb="1028" free_ram_mb="7680" memory_mb_used="512"/> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl
new file mode 100644
index 0000000000..8a36c65f23
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl
@@ -0,0 +1,7 @@
+{
+ "hypervisor": {
+ "hypervisor_hostname": "fake-mini",
+ "id": %(hypervisor_id)s,
+ "uptime": " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.xml.tpl
new file mode 100644
index 0000000000..04219f5b5d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisor uptime=" 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14" id="%(hypervisor_id)s" hypervisor_hostname="fake-mini"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-instance-actions/instance-action-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-instance-actions/instance-action-get-resp.json.tpl
new file mode 100644
index 0000000000..7cd5325239
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-instance-actions/instance-action-get-resp.json.tpl
@@ -0,0 +1,27 @@
+{
+ "instanceAction": {
+ "action": "%(action)s",
+ "instance_uuid": "%(instance_uuid)s",
+ "request_id": "%(request_id)s",
+ "user_id": "%(integer_id)s",
+ "project_id": "%(integer_id)s",
+ "start_time": "%(strtime)s",
+ "message": "",
+ "events": [
+ {
+ "event": "%(event)s",
+ "start_time": "%(strtime)s",
+ "finish_time": "%(strtime)s",
+ "result": "%(result)s",
+ "traceback": ""
+ },
+ {
+ "event": "%(event)s",
+ "start_time": "%(strtime)s",
+ "finish_time": "%(strtime)s",
+ "result": "%(result)s",
+ "traceback": ""
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-instance-actions/instance-action-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-instance-actions/instance-action-get-resp.xml.tpl
new file mode 100644
index 0000000000..61c0ac8c76
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-instance-actions/instance-action-get-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<instanceAction action="%(action)s" instance_uuid="%(instance_uuid)s" request_id="%(request_id)s" user_id="%(integer_id)s" project_id="%(integer_id)s" start_time="%(xmltime)s" message="">
+ <events event="%(event)s" start_time="%(xmltime)s" finish_time="%(xmltime)s" result="%(result)s" traceback=""/>
+ <events event="%(event)s" start_time="%(xmltime)s" finish_time="%(xmltime)s" result="%(result)s" traceback=""/>
+</instanceAction>
diff --git a/nova/tests/unit/integrated/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl
new file mode 100644
index 0000000000..0fdc33916a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl
@@ -0,0 +1,22 @@
+{
+ "instanceActions": [
+ {
+ "action": "%(action)s",
+ "instance_uuid": "%(uuid)s",
+ "request_id": "%(request_id)s",
+ "user_id": "%(integer_id)s",
+ "project_id": "%(integer_id)s",
+ "start_time": "%(strtime)s",
+ "message": ""
+ },
+ {
+ "action": "%(action)s",
+ "instance_uuid": "%(uuid)s",
+ "request_id": "%(request_id)s",
+ "user_id": "%(integer_id)s",
+ "project_id": "%(integer_id)s",
+ "start_time": "%(strtime)s",
+ "message": ""
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-instance-actions/instance-actions-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-instance-actions/instance-actions-list-resp.xml.tpl
new file mode 100644
index 0000000000..87a8726cce
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-instance-actions/instance-actions-list-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<instanceActions>
+ <instanceAction action="%(action)s" instance_uuid="%(uuid)s" request_id="%(request_id)s" user_id="%(integer_id)s" project_id="%(integer_id)s" start_time="%(xmltime)s" message=""/>
+ <instanceAction action="%(action)s" instance_uuid="%(uuid)s" request_id="%(request_id)s" user_id="%(integer_id)s" project_id="%(integer_id)s" start_time="%(xmltime)s" message=""/>
+</instanceActions>
diff --git a/nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json.tpl
new file mode 100644
index 0000000000..81b0d6c341
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json.tpl
@@ -0,0 +1,17 @@
+{
+ "instance_usage_audit_logs": {
+ "hosts_not_run": [
+ "%(hostid)s"
+ ],
+ "log": {},
+ "num_hosts": 1,
+ "num_hosts_done": 0,
+ "num_hosts_not_run": 1,
+ "num_hosts_running": 0,
+ "overall_status": "0 of 1 hosts done. 0 errors.",
+ "period_beginning": "%(xmltime)s",
+ "period_ending": "%(xmltime)s",
+ "total_errors": 0,
+ "total_instances": 0
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml.tpl
new file mode 100644
index 0000000000..8b670b0c91
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml.tpl
@@ -0,0 +1,16 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<instance_usage_audit_logs>
+ <total_errors>0</total_errors>
+ <total_instances>0</total_instances>
+ <log/>
+ <num_hosts_running>0</num_hosts_running>
+ <num_hosts_done>0</num_hosts_done>
+ <num_hosts_not_run>1</num_hosts_not_run>
+ <hosts_not_run>
+ <item>%(hostid)s</item>
+ </hosts_not_run>
+ <overall_status>0 of 1 hosts done. 0 errors.</overall_status>
+ <period_ending>%(xmltime)s</period_ending>
+ <period_beginning>%(xmltime)s</period_beginning>
+ <num_hosts>1</num_hosts>
+</instance_usage_audit_logs>
diff --git a/nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json.tpl
new file mode 100644
index 0000000000..71549c156b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json.tpl
@@ -0,0 +1,17 @@
+{
+ "instance_usage_audit_log": {
+ "hosts_not_run": [
+ "%(hostid)s"
+ ],
+ "log": {},
+ "num_hosts": 1,
+ "num_hosts_done": 0,
+ "num_hosts_not_run": 1,
+ "num_hosts_running": 0,
+ "overall_status": "0 of 1 hosts done. 0 errors.",
+ "period_beginning": "%(xmltime)s",
+ "period_ending": "%(xmltime)s",
+ "total_errors": 0,
+ "total_instances": 0
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml.tpl
new file mode 100644
index 0000000000..9ceb1c26c8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml.tpl
@@ -0,0 +1,16 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<instance_usage_audit_log>
+ <total_errors>0</total_errors>
+ <total_instances>0</total_instances>
+ <log/>
+ <num_hosts_running>0</num_hosts_running>
+ <num_hosts_done>0</num_hosts_done>
+ <num_hosts_not_run>1</num_hosts_not_run>
+ <hosts_not_run>
+ <item>%(hostid)s</item>
+ </hosts_not_run>
+ <overall_status>0 of 1 hosts done. 0 errors.</overall_status>
+ <period_ending>%(xmltime)s</period_ending>
+ <period_beginning>%(xmltime)s</period_beginning>
+ <num_hosts>1</num_hosts>
+</instance_usage_audit_log>
diff --git a/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-get-resp.json.tpl
new file mode 100644
index 0000000000..4fde60f14b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-get-resp.json.tpl
@@ -0,0 +1,13 @@
+{
+ "keypair": {
+ "public_key": "%(public_key)s",
+ "name": "%(keypair_name)s",
+ "fingerprint": "%(fingerprint)s",
+ "user_id": "fake",
+ "deleted": false,
+ "created_at": "%(strtime)s",
+ "updated_at": null,
+ "deleted_at": null,
+ "id": 1
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-get-resp.xml.tpl
new file mode 100644
index 0000000000..3442f1ed62
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-get-resp.xml.tpl
@@ -0,0 +1,13 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<keypair>
+ <public_key>%(public_key)s
+</public_key>
+ <name>%(keypair_name)s</name>
+ <fingerprint>%(fingerprint)s</fingerprint>
+ <user_id>fake</user_id>
+ <deleted>False</deleted>
+ <created_at>%(xmltime)s</created_at>
+ <updated_at>None</updated_at>
+ <deleted_at>None</deleted_at>
+ <id>1</id>
+</keypair>
diff --git a/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-req.json.tpl
new file mode 100644
index 0000000000..2301fa05b2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "keypair": {
+ "name": "%(keypair_name)s",
+ "public_key": "%(public_key)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-req.xml.tpl
new file mode 100644
index 0000000000..0516de3035
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-req.xml.tpl
@@ -0,0 +1,4 @@
+<keypair>
+ <name>%(keypair_name)s</name>
+ <public_key>%(public_key)s</public_key>
+</keypair>
diff --git a/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-resp.json.tpl
new file mode 100644
index 0000000000..ca7192d5dc
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "keypair": {
+ "fingerprint": "%(fingerprint)s",
+ "name": "%(keypair_name)s",
+ "public_key": "%(public_key)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-resp.xml.tpl
new file mode 100644
index 0000000000..ed2543c107
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<keypair>
+ <public_key>%(public_key)s</public_key>
+ <user_id>fake</user_id>
+ <name>%(keypair_name)s</name>
+ <fingerprint>%(fingerprint)s</fingerprint>
+</keypair>
diff --git a/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-list-resp.json.tpl
new file mode 100644
index 0000000000..29ba63c00b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-list-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "keypairs": [
+ {
+ "keypair": {
+ "fingerprint": "%(fingerprint)s",
+ "name": "%(keypair_name)s",
+ "public_key": "%(public_key)s"
+ }
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-list-resp.xml.tpl
new file mode 100644
index 0000000000..493bfa3161
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-list-resp.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<keypairs>
+ <keypair>
+ <public_key>%(public_key)s
+</public_key>
+ <name>%(keypair_name)s</name>
+ <fingerprint>%(fingerprint)s</fingerprint>
+ </keypair>
+</keypairs>
diff --git a/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-req.json.tpl
new file mode 100644
index 0000000000..68e2f03487
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "keypair": {
+ "name": "%(keypair_name)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-req.xml.tpl
new file mode 100644
index 0000000000..e14935d314
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-req.xml.tpl
@@ -0,0 +1,3 @@
+<keypair>
+ <name>%(keypair_name)s</name>
+</keypair>
diff --git a/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-resp.json.tpl
new file mode 100644
index 0000000000..aace6f5ccc
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "keypair": {
+ "fingerprint": "%(fingerprint)s",
+ "name": "%(keypair_name)s",
+ "private_key": "%(private_key)s",
+ "public_key": "%(public_key)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-resp.xml.tpl
new file mode 100644
index 0000000000..4f041e0c9e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-resp.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<keypair>
+ <public_key>%(public_key)s
+</public_key>
+ <private_key>%(private_key)s</private_key>
+ <user_id>fake</user_id>
+ <name>%(keypair_name)s</name>
+ <fingerprint>%(fingerprint)s</fingerprint>
+</keypair>
diff --git a/nova/tests/unit/integrated/api_samples/os-migrations/migrations-get.json.tpl b/nova/tests/unit/integrated/api_samples/os-migrations/migrations-get.json.tpl
new file mode 100644
index 0000000000..91775be775
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-migrations/migrations-get.json.tpl
@@ -0,0 +1,32 @@
+{
+ "migrations": [
+ {
+ "created_at": "2012-10-29T13:42:02.000000",
+ "dest_compute": "compute2",
+ "dest_host": "1.2.3.4",
+ "dest_node": "node2",
+ "id": 1234,
+ "instance_uuid": "instance_id_123",
+ "new_instance_type_id": 2,
+ "old_instance_type_id": 1,
+ "source_compute": "compute1",
+ "source_node": "node1",
+ "status": "Done",
+ "updated_at": "2012-10-29T13:42:02.000000"
+ },
+ {
+ "created_at": "2013-10-22T13:42:02.000000",
+ "dest_compute": "compute20",
+ "dest_host": "5.6.7.8",
+ "dest_node": "node20",
+ "id": 5678,
+ "instance_uuid": "instance_id_456",
+ "new_instance_type_id": 6,
+ "old_instance_type_id": 5,
+ "source_compute": "compute10",
+ "source_node": "node10",
+ "status": "Done",
+ "updated_at": "2013-10-22T13:42:02.000000"
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-migrations/migrations-get.xml.tpl b/nova/tests/unit/integrated/api_samples/os-migrations/migrations-get.xml.tpl
new file mode 100644
index 0000000000..f5c59c7f1b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-migrations/migrations-get.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<migrations>
+ <migration dest_host="1.2.3.4" status="Done" old_instance_type_id="1" updated_at="2012-10-29 13:42:02" dest_compute="compute2" created_at="2012-10-29 13:42:02" source_node="node1" instance_uuid="instance_id_123" dest_node="node2" id="1234" new_instance_type_id="2" source_compute="compute1"/>
+ <migration dest_host="5.6.7.8" status="Done" old_instance_type_id="5" updated_at="2013-10-22 13:42:02" dest_compute="compute20" created_at="2013-10-22 13:42:02" source_node="node10" instance_uuid="instance_id_456" dest_node="node20" id="5678" new_instance_type_id="6" source_compute="compute10"/>
+</migrations> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl
new file mode 100644
index 0000000000..a7690d7b69
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl
@@ -0,0 +1,18 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "min_count": "%(min_count)s",
+ "max_count": "%(max_count)s",
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.xml.tpl
new file mode 100644
index 0000000000..1548974da3
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.xml.tpl
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1"
+ imageRef="%(host)s/openstack/images/%(image_id)s"
+ flavorRef="%(host)s/openstack/flavors/1"
+ name="new-server-test"
+ min_count="%(min_count)s"
+ max_count="%(max_count)s">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.xml.tpl
new file mode 100644
index 0000000000..2ad5c102b0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.xml.tpl
@@ -0,0 +1,8 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-req.json.tpl
new file mode 100644
index 0000000000..9984c05884
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-req.json.tpl
@@ -0,0 +1,19 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "return_reservation_id": "True",
+ "min_count": "%(min_count)s",
+ "max_count": "%(max_count)s",
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-req.xml.tpl
new file mode 100644
index 0000000000..7ac9b23d67
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-req.xml.tpl
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1"
+ imageRef="%(host)s/openstack/images/%(image_id)s"
+ flavorRef="%(host)s/openstack/flavors/1"
+ name="new-server-test"
+ min_count="%(min_count)s"
+ max_count="%(max_count)s"
+ return_reservation_id="True">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl
new file mode 100644
index 0000000000..22d2880feb
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "reservation_id": "%(reservation_id)s"
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-resp.xml.tpl
new file mode 100644
index 0000000000..e5ba2cc56e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" reservation_id="%(reservation_id)s"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-networks-associate/network-associate-host-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-associate-host-req.json.tpl
new file mode 100644
index 0000000000..762e881751
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-associate-host-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "associate_host": "%(host)s"
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-networks-associate/network-associate-host-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-associate-host-req.xml.tpl
new file mode 100644
index 0000000000..7c96c96a12
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-associate-host-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<associate_host>%(host)s</associate_host>
diff --git a/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl
new file mode 100644
index 0000000000..46f69b3e81
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "disassociate_host": null
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-host-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-host-req.xml.tpl
new file mode 100644
index 0000000000..910504a44a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-host-req.xml.tpl
@@ -0,0 +1 @@
+<disassociate_host/>
diff --git a/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl
new file mode 100644
index 0000000000..63b6eb6839
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "disassociate_project": null
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-project-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-project-req.xml.tpl
new file mode 100644
index 0000000000..d4162c19e0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-project-req.xml.tpl
@@ -0,0 +1 @@
+<disassociate_project/>
diff --git a/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-req.json.tpl
new file mode 100644
index 0000000000..2e09d15a60
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "disassociate": null
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-req.xml.tpl
new file mode 100644
index 0000000000..c26f7b61a8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-req.xml.tpl
@@ -0,0 +1 @@
+<disassociate/>
diff --git a/nova/tests/unit/integrated/api_samples/os-networks/network-add-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-networks/network-add-req.json.tpl
new file mode 100644
index 0000000000..6489f6e1b5
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-networks/network-add-req.json.tpl
@@ -0,0 +1 @@
+{"id": "1"}
diff --git a/nova/tests/unit/integrated/api_samples/os-networks/network-add-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-networks/network-add-req.xml.tpl
new file mode 100644
index 0000000000..9e5822a9e8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-networks/network-add-req.xml.tpl
@@ -0,0 +1 @@
+<id>1</id>
diff --git a/nova/tests/unit/integrated/api_samples/os-networks/network-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-networks/network-create-req.json.tpl
new file mode 100644
index 0000000000..5e2be031cb
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-networks/network-create-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "network": {
+ "label": "new net 111",
+ "cidr": "10.20.105.0/24"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-networks/network-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-networks/network-create-req.xml.tpl
new file mode 100644
index 0000000000..d5222f9e8f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-networks/network-create-req.xml.tpl
@@ -0,0 +1,4 @@
+<network>
+ <label>new net 111</label>
+ <cidr>10.20.105.0/24</cidr>
+</network>
diff --git a/nova/tests/unit/integrated/api_samples/os-networks/network-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-networks/network-create-resp.json.tpl
new file mode 100644
index 0000000000..e178ab50cb
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-networks/network-create-resp.json.tpl
@@ -0,0 +1,32 @@
+{
+ "network": {
+ "bridge": null,
+ "vpn_public_port": null,
+ "dhcp_start": "%(ip)s",
+ "bridge_interface": null,
+ "updated_at": null,
+ "id": "%(id)s",
+ "cidr_v6": null,
+ "deleted_at": null,
+ "gateway": "%(ip)s",
+ "rxtx_base": null,
+ "label": "new net 111",
+ "priority": null,
+ "project_id": null,
+ "vpn_private_address": null,
+ "deleted": null,
+ "vlan": null,
+ "broadcast": "%(ip)s",
+ "netmask": "%(ip)s",
+ "injected": null,
+ "cidr": "10.20.105.0/24",
+ "vpn_public_address": null,
+ "multi_host": null,
+ "dns2": null,
+ "created_at": null,
+ "host": null,
+ "gateway_v6": null,
+ "netmask_v6": null,
+ "dns1": null
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-networks/network-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-networks/network-create-resp.xml.tpl
new file mode 100644
index 0000000000..d709952cda
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-networks/network-create-resp.xml.tpl
@@ -0,0 +1,30 @@
+<network>
+ <bridge>None</bridge>
+ <vpn_public_port>None</vpn_public_port>
+ <dhcp_start>%(ip)s</dhcp_start>
+ <bridge_interface>None</bridge_interface>
+ <updated_at>None</updated_at>
+ <id>%(id)s</id>
+ <cidr_v6>None</cidr_v6>
+ <deleted_at>None</deleted_at>
+ <gateway>%(ip)s</gateway>
+ <rxtx_base>None</rxtx_base>
+ <label>new net 111</label>
+ <priority>None</priority>
+ <project_id>None</project_id>
+ <vpn_private_address>None</vpn_private_address>
+ <deleted>False</deleted>
+ <vlan>None</vlan>
+ <broadcast>%(ip)s</broadcast>
+ <netmask>%(ip)s</netmask>
+ <injected>None</injected>
+ <cidr>10.20.105.0/24</cidr>
+ <vpn_public_address>None</vpn_public_address>
+ <multi_host>None</multi_host>
+ <dns2>None</dns2>
+ <created_at>None</created_at>
+ <host>None</host>
+ <gateway_v6>None</gateway_v6>
+ <netmask_v6>None</netmask_v6>
+ <dns1>None</dns1>
+</network>
diff --git a/nova/tests/unit/integrated/api_samples/os-networks/network-show-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-networks/network-show-resp.json.tpl
new file mode 100644
index 0000000000..66e7122105
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-networks/network-show-resp.json.tpl
@@ -0,0 +1,33 @@
+{
+ "network":
+ {
+ "bridge": "br100",
+ "bridge_interface": "eth0",
+ "broadcast": "%(ip)s",
+ "cidr": "10.0.0.0/29",
+ "cidr_v6": null,
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_start": "%(ip)s",
+ "dns1": null,
+ "dns2": null,
+ "gateway": "%(ip)s",
+ "gateway_v6": null,
+ "host": "nsokolov-desktop",
+ "id": "%(id)s",
+ "injected": false,
+ "label": "mynet_0",
+ "multi_host": false,
+ "netmask": "%(ip)s",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": "1234",
+ "rxtx_base": null,
+ "updated_at": "%(strtime)s",
+ "vlan": 100,
+ "vpn_private_address": "%(ip)s",
+ "vpn_public_address": "%(ip)s",
+ "vpn_public_port": 1000
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-networks/network-show-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-networks/network-show-resp.xml.tpl
new file mode 100644
index 0000000000..aeab222391
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-networks/network-show-resp.xml.tpl
@@ -0,0 +1,31 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<network>
+ <bridge>br100</bridge>
+ <vpn_public_port>1000</vpn_public_port>
+ <dhcp_start>%(ip)s</dhcp_start>
+ <bridge_interface>eth0</bridge_interface>
+ <updated_at>%(xmltime)s</updated_at>
+ <id>%(id)s</id>
+ <cidr_v6>None</cidr_v6>
+ <deleted_at>None</deleted_at>
+ <gateway>%(ip)s</gateway>
+ <rxtx_base>None</rxtx_base>
+ <label>mynet_0</label>
+ <priority>None</priority>
+ <project_id>1234</project_id>
+ <vpn_private_address>%(ip)s</vpn_private_address>
+ <deleted>False</deleted>
+ <vlan>100</vlan>
+ <broadcast>%(ip)s</broadcast>
+ <netmask>%(ip)s</netmask>
+ <injected>False</injected>
+ <cidr>10.0.0.0/29</cidr>
+ <vpn_public_address>%(ip)s</vpn_public_address>
+ <multi_host>False</multi_host>
+ <dns2>None</dns2>
+ <created_at>%(xmltime)s</created_at>
+ <host>nsokolov-desktop</host>
+ <gateway_v6>None</gateway_v6>
+ <netmask_v6>None</netmask_v6>
+ <dns1>None</dns1>
+</network>
diff --git a/nova/tests/unit/integrated/api_samples/os-networks/networks-disassociate-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-networks/networks-disassociate-req.json.tpl
new file mode 100644
index 0000000000..df99b889c4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-networks/networks-disassociate-req.json.tpl
@@ -0,0 +1 @@
+{"disassociate": null}
diff --git a/nova/tests/unit/integrated/api_samples/os-networks/networks-disassociate-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-networks/networks-disassociate-req.xml.tpl
new file mode 100644
index 0000000000..63c0300904
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-networks/networks-disassociate-req.xml.tpl
@@ -0,0 +1 @@
+<disassociate>None</disassociate>
diff --git a/nova/tests/unit/integrated/api_samples/os-networks/networks-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-networks/networks-list-resp.json.tpl
new file mode 100644
index 0000000000..4e359c6171
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-networks/networks-list-resp.json.tpl
@@ -0,0 +1,64 @@
+{
+ "networks": [
+ {
+ "bridge": "br100",
+ "bridge_interface": "eth0",
+ "broadcast": "%(ip)s",
+ "cidr": "10.0.0.0/29",
+ "cidr_v6": null,
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_start": "%(ip)s",
+ "dns1": null,
+ "dns2": null,
+ "gateway": "%(ip)s",
+ "gateway_v6": null,
+ "host": "nsokolov-desktop",
+ "id": "%(id)s",
+ "injected": false,
+ "label": "mynet_0",
+ "multi_host": false,
+ "netmask": "%(ip)s",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": "1234",
+ "rxtx_base": null,
+ "updated_at": "%(strtime)s",
+ "vlan": 100,
+ "vpn_private_address": "%(ip)s",
+ "vpn_public_address": "%(ip)s",
+ "vpn_public_port": 1000
+ },
+ {
+ "bridge": "br101",
+ "bridge_interface": "eth0",
+ "broadcast": "%(ip)s",
+ "cidr": "10.0.0.10/29",
+ "cidr_v6": null,
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_start": "%(ip)s",
+ "dns1": null,
+ "dns2": null,
+ "gateway": "%(ip)s",
+ "gateway_v6": null,
+ "host": null,
+ "id": "%(id)s",
+ "injected": false,
+ "label": "mynet_1",
+ "multi_host": false,
+ "netmask": "%(ip)s",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": null,
+ "rxtx_base": null,
+ "updated_at": null,
+ "vlan": 101,
+ "vpn_private_address": "%(ip)s",
+ "vpn_public_address": null,
+ "vpn_public_port": 1001
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-networks/networks-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-networks/networks-list-resp.xml.tpl
new file mode 100644
index 0000000000..7ac19a8137
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-networks/networks-list-resp.xml.tpl
@@ -0,0 +1,63 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<networks>
+ <network>
+ <bridge>br100</bridge>
+ <vpn_public_port>1000</vpn_public_port>
+ <dhcp_start>%(ip)s</dhcp_start>
+ <bridge_interface>eth0</bridge_interface>
+ <updated_at>%(xmltime)s</updated_at>
+ <id>%(id)s</id>
+ <cidr_v6>None</cidr_v6>
+ <deleted_at>None</deleted_at>
+ <gateway>%(ip)s</gateway>
+ <rxtx_base>None</rxtx_base>
+ <label>mynet_0</label>
+ <priority>None</priority>
+ <project_id>1234</project_id>
+ <vpn_private_address>%(ip)s</vpn_private_address>
+ <deleted>False</deleted>
+ <vlan>100</vlan>
+ <broadcast>%(ip)s</broadcast>
+ <netmask>%(ip)s</netmask>
+ <injected>False</injected>
+ <cidr>10.0.0.0/29</cidr>
+ <vpn_public_address>%(ip)s</vpn_public_address>
+ <multi_host>False</multi_host>
+ <dns2>None</dns2>
+ <created_at>%(xmltime)s</created_at>
+ <host>nsokolov-desktop</host>
+ <gateway_v6>None</gateway_v6>
+ <netmask_v6>None</netmask_v6>
+ <dns1>None</dns1>
+ </network>
+ <network>
+ <bridge>br101</bridge>
+ <vpn_public_port>1001</vpn_public_port>
+ <dhcp_start>%(ip)s</dhcp_start>
+ <bridge_interface>eth0</bridge_interface>
+ <updated_at>None</updated_at>
+ <id>%(id)s</id>
+ <cidr_v6>None</cidr_v6>
+ <deleted_at>None</deleted_at>
+ <gateway>%(ip)s</gateway>
+ <rxtx_base>None</rxtx_base>
+ <label>mynet_1</label>
+ <priority>None</priority>
+ <project_id>None</project_id>
+ <vpn_private_address>%(ip)s</vpn_private_address>
+ <deleted>False</deleted>
+ <vlan>101</vlan>
+ <broadcast>%(ip)s</broadcast>
+ <netmask>%(ip)s</netmask>
+ <injected>False</injected>
+ <cidr>10.0.0.10/29</cidr>
+ <vpn_public_address>None</vpn_public_address>
+ <multi_host>False</multi_host>
+ <dns2>None</dns2>
+ <created_at>%(xmltime)s</created_at>
+ <host>None</host>
+ <gateway_v6>None</gateway_v6>
+ <netmask_v6>None</netmask_v6>
+ <dns1>None</dns1>
+ </network>
+</networks>
diff --git a/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-resp.json.tpl
new file mode 100644
index 0000000000..cd7fdcf2d3
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-resp.json.tpl
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "accessIPv4": "%(ip)s",
+ "accessIPv6": "%(ip6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "%(password)s",
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "meta var": "meta val"
+ },
+ "name": "%(name)s",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-resp.xml.tpl
new file mode 100644
index 0000000000..254745649c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-resp.xml.tpl
@@ -0,0 +1,37 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
+ id="%(uuid)s"
+ tenantId="openstack" userId="fake"
+ name="%(name)s"
+ hostId="%(hostid)s" progress="0"
+ status="ACTIVE" adminPass="%(password)s"
+ created="%(isotime)s"
+ updated="%(isotime)s"
+ accessIPv4="%(ip)s"
+ accessIPv6="%(ip6)s">
+ <image id="%(uuid)s">
+ <atom:link
+ rel="bookmark"
+ href="%(host)s/openstack/images/%(uuid)s"/>
+ </image>
+ <flavor id="1">
+ <atom:link
+ rel="bookmark"
+ href="%(host)s/openstack/flavors/1"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link
+ rel="self"
+ href="%(host)s/v2/openstack/servers/%(uuid)s"/>
+ <atom:link
+ rel="bookmark"
+ href="%(host)s/openstack/servers/%(uuid)s"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild.json.tpl b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild.json.tpl
new file mode 100644
index 0000000000..2f06fd7008
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild.json.tpl
@@ -0,0 +1,19 @@
+{
+ "rebuild" : {
+ "imageRef" : "%(host)s/v2/32278/images/%(uuid)s",
+ "name" : "%(name)s",
+ "adminPass" : "%(pass)s",
+ "accessIPv4" : "%(ip)s",
+ "accessIPv6" : "%(ip6)s",
+ "metadata" : {
+ "meta var" : "meta val"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ],
+ "preserve_ephemeral": %(preserve_ephemeral)s
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild.xml.tpl b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild.xml.tpl
new file mode 100644
index 0000000000..6d469d40ea
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild.xml.tpl
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<rebuild
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="%(name)s"
+ imageRef="%(host)s/v1.1/32278/images/%(uuid)s"
+ accessIPv4="%(ip)s"
+ accessIPv6="%(ip6)s"
+ adminPass="%(pass)s"
+ preserve_ephemeral="%(preserve_ephemeral)s">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</rebuild>
diff --git a/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl
new file mode 100644
index 0000000000..f9a94e760a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl
@@ -0,0 +1,17 @@
+{
+ "quota_class_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "id": "%(set_id)s",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl
new file mode 100644
index 0000000000..fb8e7992a5
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl
@@ -0,0 +1,15 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_class_set id="%(set_id)s">
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <fixed_ips>-1</fixed_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_class_set>
diff --git a/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl
new file mode 100644
index 0000000000..483fda8c53
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "quota_class_set": {
+ "instances": 50,
+ "cores": 50,
+ "ram": 51200,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "metadata_items": 128,
+ "injected_files": 5,
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "security_groups": 10,
+ "security_group_rules": 20,
+ "key_pairs": 100
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl
new file mode 100644
index 0000000000..150fb6a42a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<quota_class_set>
+ <cores>50</cores>
+ <floating_ips>10</floating_ips>
+ <fixed_ips>-1</fixed_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>50</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_class_set>
diff --git a/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl
new file mode 100644
index 0000000000..c36783f2f0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "quota_class_set": {
+ "cores": 50,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 50,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl
new file mode 100644
index 0000000000..cd674a24da
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl
@@ -0,0 +1,15 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_class_set>
+ <cores>50</cores>
+ <floating_ips>10</floating_ips>
+ <fixed_ips>-1</fixed_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>50</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_class_set>
diff --git a/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl
new file mode 100644
index 0000000000..2f0fd98572
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl
@@ -0,0 +1,17 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "id": "fake_tenant",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl
new file mode 100644
index 0000000000..f56987563c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl
@@ -0,0 +1,15 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <fixed_ips>-1</fixed_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_set>
diff --git a/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl
new file mode 100644
index 0000000000..2f0fd98572
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl
@@ -0,0 +1,17 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "id": "fake_tenant",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl
new file mode 100644
index 0000000000..f56987563c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl
@@ -0,0 +1,15 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <fixed_ips>-1</fixed_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_set>
diff --git a/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-req.json.tpl
new file mode 100644
index 0000000000..1f12caa045
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "quota_set": {
+ "security_groups": 45
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-req.xml.tpl
new file mode 100644
index 0000000000..596ce56ac3
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-req.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <security_groups>45</security_groups>
+</quota_set>
diff --git a/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl
new file mode 100644
index 0000000000..34df1fe01e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 45
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl
new file mode 100644
index 0000000000..91ac3a0dda
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl
@@ -0,0 +1,15 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set>
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <fixed_ips>-1</fixed_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>45</security_groups>
+</quota_set>
diff --git a/nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-rescue.json.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-rescue.json.tpl
new file mode 100644
index 0000000000..d9a355319e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-rescue.json.tpl
@@ -0,0 +1,53 @@
+{
+ "server": {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "status": "%(status)s",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-rescue.xml.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-rescue.xml.tpl
new file mode 100644
index 0000000000..5b134dcee0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-rescue.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="%(status)s" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" id="%(id)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-unrescue.json.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-unrescue.json.tpl
new file mode 100644
index 0000000000..5a017d8da1
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-unrescue.json.tpl
@@ -0,0 +1,54 @@
+{
+ "server": {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "%(status)s",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-unrescue.xml.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-unrescue.xml.tpl
new file mode 100644
index 0000000000..145dd0be28
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-unrescue.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="%(status)s" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-rescue/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-rescue/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-rescue/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-rescue/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-rescue/server-rescue-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-rescue-req.json.tpl
new file mode 100644
index 0000000000..d712347537
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-rescue-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "rescue": {
+ "adminPass": "%(password)s"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-rescue/server-rescue-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-rescue-req.xml.tpl
new file mode 100644
index 0000000000..09acae072a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-rescue-req.xml.tpl
@@ -0,0 +1,3 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<rescue xmlns="http://docs.openstack.org/compute/api/v1.1"
+ adminPass="%(password)s"/> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-rescue/server-rescue.json.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-rescue.json.tpl
new file mode 100644
index 0000000000..1922e4db1b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-rescue.json.tpl
@@ -0,0 +1,3 @@
+{
+ "adminPass": "%(password)s"
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-rescue/server-rescue.xml.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-rescue.xml.tpl
new file mode 100644
index 0000000000..b3b95fdde4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-rescue.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<adminPass>%(password)s</adminPass> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-rescue/server-unrescue-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-unrescue-req.json.tpl
new file mode 100644
index 0000000000..cafc9b13a8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-unrescue-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "unrescue": null
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-rescue/server-unrescue-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-unrescue-req.xml.tpl
new file mode 100644
index 0000000000..6a87f8fb21
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-unrescue-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<unrescue xmlns="http://docs.openstack.org/compute/api/v1.1"/> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl
new file mode 100644
index 0000000000..8836d0eecc
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "security_group_default_rule": {
+ "ip_protocol": "TCP",
+ "from_port": "80",
+ "to_port": "80",
+ "cidr": "10.10.10.0/24"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.xml.tpl
new file mode 100644
index 0000000000..daee122905
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<security_group_default_rule>
+ <ip_protocol>TCP</ip_protocol>
+ <from_port>80</from_port>
+ <to_port>80</to_port>
+ <cidr>10.10.10.0/24</cidr>
+</security_group_default_rule> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl
new file mode 100644
index 0000000000..ae6c62bfd6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "security_group_default_rule": {
+ "from_port": 80,
+ "id": 1,
+ "ip_protocol": "TCP",
+ "ip_range":{
+ "cidr": "10.10.10.0/24"
+ },
+ "to_port": 80
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.xml.tpl
new file mode 100644
index 0000000000..9e700969ff
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<security_group_default_rule xmlns="http://docs.openstack.org/compute/api/v1.1" id="1">
+ <ip_protocol>TCP</ip_protocol>
+ <from_port>80</from_port>
+ <to_port>80</to_port>
+ <ip_range>
+ <cidr>10.10.10.0/24</cidr>
+ </ip_range>
+</security_group_default_rule> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl
new file mode 100644
index 0000000000..c083640c3e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl
@@ -0,0 +1,13 @@
+{
+ "security_group_default_rules": [
+ {
+ "from_port": 80,
+ "id": 1,
+ "ip_protocol": "TCP",
+ "ip_range": {
+ "cidr": "10.10.10.0/24"
+ },
+ "to_port": 80
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.xml.tpl
new file mode 100644
index 0000000000..f009bf80f1
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.xml.tpl
@@ -0,0 +1,11 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<security_group_default_rules xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <security_group_default_rule id="1">
+ <ip_protocol>TCP</ip_protocol>
+ <from_port>80</from_port>
+ <to_port>80</to_port>
+ <ip_range>
+ <cidr>10.10.10.0/24</cidr>
+ </ip_range>
+ </security_group_default_rule>
+</security_group_default_rules> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl
new file mode 100644
index 0000000000..97b5259a18
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "security_group_default_rule": {
+ "id": 1,
+ "from_port": 80,
+ "to_port": 80,
+ "ip_protocol": "TCP",
+ "ip_range": {
+ "cidr": "10.10.10.0/24"
+ }
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.xml.tpl
new file mode 100644
index 0000000000..9181abd387
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<security_group_default_rule xmlns="http://docs.openstack.org/compute/api/v1.1" id="1">
+ <from_port>80</from_port>
+ <to_port>80</to_port>
+ <ip_protocol>TCP</ip_protocol>
+ <ip_range>
+ <cidr>10.10.10.0/24</cidr>
+ </ip_range>
+</security_group_default_rule> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-add-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-add-post-req.json.tpl
new file mode 100644
index 0000000000..41ae659135
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-add-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "addSecurityGroup" : {
+ "name" : "%(group_name)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-add-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-add-post-req.xml.tpl
new file mode 100644
index 0000000000..7540245bc3
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-add-post-req.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<addSecurityGroup>
+ <name>%(group_name)s</name>
+</addSecurityGroup>
diff --git a/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-post-req.json.tpl
new file mode 100644
index 0000000000..3f54ab6856
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-post-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "security_group": {
+ "name": "%(group_name)s",
+ "description": "description"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-post-req.xml.tpl
new file mode 100644
index 0000000000..c62b14c495
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-post-req.xml.tpl
@@ -0,0 +1,5 @@
+<security_group name="%(group_name)s">
+ <description>
+ description
+ </description>
+</security_group>
diff --git a/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-remove-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-remove-post-req.json.tpl
new file mode 100644
index 0000000000..a3f545785f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-remove-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "removeSecurityGroup" : {
+ "name" : "%(group_name)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-remove-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-remove-post-req.xml.tpl
new file mode 100644
index 0000000000..9a64a5debc
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-remove-post-req.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<removeSecurityGroup>
+ <name>%(group_name)s</name>
+</removeSecurityGroup>
diff --git a/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-create-resp.json.tpl
new file mode 100644
index 0000000000..b9325e2e7a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-create-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "security_group": {
+ "description": "description",
+ "id": 2,
+ "name": "%(group_name)s",
+ "rules": [],
+ "tenant_id": "openstack"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-create-resp.xml.tpl
new file mode 100644
index 0000000000..c641fd60fc
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-create-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<security_group xmlns="http://docs.openstack.org/compute/api/v1.1" tenant_id="openstack" id="2" name="%(group_name)s">
+ <description>
+ description
+ </description>
+ <rules/>
+</security_group>
diff --git a/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-get-resp.json.tpl
new file mode 100644
index 0000000000..0372512744
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-get-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "security_group": {
+ "description": "default",
+ "id": 1,
+ "name": "default",
+ "rules": [],
+ "tenant_id": "openstack"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-get-resp.xml.tpl
new file mode 100644
index 0000000000..2b19797101
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-get-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<security_group xmlns="http://docs.openstack.org/compute/api/v1.1" tenant_id="openstack" id="1" name="default">
+ <description>default</description>
+ <rules/>
+</security_group>
diff --git a/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl
new file mode 100644
index 0000000000..1771f2dff1
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "security_groups": [
+ {
+ "description": "default",
+ "id": 1,
+ "name": "default",
+ "rules": [],
+ "tenant_id": "openstack"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-list-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-list-get-resp.xml.tpl
new file mode 100644
index 0000000000..8f6e201bdd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-list-get-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<security_groups xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <security_group tenant_id="openstack" id="1" name="default">
+ <description>default</description>
+ <rules/>
+ </security_group>
+</security_groups>
diff --git a/nova/tests/unit/integrated/api_samples/os-security-groups/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-security-groups/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/server-post-req.xml.tpl
new file mode 100644
index 0000000000..4f0444219c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/ openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-security-groups/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/server-post-resp.json.tpl
new file mode 100644
index 0000000000..2133d3f890
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/server-post-resp.json.tpl
@@ -0,0 +1,21 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-security-groups/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..1ca430955b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/server-post-resp.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+ <security_groups>
+ <security_group name="default"/>
+ </security_groups>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl
new file mode 100644
index 0000000000..1771f2dff1
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "security_groups": [
+ {
+ "description": "default",
+ "id": 1,
+ "name": "default",
+ "rules": [],
+ "tenant_id": "openstack"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-security-groups/server-security-groups-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/server-security-groups-list-resp.xml.tpl
new file mode 100644
index 0000000000..8f6e201bdd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/server-security-groups-list-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<security_groups xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <security_group tenant_id="openstack" id="1" name="default">
+ <description>default</description>
+ <rules/>
+ </security_group>
+</security_groups>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl
new file mode 100644
index 0000000000..1afedaee9c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl
@@ -0,0 +1,17 @@
+{
+ "cpu0_time": 17300000000,
+ "memory": 524288,
+ "vda_errors": -1,
+ "vda_read": 262144,
+ "vda_read_req": 112,
+ "vda_write": 5778432,
+ "vda_write_req": 488,
+ "vnet1_rx": 2070139,
+ "vnet1_rx_drop": 0,
+ "vnet1_rx_errors": 0,
+ "vnet1_rx_packets": 26701,
+ "vnet1_tx": 140208,
+ "vnet1_tx_drop": 0,
+ "vnet1_tx_errors": 0,
+ "vnet1_tx_packets": 662
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.xml.tpl
new file mode 100644
index 0000000000..776419f82f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.xml.tpl
@@ -0,0 +1,18 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<diagnostics xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <memory>524288</memory>
+ <vda_write>5778432</vda_write>
+ <vnet1_rx_packets>26701</vnet1_rx_packets>
+ <vnet1_rx_drop>0</vnet1_rx_drop>
+ <vnet1_tx_drop>0</vnet1_tx_drop>
+ <vnet1_rx>2070139</vnet1_rx>
+ <vda_read>262144</vda_read>
+ <vda_write_req>488</vda_write_req>
+ <vnet1_tx>140208</vnet1_tx>
+ <vnet1_tx_errors>0</vnet1_tx_errors>
+ <vnet1_tx_packets>662</vnet1_tx_packets>
+ <vnet1_rx_errors>0</vnet1_rx_errors>
+ <cpu0_time>17300000000</cpu0_time>
+ <vda_read_req>112</vda_read_req>
+ <vda_errors>-1</vda_errors>
+</diagnostics> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-req.json.tpl
new file mode 100644
index 0000000000..43c3b6b407
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "events": [
+ {
+ "name": "%(name)s",
+ "tag": "%(tag)s",
+ "status": "%(status)s",
+ "server_uuid": "%(uuid)s"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-req.xml.tpl
new file mode 100644
index 0000000000..a9029857cf
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-req.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <events>
+ <event>
+ <name>%(name)s</name>
+ <tag>%(tag)s</tag>
+ <status>%(status)s</status>
+ <server_uuid>%(uuid)s</server_uuid>
+ </event>
+ </events>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-resp.json.tpl
new file mode 100644
index 0000000000..aa11b62c83
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "events": [
+ {
+ "code": 200,
+ "name": "%(name)s",
+ "server_uuid": "%(uuid)s",
+ "status": "%(status)s",
+ "tag": "%(tag)s"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-resp.xml.tpl
new file mode 100644
index 0000000000..24cf59ccba
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-resp.xml.tpl
@@ -0,0 +1,10 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<events>
+ <event>
+ <status>%(status)s</status>
+ <tag>%(tag)s</tag>
+ <name>%(name)s</name>
+ <server_uuid>%(uuid)s</server_uuid>
+ <code>200</code>
+ </event>
+</events>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-group-quotas/limit-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/limit-get-resp.json.tpl
new file mode 100644
index 0000000000..939bbd7cd8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/limit-get-resp.json.tpl
@@ -0,0 +1,87 @@
+{
+ "limits": {
+ "absolute": {
+ "maxImageMeta": 128,
+ "maxPersonality": 5,
+ "maxPersonalitySize": 10240,
+ "maxServerMeta": 128,
+ "maxTotalCores": 20,
+ "maxTotalFloatingIps": 10,
+ "maxTotalInstances": 10,
+ "maxTotalKeypairs": 100,
+ "maxTotalRAMSize": 51200,
+ "maxSecurityGroups": 10,
+ "maxSecurityGroupRules": 20,
+ "maxServerGroups": 10,
+ "maxServerGroupMembers": 10
+ },
+ "rate": [
+ {
+ "limit": [
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "POST"
+ },
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "PUT"
+ },
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "DELETE"
+ }
+ ],
+ "regex": ".*",
+ "uri": "*"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "POST"
+ }
+ ],
+ "regex": "^/servers",
+ "uri": "*/servers"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "GET"
+ }
+ ],
+ "regex": ".*changes-since.*",
+ "uri": "*changes-since*"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 12,
+ "unit": "MINUTE",
+ "value": 12,
+ "verb": "GET"
+ }
+ ],
+ "regex": "^/os-fping",
+ "uri": "*/os-fping"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-group-quotas/limit-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/limit-get-resp.xml.tpl
new file mode 100644
index 0000000000..91fd5e0b9f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/limit-get-resp.xml.tpl
@@ -0,0 +1,34 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<limits xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/common/api/v1.0">
+ <rates>
+ <rate regex=".*" uri="*">
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="POST" remaining="120" value="120"/>
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="PUT" remaining="120" value="120"/>
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="DELETE" remaining="120" value="120"/>
+ </rate>
+ <rate regex="^/servers" uri="*/servers">
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="POST" remaining="120" value="120"/>
+ </rate>
+ <rate regex=".*changes-since.*" uri="*changes-since*">
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="GET" remaining="120" value="120"/>
+ </rate>
+ <rate regex="^/os-fping" uri="*/os-fping">
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="GET" remaining="12" value="12"/>
+ </rate>
+ </rates>
+ <absolute>
+ <limit name="maxServerMeta" value="128"/>
+ <limit name="maxTotalInstances" value="10"/>
+ <limit name="maxPersonality" value="5"/>
+ <limit name="maxImageMeta" value="128"/>
+ <limit name="maxPersonalitySize" value="10240"/>
+ <limit name="maxSecurityGroupRules" value="20"/>
+ <limit name="maxTotalKeypairs" value="100"/>
+ <limit name="maxSecurityGroups" value="10"/>
+ <limit name="maxTotalCores" value="20"/>
+ <limit name="maxTotalFloatingIps" value="10"/>
+ <limit name="maxTotalRAMSize" value="51200"/>
+ <limit name="maxServerGroups" value="10"/>
+ <limit name="maxServerGroupMembers" value="10"/>
+ </absolute>
+</limits>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-show-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-show-get-resp.json.tpl
new file mode 100644
index 0000000000..06a007e5e7
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-show-get-resp.json.tpl
@@ -0,0 +1,19 @@
+{
+ "quota_class_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "id": "%(set_id)s",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10,
+ "server_groups": 10,
+ "server_group_members": 10
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-show-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-show-get-resp.xml.tpl
new file mode 100644
index 0000000000..5951360f60
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-show-get-resp.xml.tpl
@@ -0,0 +1,17 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_class_set id="%(set_id)s">
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <fixed_ips>-1</fixed_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+ <server_groups>10</server_groups>
+ <server_group_members>10</server_group_members>
+</quota_class_set>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-req.json.tpl
new file mode 100644
index 0000000000..32df1e4b2b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-req.json.tpl
@@ -0,0 +1,18 @@
+{
+ "quota_class_set": {
+ "instances": 50,
+ "cores": 50,
+ "ram": 51200,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "metadata_items": 128,
+ "injected_files": 5,
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "security_groups": 10,
+ "security_group_rules": 20,
+ "key_pairs": 100,
+ "server_groups": 10,
+ "server_group_members": 10
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-req.xml.tpl
new file mode 100644
index 0000000000..e32e3d44c1
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-req.xml.tpl
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<quota_class_set>
+ <cores>50</cores>
+ <floating_ips>10</floating_ips>
+ <fixed_ips>-1</fixed_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>50</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+ <server_groups>10</server_groups>
+ <server_group_members>10</server_group_members>
+</quota_class_set>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-resp.json.tpl
new file mode 100644
index 0000000000..c08c585df8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-resp.json.tpl
@@ -0,0 +1,18 @@
+{
+ "quota_class_set": {
+ "cores": 50,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 50,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10,
+ "server_groups": 10,
+ "server_group_members": 10
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-resp.xml.tpl
new file mode 100644
index 0000000000..d0c34e50a6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-resp.xml.tpl
@@ -0,0 +1,17 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_class_set>
+ <cores>50</cores>
+ <floating_ips>10</floating_ips>
+ <fixed_ips>-1</fixed_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>50</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+ <server_groups>10</server_groups>
+ <server_group_members>10</server_group_members>
+</quota_class_set>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-defaults-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-defaults-get-resp.json.tpl
new file mode 100644
index 0000000000..f66f22cd2d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-defaults-get-resp.json.tpl
@@ -0,0 +1,19 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "id": "fake_tenant",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10,
+ "server_groups": 10,
+ "server_group_members": 10
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-defaults-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-defaults-get-resp.xml.tpl
new file mode 100644
index 0000000000..e6076286ca
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-defaults-get-resp.xml.tpl
@@ -0,0 +1,17 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <fixed_ips>-1</fixed_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+ <server_groups>10</server_groups>
+ <server_group_members>10</server_group_members>
+</quota_set>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-get-resp.json.tpl
new file mode 100644
index 0000000000..f66f22cd2d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-get-resp.json.tpl
@@ -0,0 +1,19 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "id": "fake_tenant",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10,
+ "server_groups": 10,
+ "server_group_members": 10
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-get-resp.xml.tpl
new file mode 100644
index 0000000000..e6076286ca
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-get-resp.xml.tpl
@@ -0,0 +1,17 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <fixed_ips>-1</fixed_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+ <server_groups>10</server_groups>
+ <server_group_members>10</server_group_members>
+</quota_set>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-req.json.tpl
new file mode 100644
index 0000000000..1f12caa045
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "quota_set": {
+ "security_groups": 45
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-req.xml.tpl
new file mode 100644
index 0000000000..596ce56ac3
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-req.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <security_groups>45</security_groups>
+</quota_set>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-resp.json.tpl
new file mode 100644
index 0000000000..605857f39e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-resp.json.tpl
@@ -0,0 +1,18 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 45,
+ "server_groups": 10,
+ "server_group_members": 10
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-resp.xml.tpl
new file mode 100644
index 0000000000..dfaddfd969
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-resp.xml.tpl
@@ -0,0 +1,17 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set>
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <fixed_ips>-1</fixed_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>45</security_groups>
+ <server_groups>10</server_groups>
+ <server_group_members>10</server_group_members>
+</quota_set>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-group-quotas/usedlimits-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/usedlimits-get-resp.json.tpl
new file mode 100644
index 0000000000..3bd6b42432
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/usedlimits-get-resp.json.tpl
@@ -0,0 +1,93 @@
+{
+ "limits": {
+ "absolute": {
+ "maxImageMeta": 128,
+ "maxPersonality": 5,
+ "maxPersonalitySize": 10240,
+ "maxSecurityGroupRules": 20,
+ "maxSecurityGroups": 10,
+ "maxServerMeta": 128,
+ "maxTotalCores": 20,
+ "maxTotalFloatingIps": 10,
+ "maxTotalInstances": 10,
+ "maxTotalKeypairs": 100,
+ "maxTotalRAMSize": 51200,
+ "maxServerGroups": 10,
+ "maxServerGroupMembers": 10,
+ "totalCoresUsed": 0,
+ "totalInstancesUsed": 0,
+ "totalRAMUsed": 0,
+ "totalSecurityGroupsUsed": 0,
+ "totalFloatingIpsUsed": 0,
+ "totalServerGroupsUsed": 0
+ },
+ "rate": [
+ {
+ "limit": [
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "POST"
+ },
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "PUT"
+ },
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "DELETE"
+ }
+ ],
+ "regex": ".*",
+ "uri": "*"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "POST"
+ }
+ ],
+ "regex": "^/servers",
+ "uri": "*/servers"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "GET"
+ }
+ ],
+ "regex": ".*changes-since.*",
+ "uri": "*changes-since*"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 12,
+ "unit": "MINUTE",
+ "value": 12,
+ "verb": "GET"
+ }
+ ],
+ "regex": "^/os-fping",
+ "uri": "*/os-fping"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-group-quotas/usedlimits-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/usedlimits-get-resp.xml.tpl
new file mode 100644
index 0000000000..4b4ea91539
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/usedlimits-get-resp.xml.tpl
@@ -0,0 +1,40 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<limits xmlns:os-used-limits="http://docs.openstack.org/compute/ext/used_limits/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/common/api/v1.0">
+ <rates>
+ <rate regex=".*" uri="*">
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="POST" remaining="120" value="120"/>
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="PUT" remaining="120" value="120"/>
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="DELETE" remaining="120" value="120"/>
+ </rate>
+ <rate regex="^/servers" uri="*/servers">
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="POST" remaining="120" value="120"/>
+ </rate>
+ <rate regex=".*changes-since.*" uri="*changes-since*">
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="GET" remaining="120" value="120"/>
+ </rate>
+ <rate regex="^/os-fping" uri="*/os-fping">
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="GET" remaining="12" value="12"/>
+ </rate>
+ </rates>
+ <absolute>
+ <limit name="maxServerMeta" value="128"/>
+ <limit name="maxTotalInstances" value="10"/>
+ <limit name="maxPersonality" value="5"/>
+ <limit name="maxImageMeta" value="128"/>
+ <limit name="maxPersonalitySize" value="10240"/>
+ <limit name="maxSecurityGroupRules" value="20"/>
+ <limit name="maxTotalKeypairs" value="100"/>
+ <limit name="totalCoresUsed" value="0"/>
+ <limit name="totalRAMUsed" value="0"/>
+ <limit name="totalInstancesUsed" value="0"/>
+ <limit name="maxSecurityGroups" value="10"/>
+ <limit name="maxTotalCores" value="20"/>
+ <limit name="totalSecurityGroupsUsed" value="0"/>
+ <limit name="maxTotalFloatingIps" value="10"/>
+ <limit name="totalFloatingIpsUsed" value="0"/>
+ <limit name="maxTotalRAMSize" value="51200"/>
+ <limit name="maxServerGroups" value="10"/>
+ <limit name="totalServerGroupsUsed" value="0"/>
+ <limit name="maxServerGroupMembers" value="10"/>
+ </absolute>
+</limits>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-get-resp.json.tpl
new file mode 100644
index 0000000000..ba72643b6d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-get-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "server_group": {
+ "id": "%(id)s",
+ "name": "%(name)s",
+ "policies": ["anti-affinity"],
+ "members": [],
+ "metadata": {}
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-get-resp.xml.tpl
new file mode 100644
index 0000000000..dc4651aab7
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-get-resp.xml.tpl
@@ -0,0 +1,8 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server_group xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" name="test">
+ <policies>
+ <policy>anti-affinity</policy>
+ </policies>
+ <members/>
+ <metadata/>
+</server_group>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-list-resp.json.tpl
new file mode 100644
index 0000000000..f01d451dd2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-list-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "server_groups": [
+ {
+ "id": "%(id)s",
+ "name": "test",
+ "policies": ["anti-affinity"],
+ "members": [],
+ "metadata": {}
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-list-resp.xml.tpl
new file mode 100644
index 0000000000..bda7562118
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-list-resp.xml.tpl
@@ -0,0 +1,10 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server_groups xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server_group id="%(id)s" name="test">
+ <policies>
+ <policy>anti-affinity</policy>
+ </policies>
+ <members/>
+ <metadata/>
+</server_group>
+</server_groups>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-req.json.tpl
new file mode 100644
index 0000000000..1cc2328320
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "server_group": {
+ "name": "%(name)s",
+ "policies": ["anti-affinity"]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-req.xml.tpl
new file mode 100644
index 0000000000..abe8459549
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-req.xml.tpl
@@ -0,0 +1,5 @@
+<server_group name="test">
+ <policies>
+ <policy>anti-affinity</policy>
+ </policies>
+</server_group>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-resp.json.tpl
new file mode 100644
index 0000000000..ee9c37e82c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-resp.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server_group": {
+ "id": "%(id)s",
+ "name": "%(name)s",
+ "policies": ["anti-affinity"],
+ "members": [],
+ "metadata": {}
+ }
+}
+
diff --git a/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-resp.xml.tpl
new file mode 100644
index 0000000000..dc4651aab7
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-resp.xml.tpl
@@ -0,0 +1,8 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server_group xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" name="test">
+ <policies>
+ <policy>anti-affinity</policy>
+ </policies>
+ <members/>
+ <metadata/>
+</server_group>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/servers-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/servers-list-resp.json.tpl
new file mode 100644
index 0000000000..8b97dc28d7
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/servers-list-resp.json.tpl
@@ -0,0 +1,18 @@
+{
+ "servers": [
+ {
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/servers-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/servers-list-resp.xml.tpl
new file mode 100644
index 0000000000..03bee03a6e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/servers-list-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server name="new-server-test" id="%(id)s">
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-password/get-password-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-password/get-password-resp.json.tpl
new file mode 100644
index 0000000000..026f15d46a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-password/get-password-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "password": "%(encrypted_password)s"
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-password/get-password-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-password/get-password-resp.xml.tpl
new file mode 100644
index 0000000000..046eed30fb
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-password/get-password-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<password>%(encrypted_password)s</password>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-password/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-password/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-password/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-password/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-password/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-password/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-password/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-password/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-password/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-password/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-password/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-password/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-server-start-stop/server_start_stop.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server_start_stop.json.tpl
new file mode 100644
index 0000000000..a993b3f684
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server_start_stop.json.tpl
@@ -0,0 +1,3 @@
+{
+ "%(action)s" : null
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-server-start-stop/server_start_stop.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server_start_stop.xml.tpl
new file mode 100644
index 0000000000..35cc3c2045
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server_start_stop.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<%(action)s/>
diff --git a/nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-req.json.tpl
new file mode 100644
index 0000000000..13ba2f11ca
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "host": "%(host)s",
+ "binary": "%(binary)s",
+ "disabled_reason": "%(disabled_reason)s"
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-req.xml.tpl
new file mode 100644
index 0000000000..a1ffd7e205
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<service host="%(host)s" binary="%(binary)s" disabled_reason="%(disabled_reason)s"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-resp.json.tpl
new file mode 100644
index 0000000000..5266b0b623
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "service": {
+ "binary": "%(binary)s",
+ "host": "%(host)s",
+ "disabled_reason": "%(disabled_reason)s",
+ "status": "disabled"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-resp.xml.tpl
new file mode 100644
index 0000000000..f7255d3851
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<service status="disabled" binary="%(binary)s" host="%(host)s" disabled_reason="%(disabled_reason)s"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-services/service-disable-put-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-disable-put-req.json.tpl
new file mode 100644
index 0000000000..57182e935c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-disable-put-req.json.tpl
@@ -0,0 +1,4 @@
+{
+ "host": "%(host)s",
+ "binary": "%(binary)s"
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-services/service-disable-put-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-disable-put-req.xml.tpl
new file mode 100644
index 0000000000..fc297bcd34
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-disable-put-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<service host="%(host)s" binary="%(binary)s" />
diff --git a/nova/tests/unit/integrated/api_samples/os-services/service-disable-put-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-disable-put-resp.json.tpl
new file mode 100644
index 0000000000..47a8b3d816
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-disable-put-resp.json.tpl
@@ -0,0 +1,7 @@
+{
+ "service": {
+ "host": "%(host)s",
+ "binary": "%(binary)s",
+ "status": "disabled"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-services/service-disable-put-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-disable-put-resp.xml.tpl
new file mode 100644
index 0000000000..cc03298c53
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-disable-put-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<service host="%(host)s" binary="%(binary)s" status="disabled" />
diff --git a/nova/tests/unit/integrated/api_samples/os-services/service-enable-put-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-enable-put-req.json.tpl
new file mode 100644
index 0000000000..57182e935c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-enable-put-req.json.tpl
@@ -0,0 +1,4 @@
+{
+ "host": "%(host)s",
+ "binary": "%(binary)s"
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-services/service-enable-put-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-enable-put-req.xml.tpl
new file mode 100644
index 0000000000..fc297bcd34
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-enable-put-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<service host="%(host)s" binary="%(binary)s" />
diff --git a/nova/tests/unit/integrated/api_samples/os-services/service-enable-put-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-enable-put-resp.json.tpl
new file mode 100644
index 0000000000..24f72311d1
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-enable-put-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "service": {
+ "host": "%(host)s",
+ "binary": "%(binary)s",
+ "status": "enabled"
+ }
+}
+
diff --git a/nova/tests/unit/integrated/api_samples/os-services/service-enable-put-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-enable-put-resp.xml.tpl
new file mode 100644
index 0000000000..3cbf51b778
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-enable-put-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<service host="%(host)s" binary="%(binary)s" status="enabled" />
diff --git a/nova/tests/unit/integrated/api_samples/os-services/services-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-services/services-get-resp.json.tpl
new file mode 100644
index 0000000000..80be294dd6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-services/services-get-resp.json.tpl
@@ -0,0 +1,40 @@
+{
+ "services": [
+ {
+ "binary": "nova-scheduler",
+ "host": "host1",
+ "disabled_reason": "test1",
+ "state": "up",
+ "status": "disabled",
+ "updated_at": "%(strtime)s",
+ "zone": "internal"
+ },
+ {
+ "binary": "nova-compute",
+ "host": "host1",
+ "disabled_reason": "test2",
+ "state": "up",
+ "status": "disabled",
+ "updated_at": "%(strtime)s",
+ "zone": "nova"
+ },
+ {
+ "binary": "nova-scheduler",
+ "host": "host2",
+ "disabled_reason": null,
+ "state": "down",
+ "status": "enabled",
+ "updated_at": "%(strtime)s",
+ "zone": "internal"
+ },
+ {
+ "binary": "nova-compute",
+ "host": "host2",
+ "disabled_reason": "test4",
+ "state": "down",
+ "status": "disabled",
+ "updated_at": "%(strtime)s",
+ "zone": "nova"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-services/services-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-services/services-get-resp.xml.tpl
new file mode 100644
index 0000000000..365f02e573
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-services/services-get-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<services>
+ <service status="disabled" binary="nova-scheduler" zone="internal" state="up" updated_at="%(xmltime)s" host="host1" disabled_reason="test1"/>
+ <service status="disabled" binary="nova-compute" zone="nova" state="up" updated_at="%(xmltime)s" host="host1" disabled_reason="test2"/>
+ <service status="enabled" binary="nova-scheduler" zone="internal" state="down" updated_at="%(xmltime)s" host="host2" disabled_reason="None"/>
+ <service status="disabled" binary="nova-compute" zone="nova" state="down" updated_at="%(xmltime)s" host="host2" disabled_reason="test4"/>
+</services>
diff --git a/nova/tests/unit/integrated/api_samples/os-services/services-list-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-services/services-list-get-resp.json.tpl
new file mode 100644
index 0000000000..09564520e7
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-services/services-list-get-resp.json.tpl
@@ -0,0 +1,36 @@
+{
+ "services": [
+ {
+ "binary": "nova-scheduler",
+ "host": "host1",
+ "state": "up",
+ "status": "disabled",
+ "updated_at": "%(strtime)s",
+ "zone": "internal"
+ },
+ {
+ "binary": "nova-compute",
+ "host": "host1",
+ "state": "up",
+ "status": "disabled",
+ "updated_at": "%(strtime)s",
+ "zone": "nova"
+ },
+ {
+ "binary": "nova-scheduler",
+ "host": "host2",
+ "state": "down",
+ "status": "enabled",
+ "updated_at": "%(strtime)s",
+ "zone": "internal"
+ },
+ {
+ "binary": "nova-compute",
+ "host": "host2",
+ "state": "down",
+ "status": "disabled",
+ "updated_at": "%(strtime)s",
+ "zone": "nova"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-services/services-list-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-services/services-list-get-resp.xml.tpl
new file mode 100644
index 0000000000..1c6b0113b7
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-services/services-list-get-resp.xml.tpl
@@ -0,0 +1,6 @@
+<services>
+ <service status="disabled" binary="nova-scheduler" zone="internal" state="up" host="host1" updated_at="%(xmltime)s"/>
+ <service status="disabled" binary="nova-compute" zone="nova" state="up" host="host1" updated_at="%(xmltime)s" />
+ <service status="enabled" binary="nova-scheduler" zone="internal" state="down" host="host2" updated_at="%(xmltime)s"/>
+ <service status="disabled" binary="nova-compute" zone="nova" state="down" host="host2" updated_at="%(xmltime)s"/>
+</services>
diff --git a/nova/tests/unit/integrated/api_samples/os-shelve/os-shelve-offload.json.tpl b/nova/tests/unit/integrated/api_samples/os-shelve/os-shelve-offload.json.tpl
new file mode 100644
index 0000000000..5a19f85cff
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-shelve/os-shelve-offload.json.tpl
@@ -0,0 +1,3 @@
+{
+ "%(action)s": null
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-shelve/os-shelve-offload.xml.tpl b/nova/tests/unit/integrated/api_samples/os-shelve/os-shelve-offload.xml.tpl
new file mode 100644
index 0000000000..41d18bdac0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-shelve/os-shelve-offload.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <%(action)s/>
diff --git a/nova/tests/unit/integrated/api_samples/os-shelve/os-shelve.json.tpl b/nova/tests/unit/integrated/api_samples/os-shelve/os-shelve.json.tpl
new file mode 100644
index 0000000000..5a19f85cff
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-shelve/os-shelve.json.tpl
@@ -0,0 +1,3 @@
+{
+ "%(action)s": null
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-shelve/os-shelve.xml.tpl b/nova/tests/unit/integrated/api_samples/os-shelve/os-shelve.xml.tpl
new file mode 100644
index 0000000000..41d18bdac0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-shelve/os-shelve.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <%(action)s/>
diff --git a/nova/tests/unit/integrated/api_samples/os-shelve/os-unshelve.json.tpl b/nova/tests/unit/integrated/api_samples/os-shelve/os-unshelve.json.tpl
new file mode 100644
index 0000000000..5a19f85cff
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-shelve/os-unshelve.json.tpl
@@ -0,0 +1,3 @@
+{
+ "%(action)s": null
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-shelve/os-unshelve.xml.tpl b/nova/tests/unit/integrated/api_samples/os-shelve/os-unshelve.xml.tpl
new file mode 100644
index 0000000000..41d18bdac0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-shelve/os-unshelve.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <%(action)s/>
diff --git a/nova/tests/unit/integrated/api_samples/os-shelve/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-shelve/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-shelve/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-shelve/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-shelve/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-shelve/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-shelve/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-shelve/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-shelve/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-shelve/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-shelve/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-shelve/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl
new file mode 100644
index 0000000000..f37083013d
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl
@@ -0,0 +1,27 @@
+{
+ "tenant_usage": {
+ "server_usages": [
+ {
+ "ended_at": null,
+ "flavor": "m1.tiny",
+ "hours": 1.0,
+ "instance_id": "%(uuid)s",
+ "local_gb": 1,
+ "memory_mb": 512,
+ "name": "new-server-test",
+ "started_at": "%(strtime)s",
+ "state": "active",
+ "tenant_id": "openstack",
+ "uptime": 3600,
+ "vcpus": 1
+ }
+ ],
+ "start": "%(strtime)s",
+ "stop": "%(strtime)s",
+ "tenant_id": "openstack",
+ "total_hours": 1.0,
+ "total_local_gb_usage": 1.0,
+ "total_memory_mb_usage": 512.0,
+ "total_vcpus_usage": 1.0
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.xml.tpl b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.xml.tpl
new file mode 100644
index 0000000000..014c2f9d64
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.xml.tpl
@@ -0,0 +1,26 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<tenant_usage>
+ <tenant_id>openstack</tenant_id>
+ <total_local_gb_usage>1.0</total_local_gb_usage>
+ <total_vcpus_usage>1.0</total_vcpus_usage>
+ <total_memory_mb_usage>512.0</total_memory_mb_usage>
+ <total_hours>1.0</total_hours>
+ <start>%(xmltime)s</start>
+ <stop>%(xmltime)s</stop>
+ <server_usages>
+ <server_usage>
+ <instance_id>%(uuid)s</instance_id>
+ <name>new-server-test</name>
+ <hours>1.0</hours>
+ <memory_mb>512</memory_mb>
+ <local_gb>1</local_gb>
+ <vcpus>1</vcpus>
+ <tenant_id>openstack</tenant_id>
+ <flavor>m1.tiny</flavor>
+ <started_at>%(xmltime)s</started_at>
+ <ended_at>None</ended_at>
+ <state>active</state>
+ <uptime>3600</uptime>
+ </server_usage>
+ </server_usages>
+</tenant_usage>
diff --git a/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl
new file mode 100644
index 0000000000..25b5ff2b84
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl
@@ -0,0 +1,13 @@
+{
+ "tenant_usages": [
+ {
+ "start": "%(strtime)s",
+ "stop": "%(strtime)s",
+ "tenant_id": "openstack",
+ "total_hours": 1.0,
+ "total_local_gb_usage": 1.0,
+ "total_memory_mb_usage": 512.0,
+ "total_vcpus_usage": 1.0
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.xml.tpl b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.xml.tpl
new file mode 100644
index 0000000000..b1bb63f1c4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.xml.tpl
@@ -0,0 +1,13 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<tenant_usages>
+ <tenant_usage>
+ <tenant_id>openstack</tenant_id>
+ <total_local_gb_usage>1.0</total_local_gb_usage>
+ <total_vcpus_usage>1.0</total_vcpus_usage>
+ <total_memory_mb_usage>512.0</total_memory_mb_usage>
+ <total_hours>1.0</total_hours>
+ <start>%(xmltime)s</start>
+ <stop>%(xmltime)s</stop>
+ <server_usages/>
+ </tenant_usage>
+</tenant_usages>
diff --git a/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl
new file mode 100644
index 0000000000..757084d2f3
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl
@@ -0,0 +1,14 @@
+{
+ "networks": [
+ {
+ "cidr": "10.0.0.0/29",
+ "id": "%(id)s",
+ "label": "test_0"
+ },
+ {
+ "cidr": "10.0.0.8/29",
+ "id": "%(id)s",
+ "label": "test_1"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-list-res.xml.tpl b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-list-res.xml.tpl
new file mode 100644
index 0000000000..0562ebae7c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-list-res.xml.tpl
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<networks>
+ <network>
+ <cidr>10.0.0.0/29</cidr>
+ <id>%(id)s</id>
+ <label>test_0</label>
+ </network>
+ <network>
+ <cidr>10.0.0.8/29</cidr>
+ <id>%(id)s</id>
+ <label>test_1</label>
+ </network>
+</networks>
diff --git a/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl
new file mode 100644
index 0000000000..fb1c2d3d06
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl
@@ -0,0 +1,9 @@
+{
+ "network": {
+ "label": "public",
+ "cidr": "172.0.0.0/24",
+ "vlan_start": 1,
+ "num_networks": 1,
+ "network_size": 255
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-req.xml.tpl
new file mode 100644
index 0000000000..0493de3872
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-req.xml.tpl
@@ -0,0 +1,7 @@
+<network>
+ <label>public</label>
+ <cidr>172.0.0.0/24</cidr>
+ <vlan_start>1</vlan_start>
+ <num_networks>1</num_networks>
+ <network_size>255</network_size>
+</network>
diff --git a/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl
new file mode 100644
index 0000000000..ff9e2273d3
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl
@@ -0,0 +1,7 @@
+{
+ "network": {
+ "cidr": "172.0.0.0/24",
+ "id": "%(id)s",
+ "label": "public"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-res.xml.tpl b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-res.xml.tpl
new file mode 100644
index 0000000000..9c6c2f28b2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-res.xml.tpl
@@ -0,0 +1,5 @@
+<network>
+ <cidr>172.0.0.0/24</cidr>
+ <id>%(id)s</id>
+ <label>public</label>
+</network>
diff --git a/nova/tests/unit/integrated/api_samples/os-used-limits-for-admin/usedlimitsforadmin-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-used-limits-for-admin/usedlimitsforadmin-get-resp.json.tpl
new file mode 100644
index 0000000000..dcf861c4ef
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-used-limits-for-admin/usedlimitsforadmin-get-resp.json.tpl
@@ -0,0 +1,90 @@
+{
+ "limits": {
+ "absolute": {
+ "maxImageMeta": 128,
+ "maxPersonality": 5,
+ "maxPersonalitySize": 10240,
+ "maxSecurityGroupRules": 20,
+ "maxSecurityGroups": 10,
+ "maxServerMeta": 128,
+ "maxTotalCores": 20,
+ "maxTotalFloatingIps": 10,
+ "maxTotalInstances": 10,
+ "maxTotalKeypairs": 100,
+ "maxTotalRAMSize": 51200,
+ "totalCoresUsed": 0,
+ "totalInstancesUsed": 0,
+ "totalRAMUsed": 0,
+ "totalSecurityGroupsUsed": 0,
+ "totalFloatingIpsUsed": 0
+ },
+ "rate": [
+ {
+ "limit": [
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "POST"
+ },
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "PUT"
+ },
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "DELETE"
+ }
+ ],
+ "regex": ".*",
+ "uri": "*"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "POST"
+ }
+ ],
+ "regex": "^/servers",
+ "uri": "*/servers"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "GET"
+ }
+ ],
+ "regex": ".*changes-since.*",
+ "uri": "*changes-since*"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 12,
+ "unit": "MINUTE",
+ "value": 12,
+ "verb": "GET"
+ }
+ ],
+ "regex": "^/os-fping",
+ "uri": "*/os-fping"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-used-limits-for-admin/usedlimitsforadmin-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-used-limits-for-admin/usedlimitsforadmin-get-resp.xml.tpl
new file mode 100644
index 0000000000..9f6a2d9f7a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-used-limits-for-admin/usedlimitsforadmin-get-resp.xml.tpl
@@ -0,0 +1,37 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<limits xmlns:os-used-limits="http://docs.openstack.org/compute/ext/used_limits/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/common/api/v1.0">
+ <rates>
+ <rate regex=".*" uri="*">
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="POST" remaining="120" value="120"/>
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="PUT" remaining="120" value="120"/>
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="DELETE" remaining="120" value="120"/>
+ </rate>
+ <rate regex="^/servers" uri="*/servers">
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="POST" remaining="120" value="120"/>
+ </rate>
+ <rate regex=".*changes-since.*" uri="*changes-since*">
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="GET" remaining="120" value="120"/>
+ </rate>
+ <rate regex="^/os-fping" uri="*/os-fping">
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="GET" remaining="12" value="12"/>
+ </rate>
+ </rates>
+ <absolute>
+ <limit name="maxServerMeta" value="128"/>
+ <limit name="maxTotalInstances" value="10"/>
+ <limit name="maxPersonality" value="5"/>
+ <limit name="maxImageMeta" value="128"/>
+ <limit name="maxPersonalitySize" value="10240"/>
+ <limit name="maxSecurityGroupRules" value="20"/>
+ <limit name="maxTotalKeypairs" value="100"/>
+ <limit name="totalCoresUsed" value="0"/>
+ <limit name="totalRAMUsed" value="0"/>
+ <limit name="totalInstancesUsed" value="0"/>
+ <limit name="maxSecurityGroups" value="10"/>
+ <limit name="maxTotalCores" value="20"/>
+ <limit name="totalSecurityGroupsUsed" value="0"/>
+ <limit name="maxTotalFloatingIps" value="10"/>
+ <limit name="totalFloatingIpsUsed" value="0"/>
+ <limit name="maxTotalRAMSize" value="51200"/>
+ </absolute>
+</limits>
diff --git a/nova/tests/unit/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl
new file mode 100644
index 0000000000..dcf861c4ef
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl
@@ -0,0 +1,90 @@
+{
+ "limits": {
+ "absolute": {
+ "maxImageMeta": 128,
+ "maxPersonality": 5,
+ "maxPersonalitySize": 10240,
+ "maxSecurityGroupRules": 20,
+ "maxSecurityGroups": 10,
+ "maxServerMeta": 128,
+ "maxTotalCores": 20,
+ "maxTotalFloatingIps": 10,
+ "maxTotalInstances": 10,
+ "maxTotalKeypairs": 100,
+ "maxTotalRAMSize": 51200,
+ "totalCoresUsed": 0,
+ "totalInstancesUsed": 0,
+ "totalRAMUsed": 0,
+ "totalSecurityGroupsUsed": 0,
+ "totalFloatingIpsUsed": 0
+ },
+ "rate": [
+ {
+ "limit": [
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "POST"
+ },
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "PUT"
+ },
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "DELETE"
+ }
+ ],
+ "regex": ".*",
+ "uri": "*"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "POST"
+ }
+ ],
+ "regex": "^/servers",
+ "uri": "*/servers"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 120,
+ "unit": "MINUTE",
+ "value": 120,
+ "verb": "GET"
+ }
+ ],
+ "regex": ".*changes-since.*",
+ "uri": "*changes-since*"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "%(isotime)s",
+ "remaining": 12,
+ "unit": "MINUTE",
+ "value": 12,
+ "verb": "GET"
+ }
+ ],
+ "regex": "^/os-fping",
+ "uri": "*/os-fping"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl
new file mode 100644
index 0000000000..9f6a2d9f7a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl
@@ -0,0 +1,37 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<limits xmlns:os-used-limits="http://docs.openstack.org/compute/ext/used_limits/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/common/api/v1.0">
+ <rates>
+ <rate regex=".*" uri="*">
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="POST" remaining="120" value="120"/>
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="PUT" remaining="120" value="120"/>
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="DELETE" remaining="120" value="120"/>
+ </rate>
+ <rate regex="^/servers" uri="*/servers">
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="POST" remaining="120" value="120"/>
+ </rate>
+ <rate regex=".*changes-since.*" uri="*changes-since*">
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="GET" remaining="120" value="120"/>
+ </rate>
+ <rate regex="^/os-fping" uri="*/os-fping">
+ <limit next-available="%(isotime)s" unit="MINUTE" verb="GET" remaining="12" value="12"/>
+ </rate>
+ </rates>
+ <absolute>
+ <limit name="maxServerMeta" value="128"/>
+ <limit name="maxTotalInstances" value="10"/>
+ <limit name="maxPersonality" value="5"/>
+ <limit name="maxImageMeta" value="128"/>
+ <limit name="maxPersonalitySize" value="10240"/>
+ <limit name="maxSecurityGroupRules" value="20"/>
+ <limit name="maxTotalKeypairs" value="100"/>
+ <limit name="totalCoresUsed" value="0"/>
+ <limit name="totalRAMUsed" value="0"/>
+ <limit name="totalInstancesUsed" value="0"/>
+ <limit name="maxSecurityGroups" value="10"/>
+ <limit name="maxTotalCores" value="20"/>
+ <limit name="totalSecurityGroupsUsed" value="0"/>
+ <limit name="maxTotalFloatingIps" value="10"/>
+ <limit name="totalFloatingIpsUsed" value="0"/>
+ <limit name="maxTotalRAMSize" value="51200"/>
+ </absolute>
+</limits>
diff --git a/nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-req.json.tpl
new file mode 100644
index 0000000000..fb7744a3d0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-req.json.tpl
@@ -0,0 +1,17 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "user_data" : "%(user_data)s",
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-req.xml.tpl
new file mode 100644
index 0000000000..22ec4d5c5f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-req.xml.tpl
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1"
+ imageRef="%(host)s/openstack/images/%(image_id)s"
+ flavorRef="%(host)s/openstack/flavors/1"
+ name="new-server-test">
+ <user_data>
+ %(user_data)s
+ </user_data>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-resp.xml.tpl
new file mode 100644
index 0000000000..2ad5c102b0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-resp.xml.tpl
@@ -0,0 +1,8 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-show-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-show-get-resp.json.tpl
new file mode 100644
index 0000000000..2f0fd98572
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-show-get-resp.json.tpl
@@ -0,0 +1,17 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "id": "fake_tenant",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-show-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-show-get-resp.xml.tpl
new file mode 100644
index 0000000000..f56987563c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-show-get-resp.xml.tpl
@@ -0,0 +1,15 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <fixed_ips>-1</fixed_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>10</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_set>
diff --git a/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-req.json.tpl
new file mode 100644
index 0000000000..b322b2a870
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "quota_set": {
+ "force": "True",
+ "instances": 9
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-req.xml.tpl
new file mode 100644
index 0000000000..c5084d44e6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-req.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set id="fake_tenant">
+ <force>True</force>
+ <instances>9</instances>
+</quota_set>
diff --git a/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-resp.json.tpl
new file mode 100644
index 0000000000..5539332927
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 9,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-resp.xml.tpl
new file mode 100644
index 0000000000..43c36c7da3
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-resp.xml.tpl
@@ -0,0 +1,15 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<quota_set>
+ <cores>20</cores>
+ <floating_ips>10</floating_ips>
+ <fixed_ips>-1</fixed_ips>
+ <injected_file_content_bytes>10240</injected_file_content_bytes>
+ <injected_file_path_bytes>255</injected_file_path_bytes>
+ <injected_files>5</injected_files>
+ <instances>9</instances>
+ <key_pairs>100</key_pairs>
+ <metadata_items>128</metadata_items>
+ <ram>51200</ram>
+ <security_group_rules>20</security_group_rules>
+ <security_groups>10</security_groups>
+</quota_set>
diff --git a/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/vifs-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/vifs-list-resp.json.tpl
new file mode 100644
index 0000000000..af0b7e05a7
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/vifs-list-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "virtual_interfaces": [
+ {
+ "id": "%(id)s",
+ "mac_address": "%(mac_addr)s"
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/vifs-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/vifs-list-resp.xml.tpl
new file mode 100644
index 0000000000..74d0c6f394
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/vifs-list-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<virtual_interfaces xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <virtual_interface id="%(id)s" mac_address="%(mac_addr)s"/>
+</virtual_interfaces> \ No newline at end of file
diff --git a/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/update-volume-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/update-volume-req.json.tpl
new file mode 100644
index 0000000000..3d360a57bc
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/update-volume-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "volumeAttachment": {
+ "volumeId": "%(volume_id)s",
+ "device": "%(device)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/update-volume-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/update-volume-req.xml.tpl
new file mode 100644
index 0000000000..ffb20ad1ea
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/update-volume-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<volumeAttachment volumeId="%(volume_id)s" device="%(device)s" />
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-req.json.tpl
new file mode 100644
index 0000000000..3d360a57bc
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "volumeAttachment": {
+ "volumeId": "%(volume_id)s",
+ "device": "%(device)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-req.xml.tpl
new file mode 100644
index 0000000000..ffb20ad1ea
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<volumeAttachment volumeId="%(volume_id)s" device="%(device)s" />
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-resp.json.tpl
new file mode 100644
index 0000000000..4730b3c197
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "volumeAttachment": {
+ "device": "%(device)s",
+ "id": "%(volume_id)s",
+ "serverId": "%(uuid)s",
+ "volumeId": "%(volume_id)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-resp.xml.tpl
new file mode 100644
index 0000000000..efad2fd02a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<volumeAttachment device="%(device)s" serverId="%(uuid)s" id="%(volume_id)s" volumeId="%(volume_id)s"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/list-volume-attachments-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/list-volume-attachments-resp.json.tpl
new file mode 100644
index 0000000000..6c1da07ef6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/list-volume-attachments-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "volumeAttachments": [
+ {
+ "device": "/dev/sdd",
+ "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803",
+ "serverId": "%(uuid)s",
+ "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803"
+ },
+ {
+ "device": "/dev/sdc",
+ "id": "a26887c6-c47b-4654-abb5-dfadf7d3f804",
+ "serverId": "%(uuid)s",
+ "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f804"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/list-volume-attachments-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/list-volume-attachments-resp.xml.tpl
new file mode 100644
index 0000000000..351646d81e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/list-volume-attachments-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<volumeAttachments>
+ <volumeAttachment device="/dev/sdd" serverId="%(uuid)s" id="a26887c6-c47b-4654-abb5-dfadf7d3f803" volumeId="a26887c6-c47b-4654-abb5-dfadf7d3f803"/>
+ <volumeAttachment device="/dev/sdc" serverId="%(uuid)s" id="a26887c6-c47b-4654-abb5-dfadf7d3f804" volumeId="a26887c6-c47b-4654-abb5-dfadf7d3f804"/>
+</volumeAttachments>
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-detail-resp.json.tpl
new file mode 100644
index 0000000000..82a63eda5f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-detail-resp.json.tpl
@@ -0,0 +1,24 @@
+{
+ "volumes": [
+ {
+ "attachments": [
+ {
+ "device": "/",
+ "id": "%(uuid)s",
+ "serverId": "%(uuid)s",
+ "volumeId": "%(uuid)s"
+ }
+ ],
+ "availabilityZone": "zone1:host1",
+ "createdAt": "%(strtime)s",
+ "displayDescription": "%(volume_desc)s",
+ "displayName": "%(volume_name)s",
+ "id": "%(uuid)s",
+ "metadata": {},
+ "size": 100,
+ "snapshotId": null,
+ "status": "in-use",
+ "volumeType": "Backup"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-detail-resp.xml.tpl
new file mode 100644
index 0000000000..bd8f324ee0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-detail-resp.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<volumes>
+ <volume status="in-use" displayDescription="%(volume_desc)s" availabilityZone="zone1:host1" displayName="%(volume_name)s" volumeType="Backup" snapshotId="None" id="%(uuid)s" createdAt="%(xmltime)s" size="100">
+ <attachments>
+ <attachment device="/" serverId="%(uuid)s" id="%(uuid)s" volumeId="%(uuid)s"/>
+ </attachments>
+ <metadata/>
+ </volume>
+</volumes>
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-get-resp.json.tpl
new file mode 100644
index 0000000000..84bfdd2a5b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-get-resp.json.tpl
@@ -0,0 +1,22 @@
+{
+ "volume": {
+ "attachments": [
+ {
+ "device": "/",
+ "id": "%(uuid)s",
+ "serverId": "%(uuid)s",
+ "volumeId": "%(uuid)s"
+ }
+ ],
+ "availabilityZone": "zone1:host1",
+ "createdAt": "%(strtime)s",
+ "displayDescription": "%(volume_desc)s",
+ "displayName": "%(volume_name)s",
+ "id": "%(uuid)s",
+ "metadata": {},
+ "size": 100,
+ "snapshotId": null,
+ "status": "in-use",
+ "volumeType": "Backup"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-get-resp.xml.tpl
new file mode 100644
index 0000000000..cb3c5edf90
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-get-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<volume status="in-use" displayDescription="%(volume_desc)s" availabilityZone="zone1:host1" displayName="%(volume_name)s" volumeType="Backup" snapshotId="None" id="%(uuid)s" createdAt="%(xmltime)s" size="100">
+ <attachments>
+ <attachment device="/" serverId="%(uuid)s" id="%(uuid)s" volumeId="%(uuid)s"/>
+ </attachments>
+ <metadata/>
+</volume>
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-index-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-index-resp.json.tpl
new file mode 100644
index 0000000000..82a63eda5f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-index-resp.json.tpl
@@ -0,0 +1,24 @@
+{
+ "volumes": [
+ {
+ "attachments": [
+ {
+ "device": "/",
+ "id": "%(uuid)s",
+ "serverId": "%(uuid)s",
+ "volumeId": "%(uuid)s"
+ }
+ ],
+ "availabilityZone": "zone1:host1",
+ "createdAt": "%(strtime)s",
+ "displayDescription": "%(volume_desc)s",
+ "displayName": "%(volume_name)s",
+ "id": "%(uuid)s",
+ "metadata": {},
+ "size": 100,
+ "snapshotId": null,
+ "status": "in-use",
+ "volumeType": "Backup"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-index-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-index-resp.xml.tpl
new file mode 100644
index 0000000000..bd8f324ee0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-index-resp.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<volumes>
+ <volume status="in-use" displayDescription="%(volume_desc)s" availabilityZone="zone1:host1" displayName="%(volume_name)s" volumeType="Backup" snapshotId="None" id="%(uuid)s" createdAt="%(xmltime)s" size="100">
+ <attachments>
+ <attachment device="/" serverId="%(uuid)s" id="%(uuid)s" volumeId="%(uuid)s"/>
+ </attachments>
+ <metadata/>
+ </volume>
+</volumes>
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-req.json.tpl
new file mode 100644
index 0000000000..db7fbff4d4
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-req.json.tpl
@@ -0,0 +1,9 @@
+{
+ "volume":
+ {
+ "availability_zone": "zone1:host1",
+ "display_name": "%(volume_name)s",
+ "display_description": "%(volume_desc)s",
+ "size": 100
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-req.xml.tpl
new file mode 100644
index 0000000000..bb115cc61b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+ <volume displayDescription="%(volume_desc)s" availabilityZone="zone1:host1" displayName="%(volume_name)s" size="100"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-resp.json.tpl
new file mode 100644
index 0000000000..d13ce20cc3
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-resp.json.tpl
@@ -0,0 +1,21 @@
+{
+ "volume": {
+ "status": "in-use",
+ "displayDescription": "%(volume_desc)s",
+ "availabilityZone": "zone1:host1",
+ "displayName": "%(volume_name)s",
+ "attachments": [
+ { "device": "/",
+ "serverId": "%(uuid)s",
+ "id": "%(uuid)s",
+ "volumeId": "%(uuid)s"
+ }
+ ],
+ "volumeType": "Backup",
+ "snapshotId": null,
+ "metadata": {},
+ "id": "%(uuid)s",
+ "createdAt": "%(strtime)s",
+ "size": 100
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-resp.xml.tpl
new file mode 100644
index 0000000000..cb3c5edf90
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<volume status="in-use" displayDescription="%(volume_desc)s" availabilityZone="zone1:host1" displayName="%(volume_name)s" volumeType="Backup" snapshotId="None" id="%(uuid)s" createdAt="%(xmltime)s" size="100">
+ <attachments>
+ <attachment device="/" serverId="%(uuid)s" id="%(uuid)s" volumeId="%(uuid)s"/>
+ </attachments>
+ <metadata/>
+</volume>
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-req.json.tpl
new file mode 100644
index 0000000000..a8d47ea031
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "snapshot": {
+ "display_name": "%(snapshot_name)s",
+ "display_description": "%(description)s",
+ "volume_id": "%(volume_id)s",
+ "force": false
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-req.xml.tpl
new file mode 100644
index 0000000000..a5b670bc2f
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-req.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <snapshot>
+ <display_name>%(snapshot_name)s</display_name>
+ <display_description>%(description)s</display_description>
+ <volume_id>%(volume_id)s</volume_id>
+ <force>false</force>
+ </snapshot>
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-resp.json.tpl
new file mode 100644
index 0000000000..6153e8140e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "snapshot": {
+ "createdAt": "%(strtime)s",
+ "displayDescription": "%(description)s",
+ "displayName": "%(snapshot_name)s",
+ "id": 100,
+ "size": 100,
+ "status": "available",
+ "volumeId": "%(uuid)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-resp.xml.tpl
new file mode 100644
index 0000000000..78268c822c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<snapshot status="available" displayDescription="%(description)s" displayName="%(snapshot_name)s" volumeId="521752a6-acf6-4b2d-bc7a-119f9148cd8c" id="100" createdAt="%(xmltime)s" size="100"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-detail-resp.json.tpl
new file mode 100644
index 0000000000..1b509d54f8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-detail-resp.json.tpl
@@ -0,0 +1,31 @@
+{
+ "snapshots": [
+ {
+ "createdAt": "%(strtime)s",
+ "displayDescription": "Default description",
+ "displayName": "Default name",
+ "id": 100,
+ "size": 100,
+ "status": "available",
+ "volumeId": 12
+ },
+ {
+ "createdAt": "%(strtime)s",
+ "displayDescription": "Default description",
+ "displayName": "Default name",
+ "id": 101,
+ "size": 100,
+ "status": "available",
+ "volumeId": 12
+ },
+ {
+ "createdAt": "%(strtime)s",
+ "displayDescription": "Default description",
+ "displayName": "Default name",
+ "id": 102,
+ "size": 100,
+ "status": "available",
+ "volumeId": 12
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-detail-resp.xml.tpl
new file mode 100644
index 0000000000..730921f4cf
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-detail-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<snapshots>
+ <snapshot status="available" displayDescription="%(text)s" displayName="%(text)s" volumeId="12" id="100" createdAt="%(xmltime)s" size="100"/>
+ <snapshot status="available" displayDescription="%(text)s" displayName="%(text)s" volumeId="12" id="101" createdAt="%(xmltime)s" size="100"/>
+ <snapshot status="available" displayDescription="%(text)s" displayName="%(text)s" volumeId="12" id="102" createdAt="%(xmltime)s" size="100"/>
+</snapshots>
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-list-resp.json.tpl
new file mode 100644
index 0000000000..c65d073ad7
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-list-resp.json.tpl
@@ -0,0 +1,31 @@
+{
+ "snapshots": [
+ {
+ "createdAt": "%(strtime)s",
+ "displayDescription": "%(text)s",
+ "displayName": "%(text)s",
+ "id": 100,
+ "size": 100,
+ "status": "available",
+ "volumeId": 12
+ },
+ {
+ "createdAt": "%(strtime)s",
+ "displayDescription": "%(text)s",
+ "displayName": "%(text)s",
+ "id": 101,
+ "size": 100,
+ "status": "available",
+ "volumeId": 12
+ },
+ {
+ "createdAt": "%(strtime)s",
+ "displayDescription": "%(text)s",
+ "displayName": "%(text)s",
+ "id": 102,
+ "size": 100,
+ "status": "available",
+ "volumeId": 12
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-list-resp.xml.tpl
new file mode 100644
index 0000000000..730921f4cf
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-list-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<snapshots>
+ <snapshot status="available" displayDescription="%(text)s" displayName="%(text)s" volumeId="12" id="100" createdAt="%(xmltime)s" size="100"/>
+ <snapshot status="available" displayDescription="%(text)s" displayName="%(text)s" volumeId="12" id="101" createdAt="%(xmltime)s" size="100"/>
+ <snapshot status="available" displayDescription="%(text)s" displayName="%(text)s" volumeId="12" id="102" createdAt="%(xmltime)s" size="100"/>
+</snapshots>
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-show-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-show-resp.json.tpl
new file mode 100644
index 0000000000..a9ab6240d6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-show-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "snapshot": {
+ "createdAt": "%(strtime)s",
+ "displayDescription": "%(description)s",
+ "displayName": "%(snapshot_name)s",
+ "id": "100",
+ "size": 100,
+ "status": "available",
+ "volumeId": 12
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-show-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-show-resp.xml.tpl
new file mode 100644
index 0000000000..c42bf41b3c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-show-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<snapshot status="available" displayDescription="%(description)s" displayName="%(snapshot_name)s" volumeId="12" id="100" createdAt="%(xmltime)s" size="100"/>
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/volume-attachment-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/volume-attachment-detail-resp.json.tpl
new file mode 100644
index 0000000000..86099eeb87
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/volume-attachment-detail-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "volumeAttachment": {
+ "device": "/dev/sdd",
+ "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803",
+ "serverId": "%(uuid)s",
+ "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/os-volumes/volume-attachment-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/volume-attachment-detail-resp.xml.tpl
new file mode 100644
index 0000000000..45fd199793
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/volume-attachment-detail-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<volumeAttachment device="/dev/sdd" serverId="%(uuid)s" id="a26887c6-c47b-4654-abb5-dfadf7d3f803" volumeId="a26887c6-c47b-4654-abb5-dfadf7d3f803"/>
diff --git a/nova/tests/unit/integrated/api_samples/server-action-changepassword.json.tpl b/nova/tests/unit/integrated/api_samples/server-action-changepassword.json.tpl
new file mode 100644
index 0000000000..da615718fe
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-action-changepassword.json.tpl
@@ -0,0 +1,5 @@
+{
+ "changePassword" : {
+ "adminPass" : "%(password)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/server-action-changepassword.xml.tpl b/nova/tests/unit/integrated/api_samples/server-action-changepassword.xml.tpl
new file mode 100644
index 0000000000..6c343024e2
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-action-changepassword.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<changePassword
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ adminPass="%(password)s"/>
diff --git a/nova/tests/unit/integrated/api_samples/server-action-confirmresize.json.tpl b/nova/tests/unit/integrated/api_samples/server-action-confirmresize.json.tpl
new file mode 100644
index 0000000000..432f6126e9
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-action-confirmresize.json.tpl
@@ -0,0 +1,3 @@
+{
+ "confirmResize" : null
+}
diff --git a/nova/tests/unit/integrated/api_samples/server-action-confirmresize.xml.tpl b/nova/tests/unit/integrated/api_samples/server-action-confirmresize.xml.tpl
new file mode 100644
index 0000000000..18f07bd67b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-action-confirmresize.xml.tpl
@@ -0,0 +1,3 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<confirmResize
+ xmlns="http://docs.openstack.org/compute/api/v1.1"/>
diff --git a/nova/tests/unit/integrated/api_samples/server-action-createimage.json.tpl b/nova/tests/unit/integrated/api_samples/server-action-createimage.json.tpl
new file mode 100644
index 0000000000..0b9e39ffb3
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-action-createimage.json.tpl
@@ -0,0 +1,9 @@
+{
+ "createImage" : {
+ "name" : "%(name)s",
+ "metadata": {
+ "%(meta_var)s": "%(meta_val)s"
+ }
+ }
+}
+
diff --git a/nova/tests/unit/integrated/api_samples/server-action-createimage.xml.tpl b/nova/tests/unit/integrated/api_samples/server-action-createimage.xml.tpl
new file mode 100644
index 0000000000..aa1eccf8a5
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-action-createimage.xml.tpl
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<createImage
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="%(name)s">
+ <metadata>
+ <meta key="%(meta_var)s">%(meta_val)s</meta>
+ </metadata>
+</createImage>
diff --git a/nova/tests/unit/integrated/api_samples/server-action-reboot.json.tpl b/nova/tests/unit/integrated/api_samples/server-action-reboot.json.tpl
new file mode 100644
index 0000000000..18eda9b9ab
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-action-reboot.json.tpl
@@ -0,0 +1,5 @@
+{
+ "reboot" : {
+ "type" : "%(type)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/server-action-reboot.xml.tpl b/nova/tests/unit/integrated/api_samples/server-action-reboot.xml.tpl
new file mode 100644
index 0000000000..d4cfe198c7
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-action-reboot.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<reboot
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ type="%(type)s"/>
diff --git a/nova/tests/unit/integrated/api_samples/server-action-rebuild-resp.json.tpl b/nova/tests/unit/integrated/api_samples/server-action-rebuild-resp.json.tpl
new file mode 100644
index 0000000000..cd7fdcf2d3
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-action-rebuild-resp.json.tpl
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "accessIPv4": "%(ip)s",
+ "accessIPv6": "%(ip6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "%(password)s",
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "meta var": "meta val"
+ },
+ "name": "%(name)s",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/server-action-rebuild-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/server-action-rebuild-resp.xml.tpl
new file mode 100644
index 0000000000..254745649c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-action-rebuild-resp.xml.tpl
@@ -0,0 +1,37 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
+ id="%(uuid)s"
+ tenantId="openstack" userId="fake"
+ name="%(name)s"
+ hostId="%(hostid)s" progress="0"
+ status="ACTIVE" adminPass="%(password)s"
+ created="%(isotime)s"
+ updated="%(isotime)s"
+ accessIPv4="%(ip)s"
+ accessIPv6="%(ip6)s">
+ <image id="%(uuid)s">
+ <atom:link
+ rel="bookmark"
+ href="%(host)s/openstack/images/%(uuid)s"/>
+ </image>
+ <flavor id="1">
+ <atom:link
+ rel="bookmark"
+ href="%(host)s/openstack/flavors/1"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link
+ rel="self"
+ href="%(host)s/v2/openstack/servers/%(uuid)s"/>
+ <atom:link
+ rel="bookmark"
+ href="%(host)s/openstack/servers/%(uuid)s"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/server-action-rebuild.json.tpl b/nova/tests/unit/integrated/api_samples/server-action-rebuild.json.tpl
new file mode 100644
index 0000000000..273906a349
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-action-rebuild.json.tpl
@@ -0,0 +1,18 @@
+{
+ "rebuild" : {
+ "imageRef" : "%(host)s/v2/32278/images/%(uuid)s",
+ "name" : "%(name)s",
+ "adminPass" : "%(pass)s",
+ "accessIPv4" : "%(ip)s",
+ "accessIPv6" : "%(ip6)s",
+ "metadata" : {
+ "meta var" : "meta val"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/server-action-rebuild.xml.tpl b/nova/tests/unit/integrated/api_samples/server-action-rebuild.xml.tpl
new file mode 100644
index 0000000000..84f0b98961
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-action-rebuild.xml.tpl
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<rebuild
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="%(name)s"
+ imageRef="%(host)s/v1.1/32278/images/%(uuid)s"
+ accessIPv4="%(ip)s"
+ accessIPv6="%(ip6)s"
+ adminPass="%(pass)s">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</rebuild>
diff --git a/nova/tests/unit/integrated/api_samples/server-action-resize.json.tpl b/nova/tests/unit/integrated/api_samples/server-action-resize.json.tpl
new file mode 100644
index 0000000000..468a88da24
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-action-resize.json.tpl
@@ -0,0 +1,5 @@
+{
+ "resize" : {
+ "flavorRef" : "%(id)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/server-action-resize.xml.tpl b/nova/tests/unit/integrated/api_samples/server-action-resize.xml.tpl
new file mode 100644
index 0000000000..cbe49ea59a
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-action-resize.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<resize
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ flavorRef="%(id)s"/>
diff --git a/nova/tests/unit/integrated/api_samples/server-action-revertresize.json.tpl b/nova/tests/unit/integrated/api_samples/server-action-revertresize.json.tpl
new file mode 100644
index 0000000000..2ddf6e5ab0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-action-revertresize.json.tpl
@@ -0,0 +1,3 @@
+{
+ "revertResize" : null
+}
diff --git a/nova/tests/unit/integrated/api_samples/server-action-revertresize.xml.tpl b/nova/tests/unit/integrated/api_samples/server-action-revertresize.xml.tpl
new file mode 100644
index 0000000000..5c13bbdc0c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-action-revertresize.xml.tpl
@@ -0,0 +1,3 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<revertResize
+ xmlns="http://docs.openstack.org/compute/api/v1.1"/>
diff --git a/nova/tests/unit/integrated/api_samples/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/server-get-resp.json.tpl
new file mode 100644
index 0000000000..4ac6374529
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-get-resp.json.tpl
@@ -0,0 +1,54 @@
+{
+ "server": {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/server-get-resp.xml.tpl
new file mode 100644
index 0000000000..cee28db35c
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-get-resp.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/server-ips-network-resp.json.tpl b/nova/tests/unit/integrated/api_samples/server-ips-network-resp.json.tpl
new file mode 100644
index 0000000000..29d2370a74
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-ips-network-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/server-ips-network-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/server-ips-network-resp.xml.tpl
new file mode 100644
index 0000000000..153dca9b54
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-ips-network-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<network xmlns="http://docs.openstack.org/compute/api/v1.1" id="private">
+ <ip version="4" addr="%(ip)s"/>
+</network>
diff --git a/nova/tests/unit/integrated/api_samples/server-ips-resp.json.tpl b/nova/tests/unit/integrated/api_samples/server-ips-resp.json.tpl
new file mode 100644
index 0000000000..259eabea72
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-ips-resp.json.tpl
@@ -0,0 +1,10 @@
+{
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/server-ips-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/server-ips-resp.xml.tpl
new file mode 100644
index 0000000000..62d804b2af
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-ips-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<addresses xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+</addresses>
diff --git a/nova/tests/unit/integrated/api_samples/server-metadata-all-req.json.tpl b/nova/tests/unit/integrated/api_samples/server-metadata-all-req.json.tpl
new file mode 100644
index 0000000000..2278d2afd8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-metadata-all-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "metadata" : {
+ "foo" : "%(value)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/server-metadata-all-req.xml.tpl b/nova/tests/unit/integrated/api_samples/server-metadata-all-req.xml.tpl
new file mode 100644
index 0000000000..e742706736
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-metadata-all-req.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="foo">%(value)s</meta>
+</metadata>
diff --git a/nova/tests/unit/integrated/api_samples/server-metadata-all-resp.json.tpl b/nova/tests/unit/integrated/api_samples/server-metadata-all-resp.json.tpl
new file mode 100644
index 0000000000..2278d2afd8
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-metadata-all-resp.json.tpl
@@ -0,0 +1,5 @@
+{
+ "metadata" : {
+ "foo" : "%(value)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/server-metadata-all-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/server-metadata-all-resp.xml.tpl
new file mode 100644
index 0000000000..e742706736
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-metadata-all-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="foo">%(value)s</meta>
+</metadata>
diff --git a/nova/tests/unit/integrated/api_samples/server-metadata-req.json.tpl b/nova/tests/unit/integrated/api_samples/server-metadata-req.json.tpl
new file mode 100644
index 0000000000..35872e95fc
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-metadata-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "meta" : {
+ "foo" : "%(value)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/server-metadata-req.xml.tpl b/nova/tests/unit/integrated/api_samples/server-metadata-req.xml.tpl
new file mode 100644
index 0000000000..fa9d6ad480
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-metadata-req.xml.tpl
@@ -0,0 +1,3 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="foo">%(value)s</meta>
diff --git a/nova/tests/unit/integrated/api_samples/server-metadata-resp.json.tpl b/nova/tests/unit/integrated/api_samples/server-metadata-resp.json.tpl
new file mode 100644
index 0000000000..85d69ec956
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-metadata-resp.json.tpl
@@ -0,0 +1,5 @@
+{
+ "meta": {
+ "foo": "%(value)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/server-metadata-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/server-metadata-resp.xml.tpl
new file mode 100644
index 0000000000..fa9d6ad480
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-metadata-resp.xml.tpl
@@ -0,0 +1,3 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="foo">%(value)s</meta>
diff --git a/nova/tests/unit/integrated/api_samples/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/api_samples/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/unit/integrated/api_samples/servers-details-resp.json.tpl b/nova/tests/unit/integrated/api_samples/servers-details-resp.json.tpl
new file mode 100644
index 0000000000..81afe431c0
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/servers-details-resp.json.tpl
@@ -0,0 +1,56 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/servers-details-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/servers-details-resp.xml.tpl
new file mode 100644
index 0000000000..da0472dbcf
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/servers-details-resp.xml.tpl
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server status="ACTIVE" updated="%(isotime)s" hostId="%(hostid)s" name="new-server-test" created="%(isotime)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/unit/integrated/api_samples/servers-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/servers-list-resp.json.tpl
new file mode 100644
index 0000000000..8b97dc28d7
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/servers-list-resp.json.tpl
@@ -0,0 +1,18 @@
+{
+ "servers": [
+ {
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/servers-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/servers-list-resp.xml.tpl
new file mode 100644
index 0000000000..03bee03a6e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/servers-list-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server name="new-server-test" id="%(id)s">
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/unit/integrated/api_samples/versions-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/versions-get-resp.json.tpl
new file mode 100644
index 0000000000..5c3b1ec05b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/versions-get-resp.json.tpl
@@ -0,0 +1,26 @@
+{
+ "versions": [
+ {
+ "id": "v2.0",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/",
+ "rel": "self"
+ }
+ ],
+ "status": "CURRENT",
+ "updated": "2011-01-21T11:33:21Z"
+ },
+ {
+ "id": "v2.1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/",
+ "rel": "self"
+ }
+ ],
+ "status": "EXPERIMENTAL",
+ "updated": "2013-07-23T11:33:21Z"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/api_samples/versions-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/versions-get-resp.xml.tpl
new file mode 100644
index 0000000000..09c4a52f9e
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/versions-get-resp.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<versions xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/common/api/v1.0">
+ <version status="CURRENT" updated="2011-01-21T11:33:21Z" id="v2.0">
+ <atom:link href="http://openstack.example.com/v2/" rel="self"/>
+ </version>
+ <version status="EXPERIMENTAL" updated="2013-07-23T11:33:21Z" id="v2.1">
+ <atom:link href="http://openstack.example.com/v2/" rel="self"/>
+ </version>
+</versions>
diff --git a/nova/tests/unit/integrated/api_samples_test_base.py b/nova/tests/unit/integrated/api_samples_test_base.py
new file mode 100644
index 0000000000..69f5f3eb2b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples_test_base.py
@@ -0,0 +1,323 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import re
+
+from lxml import etree
+from oslo.serialization import jsonutils
+from oslo.utils import importutils
+import six
+
+from nova.i18n import _
+from nova import test
+from nova.tests.unit.integrated import integrated_helpers
+
+
+class NoMatch(test.TestingException):
+ pass
+
+
+class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
+ ctype = 'json'
+ all_extensions = False
+ extension_name = None
+
+ def _pretty_data(self, data):
+ if self.ctype == 'json':
+ data = jsonutils.dumps(jsonutils.loads(data), sort_keys=True,
+ indent=4)
+
+ else:
+ if data is None:
+ # Likely from missing XML file.
+ return ""
+ xml = etree.XML(data)
+ data = etree.tostring(xml, encoding="UTF-8",
+ xml_declaration=True, pretty_print=True)
+ return '\n'.join(line.rstrip() for line in data.split('\n')).strip()
+
+ def _objectify(self, data):
+ if not data:
+ return {}
+ if self.ctype == 'json':
+ # NOTE(vish): allow non-quoted replacements to survive json
+ data = re.sub(r'([^"])%\((.+)\)s([^"])', r'\1"%(int:\2)s"\3', data)
+ return jsonutils.loads(data)
+ else:
+ def to_dict(node):
+ ret = {}
+ if node.items():
+ ret.update(dict(node.items()))
+ if node.text:
+ ret['__content__'] = node.text
+ if node.tag:
+ ret['__tag__'] = node.tag
+ if node.nsmap:
+ ret['__nsmap__'] = node.nsmap
+ for element in node:
+ ret.setdefault(node.tag, [])
+ ret[node.tag].append(to_dict(element))
+ return ret
+ return to_dict(etree.fromstring(data))
+
+ @classmethod
+ def _get_sample_path(cls, name, dirname, suffix=''):
+ parts = [dirname]
+ parts.append('api_samples')
+ if cls.all_extensions:
+ parts.append('all_extensions')
+ if cls.extension_name:
+ alias = importutils.import_class(cls.extension_name).alias
+ parts.append(alias)
+ parts.append(name + "." + cls.ctype + suffix)
+ return os.path.join(*parts)
+
+ @classmethod
+ def _get_sample(cls, name):
+ dirname = os.path.dirname(os.path.abspath(__file__))
+ dirname = os.path.normpath(os.path.join(dirname, "../../../../doc"))
+ return cls._get_sample_path(name, dirname)
+
+ @classmethod
+ def _get_template(cls, name):
+ dirname = os.path.dirname(os.path.abspath(__file__))
+ return cls._get_sample_path(name, dirname, suffix='.tpl')
+
+ def _read_template(self, name):
+ template = self._get_template(name)
+ with open(template) as inf:
+ return inf.read().strip()
+
+ def _write_template(self, name, data):
+ with open(self._get_template(name), 'w') as outf:
+ outf.write(data)
+
+ def _write_sample(self, name, data):
+ with open(self._get_sample(name), 'w') as outf:
+ outf.write(data)
+
+ def _compare_result(self, subs, expected, result, result_str):
+ matched_value = None
+ if isinstance(expected, dict):
+ if not isinstance(result, dict):
+ raise NoMatch(_('%(result_str)s: %(result)s is not a dict.')
+ % {'result_str': result_str, 'result': result})
+ ex_keys = sorted(expected.keys())
+ res_keys = sorted(result.keys())
+ if ex_keys != res_keys:
+ ex_delta = []
+ res_delta = []
+ for key in ex_keys:
+ if key not in res_keys:
+ ex_delta.append(key)
+ for key in res_keys:
+ if key not in ex_keys:
+ res_delta.append(key)
+ raise NoMatch(
+ _('Dictionary key mismatch:\n'
+ 'Extra key(s) in template:\n%(ex_delta)s\n'
+ 'Extra key(s) in %(result_str)s:\n%(res_delta)s\n') %
+ {'ex_delta': ex_delta, 'result_str': result_str,
+ 'res_delta': res_delta})
+ for key in ex_keys:
+ res = self._compare_result(subs, expected[key], result[key],
+ result_str)
+ matched_value = res or matched_value
+ elif isinstance(expected, list):
+ if not isinstance(result, list):
+ raise NoMatch(
+ _('%(result_str)s: %(result)s is not a list.') %
+ {'result_str': result_str, 'result': result})
+
+ expected = expected[:]
+ extra = []
+ for res_obj in result:
+ for i, ex_obj in enumerate(expected):
+ try:
+ matched_value = self._compare_result(subs, ex_obj,
+ res_obj,
+ result_str)
+ del expected[i]
+ break
+ except NoMatch:
+ pass
+ else:
+ extra.append(res_obj)
+
+ error = []
+ if expected:
+ error.append(_('Extra list items in template:'))
+ error.extend([repr(o) for o in expected])
+
+ if extra:
+ error.append(_('Extra list items in %(result_str)s:') %
+ {'result_str': result_str})
+ error.extend([repr(o) for o in extra])
+
+ if error:
+ raise NoMatch('\n'.join(error))
+ elif isinstance(expected, six.string_types) and '%' in expected:
+ # NOTE(vish): escape stuff for regex
+ for char in '[]<>?':
+ expected = expected.replace(char, '\\%s' % char)
+ # NOTE(vish): special handling of subs that are not quoted. We are
+ # expecting an int but we had to pass in a string
+ # so the json would parse properly.
+ if expected.startswith("%(int:"):
+ result = str(result)
+ expected = expected.replace('int:', '')
+ expected = expected % subs
+ expected = '^%s$' % expected
+ match = re.match(expected, result)
+ if not match:
+ raise NoMatch(
+ _('Values do not match:\n'
+ 'Template: %(expected)s\n%(result_str)s: %(result)s') %
+ {'expected': expected, 'result_str': result_str,
+ 'result': result})
+ try:
+ matched_value = match.group('id')
+ except IndexError:
+ if match.groups():
+ matched_value = match.groups()[0]
+ else:
+ if isinstance(expected, six.string_types):
+ # NOTE(danms): Ignore whitespace in this comparison
+ expected = expected.strip()
+ if isinstance(result, six.string_types):
+ result = result.strip()
+ if expected != result:
+ raise NoMatch(
+ _('Values do not match:\n'
+ 'Template: %(expected)s\n%(result_str)s: '
+ '%(result)s') % {'expected': expected,
+ 'result_str': result_str,
+ 'result': result})
+ return matched_value
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ """Give the test a chance to modify subs after the server response
+ was verified, and before the on-disk doc/api_samples file is checked.
+ This may be needed by some tests to convert exact matches expected
+ from the server into pattern matches to verify what is in the
+ sample file.
+
+ If there are no changes to be made, subs is returned unharmed.
+ """
+ return subs
+
+ def _verify_response(self, name, subs, response, exp_code):
+ self.assertEqual(response.status_code, exp_code)
+ response_data = response.content
+ response_data = self._pretty_data(response_data)
+ if not os.path.exists(self._get_template(name)):
+ self._write_template(name, response_data)
+ template_data = response_data
+ else:
+ template_data = self._read_template(name)
+
+ if (self.generate_samples and
+ not os.path.exists(self._get_sample(name))):
+ self._write_sample(name, response_data)
+ sample_data = response_data
+ else:
+ with file(self._get_sample(name)) as sample:
+ sample_data = sample.read()
+
+ try:
+ template_data = self._objectify(template_data)
+ response_data = self._objectify(response_data)
+ response_result = self._compare_result(subs, template_data,
+ response_data, "Response")
+ # NOTE(danms): replace some of the subs with patterns for the
+ # doc/api_samples check, which won't have things like the
+ # correct compute host name. Also let the test do some of its
+ # own generalization, if necessary
+ vanilla_regexes = self._get_regexes()
+ subs['compute_host'] = vanilla_regexes['host_name']
+ subs['id'] = vanilla_regexes['id']
+ subs = self.generalize_subs(subs, vanilla_regexes)
+ sample_data = self._objectify(sample_data)
+ self._compare_result(subs, template_data, sample_data, "Sample")
+ return response_result
+ except NoMatch:
+ raise
+
+ def _get_host(self):
+ return 'http://openstack.example.com'
+
+ def _get_glance_host(self):
+ return 'http://glance.openstack.example.com'
+
+ def _get_regexes(self):
+ if self.ctype == 'json':
+ text = r'(\\"|[^"])*'
+ else:
+ text = r'[^<]*'
+ isotime_re = '\d{4}-[0,1]\d-[0-3]\dT\d{2}:\d{2}:\d{2}Z'
+ strtime_re = '\d{4}-[0,1]\d-[0-3]\dT\d{2}:\d{2}:\d{2}\.\d{6}'
+ xmltime_re = ('\d{4}-[0,1]\d-[0-3]\d '
+ '\d{2}:\d{2}:\d{2}'
+ '(\.\d{6})?(\+00:00)?')
+ return {
+ 'isotime': isotime_re,
+ 'strtime': strtime_re,
+ 'strtime_or_none': r'None|%s' % strtime_re,
+ 'xmltime': xmltime_re,
+ 'password': '[0-9a-zA-Z]{1,12}',
+ 'ip': '[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}',
+ 'ip6': '([0-9a-zA-Z]{1,4}:){1,7}:?[0-9a-zA-Z]{1,4}',
+ 'id': '(?P<id>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
+ '-[0-9a-f]{4}-[0-9a-f]{12})',
+ 'uuid': '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
+ '-[0-9a-f]{4}-[0-9a-f]{12}',
+ 'reservation_id': 'r-[0-9a-zA-Z]{8}',
+ 'private_key': '-----BEGIN RSA PRIVATE KEY-----'
+ '[a-zA-Z0-9\n/+=]*'
+ '-----END RSA PRIVATE KEY-----',
+ 'public_key': 'ssh-rsa[ a-zA-Z0-9/+=]*'
+ 'Generated-by-Nova',
+ 'fingerprint': '([0-9a-f]{2}:){15}[0-9a-f]{2}',
+ 'host': self._get_host(),
+ 'host_name': '[0-9a-z]{32}',
+ 'glance_host': self._get_glance_host(),
+ 'compute_host': self.compute.host,
+ 'text': text,
+ 'int': '[0-9]+',
+ }
+
+ def _get_response(self, url, method, body=None, strip_version=False):
+ headers = {}
+ headers['Content-Type'] = 'application/' + self.ctype
+ headers['Accept'] = 'application/' + self.ctype
+ return self.api.api_request(url, body=body, method=method,
+ headers=headers, strip_version=strip_version)
+
+ def _do_get(self, url, strip_version=False):
+ return self._get_response(url, 'GET', strip_version=strip_version)
+
+ def _do_post(self, url, name, subs, method='POST'):
+ body = self._read_template(name) % subs
+ sample = self._get_sample(name)
+ if self.generate_samples and not os.path.exists(sample):
+ self._write_sample(name, body)
+ return self._get_response(url, method, body)
+
+ def _do_put(self, url, name, subs):
+ return self._do_post(url, name, subs, method='PUT')
+
+ def _do_delete(self, url):
+ return self._get_response(url, 'DELETE')
diff --git a/nova/tests/unit/integrated/integrated_helpers.py b/nova/tests/unit/integrated/integrated_helpers.py
new file mode 100644
index 0000000000..e62d84f642
--- /dev/null
+++ b/nova/tests/unit/integrated/integrated_helpers.py
@@ -0,0 +1,160 @@
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Provides common functionality for integrated unit tests
+"""
+
+import random
+import string
+import uuid
+
+from oslo.config import cfg
+
+import nova.image.glance
+from nova.openstack.common import log as logging
+from nova import service
+from nova import test
+from nova.tests.unit import cast_as_call
+from nova.tests.unit import fake_crypto
+import nova.tests.unit.image.fake
+from nova.tests.unit.integrated.api import client
+
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+CONF.import_opt('manager', 'nova.cells.opts', group='cells')
+
+
+def generate_random_alphanumeric(length):
+ """Creates a random alphanumeric string of specified length."""
+ return ''.join(random.choice(string.ascii_uppercase + string.digits)
+ for _x in range(length))
+
+
+def generate_random_numeric(length):
+ """Creates a random numeric string of specified length."""
+ return ''.join(random.choice(string.digits)
+ for _x in range(length))
+
+
+def generate_new_element(items, prefix, numeric=False):
+ """Creates a random string with prefix, that is not in 'items' list."""
+ while True:
+ if numeric:
+ candidate = prefix + generate_random_numeric(8)
+ else:
+ candidate = prefix + generate_random_alphanumeric(8)
+ if candidate not in items:
+ return candidate
+ LOG.debug("Random collision on %s" % candidate)
+
+
+class _IntegratedTestBase(test.TestCase):
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(_IntegratedTestBase, self).setUp()
+
+ f = self._get_flags()
+ self.flags(**f)
+ self.flags(verbose=True)
+
+ self.useFixture(test.ReplaceModule('crypto', fake_crypto))
+ nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
+ self.flags(scheduler_driver='nova.scheduler.'
+ 'chance.ChanceScheduler')
+ self._setup_services()
+ self._start_api_service()
+
+ self.api = self._get_test_client()
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ def _setup_services(self):
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
+ self.compute = self.start_service('compute')
+ self.cert = self.start_service('cert')
+ self.consoleauth = self.start_service('consoleauth')
+ self.network = self.start_service('network')
+ self.scheduler = self.start_service('scheduler')
+ self.cells = self.start_service('cells', manager=CONF.cells.manager)
+
+ def tearDown(self):
+ self.osapi.stop()
+ nova.tests.unit.image.fake.FakeImageService_reset()
+ super(_IntegratedTestBase, self).tearDown()
+
+ def _get_test_client(self):
+ return client.TestOpenStackClient('fake', 'fake', self.auth_url)
+
+ def _start_api_service(self):
+ self.osapi = service.WSGIService("osapi_compute")
+ self.osapi.start()
+ self.auth_url = 'http://%(host)s:%(port)s/%(api_version)s' % ({
+ 'host': self.osapi.host, 'port': self.osapi.port,
+ 'api_version': self._api_version})
+
+ def _get_flags(self):
+ """An opportunity to setup flags, before the services are started."""
+ f = {}
+
+ # Ensure tests only listen on localhost
+ f['ec2_listen'] = '127.0.0.1'
+ f['osapi_compute_listen'] = '127.0.0.1'
+ f['metadata_listen'] = '127.0.0.1'
+
+ # Auto-assign ports to allow concurrent tests
+ f['ec2_listen_port'] = 0
+ f['osapi_compute_listen_port'] = 0
+ f['metadata_listen_port'] = 0
+
+ f['fake_network'] = True
+ return f
+
+ def get_unused_server_name(self):
+ servers = self.api.get_servers()
+ server_names = [server['name'] for server in servers]
+ return generate_new_element(server_names, 'server')
+
+ def get_invalid_image(self):
+ return str(uuid.uuid4())
+
+ def _build_minimal_create_server_request(self):
+ server = {}
+
+ image = self.api.get_images()[0]
+ LOG.debug("Image: %s" % image)
+
+ if self._image_ref_parameter in image:
+ image_href = image[self._image_ref_parameter]
+ else:
+ image_href = image['id']
+ image_href = 'http://fake.server/%s' % image_href
+
+ # We now have a valid imageId
+ server[self._image_ref_parameter] = image_href
+
+ # Set a valid flavorId
+ flavor = self.api.get_flavors()[0]
+ LOG.debug("Using flavor: %s" % flavor)
+ server[self._flavor_ref_parameter] = ('http://fake.server/%s'
+ % flavor['id'])
+
+ # Set a valid server name
+ server_name = self.get_unused_server_name()
+ server['name'] = server_name
+ return server
diff --git a/nova/tests/unit/integrated/test_api_samples.py b/nova/tests/unit/integrated/test_api_samples.py
new file mode 100644
index 0000000000..676c7ee0e9
--- /dev/null
+++ b/nova/tests/unit/integrated/test_api_samples.py
@@ -0,0 +1,4433 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import copy
+import datetime
+import inspect
+import os
+import re
+import urllib
+import uuid as uuid_lib
+
+from lxml import etree
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import importutils
+from oslo.utils import timeutils
+
+from nova.api.metadata import password
+from nova.api.openstack.compute.contrib import fping
+from nova.api.openstack.compute import extensions
+# Import extensions to pull in osapi_compute_extension CONF option used below.
+from nova.cells import rpcapi as cells_rpcapi
+from nova.cells import state
+from nova.cloudpipe import pipelib
+from nova.compute import api as compute_api
+from nova.compute import cells_api as cells_api
+from nova.compute import manager as compute_manager
+from nova.compute import rpcapi as compute_rpcapi
+from nova.conductor import manager as conductor_manager
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import models
+from nova import exception
+from nova.network import api as network_api
+from nova import objects
+from nova.openstack.common import log as logging
+import nova.quota
+from nova.servicegroup import api as service_group_api
+from nova import test
+from nova.tests.unit.api.openstack.compute.contrib import test_fping
+from nova.tests.unit.api.openstack.compute.contrib import test_networks
+from nova.tests.unit.api.openstack.compute.contrib import test_services
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_network_cache_model
+from nova.tests.unit import fake_server_actions
+from nova.tests.unit import fake_utils
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated import api_samples_test_base
+from nova.tests.unit.integrated import integrated_helpers
+from nova.tests.unit.objects import test_network
+from nova.tests.unit import utils as test_utils
+from nova import utils
+from nova.volume import cinder
+
+CONF = cfg.CONF
+CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
+CONF.import_opt('shelved_offload_time', 'nova.compute.manager')
+CONF.import_opt('enable_network_quota',
+ 'nova.api.openstack.compute.contrib.os_tenant_networks')
+CONF.import_opt('osapi_compute_extension',
+ 'nova.api.openstack.compute.extensions')
+CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
+CONF.import_opt('osapi_compute_link_prefix', 'nova.api.openstack.common')
+CONF.import_opt('osapi_glance_link_prefix', 'nova.api.openstack.common')
+CONF.import_opt('enable', 'nova.cells.opts', group='cells')
+CONF.import_opt('cell_type', 'nova.cells.opts', group='cells')
+CONF.import_opt('db_check_interval', 'nova.cells.state', group='cells')
+LOG = logging.getLogger(__name__)
+
+
+class ApiSampleTestBaseV2(api_samples_test_base.ApiSampleTestBase):
+ _api_version = 'v2'
+
+ def setUp(self):
+ extends = []
+ self.flags(use_ipv6=False,
+ osapi_compute_link_prefix=self._get_host(),
+ osapi_glance_link_prefix=self._get_glance_host())
+ if not self.all_extensions:
+ if hasattr(self, 'extends_name'):
+ extends = [self.extends_name]
+ ext = [self.extension_name] if self.extension_name else []
+ self.flags(osapi_compute_extension=ext + extends)
+ super(ApiSampleTestBaseV2, self).setUp()
+ self.useFixture(test.SampleNetworks(host=self.network.host))
+ fake_network.stub_compute_with_ips(self.stubs)
+ fake_utils.stub_out_utils_spawn_n(self.stubs)
+ self.generate_samples = os.getenv('GENERATE_SAMPLES') is not None
+
+
+class ApiSamplesTrap(ApiSampleTestBaseV2):
+ """Make sure extensions don't get added without tests."""
+
+ all_extensions = True
+
+ def _get_extensions_tested(self):
+ tests = []
+ for attr in globals().values():
+ if not inspect.isclass(attr):
+ continue # Skip non-class objects
+ if not issubclass(attr, integrated_helpers._IntegratedTestBase):
+ continue # Skip non-test classes
+ if attr.extension_name is None:
+ continue # Skip base tests
+ cls = importutils.import_class(attr.extension_name)
+ tests.append(cls.alias)
+ return tests
+
+ def _get_extensions(self):
+ extensions = []
+ response = self._do_get('extensions')
+ for extension in jsonutils.loads(response.content)['extensions']:
+ extensions.append(str(extension['alias']))
+ return extensions
+
+ def test_all_extensions_have_samples(self):
+ # NOTE(danms): This is a list of extensions which are currently
+ # in the tree but that don't (yet) have tests. This list should
+ # NOT be allowed to grow, and should shrink to zero (and be
+ # removed) soon.
+ do_not_approve_additions = []
+ do_not_approve_additions.append('os-create-server-ext')
+ do_not_approve_additions.append('os-baremetal-ext-status')
+ do_not_approve_additions.append('os-baremetal-nodes')
+
+ tests = self._get_extensions_tested()
+ extensions = self._get_extensions()
+ missing_tests = []
+ for extension in extensions:
+ # NOTE(danms): if you add tests, remove it from the
+ # exclusions list
+ self.assertFalse(extension in do_not_approve_additions and
+ extension in tests)
+
+ # NOTE(danms): if you add an extension, it must come with
+ # api_samples tests!
+ if (extension not in tests and
+ extension not in do_not_approve_additions):
+ missing_tests.append(extension)
+
+ if missing_tests:
+ LOG.error("Extensions are missing tests: %s" % missing_tests)
+ self.assertEqual(missing_tests, [])
+
+
+class VersionsSampleJsonTest(ApiSampleTestBaseV2):
+ def test_versions_get(self):
+ response = self._do_get('', strip_version=True)
+ subs = self._get_regexes()
+ self._verify_response('versions-get-resp', subs, response, 200)
+
+
+class VersionsSampleXmlTest(VersionsSampleJsonTest):
+ ctype = 'xml'
+
+
+class ServersSampleBase(ApiSampleTestBaseV2):
+ def _post_server(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ }
+ response = self._do_post('servers', 'server-post-req', subs)
+ subs = self._get_regexes()
+ return self._verify_response('server-post-resp', subs, response, 202)
+
+
+class ServersSampleJsonTest(ServersSampleBase):
+ def test_servers_post(self):
+ return self._post_server()
+
+ def test_servers_get(self):
+ uuid = self.test_servers_post()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_servers_list(self):
+ uuid = self._post_server()
+ response = self._do_get('servers')
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ self._verify_response('servers-list-resp', subs, response, 200)
+
+ def test_servers_details(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+ self._verify_response('servers-details-resp', subs, response, 200)
+
+
+class ServersSampleXmlTest(ServersSampleJsonTest):
+ ctype = 'xml'
+
+
+class ServersSampleAllExtensionJsonTest(ServersSampleJsonTest):
+ all_extensions = True
+
+
+class ServersSampleAllExtensionXmlTest(ServersSampleXmlTest):
+ all_extensions = True
+
+
+class ServersSampleHideAddressesJsonTest(ServersSampleJsonTest):
+ extension_name = '.'.join(('nova.api.openstack.compute.contrib',
+ 'hide_server_addresses',
+ 'Hide_server_addresses'))
+
+
+class ServersSampleHideAddressesXMLTest(ServersSampleHideAddressesJsonTest):
+ ctype = 'xml'
+
+
+class ServersSampleMultiStatusJsonTest(ServersSampleBase):
+ extension_name = '.'.join(('nova.api.openstack.compute.contrib',
+ 'server_list_multi_status',
+ 'Server_list_multi_status'))
+
+ def test_servers_list(self):
+ uuid = self._post_server()
+ response = self._do_get('servers?status=active&status=error')
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ self._verify_response('servers-list-resp', subs, response, 200)
+
+
+class ServersSampleMultiStatusXMLTest(ServersSampleMultiStatusJsonTest):
+ ctype = 'xml'
+
+
+class ServersMetadataJsonTest(ServersSampleBase):
+ def _create_and_set(self, subs):
+ uuid = self._post_server()
+ response = self._do_put('servers/%s/metadata' % uuid,
+ 'server-metadata-all-req',
+ subs)
+ self._verify_response('server-metadata-all-resp', subs, response, 200)
+ return uuid
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['value'] = '(Foo|Bar) Value'
+ return subs
+
+ def test_metadata_put_all(self):
+ # Test setting all metadata for a server.
+ subs = {'value': 'Foo Value'}
+ self._create_and_set(subs)
+
+ def test_metadata_post_all(self):
+ # Test updating all metadata for a server.
+ subs = {'value': 'Foo Value'}
+ uuid = self._create_and_set(subs)
+ subs['value'] = 'Bar Value'
+ response = self._do_post('servers/%s/metadata' % uuid,
+ 'server-metadata-all-req',
+ subs)
+ self._verify_response('server-metadata-all-resp', subs, response, 200)
+
+ def test_metadata_get_all(self):
+ # Test getting all metadata for a server.
+ subs = {'value': 'Foo Value'}
+ uuid = self._create_and_set(subs)
+ response = self._do_get('servers/%s/metadata' % uuid)
+ self._verify_response('server-metadata-all-resp', subs, response, 200)
+
+ def test_metadata_put(self):
+ # Test putting an individual metadata item for a server.
+ subs = {'value': 'Foo Value'}
+ uuid = self._create_and_set(subs)
+ subs['value'] = 'Bar Value'
+ response = self._do_put('servers/%s/metadata/foo' % uuid,
+ 'server-metadata-req',
+ subs)
+ self._verify_response('server-metadata-resp', subs, response, 200)
+
+ def test_metadata_get(self):
+ # Test getting an individual metadata item for a server.
+ subs = {'value': 'Foo Value'}
+ uuid = self._create_and_set(subs)
+ response = self._do_get('servers/%s/metadata/foo' % uuid)
+ self._verify_response('server-metadata-resp', subs, response, 200)
+
+ def test_metadata_delete(self):
+ # Test deleting an individual metadata item for a server.
+ subs = {'value': 'Foo Value'}
+ uuid = self._create_and_set(subs)
+ response = self._do_delete('servers/%s/metadata/foo' % uuid)
+ self.assertEqual(response.status_code, 204)
+ self.assertEqual(response.content, '')
+
+
+class ServersMetadataXmlTest(ServersMetadataJsonTest):
+ ctype = 'xml'
+
+
+class ServersIpsJsonTest(ServersSampleBase):
+ def test_get(self):
+ # Test getting a server's IP information.
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/ips' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('server-ips-resp', subs, response, 200)
+
+ def test_get_by_network(self):
+ # Test getting a server's IP information by network id.
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/ips/private' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('server-ips-network-resp', subs, response, 200)
+
+
+class ServersIpsXmlTest(ServersIpsJsonTest):
+ ctype = 'xml'
+
+
+class ExtensionsSampleJsonTest(ApiSampleTestBaseV2):
+ all_extensions = True
+
+ def test_extensions_get(self):
+ response = self._do_get('extensions')
+ subs = self._get_regexes()
+ self._verify_response('extensions-get-resp', subs, response, 200)
+
+
+class ExtensionsSampleXmlTest(ExtensionsSampleJsonTest):
+ ctype = 'xml'
+
+
+class FlavorsSampleJsonTest(ApiSampleTestBaseV2):
+
+ def test_flavors_get(self):
+ response = self._do_get('flavors/1')
+ subs = self._get_regexes()
+ self._verify_response('flavor-get-resp', subs, response, 200)
+
+ def test_flavors_list(self):
+ response = self._do_get('flavors')
+ subs = self._get_regexes()
+ self._verify_response('flavors-list-resp', subs, response, 200)
+
+
+class FlavorsSampleXmlTest(FlavorsSampleJsonTest):
+ ctype = 'xml'
+
+
+class HostsSampleJsonTest(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib.hosts.Hosts"
+
+ def test_host_startup(self):
+ response = self._do_get('os-hosts/%s/startup' % self.compute.host)
+ subs = self._get_regexes()
+ self._verify_response('host-get-startup', subs, response, 200)
+
+ def test_host_reboot(self):
+ response = self._do_get('os-hosts/%s/reboot' % self.compute.host)
+ subs = self._get_regexes()
+ self._verify_response('host-get-reboot', subs, response, 200)
+
+ def test_host_shutdown(self):
+ response = self._do_get('os-hosts/%s/shutdown' % self.compute.host)
+ subs = self._get_regexes()
+ self._verify_response('host-get-shutdown', subs, response, 200)
+
+ def test_host_maintenance(self):
+ response = self._do_put('os-hosts/%s' % self.compute.host,
+ 'host-put-maintenance-req', {})
+ subs = self._get_regexes()
+ self._verify_response('host-put-maintenance-resp', subs, response, 200)
+
+ def test_host_get(self):
+ response = self._do_get('os-hosts/%s' % self.compute.host)
+ subs = self._get_regexes()
+ self._verify_response('host-get-resp', subs, response, 200)
+
+ def test_hosts_list(self):
+ response = self._do_get('os-hosts')
+ subs = self._get_regexes()
+ self._verify_response('hosts-list-resp', subs, response, 200)
+
+
+class HostsSampleXmlTest(HostsSampleJsonTest):
+ ctype = 'xml'
+
+
+class FlavorsSampleAllExtensionJsonTest(FlavorsSampleJsonTest):
+ all_extensions = True
+
+
+class FlavorsSampleAllExtensionXmlTest(FlavorsSampleXmlTest):
+ all_extensions = True
+
+
+class ImagesSampleJsonTest(ApiSampleTestBaseV2):
+ def test_images_list(self):
+ # Get api sample of images get list request.
+ response = self._do_get('images')
+ subs = self._get_regexes()
+ self._verify_response('images-list-get-resp', subs, response, 200)
+
+ def test_image_get(self):
+ # Get api sample of one single image details request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_get('images/%s' % image_id)
+ subs = self._get_regexes()
+ subs['image_id'] = image_id
+ self._verify_response('image-get-resp', subs, response, 200)
+
+ def test_images_details(self):
+ # Get api sample of all images details request.
+ response = self._do_get('images/detail')
+ subs = self._get_regexes()
+ self._verify_response('images-details-get-resp', subs, response, 200)
+
+ def test_image_metadata_get(self):
+ # Get api sample of an image metadata request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_get('images/%s/metadata' % image_id)
+ subs = self._get_regexes()
+ subs['image_id'] = image_id
+ self._verify_response('image-metadata-get-resp', subs, response, 200)
+
+ def test_image_metadata_post(self):
+ # Get api sample to update metadata of an image metadata request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_post(
+ 'images/%s/metadata' % image_id,
+ 'image-metadata-post-req', {})
+ subs = self._get_regexes()
+ self._verify_response('image-metadata-post-resp', subs, response, 200)
+
+ def test_image_metadata_put(self):
+ # Get api sample of image metadata put request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_put('images/%s/metadata' % image_id,
+ 'image-metadata-put-req', {})
+ subs = self._get_regexes()
+ self._verify_response('image-metadata-put-resp', subs, response, 200)
+
+ def test_image_meta_key_get(self):
+ # Get api sample of an image metadata key request.
+ image_id = fake.get_valid_image_id()
+ key = "kernel_id"
+ response = self._do_get('images/%s/metadata/%s' % (image_id, key))
+ subs = self._get_regexes()
+ self._verify_response('image-meta-key-get', subs, response, 200)
+
+ def test_image_meta_key_put(self):
+ # Get api sample of image metadata key put request.
+ image_id = fake.get_valid_image_id()
+ key = "auto_disk_config"
+ response = self._do_put('images/%s/metadata/%s' % (image_id, key),
+ 'image-meta-key-put-req', {})
+ subs = self._get_regexes()
+ self._verify_response('image-meta-key-put-resp', subs, response, 200)
+
+
+class ImagesSampleXmlTest(ImagesSampleJsonTest):
+ ctype = 'xml'
+
+
+class LimitsSampleJsonTest(ApiSampleTestBaseV2):
+ def test_limits_get(self):
+ response = self._do_get('limits')
+ subs = self._get_regexes()
+ self._verify_response('limit-get-resp', subs, response, 200)
+
+
+class LimitsSampleXmlTest(LimitsSampleJsonTest):
+ ctype = 'xml'
+
+
+class ServersActionsJsonTest(ServersSampleBase):
+ def _test_server_action(self, uuid, action,
+ subs=None, resp_tpl=None, code=202):
+ subs = subs or {}
+ subs.update({'action': action})
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-action-%s' % action.lower(),
+ subs)
+ if resp_tpl:
+ subs.update(self._get_regexes())
+ self._verify_response(resp_tpl, subs, response, code)
+ else:
+ self.assertEqual(response.status_code, code)
+ self.assertEqual(response.content, "")
+
+ def test_server_password(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, "changePassword",
+ {"password": "foo"})
+
+ def test_server_reboot_hard(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, "reboot",
+ {"type": "HARD"})
+
+ def test_server_reboot_soft(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, "reboot",
+ {"type": "SOFT"})
+
+ def test_server_rebuild(self):
+ uuid = self._post_server()
+ image = self.api.get_images()[0]['id']
+ subs = {'host': self._get_host(),
+ 'uuid': image,
+ 'name': 'foobar',
+ 'pass': 'seekr3t',
+ 'ip': '1.2.3.4',
+ 'ip6': 'fe80::100',
+ 'hostid': '[a-f0-9]+',
+ }
+ self._test_server_action(uuid, 'rebuild', subs,
+ 'server-action-rebuild-resp')
+
+ def test_server_resize(self):
+ self.flags(allow_resize_to_same_host=True)
+ uuid = self._post_server()
+ self._test_server_action(uuid, "resize",
+ {"id": 2,
+ "host": self._get_host()})
+ return uuid
+
+ def test_server_revert_resize(self):
+ uuid = self.test_server_resize()
+ self._test_server_action(uuid, "revertResize")
+
+ def test_server_confirm_resize(self):
+ uuid = self.test_server_resize()
+ self._test_server_action(uuid, "confirmResize", code=204)
+
+ def test_server_create_image(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'createImage',
+ {'name': 'foo-image',
+ 'meta_var': 'myvar',
+ 'meta_val': 'foobar'})
+
+
+class ServersActionsXmlTest(ServersActionsJsonTest):
+ ctype = 'xml'
+
+
+class ServersActionsAllJsonTest(ServersActionsJsonTest):
+ all_extensions = True
+
+
+class ServersActionsAllXmlTest(ServersActionsXmlTest):
+ all_extensions = True
+
+
+class ServerStartStopJsonTest(ServersSampleBase):
+ extension_name = "nova.api.openstack.compute.contrib" + \
+ ".server_start_stop.Server_start_stop"
+
+ def _test_server_action(self, uuid, action):
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server_start_stop',
+ {'action': action})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_server_start(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-stop')
+ self._test_server_action(uuid, 'os-start')
+
+ def test_server_stop(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-stop')
+
+
+class ServerStartStopXmlTest(ServerStartStopJsonTest):
+ ctype = 'xml'
+
+
+class UserDataJsonTest(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib.user_data.User_data"
+
+ def test_user_data_post(self):
+ user_data_contents = '#!/bin/bash\n/bin/su\necho "I am in you!"\n'
+ user_data = base64.b64encode(user_data_contents)
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'user_data': user_data
+ }
+ response = self._do_post('servers', 'userdata-post-req', subs)
+
+ subs.update(self._get_regexes())
+ self._verify_response('userdata-post-resp', subs, response, 202)
+
+
+class UserDataXmlTest(UserDataJsonTest):
+ ctype = 'xml'
+
+
+class FlavorsExtraDataJsonTest(ApiSampleTestBaseV2):
+ extension_name = ('nova.api.openstack.compute.contrib.flavorextradata.'
+ 'Flavorextradata')
+
+ def _get_flags(self):
+ f = super(FlavorsExtraDataJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # Flavorextradata extension also needs Flavormanage to be loaded.
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
+ return f
+
+ def test_flavors_extra_data_get(self):
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ subs = {
+ 'flavor_id': flavor_id,
+ 'flavor_name': 'm1.tiny'
+ }
+ subs.update(self._get_regexes())
+ self._verify_response('flavors-extra-data-get-resp',
+ subs, response, 200)
+
+ def test_flavors_extra_data_list(self):
+ response = self._do_get('flavors/detail')
+ subs = self._get_regexes()
+ self._verify_response('flavors-extra-data-list-resp',
+ subs, response, 200)
+
+ def test_flavors_extra_data_create(self):
+ subs = {
+ 'flavor_id': 666,
+ 'flavor_name': 'flavortest'
+ }
+ response = self._do_post('flavors',
+ 'flavors-extra-data-post-req',
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response('flavors-extra-data-post-resp',
+ subs, response, 200)
+
+
+class FlavorsExtraDataXmlTest(FlavorsExtraDataJsonTest):
+ ctype = 'xml'
+
+
+class FlavorRxtxJsonTest(ApiSampleTestBaseV2):
+ extension_name = ('nova.api.openstack.compute.contrib.flavor_rxtx.'
+ 'Flavor_rxtx')
+
+ def _get_flags(self):
+ f = super(FlavorRxtxJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # FlavorRxtx extension also needs Flavormanage to be loaded.
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
+ return f
+
+ def test_flavor_rxtx_get(self):
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ subs = {
+ 'flavor_id': flavor_id,
+ 'flavor_name': 'm1.tiny'
+ }
+ subs.update(self._get_regexes())
+ self._verify_response('flavor-rxtx-get-resp', subs, response, 200)
+
+ def test_flavors_rxtx_list(self):
+ response = self._do_get('flavors/detail')
+ subs = self._get_regexes()
+ self._verify_response('flavor-rxtx-list-resp', subs, response, 200)
+
+ def test_flavors_rxtx_create(self):
+ subs = {
+ 'flavor_id': 100,
+ 'flavor_name': 'flavortest'
+ }
+ response = self._do_post('flavors',
+ 'flavor-rxtx-post-req',
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response('flavor-rxtx-post-resp', subs, response, 200)
+
+
+class FlavorRxtxXmlTest(FlavorRxtxJsonTest):
+ ctype = 'xml'
+
+
+class FlavorSwapJsonTest(ApiSampleTestBaseV2):
+ extension_name = ('nova.api.openstack.compute.contrib.flavor_swap.'
+ 'Flavor_swap')
+
+ def _get_flags(self):
+ f = super(FlavorSwapJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # FlavorSwap extension also needs Flavormanage to be loaded.
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
+ return f
+
+ def test_flavor_swap_get(self):
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ subs = {
+ 'flavor_id': flavor_id,
+ 'flavor_name': 'm1.tiny'
+ }
+ subs.update(self._get_regexes())
+ self._verify_response('flavor-swap-get-resp', subs, response, 200)
+
+ def test_flavor_swap_list(self):
+ response = self._do_get('flavors/detail')
+ subs = self._get_regexes()
+ self._verify_response('flavor-swap-list-resp', subs, response, 200)
+
+ def test_flavor_swap_create(self):
+ subs = {
+ 'flavor_id': 100,
+ 'flavor_name': 'flavortest'
+ }
+ response = self._do_post('flavors',
+ 'flavor-swap-post-req',
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response('flavor-swap-post-resp', subs, response, 200)
+
+
+class FlavorSwapXmlTest(FlavorSwapJsonTest):
+ ctype = 'xml'
+
+
+class SecurityGroupsSampleJsonTest(ServersSampleBase):
+ extension_name = "nova.api.openstack.compute.contrib" + \
+ ".security_groups.Security_groups"
+
+ def _get_create_subs(self):
+ return {
+ 'group_name': 'test',
+ "description": "description",
+ }
+
+ def _create_security_group(self):
+ subs = self._get_create_subs()
+ return self._do_post('os-security-groups',
+ 'security-group-post-req', subs)
+
+ def _add_group(self, uuid):
+ subs = {
+ 'group_name': 'test'
+ }
+ return self._do_post('servers/%s/action' % uuid,
+ 'security-group-add-post-req', subs)
+
+ def test_security_group_create(self):
+ response = self._create_security_group()
+ subs = self._get_create_subs()
+ self._verify_response('security-groups-create-resp', subs,
+ response, 200)
+
+ def test_security_groups_list(self):
+ # Get api sample of security groups get list request.
+ response = self._do_get('os-security-groups')
+ subs = self._get_regexes()
+ self._verify_response('security-groups-list-get-resp',
+ subs, response, 200)
+
+ def test_security_groups_get(self):
+ # Get api sample of security groups get request.
+ security_group_id = '1'
+ response = self._do_get('os-security-groups/%s' % security_group_id)
+ subs = self._get_regexes()
+ self._verify_response('security-groups-get-resp', subs, response, 200)
+
+ def test_security_groups_list_server(self):
+ # Get api sample of security groups for a specific server.
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/os-security-groups' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('server-security-groups-list-resp',
+ subs, response, 200)
+
+ def test_security_groups_add(self):
+ self._create_security_group()
+ uuid = self._post_server()
+ response = self._add_group(uuid)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_security_groups_remove(self):
+ self._create_security_group()
+ uuid = self._post_server()
+ self._add_group(uuid)
+ subs = {
+ 'group_name': 'test'
+ }
+ response = self._do_post('servers/%s/action' % uuid,
+ 'security-group-remove-post-req', subs)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+
+class SecurityGroupsSampleXmlTest(SecurityGroupsSampleJsonTest):
+ ctype = 'xml'
+
+
+class SecurityGroupDefaultRulesSampleJsonTest(ServersSampleBase):
+ extension_name = ('nova.api.openstack.compute.contrib'
+ '.security_group_default_rules'
+ '.Security_group_default_rules')
+
+ def test_security_group_default_rules_create(self):
+ response = self._do_post('os-security-group-default-rules',
+ 'security-group-default-rules-create-req',
+ {})
+ self._verify_response('security-group-default-rules-create-resp',
+ {}, response, 200)
+
+ def test_security_group_default_rules_list(self):
+ self.test_security_group_default_rules_create()
+ response = self._do_get('os-security-group-default-rules')
+ self._verify_response('security-group-default-rules-list-resp',
+ {}, response, 200)
+
+ def test_security_group_default_rules_show(self):
+ self.test_security_group_default_rules_create()
+ rule_id = '1'
+ response = self._do_get('os-security-group-default-rules/%s' % rule_id)
+ self._verify_response('security-group-default-rules-show-resp',
+ {}, response, 200)
+
+
+class SecurityGroupDefaultRulesSampleXmlTest(
+ SecurityGroupDefaultRulesSampleJsonTest):
+ ctype = 'xml'
+
+
+class SchedulerHintsJsonTest(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.scheduler_hints."
+ "Scheduler_hints")
+
+ def test_scheduler_hints_post(self):
+ # Get api sample of scheduler hint post request.
+ hints = {'image_id': fake.get_valid_image_id(),
+ 'image_near': str(uuid_lib.uuid4())
+ }
+ response = self._do_post('servers', 'scheduler-hints-post-req',
+ hints)
+ subs = self._get_regexes()
+ self._verify_response('scheduler-hints-post-resp', subs, response, 202)
+
+
+class SchedulerHintsXmlTest(SchedulerHintsJsonTest):
+ ctype = 'xml'
+
+
+class ConsoleOutputSampleJsonTest(ServersSampleBase):
+ extension_name = "nova.api.openstack.compute.contrib" + \
+ ".console_output.Console_output"
+
+ def test_get_console_output(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'console-output-post-req',
+ {'action': 'os-getConsoleOutput'})
+ subs = self._get_regexes()
+ self._verify_response('console-output-post-resp', subs, response, 200)
+
+
+class ConsoleOutputSampleXmlTest(ConsoleOutputSampleJsonTest):
+ ctype = 'xml'
+
+
+class ExtendedServerAttributesJsonTest(ServersSampleBase):
+ extension_name = "nova.api.openstack.compute.contrib" + \
+ ".extended_server_attributes" + \
+ ".Extended_server_attributes"
+
+ def test_show(self):
+ uuid = self._post_server()
+
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['instance_name'] = 'instance-\d{8}'
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ uuid = self._post_server()
+
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['instance_name'] = 'instance-\d{8}'
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
+
+
+class ExtendedServerAttributesXmlTest(ExtendedServerAttributesJsonTest):
+ ctype = 'xml'
+
+
+class FloatingIpsJsonTest(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib." \
+ "floating_ips.Floating_ips"
+
+ def setUp(self):
+ super(FloatingIpsJsonTest, self).setUp()
+ pool = CONF.default_floating_pool
+ interface = CONF.public_interface
+
+ self.ip_pool = [
+ {
+ 'address': "10.10.10.1",
+ 'pool': pool,
+ 'interface': interface
+ },
+ {
+ 'address': "10.10.10.2",
+ 'pool': pool,
+ 'interface': interface
+ },
+ {
+ 'address': "10.10.10.3",
+ 'pool': pool,
+ 'interface': interface
+ },
+ ]
+ self.compute.db.floating_ip_bulk_create(
+ context.get_admin_context(), self.ip_pool)
+
+ def tearDown(self):
+ self.compute.db.floating_ip_bulk_destroy(
+ context.get_admin_context(), self.ip_pool)
+ super(FloatingIpsJsonTest, self).tearDown()
+
+ def test_floating_ips_list_empty(self):
+ response = self._do_get('os-floating-ips')
+
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-list-empty-resp',
+ subs, response, 200)
+
+ def test_floating_ips_list(self):
+ self._do_post('os-floating-ips',
+ 'floating-ips-create-nopool-req',
+ {})
+ self._do_post('os-floating-ips',
+ 'floating-ips-create-nopool-req',
+ {})
+
+ response = self._do_get('os-floating-ips')
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-list-resp',
+ subs, response, 200)
+
+ def test_floating_ips_create_nopool(self):
+ response = self._do_post('os-floating-ips',
+ 'floating-ips-create-nopool-req',
+ {})
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-create-resp',
+ subs, response, 200)
+
+ def test_floating_ips_create(self):
+ response = self._do_post('os-floating-ips',
+ 'floating-ips-create-req',
+ {"pool": CONF.default_floating_pool})
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-create-resp', subs, response, 200)
+
+ def test_floating_ips_get(self):
+ self.test_floating_ips_create()
+ # NOTE(sdague): the first floating ip will always have 1 as an id,
+ # but it would be better if we could get this from the create
+ response = self._do_get('os-floating-ips/%d' % 1)
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-create-resp', subs, response, 200)
+
+ def test_floating_ips_delete(self):
+ self.test_floating_ips_create()
+ response = self._do_delete('os-floating-ips/%d' % 1)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+
+class ExtendedFloatingIpsJsonTest(FloatingIpsJsonTest):
+ extends_name = ("nova.api.openstack.compute.contrib."
+ "floating_ips.Floating_ips")
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "extended_floating_ips.Extended_floating_ips")
+
+
+class FloatingIpsXmlTest(FloatingIpsJsonTest):
+ ctype = 'xml'
+
+
+class ExtendedFloatingIpsXmlTest(ExtendedFloatingIpsJsonTest):
+ ctype = 'xml'
+
+
+class FloatingIpsBulkJsonTest(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib." \
+ "floating_ips_bulk.Floating_ips_bulk"
+
+ def setUp(self):
+ super(FloatingIpsBulkJsonTest, self).setUp()
+ pool = CONF.default_floating_pool
+ interface = CONF.public_interface
+
+ self.ip_pool = [
+ {
+ 'address': "10.10.10.1",
+ 'pool': pool,
+ 'interface': interface
+ },
+ {
+ 'address': "10.10.10.2",
+ 'pool': pool,
+ 'interface': interface
+ },
+ {
+ 'address': "10.10.10.3",
+ 'pool': pool,
+ 'interface': interface,
+ 'host': "testHost"
+ },
+ ]
+ self.compute.db.floating_ip_bulk_create(
+ context.get_admin_context(), self.ip_pool)
+
+ def tearDown(self):
+ self.compute.db.floating_ip_bulk_destroy(
+ context.get_admin_context(), self.ip_pool)
+ super(FloatingIpsBulkJsonTest, self).tearDown()
+
+ def test_floating_ips_bulk_list(self):
+ response = self._do_get('os-floating-ips-bulk')
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-bulk-list-resp',
+ subs, response, 200)
+
+ def test_floating_ips_bulk_list_by_host(self):
+ response = self._do_get('os-floating-ips-bulk/testHost')
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-bulk-list-by-host-resp',
+ subs, response, 200)
+
+ def test_floating_ips_bulk_create(self):
+ response = self._do_post('os-floating-ips-bulk',
+ 'floating-ips-bulk-create-req',
+ {"ip_range": "192.168.1.0/24",
+ "pool": CONF.default_floating_pool,
+ "interface": CONF.public_interface})
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-bulk-create-resp', subs,
+ response, 200)
+
+ def test_floating_ips_bulk_delete(self):
+ response = self._do_put('os-floating-ips-bulk/delete',
+ 'floating-ips-bulk-delete-req',
+ {"ip_range": "192.168.1.0/24"})
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-bulk-delete-resp', subs,
+ response, 200)
+
+
+class FloatingIpsBulkXmlTest(FloatingIpsBulkJsonTest):
+ ctype = 'xml'
+
+
+class KeyPairsSampleJsonTest(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib.keypairs.Keypairs"
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['keypair_name'] = 'keypair-[0-9a-f-]+'
+ return subs
+
+ def test_keypairs_post(self, public_key=None):
+ """Get api sample of key pairs post request."""
+ key_name = 'keypair-' + str(uuid_lib.uuid4())
+ response = self._do_post('os-keypairs', 'keypairs-post-req',
+ {'keypair_name': key_name})
+ subs = self._get_regexes()
+ subs['keypair_name'] = '(%s)' % key_name
+ self._verify_response('keypairs-post-resp', subs, response, 200)
+ # NOTE(maurosr): return the key_name is necessary cause the
+ # verification returns the label of the last compared information in
+ # the response, not necessarily the key name.
+ return key_name
+
+ def test_keypairs_import_key_post(self):
+ # Get api sample of key pairs post to import user's key.
+ key_name = 'keypair-' + str(uuid_lib.uuid4())
+ subs = {
+ 'keypair_name': key_name,
+ 'public_key': "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGg"
+ "B4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0l"
+ "RE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv"
+ "9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYc"
+ "pSxsIbECHw== Generated-by-Nova"
+ }
+ response = self._do_post('os-keypairs', 'keypairs-import-post-req',
+ subs)
+ subs = self._get_regexes()
+ subs['keypair_name'] = '(%s)' % key_name
+ self._verify_response('keypairs-import-post-resp', subs, response, 200)
+
+ def test_keypairs_list(self):
+ # Get api sample of key pairs list request.
+ key_name = self.test_keypairs_post()
+ response = self._do_get('os-keypairs')
+ subs = self._get_regexes()
+ subs['keypair_name'] = '(%s)' % key_name
+ self._verify_response('keypairs-list-resp', subs, response, 200)
+
+ def test_keypairs_get(self):
+ # Get api sample of key pairs get request.
+ key_name = self.test_keypairs_post()
+ response = self._do_get('os-keypairs/%s' % key_name)
+ subs = self._get_regexes()
+ subs['keypair_name'] = '(%s)' % key_name
+ self._verify_response('keypairs-get-resp', subs, response, 200)
+
+
+class KeyPairsSampleXmlTest(KeyPairsSampleJsonTest):
+ ctype = 'xml'
+
+
+class RescueJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".rescue.Rescue")
+
+ def _rescue(self, uuid):
+ req_subs = {
+ 'password': 'MySecretPass'
+ }
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-rescue-req', req_subs)
+ self._verify_response('server-rescue', req_subs, response, 200)
+
+ def _unrescue(self, uuid):
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-unrescue-req', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_server_rescue(self):
+ uuid = self._post_server()
+
+ self._rescue(uuid)
+
+ # Do a server get to make sure that the 'RESCUE' state is set
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['status'] = 'RESCUE'
+
+ self._verify_response('server-get-resp-rescue', subs, response, 200)
+
+ def test_server_unrescue(self):
+ uuid = self._post_server()
+
+ self._rescue(uuid)
+ self._unrescue(uuid)
+
+ # Do a server get to make sure that the 'ACTIVE' state is back
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['status'] = 'ACTIVE'
+
+ self._verify_response('server-get-resp-unrescue', subs, response, 200)
+
+
+class RescueXmlTest(RescueJsonTest):
+ ctype = 'xml'
+
+
+class ExtendedRescueWithImageJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_rescue_with_image.Extended_rescue_with_image")
+
+ def _get_flags(self):
+ f = super(ExtendedRescueWithImageJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # ExtendedRescueWithImage extension also needs Rescue to be loaded.
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.rescue.Rescue')
+ return f
+
+ def _rescue(self, uuid):
+ req_subs = {
+ 'password': 'MySecretPass',
+ 'rescue_image_ref': fake.get_valid_image_id()
+ }
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-rescue-req', req_subs)
+ self._verify_response('server-rescue', req_subs, response, 200)
+
+ def test_server_rescue(self):
+ uuid = self._post_server()
+
+ self._rescue(uuid)
+
+ # Do a server get to make sure that the 'RESCUE' state is set
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['status'] = 'RESCUE'
+
+ self._verify_response('server-get-resp-rescue', subs, response, 200)
+
+
+class ExtendedRescueWithImageXmlTest(ExtendedRescueWithImageJsonTest):
+ ctype = 'xml'
+
+
+class ShelveJsonTest(ServersSampleBase):
+ extension_name = "nova.api.openstack.compute.contrib.shelve.Shelve"
+
+ def setUp(self):
+ super(ShelveJsonTest, self).setUp()
+ # Don't offload instance, so we can test the offload call.
+ CONF.set_override('shelved_offload_time', -1)
+
+ def _test_server_action(self, uuid, template, action):
+ response = self._do_post('servers/%s/action' % uuid,
+ template, {'action': action})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_shelve(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+
+ def test_shelve_offload(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ self._test_server_action(uuid, 'os-shelve-offload', 'shelveOffload')
+
+ def test_unshelve(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ self._test_server_action(uuid, 'os-unshelve', 'unshelve')
+
+
+class ShelveXmlTest(ShelveJsonTest):
+ ctype = 'xml'
+
+
+class VirtualInterfacesJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".virtual_interfaces.Virtual_interfaces")
+
+ def test_vifs_list(self):
+ uuid = self._post_server()
+
+ response = self._do_get('servers/%s/os-virtual-interfaces' % uuid)
+
+ subs = self._get_regexes()
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+
+ self._verify_response('vifs-list-resp', subs, response, 200)
+
+
+class VirtualInterfacesXmlTest(VirtualInterfacesJsonTest):
+ ctype = 'xml'
+
+
+class CloudPipeSampleJsonTest(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib.cloudpipe.Cloudpipe"
+
+ def setUp(self):
+ super(CloudPipeSampleJsonTest, self).setUp()
+
+ def get_user_data(self, project_id):
+ """Stub method to generate user data for cloudpipe tests."""
+ return "VVNFUiBEQVRB\n"
+
+ def network_api_get(self, context, network_uuid):
+ """Stub to get a valid network and its information."""
+ return {'vpn_public_address': '127.0.0.1',
+ 'vpn_public_port': 22}
+
+ self.stubs.Set(pipelib.CloudPipe, 'get_encoded_zip', get_user_data)
+ self.stubs.Set(network_api.API, "get",
+ network_api_get)
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['project_id'] = 'cloudpipe-[0-9a-f-]+'
+ return subs
+
+ def test_cloud_pipe_create(self):
+ # Get api samples of cloud pipe extension creation.
+ self.flags(vpn_image_id=fake.get_valid_image_id())
+ project = {'project_id': 'cloudpipe-' + str(uuid_lib.uuid4())}
+ response = self._do_post('os-cloudpipe', 'cloud-pipe-create-req',
+ project)
+ subs = self._get_regexes()
+ subs.update(project)
+ subs['image_id'] = CONF.vpn_image_id
+ self._verify_response('cloud-pipe-create-resp', subs, response, 200)
+ return project
+
+ def test_cloud_pipe_list(self):
+ # Get api samples of cloud pipe extension get request.
+ project = self.test_cloud_pipe_create()
+ response = self._do_get('os-cloudpipe')
+ subs = self._get_regexes()
+ subs.update(project)
+ subs['image_id'] = CONF.vpn_image_id
+ self._verify_response('cloud-pipe-get-resp', subs, response, 200)
+
+
+class CloudPipeSampleXmlTest(CloudPipeSampleJsonTest):
+ ctype = "xml"
+
+
+class CloudPipeUpdateJsonTest(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".cloudpipe_update.Cloudpipe_update")
+
+ def _get_flags(self):
+ f = super(CloudPipeUpdateJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # Cloudpipe_update also needs cloudpipe to be loaded
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.cloudpipe.Cloudpipe')
+ return f
+
+ def test_cloud_pipe_update(self):
+ subs = {'vpn_ip': '192.168.1.1',
+ 'vpn_port': 2000}
+ response = self._do_put('os-cloudpipe/configure-project',
+ 'cloud-pipe-update-req',
+ subs)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+
+class CloudPipeUpdateXmlTest(CloudPipeUpdateJsonTest):
+ ctype = "xml"
+
+
+class AgentsJsonTest(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib.agents.Agents"
+
+ def _get_flags(self):
+ f = super(AgentsJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ return f
+
+ def setUp(self):
+ super(AgentsJsonTest, self).setUp()
+
+ fake_agents_list = [{'url': 'http://example.com/path/to/resource',
+ 'hypervisor': 'hypervisor',
+ 'architecture': 'x86',
+ 'os': 'os',
+ 'version': '8.0',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545',
+ 'id': 1}]
+
+ def fake_agent_build_create(context, values):
+ values['id'] = 1
+ agent_build_ref = models.AgentBuild()
+ agent_build_ref.update(values)
+ return agent_build_ref
+
+ def fake_agent_build_get_all(context, hypervisor):
+ agent_build_all = []
+ for agent in fake_agents_list:
+ if hypervisor and hypervisor != agent['hypervisor']:
+ continue
+ agent_build_ref = models.AgentBuild()
+ agent_build_ref.update(agent)
+ agent_build_all.append(agent_build_ref)
+ return agent_build_all
+
+ def fake_agent_build_update(context, agent_build_id, values):
+ pass
+
+ def fake_agent_build_destroy(context, agent_update_id):
+ pass
+
+ self.stubs.Set(db, "agent_build_create",
+ fake_agent_build_create)
+ self.stubs.Set(db, "agent_build_get_all",
+ fake_agent_build_get_all)
+ self.stubs.Set(db, "agent_build_update",
+ fake_agent_build_update)
+ self.stubs.Set(db, "agent_build_destroy",
+ fake_agent_build_destroy)
+
+ def test_agent_create(self):
+ # Creates a new agent build.
+ project = {'url': 'http://example.com/path/to/resource',
+ 'hypervisor': 'hypervisor',
+ 'architecture': 'x86',
+ 'os': 'os',
+ 'version': '8.0',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'
+ }
+ response = self._do_post('os-agents', 'agent-post-req',
+ project)
+ project['agent_id'] = 1
+ self._verify_response('agent-post-resp', project, response, 200)
+ return project
+
+ def test_agent_list(self):
+ # Return a list of all agent builds.
+ response = self._do_get('os-agents')
+ project = {'url': 'http://example.com/path/to/resource',
+ 'hypervisor': 'hypervisor',
+ 'architecture': 'x86',
+ 'os': 'os',
+ 'version': '8.0',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545',
+ 'agent_id': 1
+ }
+ self._verify_response('agents-get-resp', project, response, 200)
+
+ def test_agent_update(self):
+ # Update an existing agent build.
+ agent_id = 1
+ subs = {'version': '7.0',
+ 'url': 'http://example.com/path/to/resource',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}
+ response = self._do_put('os-agents/%s' % agent_id,
+ 'agent-update-put-req', subs)
+ subs['agent_id'] = 1
+ self._verify_response('agent-update-put-resp', subs, response, 200)
+
+ def test_agent_delete(self):
+ # Deletes an existing agent build.
+ agent_id = 1
+ response = self._do_delete('os-agents/%s' % agent_id)
+ self.assertEqual(response.status_code, 200)
+
+
+class AgentsXmlTest(AgentsJsonTest):
+ ctype = "xml"
+
+
+class FixedIpJsonTest(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib.fixed_ips.Fixed_ips"
+
+ def _get_flags(self):
+ f = super(FixedIpJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ return f
+
+ def setUp(self):
+ super(FixedIpJsonTest, self).setUp()
+
+ instance = dict(test_utils.get_test_instance(),
+ hostname='openstack', host='host')
+ fake_fixed_ips = [{'id': 1,
+ 'address': '192.168.1.1',
+ 'network_id': 1,
+ 'virtual_interface_id': 1,
+ 'instance_uuid': '1',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'created_at': None,
+ 'deleted_at': None,
+ 'updated_at': None,
+ 'deleted': None,
+ 'instance': instance,
+ 'network': test_network.fake_network,
+ 'host': None},
+ {'id': 2,
+ 'address': '192.168.1.2',
+ 'network_id': 1,
+ 'virtual_interface_id': 2,
+ 'instance_uuid': '2',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'created_at': None,
+ 'deleted_at': None,
+ 'updated_at': None,
+ 'deleted': None,
+ 'instance': instance,
+ 'network': test_network.fake_network,
+ 'host': None},
+ ]
+
+ def fake_fixed_ip_get_by_address(context, address,
+ columns_to_join=None):
+ for fixed_ip in fake_fixed_ips:
+ if fixed_ip['address'] == address:
+ return fixed_ip
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+ def fake_fixed_ip_get_by_address_detailed(context, address):
+ network = {'id': 1,
+ 'cidr': "192.168.1.0/24"}
+ host = {'host': "host",
+ 'hostname': 'openstack'}
+ for fixed_ip in fake_fixed_ips:
+ if fixed_ip['address'] == address:
+ return (fixed_ip, network, host)
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+ def fake_fixed_ip_update(context, address, values):
+ fixed_ip = fake_fixed_ip_get_by_address(context, address)
+ if fixed_ip is None:
+ raise exception.FixedIpNotFoundForAddress(address=address)
+ else:
+ for key in values:
+ fixed_ip[key] = values[key]
+
+ self.stubs.Set(db, "fixed_ip_get_by_address",
+ fake_fixed_ip_get_by_address)
+ self.stubs.Set(db, "fixed_ip_get_by_address_detailed",
+ fake_fixed_ip_get_by_address_detailed)
+ self.stubs.Set(db, "fixed_ip_update", fake_fixed_ip_update)
+
+ def test_fixed_ip_reserve(self):
+ # Reserve a Fixed IP.
+ project = {'reserve': None}
+ response = self._do_post('os-fixed-ips/192.168.1.1/action',
+ 'fixedip-post-req',
+ project)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_get_fixed_ip(self):
+ # Return data about the given fixed ip.
+ response = self._do_get('os-fixed-ips/192.168.1.1')
+ project = {'cidr': '192.168.1.0/24',
+ 'hostname': 'openstack',
+ 'host': 'host',
+ 'address': '192.168.1.1'}
+ self._verify_response('fixedips-get-resp', project, response, 200)
+
+
+class FixedIpXmlTest(FixedIpJsonTest):
+ ctype = "xml"
+
+
+class AggregatesSampleJsonTest(ServersSampleBase):
+ extension_name = "nova.api.openstack.compute.contrib" + \
+ ".aggregates.Aggregates"
+ create_subs = {
+ "aggregate_id": '(?P<id>\d+)'
+ }
+
+ def _create_aggregate(self):
+ return self._do_post('os-aggregates', 'aggregate-post-req',
+ self.create_subs)
+
+ def test_aggregate_create(self):
+ response = self._create_aggregate()
+ subs = self.create_subs
+ subs.update(self._get_regexes())
+ return self._verify_response('aggregate-post-resp',
+ subs, response, 200)
+
+ def test_list_aggregates(self):
+ self._create_aggregate()
+ response = self._do_get('os-aggregates')
+ subs = self._get_regexes()
+ self._verify_response('aggregates-list-get-resp', subs, response, 200)
+
+ def test_aggregate_get(self):
+ self._create_aggregate()
+ response = self._do_get('os-aggregates/%s' % 1)
+ subs = self._get_regexes()
+ self._verify_response('aggregates-get-resp', subs, response, 200)
+
+ def test_add_metadata(self):
+ self._create_aggregate()
+ response = self._do_post('os-aggregates/%s/action' % 1,
+ 'aggregate-metadata-post-req',
+ {'action': 'set_metadata'})
+ subs = self._get_regexes()
+ self._verify_response('aggregates-metadata-post-resp', subs,
+ response, 200)
+
+ def test_add_host(self):
+ self._create_aggregate()
+ subs = {
+ "host_name": self.compute.host,
+ }
+ response = self._do_post('os-aggregates/%s/action' % 1,
+ 'aggregate-add-host-post-req', subs)
+ subs.update(self._get_regexes())
+ self._verify_response('aggregates-add-host-post-resp', subs,
+ response, 200)
+
+ def test_remove_host(self):
+ self.test_add_host()
+ subs = {
+ "host_name": self.compute.host,
+ }
+ response = self._do_post('os-aggregates/1/action',
+ 'aggregate-remove-host-post-req', subs)
+ subs.update(self._get_regexes())
+ self._verify_response('aggregates-remove-host-post-resp',
+ subs, response, 200)
+
+ def test_update_aggregate(self):
+ self._create_aggregate()
+ response = self._do_put('os-aggregates/%s' % 1,
+ 'aggregate-update-post-req', {})
+ subs = self._get_regexes()
+ self._verify_response('aggregate-update-post-resp',
+ subs, response, 200)
+
+
+class AggregatesSampleXmlTest(AggregatesSampleJsonTest):
+ ctype = 'xml'
+
+
+class CertificatesSamplesJsonTest(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.certificates."
+ "Certificates")
+
+ def test_create_certificates(self):
+ response = self._do_post('os-certificates',
+ 'certificate-create-req', {})
+ subs = self._get_regexes()
+ self._verify_response('certificate-create-resp', subs, response, 200)
+
+ def test_get_root_certificate(self):
+ response = self._do_get('os-certificates/root')
+ subs = self._get_regexes()
+ self._verify_response('certificate-get-root-resp', subs, response, 200)
+
+
+class CertificatesSamplesXmlTest(CertificatesSamplesJsonTest):
+ ctype = 'xml'
+
+
+class UsedLimitsSamplesJsonTest(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.used_limits."
+ "Used_limits")
+
+ def test_get_used_limits(self):
+ # Get api sample to used limits.
+ response = self._do_get('limits')
+ subs = self._get_regexes()
+ self._verify_response('usedlimits-get-resp', subs, response, 200)
+
+
+class UsedLimitsSamplesXmlTest(UsedLimitsSamplesJsonTest):
+ ctype = "xml"
+
+
+class UsedLimitsForAdminSamplesJsonTest(ApiSampleTestBaseV2):
+ extends_name = ("nova.api.openstack.compute.contrib.used_limits."
+ "Used_limits")
+ extension_name = (
+ "nova.api.openstack.compute.contrib.used_limits_for_admin."
+ "Used_limits_for_admin")
+
+ def test_get_used_limits_for_admin(self):
+ tenant_id = 'openstack'
+ response = self._do_get('limits?tenant_id=%s' % tenant_id)
+ subs = self._get_regexes()
+ return self._verify_response('usedlimitsforadmin-get-resp', subs,
+ response, 200)
+
+
+class UsedLimitsForAdminSamplesXmlTest(UsedLimitsForAdminSamplesJsonTest):
+ ctype = "xml"
+
+
+class MultipleCreateJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.multiple_create."
+ "Multiple_create")
+
+ def test_multiple_create(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'min_count': "2",
+ 'max_count': "3"
+ }
+ response = self._do_post('servers', 'multiple-create-post-req', subs)
+ subs.update(self._get_regexes())
+ self._verify_response('multiple-create-post-resp', subs, response, 202)
+
+ def test_multiple_create_without_reservation_id(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'min_count': "2",
+ 'max_count': "3"
+ }
+ response = self._do_post('servers', 'multiple-create-no-resv-post-req',
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response('multiple-create-no-resv-post-resp', subs,
+ response, 202)
+
+
+class MultipleCreateXmlTest(MultipleCreateJsonTest):
+ ctype = 'xml'
+
+
+class ServicesJsonTest(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib.services.Services"
+
+ def setUp(self):
+ super(ServicesJsonTest, self).setUp()
+ self.stubs.Set(db, "service_get_all",
+ test_services.fake_db_api_service_get_all)
+ self.stubs.Set(timeutils, "utcnow", test_services.fake_utcnow)
+ self.stubs.Set(timeutils, "utcnow_ts", test_services.fake_utcnow_ts)
+ self.stubs.Set(db, "service_get_by_args",
+ test_services.fake_service_get_by_host_binary)
+ self.stubs.Set(db, "service_update",
+ test_services.fake_service_update)
+
+ def tearDown(self):
+ super(ServicesJsonTest, self).tearDown()
+ timeutils.clear_time_override()
+
+ def fake_load(self, service_name):
+ return service_name == 'os-extended-services'
+
+ def test_services_list(self):
+ """Return a list of all agent builds."""
+ response = self._do_get('os-services')
+ subs = {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up'}
+ subs.update(self._get_regexes())
+ self._verify_response('services-list-get-resp', subs, response, 200)
+
+ def test_service_enable(self):
+ """Enable an existing agent build."""
+ subs = {"host": "host1",
+ 'binary': 'nova-compute'}
+ response = self._do_put('os-services/enable',
+ 'service-enable-put-req', subs)
+ subs = {"host": "host1",
+ "binary": "nova-compute"}
+ self._verify_response('service-enable-put-resp', subs, response, 200)
+
+ def test_service_disable(self):
+ """Disable an existing agent build."""
+ subs = {"host": "host1",
+ 'binary': 'nova-compute'}
+ response = self._do_put('os-services/disable',
+ 'service-disable-put-req', subs)
+ subs = {"host": "host1",
+ "binary": "nova-compute"}
+ self._verify_response('service-disable-put-resp', subs, response, 200)
+
+ def test_service_detail(self):
+ """Return a list of all running services with the disable reason
+ information if that exists.
+ """
+ self.stubs.Set(extensions.ExtensionManager, "is_loaded",
+ self.fake_load)
+ response = self._do_get('os-services')
+ self.assertEqual(response.status_code, 200)
+ subs = {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up'}
+ subs.update(self._get_regexes())
+ self._verify_response('services-get-resp',
+ subs, response, 200)
+
+ def test_service_disable_log_reason(self):
+ """Disable an existing service and log the reason."""
+ self.stubs.Set(extensions.ExtensionManager, "is_loaded",
+ self.fake_load)
+ subs = {"host": "host1",
+ 'binary': 'nova-compute',
+ 'disabled_reason': 'test2'}
+ response = self._do_put('os-services/disable-log-reason',
+ 'service-disable-log-put-req', subs)
+ return self._verify_response('service-disable-log-put-resp',
+ subs, response, 200)
+
+
+class ServicesXmlTest(ServicesJsonTest):
+ ctype = 'xml'
+
+
+class ExtendedServicesJsonTest(ApiSampleTestBaseV2):
+ """This extension is extending the functionalities of the
+ Services extension so the funcionalities introduced by this extension
+ are tested in the ServicesJsonTest and ServicesXmlTest classes.
+ """
+
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "extended_services.Extended_services")
+
+
+class ExtendedServicesXmlTest(ExtendedServicesJsonTest):
+ """This extension is tested in the ServicesXmlTest class."""
+ ctype = 'xml'
+
+
+@mock.patch.object(db, 'service_get_all',
+ side_effect=test_services.fake_db_api_service_get_all)
+@mock.patch.object(db, 'service_get_by_args',
+ side_effect=test_services.fake_service_get_by_host_binary)
+class ExtendedServicesDeleteJsonTest(ApiSampleTestBaseV2):
+ extends_name = ("nova.api.openstack.compute.contrib.services.Services")
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "extended_services_delete.Extended_services_delete")
+
+ def setUp(self):
+ super(ExtendedServicesDeleteJsonTest, self).setUp()
+ timeutils.set_time_override(test_services.fake_utcnow())
+
+ def tearDown(self):
+ super(ExtendedServicesDeleteJsonTest, self).tearDown()
+ timeutils.clear_time_override()
+
+ def test_service_detail(self, *mocks):
+ """Return a list of all running services with the disable reason
+ information if that exists.
+ """
+ response = self._do_get('os-services')
+ self.assertEqual(response.status_code, 200)
+ subs = {'id': 1,
+ 'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up'}
+ subs.update(self._get_regexes())
+ return self._verify_response('services-get-resp',
+ subs, response, 200)
+
+ def test_service_delete(self, *mocks):
+ response = self._do_delete('os-services/1')
+ self.assertEqual(response.status_code, 204)
+ self.assertEqual(response.content, "")
+
+
+class ExtendedServicesDeleteXmlTest(ExtendedServicesDeleteJsonTest):
+ """This extension is tested in the ExtendedServicesDeleteJsonTest class."""
+ ctype = 'xml'
+
+
+class SimpleTenantUsageSampleJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.simple_tenant_usage."
+ "Simple_tenant_usage")
+
+ def setUp(self):
+ """setUp method for simple tenant usage."""
+ super(SimpleTenantUsageSampleJsonTest, self).setUp()
+
+ started = timeutils.utcnow()
+ now = started + datetime.timedelta(hours=1)
+
+ timeutils.set_time_override(started)
+ self._post_server()
+ timeutils.set_time_override(now)
+
+ self.query = {
+ 'start': str(started),
+ 'end': str(now)
+ }
+
+ def tearDown(self):
+ """tearDown method for simple tenant usage."""
+ super(SimpleTenantUsageSampleJsonTest, self).tearDown()
+ timeutils.clear_time_override()
+
+ def test_get_tenants_usage(self):
+ # Get api sample to get all tenants usage request.
+ response = self._do_get('os-simple-tenant-usage?%s' % (
+ urllib.urlencode(self.query)))
+ subs = self._get_regexes()
+ self._verify_response('simple-tenant-usage-get', subs, response, 200)
+
+ def test_get_tenant_usage_details(self):
+ # Get api sample to get specific tenant usage request.
+ tenant_id = 'openstack'
+ response = self._do_get('os-simple-tenant-usage/%s?%s' % (tenant_id,
+ urllib.urlencode(self.query)))
+ subs = self._get_regexes()
+ self._verify_response('simple-tenant-usage-get-specific', subs,
+ response, 200)
+
+
+class SimpleTenantUsageSampleXmlTest(SimpleTenantUsageSampleJsonTest):
+ ctype = "xml"
+
+
+class ServerDiagnosticsSamplesJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.server_diagnostics."
+ "Server_diagnostics")
+
+ def test_server_diagnostics_get(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/diagnostics' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('server-diagnostics-get-resp', subs,
+ response, 200)
+
+
+class ServerDiagnosticsSamplesXmlTest(ServerDiagnosticsSamplesJsonTest):
+ ctype = "xml"
+
+
+class AvailabilityZoneJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.availability_zone."
+ "Availability_zone")
+
+ def test_create_availability_zone(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ "availability_zone": "nova"
+ }
+ response = self._do_post('servers', 'availability-zone-post-req', subs)
+ subs.update(self._get_regexes())
+ self._verify_response('availability-zone-post-resp', subs,
+ response, 202)
+
+
+class AvailabilityZoneXmlTest(AvailabilityZoneJsonTest):
+ ctype = "xml"
+
+
+class AdminActionsSamplesJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.admin_actions."
+ "Admin_actions")
+
+ def setUp(self):
+ """setUp Method for AdminActions api samples extension
+
+ This method creates the server that will be used in each tests
+ """
+ super(AdminActionsSamplesJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ def test_post_pause(self):
+ # Get api samples to pause server request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-pause', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_unpause(self):
+ # Get api samples to unpause server request.
+ self.test_post_pause()
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-unpause', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_suspend(self):
+ # Get api samples to suspend server request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-suspend', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_resume(self):
+ # Get api samples to server resume request.
+ self.test_post_suspend()
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-resume', {})
+ self.assertEqual(response.status_code, 202)
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager._cold_migrate')
+ def test_post_migrate(self, mock_cold_migrate):
+ # Get api samples to migrate server request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-migrate', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_reset_network(self):
+ # Get api samples to reset server network request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-reset-network', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_inject_network_info(self):
+ # Get api samples to inject network info request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-inject-network-info', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_lock_server(self):
+ # Get api samples to lock server request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-lock-server', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_unlock_server(self):
+ # Get api samples to unlock server request.
+ self.test_post_lock_server()
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-unlock-server', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_backup_server(self):
+ # Get api samples to backup server request.
+ def image_details(self, context, **kwargs):
+ """This stub is specifically used on the backup action."""
+ # NOTE(maurosr): I've added this simple stub cause backup action
+ # was trapped in infinite loop during fetch image phase since the
+ # fake Image Service always returns the same set of images
+ return []
+
+ self.stubs.Set(fake._FakeImageService, 'detail', image_details)
+
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-backup-server', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_live_migrate_server(self):
+ # Get api samples to server live migrate request.
+ def fake_live_migrate(_self, context, instance, scheduler_hint,
+ block_migration, disk_over_commit):
+ self.assertEqual(self.uuid, instance["uuid"])
+ host = scheduler_hint["host"]
+ self.assertEqual(self.compute.host, host)
+
+ self.stubs.Set(conductor_manager.ComputeTaskManager,
+ '_live_migrate',
+ fake_live_migrate)
+
+ def fake_get_compute(context, host):
+ service = dict(host=host,
+ binary='nova-compute',
+ topic='compute',
+ report_count=1,
+ updated_at='foo',
+ hypervisor_type='bar',
+ hypervisor_version=
+ utils.convert_version_to_int('1.0'),
+ disabled=False)
+ return {'compute_node': [service]}
+ self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute)
+
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-live-migrate',
+ {'hostname': self.compute.host})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_reset_state(self):
+ # get api samples to server reset state request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-reset-server-state', {})
+ self.assertEqual(response.status_code, 202)
+
+
+class AdminActionsSamplesXmlTest(AdminActionsSamplesJsonTest):
+ ctype = 'xml'
+
+
+class ConsolesSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".consoles.Consoles")
+
+ def setUp(self):
+ super(ConsolesSampleJsonTests, self).setUp()
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=True, group='spice')
+ self.flags(enabled=True, group='rdp')
+ self.flags(enabled=True, group='serial_console')
+
+ def test_get_vnc_console(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-vnc-console-post-req',
+ {'action': 'os-getVNCConsole'})
+ subs = self._get_regexes()
+ subs["url"] = \
+ "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
+ self._verify_response('get-vnc-console-post-resp', subs, response, 200)
+
+ def test_get_spice_console(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-spice-console-post-req',
+ {'action': 'os-getSPICEConsole'})
+ subs = self._get_regexes()
+ subs["url"] = \
+ "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
+ self._verify_response('get-spice-console-post-resp', subs,
+ response, 200)
+
+ def test_get_rdp_console(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-rdp-console-post-req',
+ {'action': 'os-getRDPConsole'})
+ subs = self._get_regexes()
+ subs["url"] = \
+ "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
+ self._verify_response('get-rdp-console-post-resp', subs,
+ response, 200)
+
+ def test_get_serial_console(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-serial-console-post-req',
+ {'action': 'os-getSerialConsole'})
+ subs = self._get_regexes()
+ subs["url"] = \
+ "((ws?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
+ self._verify_response('get-serial-console-post-resp', subs,
+ response, 200)
+
+
+class ConsolesSampleXmlTests(ConsolesSampleJsonTests):
+ ctype = 'xml'
+
+
+class ConsoleAuthTokensSampleJsonTests(ServersSampleBase):
+ extends_name = ("nova.api.openstack.compute.contrib.consoles.Consoles")
+ extension_name = ("nova.api.openstack.compute.contrib.console_auth_tokens."
+ "Console_auth_tokens")
+
+ def _get_console_url(self, data):
+ return jsonutils.loads(data)["console"]["url"]
+
+ def _get_console_token(self, uuid):
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-rdp-console-post-req',
+ {'action': 'os-getRDPConsole'})
+
+ url = self._get_console_url(response.content)
+ return re.match('.+?token=([^&]+)', url).groups()[0]
+
+ def test_get_console_connect_info(self):
+ self.flags(enabled=True, group='rdp')
+
+ uuid = self._post_server()
+ token = self._get_console_token(uuid)
+
+ response = self._do_get('os-console-auth-tokens/%s' % token)
+
+ subs = self._get_regexes()
+ subs["uuid"] = uuid
+ subs["host"] = r"[\w\.\-]+"
+ subs["port"] = "[0-9]+"
+ subs["internal_access_path"] = ".*"
+ self._verify_response('get-console-connect-info-get-resp', subs,
+ response, 200)
+
+
+class ConsoleAuthTokensSampleXmlTests(ConsoleAuthTokensSampleJsonTests):
+ ctype = 'xml'
+
+ def _get_console_url(self, data):
+ return etree.fromstring(data).find('url').text
+
+
+class DeferredDeleteSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".deferred_delete.Deferred_delete")
+
+ def setUp(self):
+ super(DeferredDeleteSampleJsonTests, self).setUp()
+ self.flags(reclaim_instance_interval=1)
+
+ def test_restore(self):
+ uuid = self._post_server()
+ response = self._do_delete('servers/%s' % uuid)
+
+ response = self._do_post('servers/%s/action' % uuid,
+ 'restore-post-req', {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_force_delete(self):
+ uuid = self._post_server()
+ response = self._do_delete('servers/%s' % uuid)
+
+ response = self._do_post('servers/%s/action' % uuid,
+ 'force-delete-post-req', {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+
+class DeferredDeleteSampleXmlTests(DeferredDeleteSampleJsonTests):
+ ctype = 'xml'
+
+
+class QuotasSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
+
+ def test_show_quotas(self):
+ # Get api sample to show quotas.
+ response = self._do_get('os-quota-sets/fake_tenant')
+ self._verify_response('quotas-show-get-resp', {}, response, 200)
+
+ def test_show_quotas_defaults(self):
+ # Get api sample to show quotas defaults.
+ response = self._do_get('os-quota-sets/fake_tenant/defaults')
+ self._verify_response('quotas-show-defaults-get-resp',
+ {}, response, 200)
+
+ def test_update_quotas(self):
+ # Get api sample to update quotas.
+ response = self._do_put('os-quota-sets/fake_tenant',
+ 'quotas-update-post-req',
+ {})
+ self._verify_response('quotas-update-post-resp', {}, response, 200)
+
+
+class QuotasSampleXmlTests(QuotasSampleJsonTests):
+ ctype = "xml"
+
+
+class ExtendedQuotasSampleJsonTests(ApiSampleTestBaseV2):
+ extends_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_quotas.Extended_quotas")
+
+ def test_delete_quotas(self):
+ # Get api sample to delete quota.
+ response = self._do_delete('os-quota-sets/fake_tenant')
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_update_quotas(self):
+ # Get api sample to update quotas.
+ response = self._do_put('os-quota-sets/fake_tenant',
+ 'quotas-update-post-req',
+ {})
+ return self._verify_response('quotas-update-post-resp', {},
+ response, 200)
+
+
+class ExtendedQuotasSampleXmlTests(ExtendedQuotasSampleJsonTests):
+ ctype = "xml"
+
+
+class UserQuotasSampleJsonTests(ApiSampleTestBaseV2):
+ extends_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".user_quotas.User_quotas")
+
+ def fake_load(self, *args):
+ return True
+
+ def test_show_quotas_for_user(self):
+ # Get api sample to show quotas for user.
+ response = self._do_get('os-quota-sets/fake_tenant?user_id=1')
+ self._verify_response('user-quotas-show-get-resp', {}, response, 200)
+
+ def test_delete_quotas_for_user(self):
+ # Get api sample to delete quota for user.
+ self.stubs.Set(extensions.ExtensionManager, "is_loaded",
+ self.fake_load)
+ response = self._do_delete('os-quota-sets/fake_tenant?user_id=1')
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_update_quotas_for_user(self):
+ # Get api sample to update quotas for user.
+ response = self._do_put('os-quota-sets/fake_tenant?user_id=1',
+ 'user-quotas-update-post-req',
+ {})
+ return self._verify_response('user-quotas-update-post-resp', {},
+ response, 200)
+
+
+class UserQuotasSampleXmlTests(UserQuotasSampleJsonTests):
+ ctype = "xml"
+
+
+class ExtendedIpsSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_ips.Extended_ips")
+
+ def test_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
+
+
+class ExtendedIpsSampleXmlTests(ExtendedIpsSampleJsonTests):
+ ctype = 'xml'
+
+
+class ExtendedIpsMacSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_ips_mac.Extended_ips_mac")
+
+ def test_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ self.assertEqual(response.status_code, 200)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ self.assertEqual(response.status_code, 200)
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ subs['hostid'] = '[a-f0-9]+'
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+ self._verify_response('servers-detail-resp', subs, response, 200)
+
+
+class ExtendedIpsMacSampleXmlTests(ExtendedIpsMacSampleJsonTests):
+ ctype = 'xml'
+
+
+class ExtendedStatusSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_status.Extended_status")
+
+ def test_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
+
+
+class ExtendedStatusSampleXmlTests(ExtendedStatusSampleJsonTests):
+ ctype = 'xml'
+
+
+class ExtendedVolumesSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_volumes.Extended_volumes")
+
+ def test_show(self):
+ uuid = self._post_server()
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fakes.stub_bdm_get_all_by_instance)
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ uuid = self._post_server()
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fakes.stub_bdm_get_all_by_instance)
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
+
+
+class ExtendedVolumesSampleXmlTests(ExtendedVolumesSampleJsonTests):
+ ctype = 'xml'
+
+
+class ServerUsageSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".server_usage.Server_usage")
+
+ def test_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ return self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ return self._verify_response('servers-detail-resp', subs,
+ response, 200)
+
+
+class ServerUsageSampleXmlTests(ServerUsageSampleJsonTests):
+ ctype = 'xml'
+
+
+class ExtendedVIFNetSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_virtual_interfaces_net.Extended_virtual_interfaces_net")
+
+ def _get_flags(self):
+ f = super(ExtendedVIFNetSampleJsonTests, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # extended_virtual_interfaces_net_update also
+ # needs virtual_interfaces to be loaded
+ f['osapi_compute_extension'].append(
+ ('nova.api.openstack.compute.contrib'
+ '.virtual_interfaces.Virtual_interfaces'))
+ return f
+
+ def test_vifs_list(self):
+ uuid = self._post_server()
+
+ response = self._do_get('servers/%s/os-virtual-interfaces' % uuid)
+ self.assertEqual(response.status_code, 200)
+
+ subs = self._get_regexes()
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+
+ self._verify_response('vifs-list-resp', subs, response, 200)
+
+
+class ExtendedVIFNetSampleXmlTests(ExtendedIpsSampleJsonTests):
+ ctype = 'xml'
+
+
+class FlavorManageSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.flavormanage."
+ "Flavormanage")
+
+ def _create_flavor(self):
+ """Create a flavor."""
+ subs = {
+ 'flavor_id': 10,
+ 'flavor_name': "test_flavor"
+ }
+ response = self._do_post("flavors",
+ "flavor-create-post-req",
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response("flavor-create-post-resp", subs, response, 200)
+
+ def test_create_flavor(self):
+ # Get api sample to create a flavor.
+ self._create_flavor()
+
+ def test_delete_flavor(self):
+ # Get api sample to delete a flavor.
+ self._create_flavor()
+ response = self._do_delete("flavors/10")
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+
+class FlavorManageSampleXmlTests(FlavorManageSampleJsonTests):
+ ctype = "xml"
+
+
+class ServerPasswordSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.server_password."
+ "Server_password")
+
+ def test_get_password(self):
+
+ # Mock password since there is no api to set it
+ def fake_ext_password(*args, **kwargs):
+ return ("xlozO3wLCBRWAa2yDjCCVx8vwNPypxnypmRYDa/zErlQ+EzPe1S/"
+ "Gz6nfmC52mOlOSCRuUOmG7kqqgejPof6M7bOezS387zjq4LSvvwp"
+ "28zUknzy4YzfFGhnHAdai3TxUJ26pfQCYrq8UTzmKF2Bq8ioSEtV"
+ "VzM0A96pDh8W2i7BOz6MdoiVyiev/I1K2LsuipfxSJR7Wdke4zNX"
+ "JjHHP2RfYsVbZ/k9ANu+Nz4iIH8/7Cacud/pphH7EjrY6a4RZNrj"
+ "QskrhKYed0YERpotyjYk1eDtRe72GrSiXteqCM4biaQ5w3ruS+Ac"
+ "X//PXk3uJ5kC7d67fPXaVz4WaQRYMg==")
+ self.stubs.Set(password, "extract_password", fake_ext_password)
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/os-server-password' % uuid)
+ subs = self._get_regexes()
+ subs['encrypted_password'] = fake_ext_password().replace('+', '\\+')
+ self._verify_response('get-password-resp', subs, response, 200)
+
+ def test_reset_password(self):
+ uuid = self._post_server()
+ response = self._do_delete('servers/%s/os-server-password' % uuid)
+ self.assertEqual(response.status_code, 204)
+
+
+class ServerPasswordSampleXmlTests(ServerPasswordSampleJsonTests):
+ ctype = "xml"
+
+
+class DiskConfigJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.disk_config."
+ "Disk_config")
+
+ def test_list_servers_detail(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ self._verify_response('list-servers-detail-get', subs, response, 200)
+
+ def test_get_server(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_update_server(self):
+ uuid = self._post_server()
+ response = self._do_put('servers/%s' % uuid,
+ 'server-update-put-req', {})
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-update-put-resp', subs, response, 200)
+
+ def test_resize_server(self):
+ self.flags(allow_resize_to_same_host=True)
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-resize-post-req', {})
+ self.assertEqual(response.status_code, 202)
+ # NOTE(tmello): Resize does not return response body
+ # Bug #1085213.
+ self.assertEqual(response.content, "")
+
+ def test_rebuild_server(self):
+ uuid = self._post_server()
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ }
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-action-rebuild-req', subs)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-action-rebuild-resp',
+ subs, response, 202)
+
+ def test_get_image(self):
+ image_id = fake.get_valid_image_id()
+ response = self._do_get('images/%s' % image_id)
+ subs = self._get_regexes()
+ subs['image_id'] = image_id
+ self._verify_response('image-get-resp', subs, response, 200)
+
+ def test_list_images(self):
+ response = self._do_get('images/detail')
+ subs = self._get_regexes()
+ self._verify_response('image-list-resp', subs, response, 200)
+
+
+class DiskConfigXmlTest(DiskConfigJsonTest):
+ ctype = 'xml'
+
+
+class OsNetworksJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.os_tenant_networks"
+ ".Os_tenant_networks")
+
+ def setUp(self):
+ super(OsNetworksJsonTests, self).setUp()
+ CONF.set_override("enable_network_quota", True)
+
+ def fake(*args, **kwargs):
+ pass
+
+ self.stubs.Set(nova.quota.QUOTAS, "reserve", fake)
+ self.stubs.Set(nova.quota.QUOTAS, "commit", fake)
+ self.stubs.Set(nova.quota.QUOTAS, "rollback", fake)
+ self.stubs.Set(nova.quota.QuotaEngine, "reserve", fake)
+ self.stubs.Set(nova.quota.QuotaEngine, "commit", fake)
+ self.stubs.Set(nova.quota.QuotaEngine, "rollback", fake)
+
+ def test_list_networks(self):
+ response = self._do_get('os-tenant-networks')
+ subs = self._get_regexes()
+ self._verify_response('networks-list-res', subs, response, 200)
+
+ def test_create_network(self):
+ response = self._do_post('os-tenant-networks', "networks-post-req", {})
+ subs = self._get_regexes()
+ self._verify_response('networks-post-res', subs, response, 200)
+
+ def test_delete_network(self):
+ response = self._do_post('os-tenant-networks', "networks-post-req", {})
+ net = jsonutils.loads(response.content)
+ response = self._do_delete('os-tenant-networks/%s' %
+ net["network"]["id"])
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+
+class OsNetworksXmlTests(OsNetworksJsonTests):
+ ctype = 'xml'
+
+ def test_delete_network(self):
+ response = self._do_post('os-tenant-networks', "networks-post-req", {})
+ net = etree.fromstring(response.content)
+ network_id = net.find('id').text
+ response = self._do_delete('os-tenant-networks/%s' % network_id)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+
+class NetworksJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".os_networks.Os_networks")
+
+ def setUp(self):
+ super(NetworksJsonTests, self).setUp()
+ fake_network_api = test_networks.FakeNetworkAPI()
+ self.stubs.Set(network_api.API, "get_all",
+ fake_network_api.get_all)
+ self.stubs.Set(network_api.API, "get",
+ fake_network_api.get)
+ self.stubs.Set(network_api.API, "associate",
+ fake_network_api.associate)
+ self.stubs.Set(network_api.API, "delete",
+ fake_network_api.delete)
+ self.stubs.Set(network_api.API, "create",
+ fake_network_api.create)
+ self.stubs.Set(network_api.API, "add_network_to_project",
+ fake_network_api.add_network_to_project)
+
+ def test_network_list(self):
+ response = self._do_get('os-networks')
+ subs = self._get_regexes()
+ self._verify_response('networks-list-resp', subs, response, 200)
+
+ def test_network_disassociate(self):
+ uuid = test_networks.FAKE_NETWORKS[0]['uuid']
+ response = self._do_post('os-networks/%s/action' % uuid,
+ 'networks-disassociate-req', {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_network_show(self):
+ uuid = test_networks.FAKE_NETWORKS[0]['uuid']
+ response = self._do_get('os-networks/%s' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('network-show-resp', subs, response, 200)
+
+ def test_network_create(self):
+ response = self._do_post("os-networks",
+ 'network-create-req', {})
+ subs = self._get_regexes()
+ self._verify_response('network-create-resp', subs, response, 200)
+
+ def test_network_add(self):
+ response = self._do_post("os-networks/add",
+ 'network-add-req', {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_network_delete(self):
+ response = self._do_delete('os-networks/always_delete')
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+
+class NetworksXmlTests(NetworksJsonTests):
+ ctype = 'xml'
+
+
+class ExtendedNetworksJsonTests(ApiSampleTestBaseV2):
+ extends_name = ("nova.api.openstack.compute.contrib."
+ "os_networks.Os_networks")
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "extended_networks.Extended_networks")
+
+ def setUp(self):
+ super(ExtendedNetworksJsonTests, self).setUp()
+ fake_network_api = test_networks.FakeNetworkAPI()
+ self.stubs.Set(network_api.API, "get_all",
+ fake_network_api.get_all)
+ self.stubs.Set(network_api.API, "get",
+ fake_network_api.get)
+ self.stubs.Set(network_api.API, "associate",
+ fake_network_api.associate)
+ self.stubs.Set(network_api.API, "delete",
+ fake_network_api.delete)
+ self.stubs.Set(network_api.API, "create",
+ fake_network_api.create)
+ self.stubs.Set(network_api.API, "add_network_to_project",
+ fake_network_api.add_network_to_project)
+
+ def test_network_list(self):
+ response = self._do_get('os-networks')
+ subs = self._get_regexes()
+ self._verify_response('networks-list-resp', subs, response, 200)
+
+ def test_network_show(self):
+ uuid = test_networks.FAKE_NETWORKS[0]['uuid']
+ response = self._do_get('os-networks/%s' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('network-show-resp', subs, response, 200)
+
+ def test_network_create(self):
+ response = self._do_post("os-networks",
+ 'network-create-req', {})
+ subs = self._get_regexes()
+ self._verify_response('network-create-resp', subs, response, 200)
+
+
+class ExtendedNetworksXmlTests(ExtendedNetworksJsonTests):
+ ctype = 'xml'
+
+
+class NetworksAssociateJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".networks_associate.Networks_associate")
+
+ _sentinel = object()
+
+ def _get_flags(self):
+ f = super(NetworksAssociateJsonTests, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # Networks_associate requires Networks to be update
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.os_networks.Os_networks')
+ return f
+
+ def setUp(self):
+ super(NetworksAssociateJsonTests, self).setUp()
+
+ def fake_associate(self, context, network_id,
+ host=NetworksAssociateJsonTests._sentinel,
+ project=NetworksAssociateJsonTests._sentinel):
+ return True
+
+ self.stubs.Set(network_api.API, "associate", fake_associate)
+
+ def test_disassociate(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-disassociate-req',
+ {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_disassociate_host(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-disassociate-host-req',
+ {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_disassociate_project(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-disassociate-project-req',
+ {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_associate_host(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-associate-host-req',
+ {"host": "testHost"})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+
+class NetworksAssociateXmlTests(NetworksAssociateJsonTests):
+ ctype = 'xml'
+
+
+class FlavorDisabledSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.flavor_disabled."
+ "Flavor_disabled")
+
+ def test_show_flavor(self):
+ # Get api sample to show flavor_disabled attr. of a flavor.
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ subs = self._get_regexes()
+ subs['flavor_id'] = flavor_id
+ self._verify_response('flavor-show-get-resp', subs, response, 200)
+
+ def test_detail_flavor(self):
+ # Get api sample to show details of a flavor.
+ response = self._do_get('flavors/detail')
+ subs = self._get_regexes()
+ self._verify_response('flavor-detail-get-resp', subs, response, 200)
+
+
+class FlavorDisabledSampleXmlTests(FlavorDisabledSampleJsonTests):
+ ctype = "xml"
+
+
+class QuotaClassesSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.quota_classes."
+ "Quota_classes")
+ set_id = 'test_class'
+
+ def test_show_quota_classes(self):
+ # Get api sample to show quota classes.
+ response = self._do_get('os-quota-class-sets/%s' % self.set_id)
+ subs = {'set_id': self.set_id}
+ self._verify_response('quota-classes-show-get-resp', subs,
+ response, 200)
+
+ def test_update_quota_classes(self):
+ # Get api sample to update quota classes.
+ response = self._do_put('os-quota-class-sets/%s' % self.set_id,
+ 'quota-classes-update-post-req',
+ {})
+ self._verify_response('quota-classes-update-post-resp',
+ {}, response, 200)
+
+
+class QuotaClassesSampleXmlTests(QuotaClassesSampleJsonTests):
+ ctype = "xml"
+
+
+class CellsSampleJsonTest(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib.cells.Cells"
+
+ def setUp(self):
+ # db_check_interval < 0 makes cells manager always hit the DB
+ self.flags(enable=True, db_check_interval=-1, group='cells')
+ super(CellsSampleJsonTest, self).setUp()
+ self._stub_cells()
+
+ def _stub_cells(self, num_cells=5):
+ self.cells = []
+ self.cells_next_id = 1
+
+ def _fake_cell_get_all(context):
+ return self.cells
+
+ def _fake_cell_get(inst, context, cell_name):
+ for cell in self.cells:
+ if cell['name'] == cell_name:
+ return cell
+ raise exception.CellNotFound(cell_name=cell_name)
+
+ for x in xrange(num_cells):
+ cell = models.Cell()
+ our_id = self.cells_next_id
+ self.cells_next_id += 1
+ cell.update({'id': our_id,
+ 'name': 'cell%s' % our_id,
+ 'transport_url': 'rabbit://username%s@/' % our_id,
+ 'is_parent': our_id % 2 == 0})
+ self.cells.append(cell)
+
+ self.stubs.Set(db, 'cell_get_all', _fake_cell_get_all)
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_get', _fake_cell_get)
+
+ def test_cells_empty_list(self):
+ # Override this
+ self._stub_cells(num_cells=0)
+ response = self._do_get('os-cells')
+ subs = self._get_regexes()
+ self._verify_response('cells-list-empty-resp', subs, response, 200)
+
+ def test_cells_list(self):
+ response = self._do_get('os-cells')
+ subs = self._get_regexes()
+ self._verify_response('cells-list-resp', subs, response, 200)
+
+ def test_cells_get(self):
+ response = self._do_get('os-cells/cell3')
+ subs = self._get_regexes()
+ self._verify_response('cells-get-resp', subs, response, 200)
+
+
+class CellsSampleXmlTest(CellsSampleJsonTest):
+ ctype = 'xml'
+
+
+class CellsCapacitySampleJsonTest(ApiSampleTestBaseV2):
+ extends_name = ("nova.api.openstack.compute.contrib.cells.Cells")
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "cell_capacities.Cell_capacities")
+
+ def setUp(self):
+ self.flags(enable=True, db_check_interval=-1, group='cells')
+ super(CellsCapacitySampleJsonTest, self).setUp()
+ # (navneetk/kaushikc) : Mock cell capacity to avoid the capacity
+ # being calculated from the compute nodes in the environment
+ self._mock_cell_capacity()
+
+ def test_get_cell_capacity(self):
+ state_manager = state.CellStateManager()
+ my_state = state_manager.get_my_state()
+ response = self._do_get('os-cells/%s/capacities' %
+ my_state.name)
+ subs = self._get_regexes()
+ return self._verify_response('cells-capacities-resp',
+ subs, response, 200)
+
+ def test_get_all_cells_capacity(self):
+ response = self._do_get('os-cells/capacities')
+ subs = self._get_regexes()
+ return self._verify_response('cells-capacities-resp',
+ subs, response, 200)
+
+ def _mock_cell_capacity(self):
+ self.mox.StubOutWithMock(self.cells.manager.state_manager,
+ 'get_our_capacities')
+ response = {"ram_free":
+ {"units_by_mb": {"8192": 0, "512": 13,
+ "4096": 1, "2048": 3, "16384": 0},
+ "total_mb": 7680},
+ "disk_free":
+ {"units_by_mb": {"81920": 11, "20480": 46,
+ "40960": 23, "163840": 5, "0": 0},
+ "total_mb": 1052672}
+ }
+ self.cells.manager.state_manager.get_our_capacities(). \
+ AndReturn(response)
+ self.mox.ReplayAll()
+
+
+class CellsCapacitySampleXmlTest(CellsCapacitySampleJsonTest):
+ ctype = 'xml'
+
+
+class BlockDeviceMappingV2BootJsonTest(ServersSampleBase):
+ extension_name = ('nova.api.openstack.compute.contrib.'
+ 'block_device_mapping_v2_boot.'
+ 'Block_device_mapping_v2_boot')
+
+ def _get_flags(self):
+ f = super(BlockDeviceMappingV2BootJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # We need the volumes extension as well
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.volumes.Volumes')
+ return f
+
+ def test_servers_post_with_bdm_v2(self):
+ self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
+ self.stubs.Set(cinder.API, 'check_attach',
+ fakes.stub_volume_check_attach)
+ return self._post_server()
+
+
+class BlockDeviceMappingV2BootXmlTest(BlockDeviceMappingV2BootJsonTest):
+ ctype = 'xml'
+
+
+class FloatingIPPoolsSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.floating_ip_pools."
+ "Floating_ip_pools")
+
+ def test_list_floatingippools(self):
+ pool_list = ["pool1", "pool2"]
+
+ def fake_get_floating_ip_pools(self, context):
+ return pool_list
+
+ self.stubs.Set(network_api.API, "get_floating_ip_pools",
+ fake_get_floating_ip_pools)
+ response = self._do_get('os-floating-ip-pools')
+ subs = {
+ 'pool1': pool_list[0],
+ 'pool2': pool_list[1]
+ }
+ self._verify_response('floatingippools-list-resp', subs, response, 200)
+
+
+class FloatingIPPoolsSampleXmlTests(FloatingIPPoolsSampleJsonTests):
+ ctype = 'xml'
+
+
+class MultinicSampleJsonTest(ServersSampleBase):
+ extension_name = "nova.api.openstack.compute.contrib.multinic.Multinic"
+
+ def _disable_instance_dns_manager(self):
+ # NOTE(markmc): it looks like multinic and instance_dns_manager are
+ # incompatible. See:
+ # https://bugs.launchpad.net/nova/+bug/1213251
+ self.flags(
+ instance_dns_manager='nova.network.noop_dns_driver.NoopDNSDriver')
+
+ def setUp(self):
+ self._disable_instance_dns_manager()
+ super(MultinicSampleJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ def _add_fixed_ip(self):
+ subs = {"networkId": 1}
+ response = self._do_post('servers/%s/action' % (self.uuid),
+ 'multinic-add-fixed-ip-req', subs)
+ self.assertEqual(response.status_code, 202)
+
+ def test_add_fixed_ip(self):
+ self._add_fixed_ip()
+
+ def test_remove_fixed_ip(self):
+ self._add_fixed_ip()
+
+ subs = {"ip": "10.0.0.4"}
+ response = self._do_post('servers/%s/action' % (self.uuid),
+ 'multinic-remove-fixed-ip-req', subs)
+ self.assertEqual(response.status_code, 202)
+
+
+class MultinicSampleXmlTest(MultinicSampleJsonTest):
+ ctype = "xml"
+
+
+class InstanceUsageAuditLogJsonTest(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "instance_usage_audit_log.Instance_usage_audit_log")
+
+ def test_show_instance_usage_audit_log(self):
+ response = self._do_get('os-instance_usage_audit_log/%s' %
+ urllib.quote('2012-07-05 10:00:00'))
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('inst-usage-audit-log-show-get-resp',
+ subs, response, 200)
+
+ def test_index_instance_usage_audit_log(self):
+ response = self._do_get('os-instance_usage_audit_log')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('inst-usage-audit-log-index-get-resp',
+ subs, response, 200)
+
+
+class InstanceUsageAuditLogXmlTest(InstanceUsageAuditLogJsonTest):
+ ctype = "xml"
+
+
+class FlavorExtraSpecsSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.flavorextraspecs."
+ "Flavorextraspecs")
+
+ def _flavor_extra_specs_create(self):
+ subs = {'value1': 'value1',
+ 'value2': 'value2'
+ }
+ response = self._do_post('flavors/1/os-extra_specs',
+ 'flavor-extra-specs-create-req', subs)
+ self._verify_response('flavor-extra-specs-create-resp',
+ subs, response, 200)
+
+ def test_flavor_extra_specs_get(self):
+ subs = {'value1': 'value1'}
+ self._flavor_extra_specs_create()
+ response = self._do_get('flavors/1/os-extra_specs/key1')
+ self._verify_response('flavor-extra-specs-get-resp',
+ subs, response, 200)
+
+ def test_flavor_extra_specs_list(self):
+ subs = {'value1': 'value1',
+ 'value2': 'value2'
+ }
+ self._flavor_extra_specs_create()
+ response = self._do_get('flavors/1/os-extra_specs')
+ self._verify_response('flavor-extra-specs-list-resp',
+ subs, response, 200)
+
+ def test_flavor_extra_specs_create(self):
+ self._flavor_extra_specs_create()
+
+ def test_flavor_extra_specs_update(self):
+ subs = {'value1': 'new_value1'}
+ self._flavor_extra_specs_create()
+ response = self._do_put('flavors/1/os-extra_specs/key1',
+ 'flavor-extra-specs-update-req', subs)
+ self._verify_response('flavor-extra-specs-update-resp',
+ subs, response, 200)
+
+ def test_flavor_extra_specs_delete(self):
+ self._flavor_extra_specs_create()
+ response = self._do_delete('flavors/1/os-extra_specs/key1')
+ self.assertEqual(response.status_code, 200)
+ self.assertEqual(response.content, '')
+
+
+class FlavorExtraSpecsSampleXmlTests(FlavorExtraSpecsSampleJsonTests):
+ ctype = 'xml'
+
+
+class FpingSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.fping.Fping")
+
+ def setUp(self):
+ super(FpingSampleJsonTests, self).setUp()
+
+ def fake_check_fping(self):
+ pass
+ self.stubs.Set(utils, "execute", test_fping.execute)
+ self.stubs.Set(fping.FpingController, "check_fping",
+ fake_check_fping)
+
+ def test_get_fping(self):
+ self._post_server()
+ response = self._do_get('os-fping')
+ subs = self._get_regexes()
+ self._verify_response('fping-get-resp', subs, response, 200)
+
+ def test_get_fping_details(self):
+ uuid = self._post_server()
+ response = self._do_get('os-fping/%s' % (uuid))
+ subs = self._get_regexes()
+ self._verify_response('fping-get-details-resp', subs, response, 200)
+
+
+class FpingSampleXmlTests(FpingSampleJsonTests):
+ ctype = 'xml'
+
+
+class ExtendedAvailabilityZoneJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_availability_zone"
+ ".Extended_availability_zone")
+
+ def test_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
+
+
+class ExtendedAvailabilityZoneXmlTests(ExtendedAvailabilityZoneJsonTests):
+ ctype = 'xml'
+
+
+class EvacuateJsonTest(ServersSampleBase):
+
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".evacuate.Evacuate")
+
+ def test_server_evacuate(self):
+ uuid = self._post_server()
+
+ req_subs = {
+ 'host': 'testHost',
+ "adminPass": "MySecretPass",
+ "onSharedStorage": 'False'
+ }
+
+ def fake_service_is_up(self, service):
+ """Simulate validation of instance host is down."""
+ return False
+
+ def fake_service_get_by_compute_host(self, context, host):
+ """Simulate that given host is a valid host."""
+ return {
+ 'host_name': host,
+ 'service': 'compute',
+ 'zone': 'nova'
+ }
+
+ def fake_rebuild_instance(self, ctxt, instance, new_pass,
+ injected_files, image_ref, orig_image_ref,
+ orig_sys_metadata, bdms, recreate=False,
+ on_shared_storage=False, host=None,
+ preserve_ephemeral=False, kwargs=None):
+ return {
+ 'adminPass': new_pass
+ }
+
+ self.stubs.Set(service_group_api.API, 'service_is_up',
+ fake_service_is_up)
+ self.stubs.Set(compute_api.HostAPI, 'service_get_by_compute_host',
+ fake_service_get_by_compute_host)
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'rebuild_instance',
+ fake_rebuild_instance)
+
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-evacuate-req', req_subs)
+ subs = self._get_regexes()
+ self._verify_response('server-evacuate-resp', subs, response, 200)
+
+
+class EvacuateXmlTest(EvacuateJsonTest):
+ ctype = 'xml'
+
+
+class EvacuateFindHostSampleJsonTest(ServersSampleBase):
+ extends_name = ("nova.api.openstack.compute.contrib"
+ ".evacuate.Evacuate")
+
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_evacuate_find_host.Extended_evacuate_find_host")
+
+ @mock.patch('nova.compute.manager.ComputeManager._check_instance_exists')
+ @mock.patch('nova.compute.api.HostAPI.service_get_by_compute_host')
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate(self, rebuild_mock, service_get_mock,
+ check_instance_mock):
+ self.uuid = self._post_server()
+
+ req_subs = {
+ "adminPass": "MySecretPass",
+ "onSharedStorage": 'False'
+ }
+
+ check_instance_mock.return_value = False
+
+ def fake_service_get_by_compute_host(self, context, host):
+ return {
+ 'host_name': host,
+ 'service': 'compute',
+ 'zone': 'nova'
+ }
+ service_get_mock.side_effect = fake_service_get_by_compute_host
+ with mock.patch.object(service_group_api.API, 'service_is_up',
+ return_value=False):
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'server-evacuate-find-host-req', req_subs)
+ subs = self._get_regexes()
+ self._verify_response('server-evacuate-find-host-resp', subs,
+ response, 200)
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=False, preserve_ephemeral=mock.ANY,
+ host=None)
+
+
+class EvacuateFindHostSampleXmlTests(EvacuateFindHostSampleJsonTest):
+ ctype = "xml"
+
+
+class FloatingIpDNSJsonTest(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.floating_ip_dns."
+ "Floating_ip_dns")
+
+ domain = 'domain1.example.org'
+ name = 'instance1'
+ scope = 'public'
+ project = 'project1'
+ dns_type = 'A'
+ ip = '192.168.1.1'
+
+ def _create_or_update(self):
+ subs = {'domain': self.domain,
+ 'project': self.project,
+ 'scope': self.scope}
+ response = self._do_put('os-floating-ip-dns/%s' % self.domain,
+ 'floating-ip-dns-create-or-update-req', subs)
+ self._verify_response('floating-ip-dns-create-or-update-resp', subs,
+ response, 200)
+
+ def _create_or_update_entry(self):
+ subs = {'ip': self.ip, 'dns_type': self.dns_type}
+ response = self._do_put('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.name),
+ 'floating-ip-dns-create-or-update-entry-req',
+ subs)
+ subs.update({'name': self.name, 'domain': self.domain})
+ self._verify_response('floating-ip-dns-create-or-update-entry-resp',
+ subs, response, 200)
+
+ def test_floating_ip_dns_list(self):
+ self._create_or_update()
+ response = self._do_get('os-floating-ip-dns')
+ subs = {'domain': self.domain,
+ 'project': self.project,
+ 'scope': self.scope}
+ self._verify_response('floating-ip-dns-list-resp', subs,
+ response, 200)
+
+ def test_floating_ip_dns_create_or_update(self):
+ self._create_or_update()
+
+ def test_floating_ip_dns_delete(self):
+ self._create_or_update()
+ response = self._do_delete('os-floating-ip-dns/%s' % self.domain)
+ self.assertEqual(response.status_code, 202)
+
+ def test_floating_ip_dns_create_or_update_entry(self):
+ self._create_or_update_entry()
+
+ def test_floating_ip_dns_entry_get(self):
+ self._create_or_update_entry()
+ response = self._do_get('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.name))
+ subs = {'domain': self.domain,
+ 'ip': self.ip,
+ 'name': self.name}
+ self._verify_response('floating-ip-dns-entry-get-resp', subs,
+ response, 200)
+
+ def test_floating_ip_dns_entry_delete(self):
+ self._create_or_update_entry()
+ response = self._do_delete('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.name))
+ self.assertEqual(response.status_code, 202)
+
+ def test_floating_ip_dns_entry_list(self):
+ self._create_or_update_entry()
+ response = self._do_get('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.ip))
+ subs = {'domain': self.domain,
+ 'ip': self.ip,
+ 'name': self.name}
+ self._verify_response('floating-ip-dns-entry-list-resp', subs,
+ response, 200)
+
+
+class FloatingIpDNSXmlTest(FloatingIpDNSJsonTest):
+ ctype = 'xml'
+
+
+class InstanceActionsSampleJsonTest(ApiSampleTestBaseV2):
+ extension_name = ('nova.api.openstack.compute.contrib.instance_actions.'
+ 'Instance_actions')
+
+ def setUp(self):
+ super(InstanceActionsSampleJsonTest, self).setUp()
+ self.actions = fake_server_actions.FAKE_ACTIONS
+ self.events = fake_server_actions.FAKE_EVENTS
+ self.instance = test_utils.get_test_instance()
+
+ def fake_server_action_get_by_request_id(context, uuid, request_id):
+ return copy.deepcopy(self.actions[uuid][request_id])
+
+ def fake_server_actions_get(context, uuid):
+ return [copy.deepcopy(value) for value in
+ self.actions[uuid].itervalues()]
+
+ def fake_server_action_events_get(context, action_id):
+ return copy.deepcopy(self.events[action_id])
+
+ def fake_instance_get_by_uuid(context, instance_id):
+ return self.instance
+
+ def fake_get(self, context, instance_uuid, expected_attrs=None,
+ want_objects=True):
+ return {'uuid': instance_uuid}
+
+ self.stubs.Set(db, 'action_get_by_request_id',
+ fake_server_action_get_by_request_id)
+ self.stubs.Set(db, 'actions_get', fake_server_actions_get)
+ self.stubs.Set(db, 'action_events_get',
+ fake_server_action_events_get)
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+
+ def test_instance_action_get(self):
+ fake_uuid = fake_server_actions.FAKE_UUID
+ fake_request_id = fake_server_actions.FAKE_REQUEST_ID1
+ fake_action = self.actions[fake_uuid][fake_request_id]
+
+ response = self._do_get('servers/%s/os-instance-actions/%s' %
+ (fake_uuid, fake_request_id))
+ subs = self._get_regexes()
+ subs['action'] = '(reboot)|(resize)'
+ subs['instance_uuid'] = fake_uuid
+ subs['integer_id'] = '[0-9]+'
+ subs['request_id'] = fake_action['request_id']
+ subs['start_time'] = fake_action['start_time']
+ subs['result'] = '(Success)|(Error)'
+ subs['event'] = '(schedule)|(compute_create)'
+ self._verify_response('instance-action-get-resp', subs, response, 200)
+
+ def test_instance_actions_list(self):
+ fake_uuid = fake_server_actions.FAKE_UUID
+ response = self._do_get('servers/%s/os-instance-actions' % (fake_uuid))
+ subs = self._get_regexes()
+ subs['action'] = '(reboot)|(resize)'
+ subs['integer_id'] = '[0-9]+'
+ subs['request_id'] = ('req-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
+ '-[0-9a-f]{4}-[0-9a-f]{12}')
+ self._verify_response('instance-actions-list-resp', subs,
+ response, 200)
+
+
+class InstanceActionsSampleXmlTest(InstanceActionsSampleJsonTest):
+ ctype = 'xml'
+
+
+class ImageSizeSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".image_size.Image_size")
+
+ def test_show(self):
+ # Get api sample of one single image details request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_get('images/%s' % image_id)
+ subs = self._get_regexes()
+ subs['image_id'] = image_id
+ self._verify_response('image-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ # Get api sample of all images details request.
+ response = self._do_get('images/detail')
+ subs = self._get_regexes()
+ self._verify_response('images-details-get-resp', subs, response, 200)
+
+
+class ImageSizeSampleXmlTests(ImageSizeSampleJsonTests):
+ ctype = 'xml'
+
+
+class ConfigDriveSampleJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.config_drive."
+ "Config_drive")
+
+ def setUp(self):
+ super(ConfigDriveSampleJsonTest, self).setUp()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+
+ def test_config_drive_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ # config drive can be a string for True or empty value for False
+ subs['cdrive'] = '.*'
+ self._verify_response('server-config-drive-get-resp', subs,
+ response, 200)
+
+ def test_config_drive_detail(self):
+ self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ # config drive can be a string for True or empty value for False
+ subs['cdrive'] = '.*'
+ self._verify_response('servers-config-drive-details-resp',
+ subs, response, 200)
+
+
+class ConfigDriveSampleXmlTest(ConfigDriveSampleJsonTest):
+ ctype = 'xml'
+
+
+class FlavorAccessSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.flavor_access."
+ "Flavor_access")
+
+ def _get_flags(self):
+ f = super(FlavorAccessSampleJsonTests, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # FlavorAccess extension also needs Flavormanage to be loaded.
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
+ return f
+
+ def _add_tenant(self):
+ subs = {
+ 'tenant_id': 'fake_tenant',
+ 'flavor_id': 10
+ }
+ response = self._do_post('flavors/10/action',
+ 'flavor-access-add-tenant-req',
+ subs)
+ self._verify_response('flavor-access-add-tenant-resp',
+ subs, response, 200)
+
+ def _create_flavor(self):
+ subs = {
+ 'flavor_id': 10,
+ 'flavor_name': 'test_flavor'
+ }
+ response = self._do_post("flavors",
+ "flavor-access-create-req",
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response("flavor-access-create-resp", subs, response, 200)
+
+ def test_flavor_access_create(self):
+ self._create_flavor()
+
+ def test_flavor_access_detail(self):
+ response = self._do_get('flavors/detail')
+ subs = self._get_regexes()
+ self._verify_response('flavor-access-detail-resp', subs, response, 200)
+
+ def test_flavor_access_list(self):
+ self._create_flavor()
+ self._add_tenant()
+ flavor_id = 10
+ response = self._do_get('flavors/%s/os-flavor-access' % flavor_id)
+ subs = {
+ 'flavor_id': flavor_id,
+ 'tenant_id': 'fake_tenant',
+ }
+ self._verify_response('flavor-access-list-resp', subs, response, 200)
+
+ def test_flavor_access_show(self):
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ subs = {
+ 'flavor_id': flavor_id
+ }
+ subs.update(self._get_regexes())
+ self._verify_response('flavor-access-show-resp', subs, response, 200)
+
+ def test_flavor_access_add_tenant(self):
+ self._create_flavor()
+ self._add_tenant()
+
+ def test_flavor_access_remove_tenant(self):
+ self._create_flavor()
+ self._add_tenant()
+ subs = {
+ 'tenant_id': 'fake_tenant',
+ }
+ response = self._do_post('flavors/10/action',
+ "flavor-access-remove-tenant-req",
+ subs)
+ exp_subs = {
+ "tenant_id": self.api.project_id,
+ "flavor_id": "10"
+ }
+ self._verify_response('flavor-access-remove-tenant-resp',
+ exp_subs, response, 200)
+
+
+class FlavorAccessSampleXmlTests(FlavorAccessSampleJsonTests):
+ ctype = 'xml'
+
+
+@mock.patch.object(service_group_api.API, "service_is_up", lambda _: True)
+class HypervisorsSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.hypervisors."
+ "Hypervisors")
+
+ def test_hypervisors_list(self):
+ response = self._do_get('os-hypervisors')
+ self._verify_response('hypervisors-list-resp', {}, response, 200)
+
+ def test_hypervisors_search(self):
+ response = self._do_get('os-hypervisors/fake/search')
+ self._verify_response('hypervisors-search-resp', {}, response, 200)
+
+ def test_hypervisors_servers(self):
+ response = self._do_get('os-hypervisors/fake/servers')
+ self._verify_response('hypervisors-servers-resp', {}, response, 200)
+
+ def test_hypervisors_show(self):
+ hypervisor_id = 1
+ subs = {
+ 'hypervisor_id': hypervisor_id
+ }
+ response = self._do_get('os-hypervisors/%s' % hypervisor_id)
+ subs.update(self._get_regexes())
+ self._verify_response('hypervisors-show-resp', subs, response, 200)
+
+ def test_hypervisors_statistics(self):
+ response = self._do_get('os-hypervisors/statistics')
+ self._verify_response('hypervisors-statistics-resp', {}, response, 200)
+
+ def test_hypervisors_uptime(self):
+ def fake_get_host_uptime(self, context, hyp):
+ return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
+ " 0.20, 0.12, 0.14")
+
+ self.stubs.Set(compute_api.HostAPI,
+ 'get_host_uptime', fake_get_host_uptime)
+ hypervisor_id = 1
+ response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
+ subs = {
+ 'hypervisor_id': hypervisor_id,
+ }
+ self._verify_response('hypervisors-uptime-resp', subs, response, 200)
+
+
+class HypervisorsSampleXmlTests(HypervisorsSampleJsonTests):
+ ctype = "xml"
+
+
+class ExtendedHypervisorsJsonTest(ApiSampleTestBaseV2):
+ extends_name = ("nova.api.openstack.compute.contrib."
+ "hypervisors.Hypervisors")
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "extended_hypervisors.Extended_hypervisors")
+
+ def test_hypervisors_show_with_ip(self):
+ hypervisor_id = 1
+ subs = {
+ 'hypervisor_id': hypervisor_id
+ }
+ response = self._do_get('os-hypervisors/%s' % hypervisor_id)
+ subs.update(self._get_regexes())
+ self._verify_response('hypervisors-show-with-ip-resp',
+ subs, response, 200)
+
+
+class ExtendedHypervisorsXmlTest(ExtendedHypervisorsJsonTest):
+ ctype = "xml"
+
+
+class HypervisorStatusJsonTest(ApiSampleTestBaseV2):
+ extends_name = ("nova.api.openstack.compute.contrib."
+ "hypervisors.Hypervisors")
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "hypervisor_status.Hypervisor_status")
+
+ def test_hypervisors_show_with_status(self):
+ hypervisor_id = 1
+ subs = {
+ 'hypervisor_id': hypervisor_id
+ }
+ response = self._do_get('os-hypervisors/%s' % hypervisor_id)
+ subs.update(self._get_regexes())
+ self._verify_response('hypervisors-show-with-status-resp',
+ subs, response, 200)
+
+
+class HypervisorStatusXmlTest(HypervisorStatusJsonTest):
+ ctype = 'xml'
+
+
+@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
+class HypervisorsCellsSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.hypervisors."
+ "Hypervisors")
+
+ def setUp(self):
+ self.flags(enable=True, cell_type='api', group='cells')
+ super(HypervisorsCellsSampleJsonTests, self).setUp()
+
+ def test_hypervisor_uptime(self, mocks):
+ fake_hypervisor = {'service': {'host': 'fake-mini',
+ 'disabled': False,
+ 'disabled_reason': None},
+ 'id': 1, 'hypervisor_hostname': 'fake-mini'}
+
+ def fake_get_host_uptime(self, context, hyp):
+ return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
+ " 0.20, 0.12, 0.14")
+
+ def fake_compute_node_get(self, context, hyp):
+ return fake_hypervisor
+
+ self.stubs.Set(cells_api.HostAPI, 'compute_node_get',
+ fake_compute_node_get)
+
+ self.stubs.Set(cells_api.HostAPI,
+ 'get_host_uptime', fake_get_host_uptime)
+ hypervisor_id = fake_hypervisor['id']
+ response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
+ subs = {'hypervisor_id': hypervisor_id}
+ self._verify_response('hypervisors-uptime-resp', subs, response, 200)
+
+
+class HypervisorsCellsSampleXmlTests(HypervisorsCellsSampleJsonTests):
+ ctype = "xml"
+
+
+class AttachInterfacesSampleJsonTest(ServersSampleBase):
+ extension_name = ('nova.api.openstack.compute.contrib.attach_interfaces.'
+ 'Attach_interfaces')
+
+ def setUp(self):
+ super(AttachInterfacesSampleJsonTest, self).setUp()
+
+ def fake_list_ports(self, *args, **kwargs):
+ uuid = kwargs.get('device_id', None)
+ if not uuid:
+ raise exception.InstanceNotFound(instance_id=None)
+ port_data = {
+ "id": "ce531f90-199f-48c0-816c-13e38010b442",
+ "network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "mac_address": "fa:16:3e:4c:2c:30",
+ "fixed_ips": [
+ {
+ "ip_address": "192.168.1.3",
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
+ }
+ ],
+ "device_id": uuid,
+ }
+ ports = {'ports': [port_data]}
+ return ports
+
+ def fake_show_port(self, context, port_id=None):
+ if not port_id:
+ raise exception.PortNotFound(port_id=None)
+ port_data = {
+ "id": port_id,
+ "network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "mac_address": "fa:16:3e:4c:2c:30",
+ "fixed_ips": [
+ {
+ "ip_address": "192.168.1.3",
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
+ }
+ ],
+ "device_id": 'bece68a3-2f8b-4e66-9092-244493d6aba7',
+ }
+ port = {'port': port_data}
+ return port
+
+ def fake_attach_interface(self, context, instance,
+ network_id, port_id,
+ requested_ip='192.168.1.3'):
+ if not network_id:
+ network_id = "fake_net_uuid"
+ if not port_id:
+ port_id = "fake_port_uuid"
+ vif = fake_network_cache_model.new_vif()
+ vif['id'] = port_id
+ vif['network']['id'] = network_id
+ vif['network']['subnets'][0]['ips'][0] = requested_ip
+ return vif
+
+ def fake_detach_interface(self, context, instance, port_id):
+ pass
+
+ self.stubs.Set(network_api.API, 'list_ports', fake_list_ports)
+ self.stubs.Set(network_api.API, 'show_port', fake_show_port)
+ self.stubs.Set(compute_api.API, 'attach_interface',
+ fake_attach_interface)
+ self.stubs.Set(compute_api.API, 'detach_interface',
+ fake_detach_interface)
+ self.flags(auth_strategy=None, group='neutron')
+ self.flags(url='http://anyhost/', group='neutron')
+ self.flags(url_timeout=30, group='neutron')
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['subnet_id'] = vanilla_regexes['uuid']
+ subs['net_id'] = vanilla_regexes['uuid']
+ subs['port_id'] = vanilla_regexes['uuid']
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+ subs['ip_address'] = vanilla_regexes['ip']
+ return subs
+
+ def test_list_interfaces(self):
+ instance_uuid = self._post_server()
+ response = self._do_get('servers/%s/os-interface' % instance_uuid)
+ subs = {
+ 'ip_address': '192.168.1.3',
+ 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
+ 'mac_addr': 'fa:16:3e:4c:2c:30',
+ 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
+ 'port_id': 'ce531f90-199f-48c0-816c-13e38010b442',
+ 'port_state': 'ACTIVE'
+ }
+ self._verify_response('attach-interfaces-list-resp', subs,
+ response, 200)
+
+ def _stub_show_for_instance(self, instance_uuid, port_id):
+ show_port = network_api.API().show_port(None, port_id)
+ show_port['port']['device_id'] = instance_uuid
+ self.stubs.Set(network_api.API, 'show_port', lambda *a, **k: show_port)
+
+ def test_show_interfaces(self):
+ instance_uuid = self._post_server()
+ port_id = 'ce531f90-199f-48c0-816c-13e38010b442'
+ self._stub_show_for_instance(instance_uuid, port_id)
+ response = self._do_get('servers/%s/os-interface/%s' %
+ (instance_uuid, port_id))
+ subs = {
+ 'ip_address': '192.168.1.3',
+ 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
+ 'mac_addr': 'fa:16:3e:4c:2c:30',
+ 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
+ 'port_id': port_id,
+ 'port_state': 'ACTIVE'
+ }
+ self._verify_response('attach-interfaces-show-resp', subs,
+ response, 200)
+
+ def test_create_interfaces(self, instance_uuid=None):
+ if instance_uuid is None:
+ instance_uuid = self._post_server()
+ subs = {
+ 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
+ 'port_id': 'ce531f90-199f-48c0-816c-13e38010b442',
+ 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
+ 'ip_address': '192.168.1.3',
+ 'port_state': 'ACTIVE',
+ 'mac_addr': 'fa:16:3e:4c:2c:30',
+ }
+ self._stub_show_for_instance(instance_uuid, subs['port_id'])
+ response = self._do_post('servers/%s/os-interface' % instance_uuid,
+ 'attach-interfaces-create-req', subs)
+ subs.update(self._get_regexes())
+ self._verify_response('attach-interfaces-create-resp', subs,
+ response, 200)
+
+ def test_delete_interfaces(self):
+ instance_uuid = self._post_server()
+ port_id = 'ce531f90-199f-48c0-816c-13e38010b442'
+ response = self._do_delete('servers/%s/os-interface/%s' %
+ (instance_uuid, port_id))
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+
+class AttachInterfacesSampleXmlTest(AttachInterfacesSampleJsonTest):
+ ctype = 'xml'
+
+
+class SnapshotsSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib.volumes.Volumes"
+
+ create_subs = {
+ 'snapshot_name': 'snap-001',
+ 'description': 'Daily backup',
+ 'volume_id': '521752a6-acf6-4b2d-bc7a-119f9148cd8c'
+ }
+
+ def setUp(self):
+ super(SnapshotsSampleJsonTests, self).setUp()
+ self.stubs.Set(cinder.API, "get_all_snapshots",
+ fakes.stub_snapshot_get_all)
+ self.stubs.Set(cinder.API, "get_snapshot", fakes.stub_snapshot_get)
+
+ def _create_snapshot(self):
+ self.stubs.Set(cinder.API, "create_snapshot",
+ fakes.stub_snapshot_create)
+
+ response = self._do_post("os-snapshots",
+ "snapshot-create-req",
+ self.create_subs)
+ return response
+
+ def test_snapshots_create(self):
+ response = self._create_snapshot()
+ self.create_subs.update(self._get_regexes())
+ self._verify_response("snapshot-create-resp",
+ self.create_subs, response, 200)
+
+ def test_snapshots_delete(self):
+ self.stubs.Set(cinder.API, "delete_snapshot",
+ fakes.stub_snapshot_delete)
+ self._create_snapshot()
+ response = self._do_delete('os-snapshots/100')
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_snapshots_detail(self):
+ response = self._do_get('os-snapshots/detail')
+ subs = self._get_regexes()
+ self._verify_response('snapshots-detail-resp', subs, response, 200)
+
+ def test_snapshots_list(self):
+ response = self._do_get('os-snapshots')
+ subs = self._get_regexes()
+ self._verify_response('snapshots-list-resp', subs, response, 200)
+
+ def test_snapshots_show(self):
+ response = self._do_get('os-snapshots/100')
+ subs = {
+ 'snapshot_name': 'Default name',
+ 'description': 'Default description'
+ }
+ subs.update(self._get_regexes())
+ self._verify_response('snapshots-show-resp', subs, response, 200)
+
+
+class SnapshotsSampleXmlTests(SnapshotsSampleJsonTests):
+ ctype = "xml"
+
+
+class AssistedVolumeSnapshotsJsonTest(ApiSampleTestBaseV2):
+ """Assisted volume snapshots."""
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "assisted_volume_snapshots.Assisted_volume_snapshots")
+
+ def _create_assisted_snapshot(self, subs):
+ self.stubs.Set(compute_api.API, 'volume_snapshot_create',
+ fakes.stub_compute_volume_snapshot_create)
+
+ response = self._do_post("os-assisted-volume-snapshots",
+ "snapshot-create-assisted-req",
+ subs)
+ return response
+
+ def test_snapshots_create_assisted(self):
+ subs = {
+ 'snapshot_name': 'snap-001',
+ 'description': 'Daily backup',
+ 'volume_id': '521752a6-acf6-4b2d-bc7a-119f9148cd8c',
+ 'snapshot_id': '421752a6-acf6-4b2d-bc7a-119f9148cd8c',
+ 'type': 'qcow2',
+ 'new_file': 'new_file_name'
+ }
+ subs.update(self._get_regexes())
+ response = self._create_assisted_snapshot(subs)
+ self._verify_response("snapshot-create-assisted-resp",
+ subs, response, 200)
+
+ def test_snapshots_delete_assisted(self):
+ self.stubs.Set(compute_api.API, 'volume_snapshot_delete',
+ fakes.stub_compute_volume_snapshot_delete)
+ snapshot_id = '100'
+ response = self._do_delete(
+ 'os-assisted-volume-snapshots/%s?delete_info='
+ '{"volume_id":"521752a6-acf6-4b2d-bc7a-119f9148cd8c"}'
+ % snapshot_id)
+ self.assertEqual(response.status_code, 204)
+ self.assertEqual(response.content, '')
+
+
+class AssistedVolumeSnapshotsXmlTest(AssistedVolumeSnapshotsJsonTest):
+ ctype = "xml"
+
+
+class VolumeAttachmentsSampleBase(ServersSampleBase):
+ def _stub_db_bdms_get_all_by_instance(self, server_id):
+
+ def fake_bdms_get_all_by_instance(context, instance_uuid,
+ use_slave=False):
+ bdms = [
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f803',
+ 'instance_uuid': server_id, 'source_type': 'volume',
+ 'destination_type': 'volume', 'device_name': '/dev/sdd'}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f804',
+ 'instance_uuid': server_id, 'source_type': 'volume',
+ 'destination_type': 'volume', 'device_name': '/dev/sdc'})
+ ]
+ return bdms
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_bdms_get_all_by_instance)
+
+ def _stub_compute_api_get(self):
+
+ def fake_compute_api_get(self, context, instance_id,
+ want_objects=False, expected_attrs=None):
+ if want_objects:
+ return fake_instance.fake_instance_obj(
+ context, **{'uuid': instance_id})
+ else:
+ return {'uuid': instance_id}
+
+ self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
+
+
+class VolumeAttachmentsSampleJsonTest(VolumeAttachmentsSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.volumes.Volumes")
+
+ def test_attach_volume_to_server(self):
+ self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
+ self.stubs.Set(cinder.API, 'check_attach', lambda *a, **k: None)
+ self.stubs.Set(cinder.API, 'reserve_volume', lambda *a, **k: None)
+ device_name = '/dev/vdd'
+ bdm = objects.BlockDeviceMapping()
+ bdm['device_name'] = device_name
+ self.stubs.Set(compute_manager.ComputeManager,
+ "reserve_block_device_name",
+ lambda *a, **k: bdm)
+ self.stubs.Set(compute_manager.ComputeManager,
+ 'attach_volume',
+ lambda *a, **k: None)
+ self.stubs.Set(objects.BlockDeviceMapping, 'get_by_volume_id',
+ classmethod(lambda *a, **k: None))
+
+ volume = fakes.stub_volume_get(None, context.get_admin_context(),
+ 'a26887c6-c47b-4654-abb5-dfadf7d3f803')
+ subs = {
+ 'volume_id': volume['id'],
+ 'device': device_name
+ }
+ server_id = self._post_server()
+ response = self._do_post('servers/%s/os-volume_attachments'
+ % server_id,
+ 'attach-volume-to-server-req', subs)
+
+ subs.update(self._get_regexes())
+ self._verify_response('attach-volume-to-server-resp', subs,
+ response, 200)
+
+ def test_list_volume_attachments(self):
+ server_id = self._post_server()
+
+ self._stub_db_bdms_get_all_by_instance(server_id)
+
+ response = self._do_get('servers/%s/os-volume_attachments'
+ % server_id)
+ subs = self._get_regexes()
+ self._verify_response('list-volume-attachments-resp', subs,
+ response, 200)
+
+ def test_volume_attachment_detail(self):
+ server_id = self._post_server()
+ attach_id = "a26887c6-c47b-4654-abb5-dfadf7d3f803"
+ self._stub_db_bdms_get_all_by_instance(server_id)
+ self._stub_compute_api_get()
+ response = self._do_get('servers/%s/os-volume_attachments/%s'
+ % (server_id, attach_id))
+ subs = self._get_regexes()
+ self._verify_response('volume-attachment-detail-resp', subs,
+ response, 200)
+
+ def test_volume_attachment_delete(self):
+ server_id = self._post_server()
+ attach_id = "a26887c6-c47b-4654-abb5-dfadf7d3f803"
+ self._stub_db_bdms_get_all_by_instance(server_id)
+ self._stub_compute_api_get()
+ self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
+ self.stubs.Set(compute_api.API, 'detach_volume', lambda *a, **k: None)
+ response = self._do_delete('servers/%s/os-volume_attachments/%s'
+ % (server_id, attach_id))
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+
+class VolumeAttachmentsSampleXmlTest(VolumeAttachmentsSampleJsonTest):
+ ctype = 'xml'
+
+
+class VolumeAttachUpdateSampleJsonTest(VolumeAttachmentsSampleBase):
+ extends_name = ("nova.api.openstack.compute.contrib.volumes.Volumes")
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "volume_attachment_update.Volume_attachment_update")
+
+ def test_volume_attachment_update(self):
+ self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
+ subs = {
+ 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f805',
+ 'device': '/dev/sdd'
+ }
+ server_id = self._post_server()
+ attach_id = 'a26887c6-c47b-4654-abb5-dfadf7d3f803'
+ self._stub_db_bdms_get_all_by_instance(server_id)
+ self._stub_compute_api_get()
+ self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
+ self.stubs.Set(compute_api.API, 'swap_volume', lambda *a, **k: None)
+ response = self._do_put('servers/%s/os-volume_attachments/%s'
+ % (server_id, attach_id),
+ 'update-volume-req',
+ subs)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+
+class VolumeAttachUpdateSampleXmlTest(VolumeAttachUpdateSampleJsonTest):
+ ctype = 'xml'
+
+
+class VolumesSampleJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.volumes.Volumes")
+
+ def _get_volume_id(self):
+ return 'a26887c6-c47b-4654-abb5-dfadf7d3f803'
+
+ def _stub_volume(self, id, displayname="Volume Name",
+ displaydesc="Volume Description", size=100):
+ volume = {
+ 'id': id,
+ 'size': size,
+ 'availability_zone': 'zone1:host1',
+ 'instance_uuid': '3912f2b4-c5ba-4aec-9165-872876fe202e',
+ 'mountpoint': '/',
+ 'status': 'in-use',
+ 'attach_status': 'attached',
+ 'name': 'vol name',
+ 'display_name': displayname,
+ 'display_description': displaydesc,
+ 'created_at': datetime.datetime(2008, 12, 1, 11, 1, 55),
+ 'snapshot_id': None,
+ 'volume_type_id': 'fakevoltype',
+ 'volume_metadata': [],
+ 'volume_type': {'name': 'Backup'}
+ }
+ return volume
+
+ def _stub_volume_get(self, context, volume_id):
+ return self._stub_volume(volume_id)
+
+ def _stub_volume_delete(self, context, *args, **param):
+ pass
+
+ def _stub_volume_get_all(self, context, search_opts=None):
+ id = self._get_volume_id()
+ return [self._stub_volume(id)]
+
+ def _stub_volume_create(self, context, size, name, description, snapshot,
+ **param):
+ id = self._get_volume_id()
+ return self._stub_volume(id)
+
+ def setUp(self):
+ super(VolumesSampleJsonTest, self).setUp()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+
+ self.stubs.Set(cinder.API, "delete", self._stub_volume_delete)
+ self.stubs.Set(cinder.API, "get", self._stub_volume_get)
+ self.stubs.Set(cinder.API, "get_all", self._stub_volume_get_all)
+
+ def _post_volume(self):
+ subs_req = {
+ 'volume_name': "Volume Name",
+ 'volume_desc': "Volume Description",
+ }
+
+ self.stubs.Set(cinder.API, "create", self._stub_volume_create)
+ response = self._do_post('os-volumes', 'os-volumes-post-req',
+ subs_req)
+ subs = self._get_regexes()
+ subs.update(subs_req)
+ self._verify_response('os-volumes-post-resp', subs, response, 200)
+
+ def test_volumes_show(self):
+ subs = {
+ 'volume_name': "Volume Name",
+ 'volume_desc': "Volume Description",
+ }
+ vol_id = self._get_volume_id()
+ response = self._do_get('os-volumes/%s' % vol_id)
+ subs.update(self._get_regexes())
+ self._verify_response('os-volumes-get-resp', subs, response, 200)
+
+ def test_volumes_index(self):
+ subs = {
+ 'volume_name': "Volume Name",
+ 'volume_desc': "Volume Description",
+ }
+ response = self._do_get('os-volumes')
+ subs.update(self._get_regexes())
+ self._verify_response('os-volumes-index-resp', subs, response, 200)
+
+ def test_volumes_detail(self):
+ # For now, index and detail are the same.
+ # See the volumes api
+ subs = {
+ 'volume_name': "Volume Name",
+ 'volume_desc': "Volume Description",
+ }
+ response = self._do_get('os-volumes/detail')
+ subs.update(self._get_regexes())
+ self._verify_response('os-volumes-detail-resp', subs, response, 200)
+
+ def test_volumes_create(self):
+ self._post_volume()
+
+ def test_volumes_delete(self):
+ self._post_volume()
+ vol_id = self._get_volume_id()
+ response = self._do_delete('os-volumes/%s' % vol_id)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+
+class VolumesSampleXmlTest(VolumesSampleJsonTest):
+ ctype = 'xml'
+
+
+class MigrationsSamplesJsonTest(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.migrations."
+ "Migrations")
+
+ def _stub_migrations(self, context, filters):
+ fake_migrations = [
+ {
+ 'id': 1234,
+ 'source_node': 'node1',
+ 'dest_node': 'node2',
+ 'source_compute': 'compute1',
+ 'dest_compute': 'compute2',
+ 'dest_host': '1.2.3.4',
+ 'status': 'Done',
+ 'instance_uuid': 'instance_id_123',
+ 'old_instance_type_id': 1,
+ 'new_instance_type_id': 2,
+ 'created_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'deleted_at': None,
+ 'deleted': False
+ },
+ {
+ 'id': 5678,
+ 'source_node': 'node10',
+ 'dest_node': 'node20',
+ 'source_compute': 'compute10',
+ 'dest_compute': 'compute20',
+ 'dest_host': '5.6.7.8',
+ 'status': 'Done',
+ 'instance_uuid': 'instance_id_456',
+ 'old_instance_type_id': 5,
+ 'new_instance_type_id': 6,
+ 'created_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
+ 'updated_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
+ 'deleted_at': None,
+ 'deleted': False
+ }
+ ]
+ return fake_migrations
+
+ def setUp(self):
+ super(MigrationsSamplesJsonTest, self).setUp()
+ self.stubs.Set(compute_api.API, 'get_migrations',
+ self._stub_migrations)
+
+ def test_get_migrations(self):
+ response = self._do_get('os-migrations')
+ subs = self._get_regexes()
+
+ self.assertEqual(response.status_code, 200)
+ self._verify_response('migrations-get', subs, response, 200)
+
+
+class MigrationsSamplesXmlTest(MigrationsSamplesJsonTest):
+ ctype = 'xml'
+
+
+class PreserveEphemeralOnRebuildJsonTest(ServersSampleBase):
+ extension_name = ('nova.api.openstack.compute.contrib.'
+ 'preserve_ephemeral_rebuild.'
+ 'Preserve_ephemeral_rebuild')
+
+ def _test_server_action(self, uuid, action,
+ subs=None, resp_tpl=None, code=202):
+ subs = subs or {}
+ subs.update({'action': action})
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-action-%s' % action.lower(),
+ subs)
+ if resp_tpl:
+ subs.update(self._get_regexes())
+ self._verify_response(resp_tpl, subs, response, code)
+ else:
+ self.assertEqual(response.status_code, code)
+ self.assertEqual(response.content, "")
+
+ def test_rebuild_server_preserve_ephemeral_false(self):
+ uuid = self._post_server()
+ image = self.api.get_images()[0]['id']
+ subs = {'host': self._get_host(),
+ 'uuid': image,
+ 'name': 'foobar',
+ 'pass': 'seekr3t',
+ 'ip': '1.2.3.4',
+ 'ip6': 'fe80::100',
+ 'hostid': '[a-f0-9]+',
+ 'preserve_ephemeral': 'false'}
+ self._test_server_action(uuid, 'rebuild', subs,
+ 'server-action-rebuild-resp')
+
+ def test_rebuild_server_preserve_ephemeral_true(self):
+ image = self.api.get_images()[0]['id']
+ subs = {'host': self._get_host(),
+ 'uuid': image,
+ 'name': 'new-server-test',
+ 'pass': 'seekr3t',
+ 'ip': '1.2.3.4',
+ 'ip6': 'fe80::100',
+ 'hostid': '[a-f0-9]+',
+ 'preserve_ephemeral': 'true'}
+
+ def fake_rebuild(self_, context, instance, image_href, admin_password,
+ **kwargs):
+ self.assertTrue(kwargs['preserve_ephemeral'])
+ self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
+
+ instance_uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % instance_uuid,
+ 'server-action-rebuild', subs)
+ self.assertEqual(response.status_code, 202)
+
+
+class PreserveEphemeralOnRebuildXmlTest(PreserveEphemeralOnRebuildJsonTest):
+ ctype = 'xml'
+
+
+class ServerExternalEventsJsonTest(ServersSampleBase):
+ extension_name = ('nova.api.openstack.compute.contrib.'
+ 'server_external_events.Server_external_events')
+
+ def test_create_event(self):
+ instance_uuid = self._post_server()
+ subs = {
+ 'uuid': instance_uuid,
+ 'name': 'network-changed',
+ 'status': 'completed',
+ 'tag': 'foo',
+ }
+ response = self._do_post('os-server-external-events',
+ 'event-create-req',
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response('event-create-resp', subs, response, 200)
+
+
+class ServerExternalEventsXmlTest(ServerExternalEventsJsonTest):
+ ctype = 'xml'
+
+
+class ServerGroupsSampleJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".server_groups.Server_groups")
+
+ def _get_create_subs(self):
+ return {'name': 'test'}
+
+ def _post_server_group(self):
+ """Verify the response status code and returns the UUID of the
+ newly created server group.
+ """
+ subs = self._get_create_subs()
+ response = self._do_post('os-server-groups',
+ 'server-groups-post-req', subs)
+ subs = self._get_regexes()
+ subs['name'] = 'test'
+ return self._verify_response('server-groups-post-resp',
+ subs, response, 200)
+
+ def _create_server_group(self):
+ subs = self._get_create_subs()
+ return self._do_post('os-server-groups',
+ 'server-groups-post-req', subs)
+
+ def test_server_groups_post(self):
+ return self._post_server_group()
+
+ def test_server_groups_list(self):
+ subs = self._get_create_subs()
+ uuid = self._post_server_group()
+ response = self._do_get('os-server-groups')
+ subs.update(self._get_regexes())
+ subs['id'] = uuid
+ self._verify_response('server-groups-list-resp',
+ subs, response, 200)
+
+ def test_server_groups_get(self):
+ # Get api sample of server groups get request.
+ subs = {'name': 'test'}
+ uuid = self._post_server_group()
+ subs['id'] = uuid
+ response = self._do_get('os-server-groups/%s' % uuid)
+
+ self._verify_response('server-groups-get-resp', subs, response, 200)
+
+ def test_server_groups_delete(self):
+ uuid = self._post_server_group()
+ response = self._do_delete('os-server-groups/%s' % uuid)
+ self.assertEqual(response.status_code, 204)
+
+
+class ServerGroupsSampleXmlTest(ServerGroupsSampleJsonTest):
+ ctype = 'xml'
+
+
+class ServerGroupQuotas_LimitsSampleJsonTest(LimitsSampleJsonTest):
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "server_group_quotas.Server_group_quotas")
+
+
+class ServerGroupQuotas_LimitsSampleXmlTest(LimitsSampleXmlTest):
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "server_group_quotas.Server_group_quotas")
+
+
+class ServerGroupQuotas_UsedLimitsSamplesJsonTest(UsedLimitsSamplesJsonTest):
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "server_group_quotas.Server_group_quotas")
+ extends_name = ("nova.api.openstack.compute.contrib.used_limits."
+ "Used_limits")
+
+
+class ServerGroupQuotas_UsedLimitsSamplesXmlTest(UsedLimitsSamplesXmlTest):
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "server_group_quotas.Server_group_quotas")
+ extends_name = ("nova.api.openstack.compute.contrib.used_limits."
+ "Used_limits")
+
+
+class ServerGroupQuotas_QuotasSampleJsonTests(QuotasSampleJsonTests):
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "server_group_quotas.Server_group_quotas")
+ extends_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
+
+
+class ServerGroupQuotas_QuotasSampleXmlTests(QuotasSampleXmlTests):
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "server_group_quotas.Server_group_quotas")
+ extends_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
+
+
+class ServerGroupQuotasQuota_ClassesSampleJsonTests(
+ QuotaClassesSampleJsonTests):
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "server_group_quotas.Server_group_quotas")
+ extends_name = ("nova.api.openstack.compute.contrib.quota_classes."
+ "Quota_classes")
+
+
+class ServerGroupQuotas_QuotaClassesSampleXmlTests(
+ QuotaClassesSampleXmlTests):
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "server_group_quotas.Server_group_quotas")
+ extends_name = ("nova.api.openstack.compute.contrib.quota_classes."
+ "Quota_classes")
diff --git a/nova/tests/unit/integrated/test_extensions.py b/nova/tests/unit/integrated/test_extensions.py
new file mode 100644
index 0000000000..927b51b453
--- /dev/null
+++ b/nova/tests/unit/integrated/test_extensions.py
@@ -0,0 +1,42 @@
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+# Import extensions to pull in osapi_compute_extension CONF option used below.
+from nova.openstack.common import log as logging
+from nova.tests.unit.integrated import integrated_helpers
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+
+
+class ExtensionsTest(integrated_helpers._IntegratedTestBase):
+ _api_version = 'v2'
+
+ def _get_flags(self):
+ f = super(ExtensionsTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ f['osapi_compute_extension'].append(
+ 'nova.tests.unit.api.openstack.compute.extensions.'
+ 'foxinsocks.Foxinsocks')
+ return f
+
+ def test_get_foxnsocks(self):
+ # Simple check that fox-n-socks works.
+ response = self.api.api_request('/foxnsocks')
+ foxnsocks = response.content
+ LOG.debug("foxnsocks: %s" % foxnsocks)
+ self.assertEqual('Try to say this Mr. Knox, sir...', foxnsocks)
diff --git a/nova/tests/unit/integrated/test_login.py b/nova/tests/unit/integrated/test_login.py
new file mode 100644
index 0000000000..851282000d
--- /dev/null
+++ b/nova/tests/unit/integrated/test_login.py
@@ -0,0 +1,36 @@
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.openstack.common import log as logging
+from nova.tests.unit.integrated.api import client
+from nova.tests.unit.integrated import integrated_helpers
+
+
+LOG = logging.getLogger(__name__)
+
+
+class LoginTest(integrated_helpers._IntegratedTestBase):
+ _api_version = 'v2'
+
+ def test_login(self):
+ # Simple check - we list flavors - so we know we're logged in.
+ flavors = self.api.get_flavors()
+ for flavor in flavors:
+ LOG.debug("flavor: %s", flavor)
+
+
+class LoginTestV3(client.TestOpenStackClientV3Mixin, LoginTest):
+ _api_version = 'v3'
diff --git a/nova/tests/unit/integrated/test_servers.py b/nova/tests/unit/integrated/test_servers.py
new file mode 100644
index 0000000000..97f80d7813
--- /dev/null
+++ b/nova/tests/unit/integrated/test_servers.py
@@ -0,0 +1,522 @@
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import time
+import zlib
+
+from oslo.utils import timeutils
+
+from nova import context
+from nova import exception
+from nova.openstack.common import log as logging
+from nova.tests.unit import fake_network
+from nova.tests.unit.integrated.api import client
+from nova.tests.unit.integrated import integrated_helpers
+import nova.virt.fake
+
+
+LOG = logging.getLogger(__name__)
+
+
+class ServersTest(integrated_helpers._IntegratedTestBase):
+ _api_version = 'v2'
+ _force_delete_parameter = 'forceDelete'
+ _image_ref_parameter = 'imageRef'
+ _flavor_ref_parameter = 'flavorRef'
+ _access_ipv4_parameter = 'accessIPv4'
+ _access_ipv6_parameter = 'accessIPv6'
+ _return_resv_id_parameter = 'return_reservation_id'
+ _min_count_parameter = 'min_count'
+
+ def setUp(self):
+ super(ServersTest, self).setUp()
+ self.conductor = self.start_service(
+ 'conductor', manager='nova.conductor.manager.ConductorManager')
+
+ def _wait_for_state_change(self, server, from_status):
+ for i in xrange(0, 50):
+ server = self.api.get_server(server['id'])
+ if server['status'] != from_status:
+ break
+ time.sleep(.1)
+
+ return server
+
+ def _restart_compute_service(self, *args, **kwargs):
+ """restart compute service. NOTE: fake driver forgets all instances."""
+ self.compute.kill()
+ self.compute = self.start_service('compute', *args, **kwargs)
+
+ def test_get_servers(self):
+ # Simple check that listing servers works.
+ servers = self.api.get_servers()
+ for server in servers:
+ LOG.debug("server: %s" % server)
+
+ def test_create_server_with_error(self):
+ # Create a server which will enter error state.
+ fake_network.set_stub_network_methods(self.stubs)
+
+ def throw_error(*args, **kwargs):
+ raise exception.BuildAbortException(reason='',
+ instance_uuid='fake')
+
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'spawn', throw_error)
+
+ server = self._build_minimal_create_server_request()
+ created_server = self.api.post_server({"server": server})
+ created_server_id = created_server['id']
+
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual(created_server_id, found_server['id'])
+
+ found_server = self._wait_for_state_change(found_server, 'BUILD')
+
+ self.assertEqual('ERROR', found_server['status'])
+ self._delete_server(created_server_id)
+
+ def test_create_and_delete_server(self):
+ # Creates and deletes a server.
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # Create server
+ # Build the server data gradually, checking errors along the way
+ server = {}
+ good_server = self._build_minimal_create_server_request()
+
+ post = {'server': server}
+
+ # Without an imageRef, this throws 500.
+ # TODO(justinsb): Check whatever the spec says should be thrown here
+ self.assertRaises(client.OpenStackApiException,
+ self.api.post_server, post)
+
+ # With an invalid imageRef, this throws 500.
+ server[self._image_ref_parameter] = self.get_invalid_image()
+ # TODO(justinsb): Check whatever the spec says should be thrown here
+ self.assertRaises(client.OpenStackApiException,
+ self.api.post_server, post)
+
+ # Add a valid imageRef
+ server[self._image_ref_parameter] = good_server.get(
+ self._image_ref_parameter)
+
+ # Without flavorRef, this throws 500
+ # TODO(justinsb): Check whatever the spec says should be thrown here
+ self.assertRaises(client.OpenStackApiException,
+ self.api.post_server, post)
+
+ server[self._flavor_ref_parameter] = good_server.get(
+ self._flavor_ref_parameter)
+
+ # Without a name, this throws 500
+ # TODO(justinsb): Check whatever the spec says should be thrown here
+ self.assertRaises(client.OpenStackApiException,
+ self.api.post_server, post)
+
+ # Set a valid server name
+ server['name'] = good_server['name']
+
+ created_server = self.api.post_server(post)
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Check it's there
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual(created_server_id, found_server['id'])
+
+ # It should also be in the all-servers list
+ servers = self.api.get_servers()
+ server_ids = [s['id'] for s in servers]
+ self.assertIn(created_server_id, server_ids)
+
+ found_server = self._wait_for_state_change(found_server, 'BUILD')
+ # It should be available...
+ # TODO(justinsb): Mock doesn't yet do this...
+ self.assertEqual('ACTIVE', found_server['status'])
+ servers = self.api.get_servers(detail=True)
+ for server in servers:
+ self.assertIn("image", server)
+ self.assertIn("flavor", server)
+
+ self._delete_server(created_server_id)
+
+ def _force_reclaim(self):
+ # Make sure that compute manager thinks the instance is
+ # old enough to be expired
+ the_past = timeutils.utcnow() + datetime.timedelta(hours=1)
+ timeutils.set_time_override(override_time=the_past)
+ ctxt = context.get_admin_context()
+ self.compute._reclaim_queued_deletes(ctxt)
+
+ def test_deferred_delete(self):
+ # Creates, deletes and waits for server to be reclaimed.
+ self.flags(reclaim_instance_interval=1)
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # Create server
+ server = self._build_minimal_create_server_request()
+
+ created_server = self.api.post_server({'server': server})
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Wait for it to finish being created
+ found_server = self._wait_for_state_change(created_server, 'BUILD')
+
+ # It should be available...
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ # Cannot restore unless instance is deleted
+ self.assertRaises(client.OpenStackApiException,
+ self.api.post_server_action, created_server_id,
+ {'restore': {}})
+
+ # Delete the server
+ self.api.delete_server(created_server_id)
+
+ # Wait for queued deletion
+ found_server = self._wait_for_state_change(found_server, 'ACTIVE')
+ self.assertEqual('SOFT_DELETED', found_server['status'])
+
+ self._force_reclaim()
+
+ # Wait for real deletion
+ self._wait_for_deletion(created_server_id)
+
+ def test_deferred_delete_restore(self):
+ # Creates, deletes and restores a server.
+ self.flags(reclaim_instance_interval=3600)
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # Create server
+ server = self._build_minimal_create_server_request()
+
+ created_server = self.api.post_server({'server': server})
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Wait for it to finish being created
+ found_server = self._wait_for_state_change(created_server, 'BUILD')
+
+ # It should be available...
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ # Delete the server
+ self.api.delete_server(created_server_id)
+
+ # Wait for queued deletion
+ found_server = self._wait_for_state_change(found_server, 'ACTIVE')
+ self.assertEqual('SOFT_DELETED', found_server['status'])
+
+ # Restore server
+ self.api.post_server_action(created_server_id, {'restore': {}})
+
+ # Wait for server to become active again
+ found_server = self._wait_for_state_change(found_server, 'DELETED')
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ def test_deferred_delete_force(self):
+ # Creates, deletes and force deletes a server.
+ self.flags(reclaim_instance_interval=3600)
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # Create server
+ server = self._build_minimal_create_server_request()
+
+ created_server = self.api.post_server({'server': server})
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Wait for it to finish being created
+ found_server = self._wait_for_state_change(created_server, 'BUILD')
+
+ # It should be available...
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ # Delete the server
+ self.api.delete_server(created_server_id)
+
+ # Wait for queued deletion
+ found_server = self._wait_for_state_change(found_server, 'ACTIVE')
+ self.assertEqual('SOFT_DELETED', found_server['status'])
+
+ # Force delete server
+ self.api.post_server_action(created_server_id,
+ {self._force_delete_parameter: {}})
+
+ # Wait for real deletion
+ self._wait_for_deletion(created_server_id)
+
+ def _wait_for_deletion(self, server_id):
+ # Wait (briefly) for deletion
+ for _retries in range(50):
+ try:
+ found_server = self.api.get_server(server_id)
+ except client.OpenStackApiNotFoundException:
+ found_server = None
+ LOG.debug("Got 404, proceeding")
+ break
+
+ LOG.debug("Found_server=%s" % found_server)
+
+ # TODO(justinsb): Mock doesn't yet do accurate state changes
+ # if found_server['status'] != 'deleting':
+ # break
+ time.sleep(.1)
+
+ # Should be gone
+ self.assertFalse(found_server)
+
+ def _delete_server(self, server_id):
+ # Delete the server
+ self.api.delete_server(server_id)
+ self._wait_for_deletion(server_id)
+
+ def test_create_server_with_metadata(self):
+ # Creates a server with metadata.
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # Build the server data gradually, checking errors along the way
+ server = self._build_minimal_create_server_request()
+
+ metadata = {}
+ for i in range(30):
+ metadata['key_%s' % i] = 'value_%s' % i
+
+ server['metadata'] = metadata
+
+ post = {'server': server}
+ created_server = self.api.post_server(post)
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual(created_server_id, found_server['id'])
+ self.assertEqual(metadata, found_server.get('metadata'))
+
+ # The server should also be in the all-servers details list
+ servers = self.api.get_servers(detail=True)
+ server_map = dict((server['id'], server) for server in servers)
+ found_server = server_map.get(created_server_id)
+ self.assertTrue(found_server)
+ # Details do include metadata
+ self.assertEqual(metadata, found_server.get('metadata'))
+
+ # The server should also be in the all-servers summary list
+ servers = self.api.get_servers(detail=False)
+ server_map = dict((server['id'], server) for server in servers)
+ found_server = server_map.get(created_server_id)
+ self.assertTrue(found_server)
+ # Summary should not include metadata
+ self.assertFalse(found_server.get('metadata'))
+
+ # Cleanup
+ self._delete_server(created_server_id)
+
+ def test_create_and_rebuild_server(self):
+ # Rebuild a server with metadata.
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # create a server with initially has no metadata
+ server = self._build_minimal_create_server_request()
+ server_post = {'server': server}
+
+ metadata = {}
+ for i in range(30):
+ metadata['key_%s' % i] = 'value_%s' % i
+
+ server_post['server']['metadata'] = metadata
+
+ created_server = self.api.post_server(server_post)
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ created_server = self._wait_for_state_change(created_server, 'BUILD')
+
+ # rebuild the server with metadata and other server attributes
+ post = {}
+ post['rebuild'] = {
+ self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "name": "blah",
+ self._access_ipv4_parameter: "172.19.0.2",
+ self._access_ipv6_parameter: "fe80::2",
+ "metadata": {'some': 'thing'},
+ }
+ post['rebuild'].update(self._get_access_ips_params())
+
+ self.api.post_server_action(created_server_id, post)
+ LOG.debug("rebuilt server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual(created_server_id, found_server['id'])
+ self.assertEqual({'some': 'thing'}, found_server.get('metadata'))
+ self.assertEqual('blah', found_server.get('name'))
+ self.assertEqual(post['rebuild'][self._image_ref_parameter],
+ found_server.get('image')['id'])
+ self._verify_access_ips(found_server)
+
+ # rebuild the server with empty metadata and nothing else
+ post = {}
+ post['rebuild'] = {
+ self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "metadata": {},
+ }
+
+ self.api.post_server_action(created_server_id, post)
+ LOG.debug("rebuilt server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual(created_server_id, found_server['id'])
+ self.assertEqual({}, found_server.get('metadata'))
+ self.assertEqual('blah', found_server.get('name'))
+ self.assertEqual(post['rebuild'][self._image_ref_parameter],
+ found_server.get('image')['id'])
+ self._verify_access_ips(found_server)
+
+ # Cleanup
+ self._delete_server(created_server_id)
+
+ def _get_access_ips_params(self):
+ return {self._access_ipv4_parameter: "172.19.0.2",
+ self._access_ipv6_parameter: "fe80::2"}
+
+ def _verify_access_ips(self, server):
+ self.assertEqual('172.19.0.2',
+ server[self._access_ipv4_parameter])
+ self.assertEqual('fe80::2', server[self._access_ipv6_parameter])
+
+ def test_rename_server(self):
+ # Test building and renaming a server.
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # Create a server
+ server = self._build_minimal_create_server_request()
+ created_server = self.api.post_server({'server': server})
+ LOG.debug("created_server: %s" % created_server)
+ server_id = created_server['id']
+ self.assertTrue(server_id)
+
+ # Rename the server to 'new-name'
+ self.api.put_server(server_id, {'server': {'name': 'new-name'}})
+
+ # Check the name of the server
+ created_server = self.api.get_server(server_id)
+ self.assertEqual(created_server['name'], 'new-name')
+
+ # Cleanup
+ self._delete_server(server_id)
+
+ def test_create_multiple_servers(self):
+ # Creates multiple servers and checks for reservation_id.
+
+ # Create 2 servers, setting 'return_reservation_id, which should
+ # return a reservation_id
+ server = self._build_minimal_create_server_request()
+ server[self._min_count_parameter] = 2
+ server[self._return_resv_id_parameter] = True
+ post = {'server': server}
+ response = self.api.post_server(post)
+ self.assertIn('reservation_id', response)
+ reservation_id = response['reservation_id']
+ self.assertNotIn(reservation_id, ['', None])
+
+ # Create 1 more server, which should not return a reservation_id
+ server = self._build_minimal_create_server_request()
+ post = {'server': server}
+ created_server = self.api.post_server(post)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # lookup servers created by the first request.
+ servers = self.api.get_servers(detail=True,
+ search_opts={'reservation_id': reservation_id})
+ server_map = dict((server['id'], server) for server in servers)
+ found_server = server_map.get(created_server_id)
+ # The server from the 2nd request should not be there.
+ self.assertIsNone(found_server)
+ # Should have found 2 servers.
+ self.assertEqual(len(server_map), 2)
+
+ # Cleanup
+ self._delete_server(created_server_id)
+ for server_id in server_map.iterkeys():
+ self._delete_server(server_id)
+
+ def test_create_server_with_injected_files(self):
+ # Creates a server with injected_files.
+ fake_network.set_stub_network_methods(self.stubs)
+ personality = []
+
+ # Inject a text file
+ data = 'Hello, World!'
+ personality.append({
+ 'path': '/helloworld.txt',
+ 'contents': data.encode('base64'),
+ })
+
+ # Inject a binary file
+ data = zlib.compress('Hello, World!')
+ personality.append({
+ 'path': '/helloworld.zip',
+ 'contents': data.encode('base64'),
+ })
+
+ # Create server
+ server = self._build_minimal_create_server_request()
+ server['personality'] = personality
+
+ post = {'server': server}
+
+ created_server = self.api.post_server(post)
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Check it's there
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual(created_server_id, found_server['id'])
+
+ found_server = self._wait_for_state_change(found_server, 'BUILD')
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ # Cleanup
+ self._delete_server(created_server_id)
+
+
+class ServersTestV3(client.TestOpenStackClientV3Mixin, ServersTest):
+ _force_delete_parameter = 'forceDelete'
+ _api_version = 'v3'
+ _image_ref_parameter = 'imageRef'
+ _flavor_ref_parameter = 'flavorRef'
+ _access_ipv4_parameter = None
+ _access_ipv6_parameter = None
+
+ def _get_access_ips_params(self):
+ return {}
+
+ def _verify_access_ips(self, server):
+ # NOTE(alexxu): access_ips was demoted as extensions in v3 api.
+ # So skips verifying access_ips
+ pass
diff --git a/nova/tests/unit/integrated/test_xml.py b/nova/tests/unit/integrated/test_xml.py
new file mode 100644
index 0000000000..822a1db88b
--- /dev/null
+++ b/nova/tests/unit/integrated/test_xml.py
@@ -0,0 +1,51 @@
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+
+from nova.api.openstack import common
+from nova.api.openstack import xmlutil
+from nova.openstack.common import log as logging
+from nova.tests.unit.integrated import integrated_helpers
+
+
+LOG = logging.getLogger(__name__)
+
+
+class XmlTests(integrated_helpers._IntegratedTestBase):
+ """"Some basic XML sanity checks."""
+
+ _api_version = 'v2'
+
+ def test_namespace_limits(self):
+ headers = {}
+ headers['Accept'] = 'application/xml'
+
+ response = self.api.api_request('/limits', headers=headers)
+ data = response.content
+ LOG.debug("data: %s" % data)
+ root = etree.XML(data)
+ self.assertEqual(root.nsmap.get(None), xmlutil.XMLNS_COMMON_V10)
+
+ def test_namespace_servers(self):
+ # /servers should have v1.1 namespace (has changed in 1.1).
+ headers = {}
+ headers['Accept'] = 'application/xml'
+
+ response = self.api.api_request('/servers', headers=headers)
+ data = response.content
+ LOG.debug("data: %s" % data)
+ root = etree.XML(data)
+ self.assertEqual(root.nsmap.get(None), common.XML_NS_V11)
diff --git a/nova/tests/unit/integrated/v3/__init__.py b/nova/tests/unit/integrated/v3/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/__init__.py
diff --git a/nova/tests/unit/integrated/v3/api_sample_base.py b/nova/tests/unit/integrated/v3/api_sample_base.py
new file mode 100644
index 0000000000..dad804328d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_sample_base.py
@@ -0,0 +1,79 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from oslo.config import cfg
+
+from nova.api.openstack import API_V3_CORE_EXTENSIONS # noqa
+from nova import test
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_utils
+from nova.tests.unit.integrated import api_samples_test_base
+
+CONF = cfg.CONF
+
+
+class ApiSampleTestBaseV3(api_samples_test_base.ApiSampleTestBase):
+ _api_version = 'v3'
+ sample_dir = None
+ extra_extensions_to_load = None
+
+ def setUp(self):
+ self.flags(use_ipv6=False,
+ osapi_compute_link_prefix=self._get_host(),
+ osapi_glance_link_prefix=self._get_glance_host())
+ if not self.all_extensions:
+ # Set the whitelist to ensure only the extensions we are
+ # interested in are loaded so the api samples don't include
+ # data from extensions we are not interested in
+ whitelist = API_V3_CORE_EXTENSIONS.copy()
+ if self.extension_name:
+ whitelist.add(self.extension_name)
+ if self.extra_extensions_to_load:
+ whitelist.update(set(self.extra_extensions_to_load))
+
+ CONF.set_override('extensions_whitelist', whitelist,
+ 'osapi_v3')
+
+ super(ApiSampleTestBaseV3, self).setUp()
+ self.useFixture(test.SampleNetworks(host=self.network.host))
+ fake_network.stub_compute_with_ips(self.stubs)
+ fake_utils.stub_out_utils_spawn_n(self.stubs)
+ self.generate_samples = os.getenv('GENERATE_SAMPLES') is not None
+
+ @classmethod
+ def _get_sample_path(cls, name, dirname, suffix=''):
+ parts = [dirname]
+ parts.append('api_samples')
+ if cls.all_extensions:
+ parts.append('all_extensions')
+ elif cls.sample_dir:
+ parts.append(cls.sample_dir)
+ elif cls.extension_name:
+ parts.append(cls.extension_name)
+ parts.append(name + "." + cls.ctype + suffix)
+ return os.path.join(*parts)
+
+ @classmethod
+ def _get_sample(cls, name):
+ dirname = os.path.dirname(os.path.abspath(__file__))
+ dirname = os.path.normpath(os.path.join(dirname,
+ "../../../../../doc/v3"))
+ return cls._get_sample_path(name, dirname)
+
+ @classmethod
+ def _get_template(cls, name):
+ dirname = os.path.dirname(os.path.abspath(__file__))
+ return cls._get_sample_path(name, dirname, suffix='.tpl')
diff --git a/nova/tests/unit/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl
new file mode 100644
index 0000000000..add1a44c32
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl
@@ -0,0 +1,76 @@
+{
+ "server": {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:host": "%(compute_host)s",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-STS:locked_by": null,
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [],
+ "os-pci:pci_devices": [{"id": 1}],
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/all_extensions/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/all_extensions/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/all_extensions/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/all_extensions/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/all_extensions/server-post-resp.json.tpl
new file mode 100644
index 0000000000..6f1d0b498e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/all_extensions/server-post-resp.json.tpl
@@ -0,0 +1,24 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "accessIPv4": "",
+ "accessIPv6": ""
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl
new file mode 100644
index 0000000000..1e9edd0592
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl
@@ -0,0 +1,78 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:host": "%(compute_host)s",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-STS:locked_by": null,
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [],
+ "os-pci:pci_devices": [{"id": 1}],
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/all_extensions/servers-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/all_extensions/servers-list-resp.json.tpl
new file mode 100644
index 0000000000..10a98858bf
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/all_extensions/servers-list-resp.json.tpl
@@ -0,0 +1,18 @@
+{
+ "servers": [
+ {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/consoles/consoles-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/consoles/consoles-create-req.json.tpl
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/consoles/consoles-create-req.json.tpl
diff --git a/nova/tests/unit/integrated/v3/api_samples/consoles/consoles-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/consoles/consoles-get-resp.json.tpl
new file mode 100644
index 0000000000..a2a6de6ed4
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/consoles/consoles-get-resp.json.tpl
@@ -0,0 +1 @@
+{"console": {"console_type": "fake", "port": 5999, "instance_name": "instance-00000001", "host": "fake", "password": "%(password)s", "id": 1}} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/consoles/consoles-list-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/consoles/consoles-list-get-resp.json.tpl
new file mode 100644
index 0000000000..9d908ad123
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/consoles/consoles-list-get-resp.json.tpl
@@ -0,0 +1 @@
+{"consoles": [{"console_type": "fake", "id": 1}]} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/consoles/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/consoles/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/consoles/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/consoles/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/consoles/server-post-resp.json.tpl
new file mode 100644
index 0000000000..71654b4b8a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/consoles/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/extension-info/extensions-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/extension-info/extensions-get-resp.json.tpl
new file mode 100644
index 0000000000..75d286fb03
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/extension-info/extensions-get-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "extension": {
+ "alias": "flavors",
+ "description": "Flavors Extension.",
+ "name": "Flavors",
+ "version": 1
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/extension-info/extensions-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/extension-info/extensions-list-resp.json.tpl
new file mode 100644
index 0000000000..8ddbe20ac7
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/extension-info/extensions-list-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "extensions": [
+ {
+ "alias": "extensions",
+ "description": "Extension information.",
+ "name": "Extensions",
+ "version": 1
+ },
+ {
+ "alias": "flavors",
+ "description": "Flavors Extension.",
+ "name": "Flavors",
+ "version": 1
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-add-tenant-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-add-tenant-req.json.tpl
new file mode 100644
index 0000000000..94f5439e04
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-add-tenant-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "addTenantAccess": {
+ "tenant": "%(tenant_id)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-add-tenant-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-add-tenant-resp.json.tpl
new file mode 100644
index 0000000000..d797155795
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-add-tenant-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "flavor_access": [
+ {
+ "flavor_id": "%(flavor_id)s",
+ "tenant_id": "%(tenant_id)s"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-create-req.json.tpl
new file mode 100644
index 0000000000..02ac4e695d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-create-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "flavor": {
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "os-flavor-access:is_public": false
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-create-resp.json.tpl
new file mode 100644
index 0000000000..bd01300043
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-create-resp.json.tpl
@@ -0,0 +1,23 @@
+{
+ "flavor": {
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "os-flavor-access:is_public": false,
+ "ram": 1024,
+ "vcpus": 2,
+ "OS-FLV-DISABLED:disabled": false,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "swap": ""
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-detail-resp.json.tpl
new file mode 100644
index 0000000000..5d593b4d62
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-detail-resp.json.tpl
@@ -0,0 +1,109 @@
+{
+ "flavors": [
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 1,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/v3/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "os-flavor-access:is_public": true,
+ "ram": 512,
+ "swap": "",
+ "vcpus": 1
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 20,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "id": "2",
+ "links": [
+ {
+ "href": "%(host)s/v3/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "os-flavor-access:is_public": true,
+ "ram": 2048,
+ "swap": "",
+ "vcpus": 1
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 40,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "id": "3",
+ "links": [
+ {
+ "href": "%(host)s/v3/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "os-flavor-access:is_public": true,
+ "ram": 4096,
+ "swap": "",
+ "vcpus": 2
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 80,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "id": "4",
+ "links": [
+ {
+ "href": "%(host)s/v3/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "os-flavor-access:is_public": true,
+ "ram": 8192,
+ "swap": "",
+ "vcpus": 4
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 160,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "id": "5",
+ "links": [
+ {
+ "href": "%(host)s/v3/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "os-flavor-access:is_public": true,
+ "ram": 16384,
+ "swap": "",
+ "vcpus": 8
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-list-resp.json.tpl
new file mode 100644
index 0000000000..a6b6dbdcda
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-list-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "flavor_access": [
+ {
+ "flavor_id": "%(flavor_id)s",
+ "tenant_id": "fake_tenant"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-remove-tenant-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-remove-tenant-req.json.tpl
new file mode 100644
index 0000000000..20711e02b4
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-remove-tenant-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "removeTenantAccess": {
+ "tenant": "%(tenant_id)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-remove-tenant-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-remove-tenant-resp.json.tpl
new file mode 100644
index 0000000000..5cab03334d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-remove-tenant-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "flavor_access": []
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-show-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-show-resp.json.tpl
new file mode 100644
index 0000000000..255b122b7a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-show-resp.json.tpl
@@ -0,0 +1,23 @@
+{
+ "flavor": {
+ "disk": 1,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "os-flavor-access:is_public": true,
+ "ram": 512,
+ "vcpus": 1,
+ "OS-FLV-DISABLED:disabled": false,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "swap": ""
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json.tpl
new file mode 100644
index 0000000000..dd858e76c5
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "extra_specs": {
+ "key1": "%(value1)s",
+ "key2": "%(value2)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl
new file mode 100644
index 0000000000..dd858e76c5
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "extra_specs": {
+ "key1": "%(value1)s",
+ "key2": "%(value2)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl
new file mode 100644
index 0000000000..adfa77008f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "key1": "%(value1)s"
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl
new file mode 100644
index 0000000000..dd858e76c5
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "extra_specs": {
+ "key1": "%(value1)s",
+ "key2": "%(value2)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json.tpl
new file mode 100644
index 0000000000..adfa77008f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "key1": "%(value1)s"
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl
new file mode 100644
index 0000000000..adfa77008f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "key1": "%(value1)s"
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/flavor-manage/flavor-create-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-manage/flavor-create-post-req.json.tpl
new file mode 100644
index 0000000000..5383e5d15e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-manage/flavor-create-post-req.json.tpl
@@ -0,0 +1,9 @@
+{
+ "flavor": {
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "%(flavor_id)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/flavor-manage/flavor-create-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-manage/flavor-create-post-resp.json.tpl
new file mode 100644
index 0000000000..3f4690b5bf
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-manage/flavor-create-post-resp.json.tpl
@@ -0,0 +1,23 @@
+{
+ "flavor": {
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "os-flavor-access:is_public": true,
+ "ram": 1024,
+ "vcpus": 2,
+ "OS-FLV-DISABLED:disabled": false,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "swap": ""
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/flavors/flavor-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavors/flavor-get-resp.json.tpl
new file mode 100644
index 0000000000..5f8a90b5f6
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/flavors/flavor-get-resp.json.tpl
@@ -0,0 +1,23 @@
+{
+ "flavor": {
+ "disk": 1,
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/v3/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "os-flavor-access:is_public": true,
+ "ram": 512,
+ "vcpus": 1,
+ "OS-FLV-DISABLED:disabled": false,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "swap": ""
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/flavors/flavors-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavors/flavors-detail-resp.json.tpl
new file mode 100644
index 0000000000..5d593b4d62
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/flavors/flavors-detail-resp.json.tpl
@@ -0,0 +1,109 @@
+{
+ "flavors": [
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 1,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/v3/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "os-flavor-access:is_public": true,
+ "ram": 512,
+ "swap": "",
+ "vcpus": 1
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 20,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "id": "2",
+ "links": [
+ {
+ "href": "%(host)s/v3/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "os-flavor-access:is_public": true,
+ "ram": 2048,
+ "swap": "",
+ "vcpus": 1
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 40,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "id": "3",
+ "links": [
+ {
+ "href": "%(host)s/v3/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "os-flavor-access:is_public": true,
+ "ram": 4096,
+ "swap": "",
+ "vcpus": 2
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 80,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "id": "4",
+ "links": [
+ {
+ "href": "%(host)s/v3/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "os-flavor-access:is_public": true,
+ "ram": 8192,
+ "swap": "",
+ "vcpus": 4
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 160,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "id": "5",
+ "links": [
+ {
+ "href": "%(host)s/v3/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "os-flavor-access:is_public": true,
+ "ram": 16384,
+ "swap": "",
+ "vcpus": 8
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/flavors/flavors-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavors/flavors-list-resp.json.tpl
new file mode 100644
index 0000000000..fed9966909
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/flavors/flavors-list-resp.json.tpl
@@ -0,0 +1,74 @@
+{
+ "flavors": [
+ {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/v3/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny"
+ },
+ {
+ "id": "2",
+ "links": [
+ {
+ "href": "%(host)s/v3/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small"
+ },
+ {
+ "id": "3",
+ "links": [
+ {
+ "href": "%(host)s/v3/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium"
+ },
+ {
+ "id": "4",
+ "links": [
+ {
+ "href": "%(host)s/v3/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large"
+ },
+ {
+ "id": "5",
+ "links": [
+ {
+ "href": "%(host)s/v3/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/image-size/image-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/image-size/image-get-resp.json.tpl
new file mode 100644
index 0000000000..9a5ebfbc11
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/image-size/image-get-resp.json.tpl
@@ -0,0 +1,34 @@
+{
+ "image": {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(image_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/images/%(image_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/images/%(image_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/%(image_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/image-size/images-details-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/image-size/images-details-get-resp.json.tpl
new file mode 100644
index 0000000000..2eba334009
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/image-size/images-details-get-resp.json.tpl
@@ -0,0 +1,219 @@
+{
+ "images": [
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "%(host)s/v3/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "links": [
+ {
+ "href": "%(host)s/v3/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "a2459075-d96c-40d5-893e-577ff92e721c",
+ "links": [
+ {
+ "href": "%(host)s/v3/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "a440c04b-79fa-479c-bed1-0b816eaec379",
+ "links": [
+ {
+ "href": "%(host)s/v3/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "False",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage6",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "links": [
+ {
+ "href": "%(host)s/v3/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "ramdisk_id": null
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "cedef40a-ed67-4d10-800e-17455edce175",
+ "links": [
+ {
+ "href": "%(host)s/v3/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "links": [
+ {
+ "href": "%(host)s/v3/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/images/image-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/image-get-resp.json.tpl
new file mode 100644
index 0000000000..57ae88548d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/images/image-get-resp.json.tpl
@@ -0,0 +1,33 @@
+{
+ "image": {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-get.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-get.json.tpl
new file mode 100644
index 0000000000..6d022eb97d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-get.json.tpl
@@ -0,0 +1,5 @@
+{
+ "meta": {
+ "kernel_id": "nokernel"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-put-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-put-req.json.tpl
new file mode 100644
index 0000000000..01528f1ce6
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-put-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "meta": {
+ "auto_disk_config": "False"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-put-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-put-resp.json.tpl
new file mode 100644
index 0000000000..3db563ec14
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-put-resp.json.tpl
@@ -0,0 +1,5 @@
+{
+ "meta": {
+ "auto_disk_config": "False"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-get-resp.json.tpl
new file mode 100644
index 0000000000..588f688d5a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-get-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-post-req.json.tpl
new file mode 100644
index 0000000000..b51e5f00fc
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-post-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "metadata": {
+ "kernel_id": "False",
+ "Label": "UpdatedImage"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-post-resp.json.tpl
new file mode 100644
index 0000000000..9479bb3395
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-post-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "metadata": {
+ "Label": "UpdatedImage",
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "False",
+ "ramdisk_id": "nokernel"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-put-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-put-req.json.tpl
new file mode 100644
index 0000000000..eec6152d77
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-put-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "metadata": {
+ "auto_disk_config": "True",
+ "Label": "Changed"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-put-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-put-resp.json.tpl
new file mode 100644
index 0000000000..c8c5ee9c4a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-put-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "metadata": {
+ "Label": "Changed",
+ "auto_disk_config": "True"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/images/images-details-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/images-details-get-resp.json.tpl
new file mode 100644
index 0000000000..df8ecad0b8
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/images/images-details-get-resp.json.tpl
@@ -0,0 +1,212 @@
+{
+ "images": [
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "a2459075-d96c-40d5-893e-577ff92e721c",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "a440c04b-79fa-479c-bed1-0b816eaec379",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "False",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage6",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "ramdisk_id": null
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "cedef40a-ed67-4d10-800e-17455edce175",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/images/images-list-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/images-list-get-resp.json.tpl
new file mode 100644
index 0000000000..32ebd60cfa
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/images/images-list-get-resp.json.tpl
@@ -0,0 +1,137 @@
+{
+ "images": [
+ {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage7"
+ },
+ {
+ "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "a2459075-d96c-40d5-893e-577ff92e721c",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "a440c04b-79fa-479c-bed1-0b816eaec379",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage6"
+ },
+ {
+ "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "cedef40a-ed67-4d10-800e-17455edce175",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-get-resp.json.tpl
new file mode 100644
index 0000000000..4fde60f14b
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-get-resp.json.tpl
@@ -0,0 +1,13 @@
+{
+ "keypair": {
+ "public_key": "%(public_key)s",
+ "name": "%(keypair_name)s",
+ "fingerprint": "%(fingerprint)s",
+ "user_id": "fake",
+ "deleted": false,
+ "created_at": "%(strtime)s",
+ "updated_at": null,
+ "deleted_at": null,
+ "id": 1
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-import-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-import-post-req.json.tpl
new file mode 100644
index 0000000000..2301fa05b2
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-import-post-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "keypair": {
+ "name": "%(keypair_name)s",
+ "public_key": "%(public_key)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-import-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-import-post-resp.json.tpl
new file mode 100644
index 0000000000..ca7192d5dc
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-import-post-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "keypair": {
+ "fingerprint": "%(fingerprint)s",
+ "name": "%(keypair_name)s",
+ "public_key": "%(public_key)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-list-resp.json.tpl
new file mode 100644
index 0000000000..29ba63c00b
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-list-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "keypairs": [
+ {
+ "keypair": {
+ "fingerprint": "%(fingerprint)s",
+ "name": "%(keypair_name)s",
+ "public_key": "%(public_key)s"
+ }
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-post-req.json.tpl
new file mode 100644
index 0000000000..68e2f03487
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "keypair": {
+ "name": "%(keypair_name)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-post-resp.json.tpl
new file mode 100644
index 0000000000..aace6f5ccc
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-post-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "keypair": {
+ "fingerprint": "%(fingerprint)s",
+ "name": "%(keypair_name)s",
+ "private_key": "%(private_key)s",
+ "public_key": "%(public_key)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl
new file mode 100644
index 0000000000..e9b7921f30
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl
@@ -0,0 +1,57 @@
+{
+ "server": {
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4,
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed"
+ }
+ ]
+ },
+ "adminPass": "%(password)s",
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(image_id)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(image_id)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "meta_var": "meta_val"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake",
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-action-rebuild.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-action-rebuild.json.tpl
new file mode 100644
index 0000000000..603363b409
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-action-rebuild.json.tpl
@@ -0,0 +1,17 @@
+{
+ "rebuild" : {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "name" : "new-server-test",
+ "metadata" : {
+ "meta_var" : "meta_val"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-get-resp.json.tpl
new file mode 100644
index 0000000000..efe7801174
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-get-resp.json.tpl
@@ -0,0 +1,57 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake",
+ "key_name": null
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-post-req.json.tpl
new file mode 100644
index 0000000000..780f764cf5
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-post-req.json.tpl
@@ -0,0 +1,18 @@
+{
+ "server" : {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-post-resp.json.tpl
new file mode 100644
index 0000000000..fb0c23b504
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-post-resp.json.tpl
@@ -0,0 +1,18 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-put-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-put-req.json.tpl
new file mode 100644
index 0000000000..d38d967042
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-put-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-put-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-put-resp.json.tpl
new file mode 100644
index 0000000000..b3e8c665e8
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-put-resp.json.tpl
@@ -0,0 +1,56 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-access-ips/servers-details-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/servers-details-resp.json.tpl
new file mode 100644
index 0000000000..041f1a1056
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/servers-details-resp.json.tpl
@@ -0,0 +1,59 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake",
+ "key_name": null
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-access-ips/servers-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/servers-list-resp.json.tpl
new file mode 100644
index 0000000000..8797266b68
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/servers-list-resp.json.tpl
@@ -0,0 +1,18 @@
+{
+ "servers": [
+ {
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl
new file mode 100644
index 0000000000..62e16737b0
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl
@@ -0,0 +1,3 @@
+{
+ "injectNetworkInfo": null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl
new file mode 100644
index 0000000000..7c79cb68a5
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl
@@ -0,0 +1,3 @@
+{
+ "resetNetwork": null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl
new file mode 100644
index 0000000000..013aed4824
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl
@@ -0,0 +1,5 @@
+{
+ "os-resetState": {
+ "state": "active"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl
new file mode 100644
index 0000000000..72d9478678
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl
@@ -0,0 +1,5 @@
+{
+ 'os-resetState': {
+ 'state': 'active'
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-admin-password/admin-password-change-password.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-admin-password/admin-password-change-password.json.tpl
new file mode 100644
index 0000000000..da615718fe
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-admin-password/admin-password-change-password.json.tpl
@@ -0,0 +1,5 @@
+{
+ "changePassword" : {
+ "adminPass" : "%(password)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-admin-password/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-admin-password/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-admin-password/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-admin-password/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-admin-password/server-post-resp.json.tpl
new file mode 100644
index 0000000000..71654b4b8a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-admin-password/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-agents/agent-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-agents/agent-post-req.json.tpl
new file mode 100644
index 0000000000..6dbd2f17cb
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-agents/agent-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "agent": {
+ "hypervisor": "%(hypervisor)s",
+ "os": "%(os)s",
+ "architecture": "%(architecture)s",
+ "version": "%(version)s",
+ "md5hash": "%(md5hash)s",
+ "url": "%(url)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-agents/agent-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-agents/agent-post-resp.json.tpl
new file mode 100644
index 0000000000..24ddede90b
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-agents/agent-post-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "agent": {
+ "agent_id": 1,
+ "architecture": "x86",
+ "hypervisor": "hypervisor",
+ "md5hash": "add6bb58e139be103324d04d82d8f545",
+ "os": "os",
+ "url": "http://example.com/path/to/resource",
+ "version": "8.0"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-agents/agent-update-put-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-agents/agent-update-put-req.json.tpl
new file mode 100644
index 0000000000..d447350e0d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-agents/agent-update-put-req.json.tpl
@@ -0,0 +1,7 @@
+{
+ "para": {
+ "url": "%(url)s",
+ "md5hash": "%(md5hash)s",
+ "version": "%(version)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-agents/agent-update-put-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-agents/agent-update-put-resp.json.tpl
new file mode 100644
index 0000000000..2919d21388
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-agents/agent-update-put-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "agent": {
+ "agent_id": "1",
+ "md5hash": "add6bb58e139be103324d04d82d8f545",
+ "url": "http://example.com/path/to/resource",
+ "version": "7.0"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-agents/agents-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-agents/agents-get-resp.json.tpl
new file mode 100644
index 0000000000..92e14e1dc5
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-agents/agents-get-resp.json.tpl
@@ -0,0 +1,13 @@
+{
+ "agents": [
+ {
+ "agent_id": 1,
+ "architecture": "x86",
+ "hypervisor": "hypervisor",
+ "md5hash": "add6bb58e139be103324d04d82d8f545",
+ "os": "os",
+ "url": "http://example.com/path/to/resource",
+ "version": "8.0"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl
new file mode 100644
index 0000000000..97395bf2f2
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "add_host": {
+ "host": "%(host_name)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl
new file mode 100644
index 0000000000..63a2921cac
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl
@@ -0,0 +1,9 @@
+{
+ "set_metadata":
+ {
+ "metadata":
+ {
+ "key": "value"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-post-req.json.tpl
new file mode 100644
index 0000000000..fc806061e8
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-post-req.json.tpl
@@ -0,0 +1,7 @@
+{
+ "aggregate":
+ {
+ "name": "name",
+ "availability_zone": "nova"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-post-resp.json.tpl
new file mode 100644
index 0000000000..935643d03c
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-post-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "aggregate": {
+ "availability_zone": "nova",
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "id": %(aggregate_id)s,
+ "name": "name",
+ "updated_at": null
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl
new file mode 100644
index 0000000000..4663e52931
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "remove_host": {
+ "host": "%(host_name)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-update-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-update-post-req.json.tpl
new file mode 100644
index 0000000000..55e4b09346
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-update-post-req.json.tpl
@@ -0,0 +1,7 @@
+{
+ "aggregate":
+ {
+ "name": "newname",
+ "availability_zone": "nova2"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl
new file mode 100644
index 0000000000..2e229a473a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl
@@ -0,0 +1,15 @@
+{
+ "aggregate": {
+ "availability_zone": "nova2",
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "hosts": [],
+ "id": 1,
+ "metadata": {
+ "availability_zone": "nova2"
+ },
+ "name": "newname",
+ "updated_at": "%(strtime)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl
new file mode 100644
index 0000000000..e5775c206d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl
@@ -0,0 +1,17 @@
+{
+ "aggregate": {
+ "availability_zone": "nova",
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "hosts": [
+ "%(compute_host)s"
+ ],
+ "id": 1,
+ "metadata": {
+ "availability_zone": "nova"
+ },
+ "name": "name",
+ "updated_at": null
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-get-resp.json.tpl
new file mode 100644
index 0000000000..b91781fae2
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-get-resp.json.tpl
@@ -0,0 +1,15 @@
+{
+ "aggregate": {
+ "availability_zone": "nova",
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "hosts": [],
+ "id": 1,
+ "metadata": {
+ "availability_zone": "nova"
+ },
+ "name": "name",
+ "updated_at": null
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl
new file mode 100644
index 0000000000..642653d1e6
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl
@@ -0,0 +1,17 @@
+{
+ "aggregates": [
+ {
+ "availability_zone": "nova",
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "hosts": [],
+ "id": 1,
+ "metadata": {
+ "availability_zone": "nova"
+ },
+ "name": "name",
+ "updated_at": null
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl
new file mode 100644
index 0000000000..b15c40fa5d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "aggregate": {
+ "availability_zone": "nova",
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "hosts": [],
+ "id": 1,
+ "metadata": {
+ "availability_zone": "nova",
+ "key": "value"
+ },
+ "name": "name",
+ "updated_at": null
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl
new file mode 100644
index 0000000000..b91781fae2
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl
@@ -0,0 +1,15 @@
+{
+ "aggregate": {
+ "availability_zone": "nova",
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "hosts": [],
+ "id": 1,
+ "metadata": {
+ "availability_zone": "nova"
+ },
+ "name": "name",
+ "updated_at": null
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl
new file mode 100644
index 0000000000..11dcf64373
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "interfaceAttachment": {
+ "port_id": "ce531f90-199f-48c0-816c-13e38010b442"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl
new file mode 100644
index 0000000000..9dff234366
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl
@@ -0,0 +1,14 @@
+{
+ "interfaceAttachment": {
+ "fixed_ips": [
+ {
+ "ip_address": "192.168.1.3",
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
+ }
+ ],
+ "mac_addr": "fa:16:3e:4c:2c:30",
+ "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "port_id": "ce531f90-199f-48c0-816c-13e38010b442",
+ "port_state": "ACTIVE"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl
new file mode 100644
index 0000000000..192f9a6487
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "interfaceAttachments": [
+ {
+ "fixed_ips": [
+ {
+ "ip_address": "192.168.1.3",
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
+ }
+ ],
+ "mac_addr": "fa:16:3e:4c:2c:30",
+ "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "port_id": "ce531f90-199f-48c0-816c-13e38010b442",
+ "port_state": "ACTIVE"
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl
new file mode 100644
index 0000000000..9dff234366
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl
@@ -0,0 +1,14 @@
+{
+ "interfaceAttachment": {
+ "fixed_ips": [
+ {
+ "ip_address": "192.168.1.3",
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
+ }
+ ],
+ "mac_addr": "fa:16:3e:4c:2c:30",
+ "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "port_id": "ce531f90-199f-48c0-816c-13e38010b442",
+ "port_state": "ACTIVE"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/server-post-resp.json.tpl
new file mode 100644
index 0000000000..71654b4b8a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-availability-zone/availability-zone-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-availability-zone/availability-zone-detail-resp.json.tpl
new file mode 100644
index 0000000000..d1b610e944
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-availability-zone/availability-zone-detail-resp.json.tpl
@@ -0,0 +1,69 @@
+{
+ "availabilityZoneInfo": [
+ {
+ "hosts": {
+ "consoleauth": {
+ "nova-consoleauth": {
+ "active": true,
+ "available": true,
+ "updated_at": %(strtime_or_none)s
+ }
+ },
+ "cert": {
+ "nova-cert": {
+ "active": true,
+ "available": true,
+ "updated_at": %(strtime_or_none)s
+ }
+ },
+ "conductor": {
+ "nova-conductor": {
+ "active": true,
+ "available": true,
+ "updated_at": %(strtime_or_none)s
+ }
+ },
+ "cells": {
+ "nova-cells": {
+ "active": true,
+ "available": true,
+ "updated_at": %(strtime_or_none)s
+ }
+ },
+ "scheduler": {
+ "nova-scheduler": {
+ "active": true,
+ "available": true,
+ "updated_at": %(strtime_or_none)s
+ }
+ },
+ "network": {
+ "nova-network": {
+ "active": true,
+ "available": true,
+ "updated_at": %(strtime_or_none)s
+ }
+ }
+ },
+ "zoneName": "internal",
+ "zoneState": {
+ "available": true
+ }
+ },
+ {
+ "hosts": {
+ "compute": {
+ "nova-compute": {
+ "active": true,
+ "available": true,
+ "updated_at": %(strtime_or_none)s
+ }
+ }
+ },
+ "zoneName": "nova",
+ "zoneState": {
+ "available": true
+ }
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-availability-zone/availability-zone-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-availability-zone/availability-zone-list-resp.json.tpl
new file mode 100644
index 0000000000..8190c5492f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-availability-zone/availability-zone-list-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "availabilityZoneInfo": [
+ {
+ "hosts": null,
+ "zoneName": "nova",
+ "zoneState": {
+ "available": true
+ }
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-availability-zone/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-availability-zone/server-post-req.json.tpl
new file mode 100644
index 0000000000..dcc1142f47
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-availability-zone/server-post-req.json.tpl
@@ -0,0 +1,17 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "availability_zone": "nova",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-availability-zone/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-availability-zone/server-post-resp.json.tpl
new file mode 100644
index 0000000000..71654b4b8a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-availability-zone/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-cells/cells-capacities-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-cells/cells-capacities-resp.json.tpl
new file mode 100644
index 0000000000..5e067dd3aa
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-cells/cells-capacities-resp.json.tpl
@@ -0,0 +1,26 @@
+{
+ "cell": {
+ "capacities": {
+ "disk_free": {
+ "total_mb": 1052672,
+ "units_by_mb": {
+ "0": 0,
+ "163840": 5,
+ "20480": 46,
+ "40960": 23,
+ "81920": 11
+ }
+ },
+ "ram_free": {
+ "total_mb": 7680,
+ "units_by_mb": {
+ "16384": 0,
+ "2048": 3,
+ "4096": 1,
+ "512": 13,
+ "8192": 0
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-cells/cells-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-cells/cells-get-resp.json.tpl
new file mode 100644
index 0000000000..62eb8ec31d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-cells/cells-get-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "cell": {
+ "name": "cell3",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child",
+ "username": "username3"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-cells/cells-list-empty-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-cells/cells-list-empty-resp.json.tpl
new file mode 100644
index 0000000000..5325a4e855
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-cells/cells-list-empty-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "cells": []
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-cells/cells-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-cells/cells-list-resp.json.tpl
new file mode 100644
index 0000000000..97ea4c6dd3
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-cells/cells-list-resp.json.tpl
@@ -0,0 +1,39 @@
+{
+ "cells": [
+ {
+ "name": "cell1",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child",
+ "username": "username1"
+ },
+ {
+ "name": "cell3",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child",
+ "username": "username3"
+ },
+ {
+ "name": "cell5",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "child",
+ "username": "username5"
+ },
+ {
+ "name": "cell2",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "parent",
+ "username": "username2"
+ },
+ {
+ "name": "cell4",
+ "rpc_host": null,
+ "rpc_port": null,
+ "type": "parent",
+ "username": "username4"
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-create-req.json.tpl
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-create-req.json.tpl
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-create-resp.json.tpl
new file mode 100644
index 0000000000..35c063c820
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-create-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "certificate": {
+ "data": "%(text)s",
+ "private_key": "%(text)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-get-root-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-get-root-resp.json.tpl
new file mode 100644
index 0000000000..4938e92fba
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-get-root-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "certificate": {
+ "data": "%(text)s",
+ "private_key": null
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl
new file mode 100644
index 0000000000..c8fc75995a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "cloudpipe": {
+ "project_id": "%(project_id)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl
new file mode 100644
index 0000000000..6aa2ff60e2
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "instance_id": "%(id)s"
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl
new file mode 100644
index 0000000000..698008802e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl
@@ -0,0 +1,13 @@
+{
+ "cloudpipes": [
+ {
+ "created_at": "%(isotime)s",
+ "instance_id": "%(uuid)s",
+ "internal_ip": "%(ip)s",
+ "project_id": "%(project_id)s",
+ "public_ip": "%(ip)s",
+ "public_port": 22,
+ "state": "down"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-update-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-update-req.json.tpl
new file mode 100644
index 0000000000..0ab9141aea
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-update-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "configure_project": {
+ "vpn_ip": "%(vpn_ip)s",
+ "vpn_port": "%(vpn_port)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl
new file mode 100644
index 0000000000..a9e9bc6564
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl
@@ -0,0 +1,56 @@
+{
+ "server": {
+ "config_drive": "%(cdrive)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake",
+ "key_name": null
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-post-resp.json.tpl
new file mode 100644
index 0000000000..71654b4b8a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl
new file mode 100644
index 0000000000..21ed41cf7d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl
@@ -0,0 +1,58 @@
+{
+ "servers": [
+ {
+ "config_drive": "%(cdrive)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake",
+ "key_name": null
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl
new file mode 100644
index 0000000000..f5be11801e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "console": {
+ "instance_uuid": "%(id)s",
+ "host": "%(host)s",
+ "port": %(port)s,
+ "internal_access_path": "%(internal_access_path)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl
new file mode 100644
index 0000000000..00956b90e4
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "os-getRDPConsole": {
+ "type": "rdp-html5"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-console-output/console-output-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-console-output/console-output-post-req.json.tpl
new file mode 100644
index 0000000000..caeb2a5502
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-console-output/console-output-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "os-getConsoleOutput": {
+ "length": 50
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-console-output/console-output-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-console-output/console-output-post-resp.json.tpl
new file mode 100644
index 0000000000..27ffe7d4c2
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-console-output/console-output-post-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "output": "FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE"
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-console-output/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-console-output/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-console-output/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-console-output/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-console-output/server-post-resp.json.tpl
new file mode 100644
index 0000000000..71654b4b8a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-console-output/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-create-backup/create-backup-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-create-backup/create-backup-req.json.tpl
new file mode 100644
index 0000000000..60f5e1d9fe
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-create-backup/create-backup-req.json.tpl
@@ -0,0 +1,7 @@
+{
+ "createBackup": {
+ "name": "Backup 1",
+ "backup_type": "daily",
+ "rotation": 1
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-create-backup/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-create-backup/server-post-req.json.tpl
new file mode 100644
index 0000000000..27557a3e9f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-create-backup/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-create-backup/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-create-backup/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-create-backup/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/force-delete-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/force-delete-post-req.json.tpl
new file mode 100644
index 0000000000..d3562d390d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/force-delete-post-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "forceDelete": null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/restore-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/restore-post-req.json.tpl
new file mode 100644
index 0000000000..d38291fe08
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/restore-post-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "restore": null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-disk-config/image-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/image-get-resp.json.tpl
new file mode 100644
index 0000000000..b79c7c857e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/image-get-resp.json.tpl
@@ -0,0 +1,34 @@
+{
+ "image": {
+ "OS-DCF:diskConfig": "AUTO",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(image_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/images/%(image_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/images/%(image_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/%(image_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-disk-config/image-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/image-list-resp.json.tpl
new file mode 100644
index 0000000000..f74aeb7c85
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/image-list-resp.json.tpl
@@ -0,0 +1,214 @@
+{
+ "images": [
+ {
+ "OS-DCF:diskConfig": "AUTO",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/images/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/images/%(id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/%(id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/images/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/images/%(id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/%(id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/images/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/%(uuid)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-DCF:diskConfig": "MANUAL",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/images/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/%(uuid)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "False",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage6",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/images/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/images/%(id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/%(id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "%(id)s",
+ "ramdisk_id": null
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/images/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/images/%(id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/%(id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/images/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/images/%(id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/images/%(id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-disk-config/list-servers-detail-get.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/list-servers-detail-get.json.tpl
new file mode 100644
index 0000000000..8a08b3e011
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/list-servers-detail-get.json.tpl
@@ -0,0 +1,58 @@
+{
+ "servers": [
+ {
+ "OS-DCF:diskConfig": "AUTO",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "key_name": null,
+ "user_id": "fake"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-action-rebuild-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-action-rebuild-req.json.tpl
new file mode 100644
index 0000000000..3d98b99bcb
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-action-rebuild-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "rebuild": {
+ "imageRef" : "%(host)s/images/%(image_id)s",
+ "OS-DCF:diskConfig": "AUTO"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-action-rebuild-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-action-rebuild-resp.json.tpl
new file mode 100644
index 0000000000..ebb5f3d8a0
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-action-rebuild-resp.json.tpl
@@ -0,0 +1,56 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "%(password)s",
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-get-resp.json.tpl
new file mode 100644
index 0000000000..f60c4af52f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-get-resp.json.tpl
@@ -0,0 +1,56 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "key_name": null,
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-post-req.json.tpl
new file mode 100644
index 0000000000..c012c48318
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-post-req.json.tpl
@@ -0,0 +1,17 @@
+{
+ "server" : {
+ "OS-DCF:diskConfig": "AUTO",
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-post-resp.json.tpl
new file mode 100644
index 0000000000..91aa3432ea
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-post-resp.json.tpl
@@ -0,0 +1,17 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-resize-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-resize-post-req.json.tpl
new file mode 100644
index 0000000000..a290485e1c
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-resize-post-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "resize": {
+ "flavorRef": "3",
+ "OS-DCF:diskConfig": "AUTO"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-update-put-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-update-put-req.json.tpl
new file mode 100644
index 0000000000..4ac22820df
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-update-put-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-update-put-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-update-put-resp.json.tpl
new file mode 100644
index 0000000000..26cf74e80a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-update-put-resp.json.tpl
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json.tpl
new file mode 100644
index 0000000000..5e2c2e6ef0
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "evacuate": {
+ "adminPass": "%(adminPass)s",
+ "onSharedStorage": "%(onSharedStorage)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json.tpl
new file mode 100644
index 0000000000..0da07da5b8
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "adminPass": "%(password)s"
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-req.json.tpl
new file mode 100644
index 0000000000..179cddce73
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-req.json.tpl
@@ -0,0 +1,7 @@
+{
+ "evacuate": {
+ "host": "%(host)s",
+ "adminPass": "%(adminPass)s",
+ "onSharedStorage": "%(onSharedStorage)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-resp.json.tpl
new file mode 100644
index 0000000000..0da07da5b8
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "adminPass": "%(password)s"
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-get-resp.json.tpl
new file mode 100644
index 0000000000..d6436738ef
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-get-resp.json.tpl
@@ -0,0 +1,56 @@
+{
+ "server": {
+ "updated": "%(isotime)s",
+ "created": "%(isotime)s",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4,
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed"
+ }
+ ]
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "user_id": "fake",
+ "key_name": null
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json.tpl
new file mode 100644
index 0000000000..c81acaf935
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json.tpl
@@ -0,0 +1,58 @@
+{
+ "servers": [
+ {
+ "updated": "%(isotime)s",
+ "created": "%(isotime)s",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4,
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed"
+ }
+ ]
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "user_id": "fake",
+ "key_name": null
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl
new file mode 100644
index 0000000000..a4c68a3e8b
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl
@@ -0,0 +1,58 @@
+{
+ "server": {
+ "OS-EXT-SRV-ATTR:host": "%(compute_host)s",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
+ "OS-EXT-SRV-ATTR:instance_name": "%(instance_name)s",
+ "updated": "%(isotime)s",
+ "created": "%(isotime)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4,
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed"
+ }
+ ]
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "user_id": "fake",
+ "key_name": null
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl
new file mode 100644
index 0000000000..8fc7dbcc4e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl
@@ -0,0 +1,60 @@
+{
+ "servers": [
+ {
+ "OS-EXT-SRV-ATTR:host": "%(compute_host)s",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
+ "OS-EXT-SRV-ATTR:instance_name": "%(instance_name)s",
+ "updated": "%(isotime)s",
+ "created": "%(isotime)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4,
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed"
+ }
+ ]
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "user_id": "fake",
+ "key_name": null
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl
new file mode 100644
index 0000000000..a0b73e41d2
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl
@@ -0,0 +1,60 @@
+{
+ "server": {
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:locked_by": null,
+ "updated": "%(isotime)s",
+ "created": "%(isotime)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4,
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed"
+ }
+ ]
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "user_id": "fake",
+ "key_name": null
+ }
+}
+
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl
new file mode 100644
index 0000000000..b8e17cd8cf
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl
@@ -0,0 +1,60 @@
+{
+ "servers": [
+ {
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:locked_by": null,
+ "updated": "%(isotime)s",
+ "created": "%(isotime)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4,
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed"
+ }
+ ]
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "user_id": "fake",
+ "key_name": null
+ }]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/attach-volume-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/attach-volume-req.json.tpl
new file mode 100644
index 0000000000..683f67c98b
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/attach-volume-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "attach": {
+ "volume_id": "%(volume_id)s",
+ "device": "%(device)s",
+ "disk_bus": "%(disk_bus)s",
+ "device_type": "%(device_type)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/detach-volume-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/detach-volume-req.json.tpl
new file mode 100644
index 0000000000..c56f3c3b83
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/detach-volume-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "detach": {
+ "volume_id": "%(volume_id)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-get-resp.json.tpl
new file mode 100644
index 0000000000..1a28dd80ef
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-get-resp.json.tpl
@@ -0,0 +1,59 @@
+{
+ "server": {
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1"},
+ {"id": "volume_id2"}
+ ],
+ "key_name": null
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/servers-detail-resp.json.tpl
new file mode 100644
index 0000000000..bb8a9bb9cb
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/servers-detail-resp.json.tpl
@@ -0,0 +1,60 @@
+{
+ "servers": [
+ {
+ "updated": "%(isotime)s",
+ "created": "%(isotime)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4,
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed"
+ }
+ ]
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "user_id": "fake",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1"},
+ {"id": "volume_id2"}
+ ],
+ "key_name": null
+ }]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/swap-volume-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/swap-volume-req.json.tpl
new file mode 100644
index 0000000000..07a3268421
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/swap-volume-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "swap_volume_attachment": {
+ "old_volume_id": "%(old_volume_id)s",
+ "new_volume_id": "%(new_volume_id)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-fixed-ips/fixedip-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-fixed-ips/fixedip-post-req.json.tpl
new file mode 100644
index 0000000000..85ae4890ad
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-fixed-ips/fixedip-post-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "reserve": "%(reserve)s"
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl
new file mode 100644
index 0000000000..a3d11475bf
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "fixed_ip": {
+ "cidr": "%(cidr)s",
+ "hostname": "%(hostname)s",
+ "host": "%(host)s",
+ "address": "%(address)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl
new file mode 100644
index 0000000000..75d8f5aacd
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl
@@ -0,0 +1,24 @@
+{
+ "flavor": {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 1,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "os-flavor-access:is_public": true,
+ "ram": 512,
+ "rxtx_factor": 1.0,
+ "swap": "",
+ "vcpus": 1
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl
new file mode 100644
index 0000000000..1f7ea26cb0
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl
@@ -0,0 +1,114 @@
+{
+ "flavors": [
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 1,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "os-flavor-access:is_public": true,
+ "ram": 512,
+ "rxtx_factor": 1.0,
+ "swap": "",
+ "vcpus": 1
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 20,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "id": "2",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "os-flavor-access:is_public": true,
+ "ram": 2048,
+ "rxtx_factor": 1.0,
+ "swap": "",
+ "vcpus": 1
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 40,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "id": "3",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "os-flavor-access:is_public": true,
+ "ram": 4096,
+ "rxtx_factor": 1.0,
+ "swap": "",
+ "vcpus": 2
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 80,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "id": "4",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "os-flavor-access:is_public": true,
+ "ram": 8192,
+ "rxtx_factor": 1.0,
+ "swap": "",
+ "vcpus": 4
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 160,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "id": "5",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "os-flavor-access:is_public": true,
+ "ram": 16384,
+ "rxtx_factor": 1.0,
+ "swap": "",
+ "vcpus": 8
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl
new file mode 100644
index 0000000000..70d0a57de8
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "flavor": {
+ "name": "%(flavor_name)s",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "rxtx_factor": 2.0
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl
new file mode 100644
index 0000000000..d3b4c231b2
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl
@@ -0,0 +1,24 @@
+{
+ "flavor": {
+ "disk": 10,
+ "id": "%(flavor_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/flavors/%(flavor_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/flavors/%(flavor_id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "%(flavor_name)s",
+ "os-flavor-access:is_public": true,
+ "ram": 1024,
+ "rxtx_factor": 2.0,
+ "vcpus": 2,
+ "OS-FLV-DISABLED:disabled": false,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "swap": ""
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl
new file mode 100644
index 0000000000..000c5e1231
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "dns_entry": {
+ "ip": "%(ip)s",
+ "dns_type": "%(dns_type)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl
new file mode 100644
index 0000000000..3ec0743ba7
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "dns_entry": {
+ "domain": "%(domain)s",
+ "id": null,
+ "ip": "%(ip)s",
+ "name": "%(name)s",
+ "type": "%(dns_type)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl
new file mode 100644
index 0000000000..f6685f5d3f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "domain_entry": {
+ "scope": "%(scope)s",
+ "project": "%(project)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl
new file mode 100644
index 0000000000..a14d395d23
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "domain_entry": {
+ "availability_zone": null,
+ "domain": "%(domain)s",
+ "project": "%(project)s",
+ "scope": "%(scope)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl
new file mode 100644
index 0000000000..8edd0603f7
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "dns_entry": {
+ "domain": "%(domain)s",
+ "id": null,
+ "ip": "%(ip)s",
+ "name": "%(name)s",
+ "type": null
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl
new file mode 100644
index 0000000000..831cda7b55
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "dns_entries": [
+ {
+ "domain": "%(domain)s",
+ "id": null,
+ "ip": "%(ip)s",
+ "name": "%(name)s",
+ "type": null
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl
new file mode 100644
index 0000000000..a6055cfecc
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl
@@ -0,0 +1,10 @@
+{
+ "domain_entries": [
+ {
+ "availability_zone": null,
+ "domain": "%(domain)s",
+ "project": "%(project)s",
+ "scope": "%(scope)s"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl
new file mode 100644
index 0000000000..607109d70d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl
@@ -0,0 +1,10 @@
+{
+ "floating_ip_pools": [
+ {
+ "name": "%(pool1)s"
+ },
+ {
+ "name": "%(pool2)s"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl
new file mode 100644
index 0000000000..2f16cf07cb
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl
@@ -0,0 +1,7 @@
+{
+ "floating_ips_bulk_create": {
+ "ip_range": "%(ip_range)s",
+ "pool": "%(pool)s",
+ "interface": "%(interface)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl
new file mode 100644
index 0000000000..ef1cbfb17f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl
@@ -0,0 +1,7 @@
+{
+ "floating_ips_bulk_create": {
+ "interface": "eth0",
+ "ip_range": "192.168.1.0/24",
+ "pool": "nova"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl
new file mode 100644
index 0000000000..d630d669cd
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "ip_range": "%(ip_range)s"
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl
new file mode 100644
index 0000000000..166984b24a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "floating_ips_bulk_delete": "192.168.1.0/24"
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl
new file mode 100644
index 0000000000..0eaaf75ae0
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "floating_ip_info": [
+ {
+ "address": "10.10.10.3",
+ "instance_uuid": null,
+ "interface": "eth0",
+ "pool": "nova",
+ "project_id": null
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl
new file mode 100644
index 0000000000..de1e622bb1
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl
@@ -0,0 +1,25 @@
+{
+ "floating_ip_info": [
+ {
+ "address": "10.10.10.1",
+ "instance_uuid": null,
+ "interface": "eth0",
+ "pool": "nova",
+ "project_id": null
+ },
+ {
+ "address": "10.10.10.2",
+ "instance_uuid": null,
+ "interface": "eth0",
+ "pool": "nova",
+ "project_id": null
+ },
+ {
+ "address": "10.10.10.3",
+ "instance_uuid": null,
+ "interface": "eth0",
+ "pool": "nova",
+ "project_id": null
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-fping/fping-get-details-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-fping/fping-get-details-resp.json.tpl
new file mode 100644
index 0000000000..f3b222c399
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-fping/fping-get-details-resp.json.tpl
@@ -0,0 +1,7 @@
+{
+ "server": {
+ "alive": false,
+ "id": "%(uuid)s",
+ "project_id": "openstack"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-fping/fping-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-fping/fping-get-resp.json.tpl
new file mode 100644
index 0000000000..b33e80668b
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-fping/fping-get-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "servers": [
+ {
+ "alive": false,
+ "id": "%(uuid)s",
+ "project_id": "openstack"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-fping/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-fping/server-post-req.json.tpl
new file mode 100644
index 0000000000..3271a58a7d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-fping/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef": "%(host)s/openstack/flavors/1",
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "personality": [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-fping/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-fping/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-fping/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-get-resp.json.tpl
new file mode 100644
index 0000000000..3a69fcd321
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-get-resp.json.tpl
@@ -0,0 +1,47 @@
+{
+ "server": {
+ "addresses": {
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-post-resp.json.tpl
new file mode 100644
index 0000000000..71654b4b8a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl
new file mode 100644
index 0000000000..353d29f480
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl
@@ -0,0 +1,48 @@
+{
+ "servers": [
+ {
+ "addresses": {},
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl
new file mode 100644
index 0000000000..8797266b68
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl
@@ -0,0 +1,18 @@
+{
+ "servers": [
+ {
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-reboot.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-reboot.json.tpl
new file mode 100644
index 0000000000..4ed89a182d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-reboot.json.tpl
@@ -0,0 +1,4 @@
+{
+ "host": "%(host_name)s",
+ "power_action": "reboot"
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-resp.json.tpl
new file mode 100644
index 0000000000..efb234b436
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-resp.json.tpl
@@ -0,0 +1,31 @@
+{
+ "host": [
+ {
+ "resource": {
+ "cpu": 1,
+ "disk_gb": 1028,
+ "host": "%(host_name)s",
+ "memory_mb": 8192,
+ "project": "(total)"
+ }
+ },
+ {
+ "resource": {
+ "cpu": 0,
+ "disk_gb": 0,
+ "host": "%(host_name)s",
+ "memory_mb": 512,
+ "project": "(used_now)"
+ }
+ },
+ {
+ "resource": {
+ "cpu": 0,
+ "disk_gb": 0,
+ "host": "%(host_name)s",
+ "memory_mb": 0,
+ "project": "(used_max)"
+ }
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-shutdown.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-shutdown.json.tpl
new file mode 100644
index 0000000000..c0df4481a2
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-shutdown.json.tpl
@@ -0,0 +1,4 @@
+{
+ "host": "%(host_name)s",
+ "power_action": "shutdown"
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-startup.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-startup.json.tpl
new file mode 100644
index 0000000000..90f5ac7bcb
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-startup.json.tpl
@@ -0,0 +1,4 @@
+{
+ "host": "%(host_name)s",
+ "power_action": "startup"
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-put-maintenance-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-put-maintenance-req.json.tpl
new file mode 100644
index 0000000000..c1da8f4f9d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-put-maintenance-req.json.tpl
@@ -0,0 +1,4 @@
+{
+ "status": "enable",
+ "maintenance_mode": "disable"
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-put-maintenance-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-put-maintenance-resp.json.tpl
new file mode 100644
index 0000000000..92f73892b3
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-put-maintenance-resp.json.tpl
@@ -0,0 +1,5 @@
+{
+ "host": "%(host_name)s",
+ "maintenance_mode": "off_maintenance",
+ "status": "enabled"
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-hosts/hosts-list-compute-service-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hosts/hosts-list-compute-service-resp.json.tpl
new file mode 100644
index 0000000000..846988bd80
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hosts/hosts-list-compute-service-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "hosts": [
+ {
+ "host_name": "%(host_name)s",
+ "service": "compute",
+ "zone": "nova"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-hosts/hosts-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hosts/hosts-list-resp.json.tpl
new file mode 100644
index 0000000000..cd5bfdf999
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hosts/hosts-list-resp.json.tpl
@@ -0,0 +1,39 @@
+{
+ "hosts": [
+ {
+ "host_name": "%(host_name)s",
+ "service": "conductor",
+ "zone": "internal"
+ },
+ {
+ "host_name": "%(host_name)s",
+ "service": "compute",
+ "zone": "nova"
+ },
+ {
+ "host_name": "%(host_name)s",
+ "service": "cert",
+ "zone": "internal"
+ },
+ {
+ "host_name": "%(host_name)s",
+ "service": "consoleauth",
+ "zone": "internal"
+ },
+ {
+ "host_name": "%(host_name)s",
+ "service": "network",
+ "zone": "internal"
+ },
+ {
+ "host_name": "%(host_name)s",
+ "service": "scheduler",
+ "zone": "internal"
+ },
+ {
+ "host_name": "%(host_name)s",
+ "service": "cells",
+ "zone": "internal"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl
new file mode 100644
index 0000000000..2777eb4887
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl
@@ -0,0 +1,30 @@
+{
+ "hypervisors": [
+ {
+ "cpu_info": "?",
+ "current_workload": 0,
+ "state": "up",
+ "status": "enabled",
+ "disk_available_least": 0,
+ "host_ip": "%(ip)s",
+ "free_disk_gb": 1028,
+ "free_ram_mb": 7680,
+ "hypervisor_hostname": "fake-mini",
+ "hypervisor_type": "fake",
+ "hypervisor_version": 1000,
+ "id": %(hypervisor_id)s,
+ "local_gb": 1028,
+ "local_gb_used": 0,
+ "memory_mb": 8192,
+ "memory_mb_used": 512,
+ "running_vms": 0,
+ "service": {
+ "host": "%(host_name)s",
+ "id": 2,
+ "disabled_reason": null
+ },
+ "vcpus": 1,
+ "vcpus_used": 0
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl
new file mode 100644
index 0000000000..710cdfcf9c
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl
@@ -0,0 +1,10 @@
+{
+ "hypervisors": [
+ {
+ "hypervisor_hostname": "fake-mini",
+ "state": "up",
+ "status": "enabled",
+ "id": 1
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl
new file mode 100644
index 0000000000..375627499d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl
@@ -0,0 +1,10 @@
+{
+ "hypervisors": [
+ {
+ "hypervisor_hostname": "fake-mini",
+ "id": 1,
+ "state": "up",
+ "status": "enabled"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl
new file mode 100644
index 0000000000..857a1b2166
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "hypervisors": [
+ {
+ "hypervisor_hostname": "fake-mini",
+ "id": 1,
+ "state": "up",
+ "status": "enabled",
+ "servers": []
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl
new file mode 100644
index 0000000000..f125da01af
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl
@@ -0,0 +1,28 @@
+{
+ "hypervisor": {
+ "cpu_info": "?",
+ "current_workload": 0,
+ "disk_available_least": 0,
+ "state": "up",
+ "status": "enabled",
+ "host_ip": "%(ip)s",
+ "free_disk_gb": 1028,
+ "free_ram_mb": 7680,
+ "hypervisor_hostname": "fake-mini",
+ "hypervisor_type": "fake",
+ "hypervisor_version": 1000,
+ "id": %(hypervisor_id)s,
+ "local_gb": 1028,
+ "local_gb_used": 0,
+ "memory_mb": 8192,
+ "memory_mb_used": 512,
+ "running_vms": 0,
+ "service": {
+ "host": "%(host_name)s",
+ "id": 2,
+ "disabled_reason": null
+ },
+ "vcpus": 1,
+ "vcpus_used": 0
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl
new file mode 100644
index 0000000000..2cfb51e703
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "hypervisor_statistics": {
+ "count": 1,
+ "current_workload": 0,
+ "disk_available_least": 0,
+ "free_disk_gb": 1028,
+ "free_ram_mb": 7680,
+ "local_gb": 1028,
+ "local_gb_used": 0,
+ "memory_mb": 8192,
+ "memory_mb_used": 512,
+ "running_vms": 0,
+ "vcpus": 1,
+ "vcpus_used": 0
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl
new file mode 100644
index 0000000000..e2f6d2e47e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "hypervisor": {
+ "hypervisor_hostname": "fake-mini",
+ "id": %(hypervisor_id)s,
+ "state": "up",
+ "status": "enabled",
+ "uptime": " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-action-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-action-get-resp.json.tpl
new file mode 100644
index 0000000000..7cd5325239
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-action-get-resp.json.tpl
@@ -0,0 +1,27 @@
+{
+ "instanceAction": {
+ "action": "%(action)s",
+ "instance_uuid": "%(instance_uuid)s",
+ "request_id": "%(request_id)s",
+ "user_id": "%(integer_id)s",
+ "project_id": "%(integer_id)s",
+ "start_time": "%(strtime)s",
+ "message": "",
+ "events": [
+ {
+ "event": "%(event)s",
+ "start_time": "%(strtime)s",
+ "finish_time": "%(strtime)s",
+ "result": "%(result)s",
+ "traceback": ""
+ },
+ {
+ "event": "%(event)s",
+ "start_time": "%(strtime)s",
+ "finish_time": "%(strtime)s",
+ "result": "%(result)s",
+ "traceback": ""
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl
new file mode 100644
index 0000000000..0fdc33916a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl
@@ -0,0 +1,22 @@
+{
+ "instanceActions": [
+ {
+ "action": "%(action)s",
+ "instance_uuid": "%(uuid)s",
+ "request_id": "%(request_id)s",
+ "user_id": "%(integer_id)s",
+ "project_id": "%(integer_id)s",
+ "start_time": "%(strtime)s",
+ "message": ""
+ },
+ {
+ "action": "%(action)s",
+ "instance_uuid": "%(uuid)s",
+ "request_id": "%(request_id)s",
+ "user_id": "%(integer_id)s",
+ "project_id": "%(integer_id)s",
+ "start_time": "%(strtime)s",
+ "message": ""
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-instance-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-instance-get-resp.json.tpl
new file mode 100644
index 0000000000..f259deefdb
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-instance-get-resp.json.tpl
@@ -0,0 +1,27 @@
+{
+ "instanceAction": {
+ "action": "reboot",
+ "events": [
+ {
+ "event": "schedule",
+ "finish_time": "2012-12-05T01:02:00.000000",
+ "result": "Success",
+ "start_time": "2012-12-05T01:00:02.000000",
+ "traceback": ""
+ },
+ {
+ "event": "compute_create",
+ "finish_time": "2012-12-05T01:04:00.000000",
+ "result": "Success",
+ "start_time": "2012-12-05T01:03:00.000000",
+ "traceback": ""
+ }
+ ],
+ "instance_uuid": "b48316c5-71e8-45e4-9884-6c78055b9b13",
+ "message": "",
+ "project_id": "147",
+ "request_id": "req-3293a3f1-b44c-4609-b8d2-d81b105636b8",
+ "start_time": "2012-12-05T00:00:00.000000",
+ "user_id": "789"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-lock-server/lock-server.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-lock-server/lock-server.json.tpl
new file mode 100644
index 0000000000..a1863f2f39
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-lock-server/lock-server.json.tpl
@@ -0,0 +1,3 @@
+{
+ "lock": null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-lock-server/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-lock-server/server-post-req.json.tpl
new file mode 100644
index 0000000000..27557a3e9f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-lock-server/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-lock-server/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-lock-server/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-lock-server/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-lock-server/unlock-server.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-lock-server/unlock-server.json.tpl
new file mode 100644
index 0000000000..9e905ca2b9
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-lock-server/unlock-server.json.tpl
@@ -0,0 +1,3 @@
+{
+ "unlock": null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-migrate-server/live-migrate-server.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-migrate-server/live-migrate-server.json.tpl
new file mode 100644
index 0000000000..4800d4aa11
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-migrate-server/live-migrate-server.json.tpl
@@ -0,0 +1,7 @@
+{
+ "os-migrateLive": {
+ "host": "%(hostname)s",
+ "block_migration": false,
+ "disk_over_commit": false
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-migrate-server/migrate-server.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-migrate-server/migrate-server.json.tpl
new file mode 100644
index 0000000000..a9bf8c483e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-migrate-server/migrate-server.json.tpl
@@ -0,0 +1,3 @@
+{
+ "migrate": null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-migrate-server/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-migrate-server/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-migrate-server/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-migrate-server/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-migrate-server/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-migrate-server/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-migrations/migrations-get.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-migrations/migrations-get.json.tpl
new file mode 100644
index 0000000000..91775be775
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-migrations/migrations-get.json.tpl
@@ -0,0 +1,32 @@
+{
+ "migrations": [
+ {
+ "created_at": "2012-10-29T13:42:02.000000",
+ "dest_compute": "compute2",
+ "dest_host": "1.2.3.4",
+ "dest_node": "node2",
+ "id": 1234,
+ "instance_uuid": "instance_id_123",
+ "new_instance_type_id": 2,
+ "old_instance_type_id": 1,
+ "source_compute": "compute1",
+ "source_node": "node1",
+ "status": "Done",
+ "updated_at": "2012-10-29T13:42:02.000000"
+ },
+ {
+ "created_at": "2013-10-22T13:42:02.000000",
+ "dest_compute": "compute20",
+ "dest_host": "5.6.7.8",
+ "dest_node": "node20",
+ "id": 5678,
+ "instance_uuid": "instance_id_456",
+ "new_instance_type_id": 6,
+ "old_instance_type_id": 5,
+ "source_compute": "compute10",
+ "source_node": "node10",
+ "status": "Done",
+ "updated_at": "2013-10-22T13:42:02.000000"
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-multinic/multinic-add-fixed-ip-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-multinic/multinic-add-fixed-ip-req.json.tpl
new file mode 100644
index 0000000000..b9744ab2c7
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-multinic/multinic-add-fixed-ip-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "addFixedIp":{
+ "networkId": %(networkId)s
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-multinic/multinic-remove-fixed-ip-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-multinic/multinic-remove-fixed-ip-req.json.tpl
new file mode 100644
index 0000000000..7367e1242c
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-multinic/multinic-remove-fixed-ip-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "removeFixedIp":{
+ "address": "%(ip)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-multinic/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-multinic/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-multinic/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-multinic/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-multinic/server-post-resp.json.tpl
new file mode 100644
index 0000000000..71654b4b8a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-multinic/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl
new file mode 100644
index 0000000000..19ede54ec2
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl
@@ -0,0 +1,18 @@
+{
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef": "%(host)s/openstack/flavors/1",
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "min_count": "%(min_count)s",
+ "max_count": "%(max_count)s",
+ "personality": [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-post-req.json.tpl
new file mode 100644
index 0000000000..e2f949f09d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-post-req.json.tpl
@@ -0,0 +1,19 @@
+{
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef": "%(host)s/openstack/flavors/1",
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "return_reservation_id": "True",
+ "min_count": "%(min_count)s",
+ "max_count": "%(max_count)s",
+ "personality": [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl
new file mode 100644
index 0000000000..22d2880feb
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "reservation_id": "%(reservation_id)s"
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-associate-host-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-associate-host-req.json.tpl
new file mode 100644
index 0000000000..762e881751
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-associate-host-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "associate_host": "%(host)s"
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl
new file mode 100644
index 0000000000..46f69b3e81
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "disassociate_host": null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl
new file mode 100644
index 0000000000..63b6eb6839
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "disassociate_project": null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-req.json.tpl
new file mode 100644
index 0000000000..2e09d15a60
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "disassociate": null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-networks/network-add-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-networks/network-add-req.json.tpl
new file mode 100644
index 0000000000..aca6770b3b
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-networks/network-add-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "id": "1"
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-networks/network-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-networks/network-create-req.json.tpl
new file mode 100644
index 0000000000..18515bd6c4
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-networks/network-create-req.json.tpl
@@ -0,0 +1,12 @@
+{
+ "network": {
+ "label": "new net 111",
+ "cidr": "10.20.105.0/24",
+ "mtu": 9000,
+ "dhcp_server": "10.20.105.2",
+ "enable_dhcp": false,
+ "share_address": true,
+ "allowed_start": "10.20.105.10",
+ "allowed_end": "10.20.105.200"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-networks/network-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-networks/network-create-resp.json.tpl
new file mode 100644
index 0000000000..5cf155b13f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-networks/network-create-resp.json.tpl
@@ -0,0 +1,36 @@
+{
+ "network": {
+ "bridge": null,
+ "vpn_public_port": null,
+ "dhcp_start": "%(ip)s",
+ "bridge_interface": null,
+ "updated_at": null,
+ "id": "%(id)s",
+ "cidr_v6": null,
+ "deleted_at": null,
+ "gateway": "%(ip)s",
+ "rxtx_base": null,
+ "label": "new net 111",
+ "priority": null,
+ "project_id": null,
+ "vpn_private_address": null,
+ "deleted": null,
+ "vlan": null,
+ "broadcast": "%(ip)s",
+ "netmask": "%(ip)s",
+ "injected": null,
+ "cidr": "10.20.105.0/24",
+ "vpn_public_address": null,
+ "multi_host": null,
+ "dns2": null,
+ "created_at": null,
+ "host": null,
+ "gateway_v6": null,
+ "netmask_v6": null,
+ "dns1": null,
+ "mtu": 9000,
+ "dhcp_server": "10.20.105.2",
+ "enable_dhcp": false,
+ "share_address": true
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-networks/network-show-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-networks/network-show-resp.json.tpl
new file mode 100644
index 0000000000..ac75fe7fb1
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-networks/network-show-resp.json.tpl
@@ -0,0 +1,37 @@
+{
+ "network":
+ {
+ "bridge": "br100",
+ "bridge_interface": "eth0",
+ "broadcast": "%(ip)s",
+ "cidr": "10.0.0.0/29",
+ "cidr_v6": null,
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_start": "%(ip)s",
+ "dns1": null,
+ "dns2": null,
+ "gateway": "%(ip)s",
+ "gateway_v6": null,
+ "host": "nsokolov-desktop",
+ "id": "%(id)s",
+ "injected": false,
+ "label": "mynet_0",
+ "multi_host": false,
+ "netmask": "%(ip)s",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": "1234",
+ "rxtx_base": null,
+ "updated_at": "%(strtime)s",
+ "vlan": 100,
+ "vpn_private_address": "%(ip)s",
+ "vpn_public_address": "%(ip)s",
+ "vpn_public_port": 1000,
+ "mtu": null,
+ "dhcp_server": "%(ip)s",
+ "enable_dhcp": true,
+ "share_address": false
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-networks/networks-disassociate-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-networks/networks-disassociate-req.json.tpl
new file mode 100644
index 0000000000..2e09d15a60
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-networks/networks-disassociate-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "disassociate": null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-networks/networks-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-networks/networks-list-resp.json.tpl
new file mode 100644
index 0000000000..ccdd586a0f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-networks/networks-list-resp.json.tpl
@@ -0,0 +1,72 @@
+{
+ "networks": [
+ {
+ "bridge": "br100",
+ "bridge_interface": "eth0",
+ "broadcast": "%(ip)s",
+ "cidr": "10.0.0.0/29",
+ "cidr_v6": null,
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_start": "%(ip)s",
+ "dns1": null,
+ "dns2": null,
+ "gateway": "%(ip)s",
+ "gateway_v6": null,
+ "host": "nsokolov-desktop",
+ "id": "%(id)s",
+ "injected": false,
+ "label": "mynet_0",
+ "multi_host": false,
+ "netmask": "%(ip)s",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": "1234",
+ "rxtx_base": null,
+ "updated_at": "%(strtime)s",
+ "vlan": 100,
+ "vpn_private_address": "%(ip)s",
+ "vpn_public_address": "%(ip)s",
+ "vpn_public_port": 1000,
+ "mtu": null,
+ "dhcp_server": "%(ip)s",
+ "enable_dhcp": true,
+ "share_address": false
+ },
+ {
+ "bridge": "br101",
+ "bridge_interface": "eth0",
+ "broadcast": "%(ip)s",
+ "cidr": "10.0.0.10/29",
+ "cidr_v6": null,
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_start": "%(ip)s",
+ "dns1": null,
+ "dns2": null,
+ "gateway": "%(ip)s",
+ "gateway_v6": null,
+ "host": null,
+ "id": "%(id)s",
+ "injected": false,
+ "label": "mynet_1",
+ "multi_host": false,
+ "netmask": "%(ip)s",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": null,
+ "rxtx_base": null,
+ "updated_at": null,
+ "vlan": 101,
+ "vpn_private_address": "%(ip)s",
+ "vpn_public_address": null,
+ "vpn_public_port": 1001,
+ "mtu": null,
+ "dhcp_server": "%(ip)s",
+ "enable_dhcp": true,
+ "share_address": false
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-pause-server/pause-server.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pause-server/pause-server.json.tpl
new file mode 100644
index 0000000000..2e7c1fad30
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pause-server/pause-server.json.tpl
@@ -0,0 +1,3 @@
+{
+ "pause": null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-pause-server/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pause-server/server-post-req.json.tpl
new file mode 100644
index 0000000000..27557a3e9f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pause-server/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-pause-server/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pause-server/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pause-server/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-pause-server/unpause-server.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pause-server/unpause-server.json.tpl
new file mode 100644
index 0000000000..ce5024f0c9
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pause-server/unpause-server.json.tpl
@@ -0,0 +1,3 @@
+{
+ "unpause": null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl
new file mode 100644
index 0000000000..f2bf2bc02c
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl
@@ -0,0 +1,42 @@
+{
+ "hypervisors": [
+ {
+ "cpu_info": "?",
+ "state": "up",
+ "status": "enabled",
+ "current_workload": 0,
+ "disk_available_least": 0,
+ "host_ip": "%(ip)s",
+ "free_disk_gb": 1028,
+ "free_ram_mb": 7680,
+ "hypervisor_hostname": "fake-mini",
+ "hypervisor_type": "fake",
+ "hypervisor_version": 1000,
+ "id": 1,
+ "local_gb": 1028,
+ "local_gb_used": 0,
+ "memory_mb": 8192,
+ "memory_mb_used": 512,
+ "os-pci:pci_stats": [
+ {
+ "count": 5,
+ "extra_info": {
+ "key1": "value1",
+ "phys_function": "[[\"0x0000\", \"0x04\", \"0x00\", \"0x1\"]]"
+ },
+ "keya": "valuea",
+ "product_id": "1520",
+ "vendor_id": "8086"
+ }
+ ],
+ "running_vms": 0,
+ "service": {
+ "host": "043b3cacf6f34c90a7245151fc8ebcda",
+ "id": 2,
+ "disabled_reason": null
+ },
+ "vcpus": 1,
+ "vcpus_used": 0
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl
new file mode 100644
index 0000000000..3c0fc0abcd
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl
@@ -0,0 +1,40 @@
+{
+ "hypervisor": {
+ "cpu_info": "?",
+ "current_workload": 0,
+ "state": "up",
+ "status": "enabled",
+ "disk_available_least": 0,
+ "host_ip": "%(ip)s",
+ "free_disk_gb": 1028,
+ "free_ram_mb": 7680,
+ "hypervisor_hostname": "fake-mini",
+ "hypervisor_type": "fake",
+ "hypervisor_version": 1000,
+ "id": 1,
+ "local_gb": 1028,
+ "local_gb_used": 0,
+ "memory_mb": 8192,
+ "memory_mb_used": 512,
+ "os-pci:pci_stats": [
+ {
+ "count": 5,
+ "extra_info": {
+ "key1": "value1",
+ "phys_function": "[[\"0x0000\", \"0x04\", \"0x00\", \"0x1\"]]"
+ },
+ "keya": "valuea",
+ "product_id": "1520",
+ "vendor_id": "8086"
+ }
+ ],
+ "running_vms": 0,
+ "service": {
+ "host": "043b3cacf6f34c90a7245151fc8ebcda",
+ "id": 2,
+ "disabled_reason": null
+ },
+ "vcpus": 1,
+ "vcpus_used": 0
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-pci/pci-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pci/pci-detail-resp.json.tpl
new file mode 100644
index 0000000000..61cb17c6b4
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pci/pci-detail-resp.json.tpl
@@ -0,0 +1,36 @@
+{
+ "pci_devices": [
+ {
+ "address": "0000:04:10.0",
+ "compute_node_id": 1,
+ "dev_id": "pci_0000_04_10_0",
+ "dev_type": "type-VF",
+ "extra_info": {
+ "key1": "value1",
+ "key2": "value2"
+ },
+ "id": 1,
+ "server_uuid": "69ba1044-0766-4ec0-b60d-09595de034a1",
+ "label": "label_8086_1520",
+ "product_id": "1520",
+ "status": "available",
+ "vendor_id": "8086"
+ },
+ {
+ "address": "0000:04:10.1",
+ "compute_node_id": 1,
+ "dev_id": "pci_0000_04_10_1",
+ "dev_type": "type-VF",
+ "extra_info": {
+ "key3": "value3",
+ "key4": "value4"
+ },
+ "id": 2,
+ "server_uuid": "d5b446a6-a1b4-4d01-b4f0-eac37b3a62fc",
+ "label": "label_8086_1520",
+ "product_id": "1520",
+ "status": "available",
+ "vendor_id": "8086"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-pci/pci-index-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pci/pci-index-resp.json.tpl
new file mode 100644
index 0000000000..6268f316df
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pci/pci-index-resp.json.tpl
@@ -0,0 +1,20 @@
+{
+ "pci_devices": [
+ {
+ "address": "0000:04:10.0",
+ "compute_node_id": 1,
+ "id": 1,
+ "product_id": "1520",
+ "status": "available",
+ "vendor_id": "8086"
+ },
+ {
+ "address": "0000:04:10.1",
+ "compute_node_id": 1,
+ "id": 2,
+ "product_id": "1520",
+ "status": "available",
+ "vendor_id": "8086"
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-pci/pci-show-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pci/pci-show-resp.json.tpl
new file mode 100644
index 0000000000..9977769881
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pci/pci-show-resp.json.tpl
@@ -0,0 +1,18 @@
+{
+ "pci_device": {
+ "address": "0000:04:10.0",
+ "compute_node_id": 1,
+ "dev_id": "pci_0000_04_10_0",
+ "dev_type": "type-VF",
+ "extra_info": {
+ "key1": "value1",
+ "key2": "value2"
+ },
+ "id": 1,
+ "server_uuid": "69ba1044-0766-4ec0-b60d-09595de034a1",
+ "label": "label_8086_1520",
+ "product_id": "1520",
+ "status": "available",
+ "vendor_id": "8086"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-pci/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pci/server-get-resp.json.tpl
new file mode 100644
index 0000000000..b94f013f28
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pci/server-get-resp.json.tpl
@@ -0,0 +1,60 @@
+{
+ "server": {
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "os-pci:pci_devices": [
+ {
+ "id": 1
+ }
+ ],
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-pci/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pci/server-post-req.json.tpl
new file mode 100644
index 0000000000..27557a3e9f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pci/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-pci/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pci/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pci/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-pci/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pci/servers-detail-resp.json.tpl
new file mode 100644
index 0000000000..d152ae31ec
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pci/servers-detail-resp.json.tpl
@@ -0,0 +1,62 @@
+{
+ "servers": [
+ {
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "os-pci:pci_devices": [
+ {
+ "id": 1
+ }
+ ],
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl
new file mode 100644
index 0000000000..f66f22cd2d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl
@@ -0,0 +1,19 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "id": "fake_tenant",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10,
+ "server_groups": 10,
+ "server_group_members": 10
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl
new file mode 100644
index 0000000000..f66f22cd2d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl
@@ -0,0 +1,19 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "id": "fake_tenant",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10,
+ "server_groups": 10,
+ "server_group_members": 10
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-post-req.json.tpl
new file mode 100644
index 0000000000..a58a179123
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-post-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "quota_set": {
+ "force": "True",
+ "instances": 45
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-post-resp.json.tpl
new file mode 100644
index 0000000000..97c456d4d4
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-post-resp.json.tpl
@@ -0,0 +1,19 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "fixed_ips": -1,
+ "floating_ips": 10,
+ "id": "fake_tenant",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 45,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10,
+ "server_groups": 10,
+ "server_group_members": 10
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-resp.json.tpl
new file mode 100644
index 0000000000..ff23ff6ae4
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-resp.json.tpl
@@ -0,0 +1,19 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 45,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10,
+ "server_groups": 10,
+ "server_group_members": 10,
+ "id": "fake_tenant"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-post-req.json.tpl
new file mode 100644
index 0000000000..1f12caa045
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "quota_set": {
+ "security_groups": 45
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl
new file mode 100644
index 0000000000..f7c276e3f7
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl
@@ -0,0 +1,19 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "id": "fake_tenant",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 45,
+ "server_groups": 10,
+ "server_group_members": 10
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-show-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-show-get-resp.json.tpl
new file mode 100644
index 0000000000..f66f22cd2d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-show-get-resp.json.tpl
@@ -0,0 +1,19 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "id": "fake_tenant",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 10,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10,
+ "server_groups": 10,
+ "server_group_members": 10
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-update-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-update-post-req.json.tpl
new file mode 100644
index 0000000000..b322b2a870
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-update-post-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "quota_set": {
+ "force": "True",
+ "instances": 9
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-update-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-update-post-resp.json.tpl
new file mode 100644
index 0000000000..a17757aafe
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-update-post-resp.json.tpl
@@ -0,0 +1,19 @@
+{
+ "quota_set": {
+ "cores": 20,
+ "floating_ips": 10,
+ "fixed_ips": -1,
+ "id": "fake_tenant",
+ "injected_file_content_bytes": 10240,
+ "injected_file_path_bytes": 255,
+ "injected_files": 5,
+ "instances": 9,
+ "key_pairs": 100,
+ "metadata_items": 128,
+ "ram": 51200,
+ "security_group_rules": 20,
+ "security_groups": 10,
+ "server_groups": 10,
+ "server_group_members": 10
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-rdp-console-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-rdp-console-post-req.json.tpl
new file mode 100644
index 0000000000..00956b90e4
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-rdp-console-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "os-getRDPConsole": {
+ "type": "rdp-html5"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-rdp-console-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-rdp-console-post-resp.json.tpl
new file mode 100644
index 0000000000..c3955d6ac0
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-rdp-console-post-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "console": {
+ "type": "rdp-html5",
+ "url": "http://127.0.0.1:6083/?token=%(uuid)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-serial-console-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-serial-console-post-req.json.tpl
new file mode 100644
index 0000000000..1d754d6608
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-serial-console-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "os-getSerialConsole": {
+ "type": "serial"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-serial-console-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-serial-console-post-resp.json.tpl
new file mode 100644
index 0000000000..721ce2b2ea
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-serial-console-post-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "console": {
+ "type": "serial",
+ "url": "ws://127.0.0.1:6083/?token=%(uuid)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-spice-console-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-spice-console-post-req.json.tpl
new file mode 100644
index 0000000000..d04f7c7ae9
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-spice-console-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "os-getSPICEConsole": {
+ "type": "spice-html5"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-spice-console-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-spice-console-post-resp.json.tpl
new file mode 100644
index 0000000000..65b72a866f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-spice-console-post-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "console": {
+ "type": "spice-html5",
+ "url": "http://127.0.0.1:6082/spice_auto.html?token=%(uuid)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-vnc-console-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-vnc-console-post-req.json.tpl
new file mode 100644
index 0000000000..1926119ced
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-vnc-console-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "os-getVNCConsole": {
+ "type": "novnc"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-vnc-console-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-vnc-console-post-resp.json.tpl
new file mode 100644
index 0000000000..2eeee7c543
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-vnc-console-post-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "console": {
+ "type": "novnc",
+ "url": "http://127.0.0.1:6080/vnc_auto.html?token=%(uuid)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-get-resp-rescue.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-get-resp-rescue.json.tpl
new file mode 100644
index 0000000000..02547e994f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-get-resp-rescue.json.tpl
@@ -0,0 +1,54 @@
+{
+ "server": {
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4,
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed"
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "status": "%(status)s",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake",
+ "key_name": null
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-get-resp-unrescue.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-get-resp-unrescue.json.tpl
new file mode 100644
index 0000000000..cd6ded9be3
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-get-resp-unrescue.json.tpl
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4,
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed"
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "%(status)s",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake",
+ "key_name": null
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue-req-with-image-ref.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue-req-with-image-ref.json.tpl
new file mode 100644
index 0000000000..8a4ad0d52a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue-req-with-image-ref.json.tpl
@@ -0,0 +1,6 @@
+{
+ "rescue": {
+ "adminPass": "MySecretPass",
+ "rescue_image_ref": "70a599e0-31e7-49b7-b260-868f441e862b"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue-req.json.tpl
new file mode 100644
index 0000000000..f946b74f53
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "rescue": {
+ "adminPass": "%(password)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue.json.tpl
new file mode 100644
index 0000000000..0da07da5b8
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue.json.tpl
@@ -0,0 +1,3 @@
+{
+ "adminPass": "%(password)s"
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-unrescue-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-unrescue-req.json.tpl
new file mode 100644
index 0000000000..cafc9b13a8
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-unrescue-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "unrescue": null
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl
new file mode 100644
index 0000000000..a381df7444
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1"
+ },
+ "OS-SCH-HNT:scheduler_hints": {
+ "same_host": "%(uuid)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tpl
new file mode 100644
index 0000000000..71654b4b8a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl
new file mode 100644
index 0000000000..8836d0eecc
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "security_group_default_rule": {
+ "ip_protocol": "TCP",
+ "from_port": "80",
+ "to_port": "80",
+ "cidr": "10.10.10.0/24"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl
new file mode 100644
index 0000000000..ae6c62bfd6
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "security_group_default_rule": {
+ "from_port": 80,
+ "id": 1,
+ "ip_protocol": "TCP",
+ "ip_range":{
+ "cidr": "10.10.10.0/24"
+ },
+ "to_port": 80
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl
new file mode 100644
index 0000000000..c083640c3e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl
@@ -0,0 +1,13 @@
+{
+ "security_group_default_rules": [
+ {
+ "from_port": 80,
+ "id": 1,
+ "ip_protocol": "TCP",
+ "ip_range": {
+ "cidr": "10.10.10.0/24"
+ },
+ "to_port": 80
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl
new file mode 100644
index 0000000000..97b5259a18
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "security_group_default_rule": {
+ "id": 1,
+ "from_port": 80,
+ "to_port": 80,
+ "ip_protocol": "TCP",
+ "ip_range": {
+ "cidr": "10.10.10.0/24"
+ }
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-add-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-add-post-req.json.tpl
new file mode 100644
index 0000000000..19a6ed2cb8
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-add-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "addSecurityGroup": {
+ "name": "%(group_name)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-post-req.json.tpl
new file mode 100644
index 0000000000..3f54ab6856
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-post-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "security_group": {
+ "name": "%(group_name)s",
+ "description": "description"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-remove-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-remove-post-req.json.tpl
new file mode 100644
index 0000000000..7f550036b8
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-remove-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "removeSecurityGroup": {
+ "name": "%(group_name)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-create-resp.json.tpl
new file mode 100644
index 0000000000..e51714e3ee
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-create-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "security_group": {
+ "description": "%(description)s",
+ "id": 1,
+ "name": "%(group_name)s",
+ "rules": [],
+ "tenant_id": "openstack"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-get-resp.json.tpl
new file mode 100644
index 0000000000..0372512744
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-get-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "security_group": {
+ "description": "default",
+ "id": 1,
+ "name": "default",
+ "rules": [],
+ "tenant_id": "openstack"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl
new file mode 100644
index 0000000000..1771f2dff1
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "security_groups": [
+ {
+ "description": "default",
+ "id": 1,
+ "name": "default",
+ "rules": [],
+ "tenant_id": "openstack"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-get-resp.json.tpl
new file mode 100644
index 0000000000..47ed3c1f22
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-get-resp.json.tpl
@@ -0,0 +1,56 @@
+{
+ "server": {
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake",
+ "security_groups": [{"name": "test"}],
+ "key_name": null
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-post-req.json.tpl
new file mode 100644
index 0000000000..6657700682
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-post-req.json.tpl
@@ -0,0 +1,11 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "security_groups": [{"name": "test"}]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-post-resp.json.tpl
new file mode 100644
index 0000000000..c87c1ee064
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-post-resp.json.tpl
@@ -0,0 +1,17 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [{"name": "test"}]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl
new file mode 100644
index 0000000000..1771f2dff1
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "security_groups": [
+ {
+ "description": "default",
+ "id": 1,
+ "name": "default",
+ "rules": [],
+ "tenant_id": "openstack"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-security-groups/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/servers-detail-resp.json.tpl
new file mode 100644
index 0000000000..519292d50a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/servers-detail-resp.json.tpl
@@ -0,0 +1,57 @@
+{
+ "servers": [
+ {
+ "updated": "%(isotime)s",
+ "created": "%(isotime)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4,
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed"
+ }
+ ]
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "user_id": "fake",
+ "security_groups": [{"name": "test"}],
+ "key_name": null
+ }]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl
new file mode 100644
index 0000000000..1afedaee9c
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl
@@ -0,0 +1,17 @@
+{
+ "cpu0_time": 17300000000,
+ "memory": 524288,
+ "vda_errors": -1,
+ "vda_read": 262144,
+ "vda_read_req": 112,
+ "vda_write": 5778432,
+ "vda_write_req": 488,
+ "vnet1_rx": 2070139,
+ "vnet1_rx_drop": 0,
+ "vnet1_rx_errors": 0,
+ "vnet1_rx_packets": 26701,
+ "vnet1_tx": 140208,
+ "vnet1_tx_drop": 0,
+ "vnet1_tx_errors": 0,
+ "vnet1_tx_packets": 662
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-server-external-events/event-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-external-events/event-create-req.json.tpl
new file mode 100644
index 0000000000..43c3b6b407
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-external-events/event-create-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "events": [
+ {
+ "name": "%(name)s",
+ "tag": "%(tag)s",
+ "status": "%(status)s",
+ "server_uuid": "%(uuid)s"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-server-external-events/event-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-external-events/event-create-resp.json.tpl
new file mode 100644
index 0000000000..aa11b62c83
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-external-events/event-create-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "events": [
+ {
+ "code": 200,
+ "name": "%(name)s",
+ "server_uuid": "%(uuid)s",
+ "status": "%(status)s",
+ "tag": "%(tag)s"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-server-external-events/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-external-events/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-external-events/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-server-external-events/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-external-events/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-external-events/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-get-resp.json.tpl
new file mode 100644
index 0000000000..ba72643b6d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-get-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "server_group": {
+ "id": "%(id)s",
+ "name": "%(name)s",
+ "policies": ["anti-affinity"],
+ "members": [],
+ "metadata": {}
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-list-resp.json.tpl
new file mode 100644
index 0000000000..f01d451dd2
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-list-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "server_groups": [
+ {
+ "id": "%(id)s",
+ "name": "test",
+ "policies": ["anti-affinity"],
+ "members": [],
+ "metadata": {}
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-post-req.json.tpl
new file mode 100644
index 0000000000..1cc2328320
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-post-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "server_group": {
+ "name": "%(name)s",
+ "policies": ["anti-affinity"]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-post-resp.json.tpl
new file mode 100644
index 0000000000..ee9c37e82c
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-post-resp.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server_group": {
+ "id": "%(id)s",
+ "name": "%(name)s",
+ "policies": ["anti-affinity"],
+ "members": [],
+ "metadata": {}
+ }
+}
+
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-get-resp.json.tpl
new file mode 100644
index 0000000000..90e75947e5
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-get-resp.json.tpl
@@ -0,0 +1,57 @@
+{
+ "server": {
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-post-resp.json.tpl
new file mode 100644
index 0000000000..71654b4b8a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-server-usage/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-usage/servers-detail-resp.json.tpl
new file mode 100644
index 0000000000..ae20daabf7
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-usage/servers-detail-resp.json.tpl
@@ -0,0 +1,58 @@
+{
+ "servers": [
+ {
+ "status": "ACTIVE",
+ "created": "%(isotime)s",
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "user_id": "fake",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "updated": "%(isotime)s",
+ "name": "new-server-test",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "id": "%(uuid)s",
+ "OS-SRV-USG:terminated_at": null,
+ "tenant_id": "openstack",
+ "progress": 0,
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "metadata": {
+ "My Server Name": "Apache1"
+ }
+ }]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-log-put-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-log-put-req.json.tpl
new file mode 100644
index 0000000000..f11cca3739
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-log-put-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "service":
+ {
+ "host": "%(host)s",
+ "binary": "%(binary)s",
+ "disabled_reason": "%(disabled_reason)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-log-put-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-log-put-resp.json.tpl
new file mode 100644
index 0000000000..442e2099f9
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-log-put-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "service": {
+ "binary": "nova-compute",
+ "disabled_reason": "test2",
+ "host": "host1",
+ "status": "disabled"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-put-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-put-req.json.tpl
new file mode 100644
index 0000000000..1323ef50f5
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-put-req.json.tpl
@@ -0,0 +1,7 @@
+{
+ "service":
+ {
+ "host": "%(host)s",
+ "binary": "%(binary)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-put-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-put-resp.json.tpl
new file mode 100644
index 0000000000..d7fe948d01
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-put-resp.json.tpl
@@ -0,0 +1,7 @@
+{
+ "service": {
+ "binary": "nova-compute",
+ "host": "host1",
+ "status": "disabled"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-services/service-enable-put-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-services/service-enable-put-req.json.tpl
new file mode 100644
index 0000000000..1323ef50f5
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-services/service-enable-put-req.json.tpl
@@ -0,0 +1,7 @@
+{
+ "service":
+ {
+ "host": "%(host)s",
+ "binary": "%(binary)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-services/service-enable-put-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-services/service-enable-put-resp.json.tpl
new file mode 100644
index 0000000000..0a6b2668df
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-services/service-enable-put-resp.json.tpl
@@ -0,0 +1,7 @@
+{
+ "service": {
+ "binary": "nova-compute",
+ "host": "host1",
+ "status": "enabled"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-services/services-list-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-services/services-list-get-resp.json.tpl
new file mode 100644
index 0000000000..174b443d0b
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-services/services-list-get-resp.json.tpl
@@ -0,0 +1,44 @@
+{
+ "services": [
+ {
+ "binary": "nova-scheduler",
+ "disabled_reason": "test1",
+ "host": "host1",
+ "id": 1,
+ "state": "up",
+ "status": "disabled",
+ "updated_at": "%(strtime)s",
+ "zone": "internal"
+ },
+ {
+ "binary": "nova-compute",
+ "disabled_reason": "test2",
+ "host": "host1",
+ "id": 2,
+ "state": "up",
+ "status": "disabled",
+ "updated_at": "%(strtime)s",
+ "zone": "nova"
+ },
+ {
+ "binary": "nova-scheduler",
+ "disabled_reason": null,
+ "host": "host2",
+ "id": 3,
+ "state": "down",
+ "status": "enabled",
+ "updated_at": "%(strtime)s",
+ "zone": "internal"
+ },
+ {
+ "binary": "nova-compute",
+ "disabled_reason": "test4",
+ "host": "host2",
+ "id": 4,
+ "state": "down",
+ "status": "disabled",
+ "updated_at": "%(strtime)s",
+ "zone": "nova"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-shelve/os-shelve-offload.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-shelve/os-shelve-offload.json.tpl
new file mode 100644
index 0000000000..5a19f85cff
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-shelve/os-shelve-offload.json.tpl
@@ -0,0 +1,3 @@
+{
+ "%(action)s": null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-shelve/os-shelve.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-shelve/os-shelve.json.tpl
new file mode 100644
index 0000000000..5a19f85cff
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-shelve/os-shelve.json.tpl
@@ -0,0 +1,3 @@
+{
+ "%(action)s": null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-shelve/os-unshelve.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-shelve/os-unshelve.json.tpl
new file mode 100644
index 0000000000..5a19f85cff
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-shelve/os-unshelve.json.tpl
@@ -0,0 +1,3 @@
+{
+ "%(action)s": null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-shelve/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-shelve/server-post-req.json.tpl
new file mode 100644
index 0000000000..6f9336d3c0
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-shelve/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-shelve/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-shelve/server-post-resp.json.tpl
new file mode 100644
index 0000000000..71654b4b8a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-shelve/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl
new file mode 100644
index 0000000000..f37083013d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl
@@ -0,0 +1,27 @@
+{
+ "tenant_usage": {
+ "server_usages": [
+ {
+ "ended_at": null,
+ "flavor": "m1.tiny",
+ "hours": 1.0,
+ "instance_id": "%(uuid)s",
+ "local_gb": 1,
+ "memory_mb": 512,
+ "name": "new-server-test",
+ "started_at": "%(strtime)s",
+ "state": "active",
+ "tenant_id": "openstack",
+ "uptime": 3600,
+ "vcpus": 1
+ }
+ ],
+ "start": "%(strtime)s",
+ "stop": "%(strtime)s",
+ "tenant_id": "openstack",
+ "total_hours": 1.0,
+ "total_local_gb_usage": 1.0,
+ "total_memory_mb_usage": 512.0,
+ "total_vcpus_usage": 1.0
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl
new file mode 100644
index 0000000000..25b5ff2b84
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl
@@ -0,0 +1,13 @@
+{
+ "tenant_usages": [
+ {
+ "start": "%(strtime)s",
+ "stop": "%(strtime)s",
+ "tenant_id": "openstack",
+ "total_hours": 1.0,
+ "total_local_gb_usage": 1.0,
+ "total_memory_mb_usage": 512.0,
+ "total_vcpus_usage": 1.0
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-resume.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-resume.json.tpl
new file mode 100644
index 0000000000..ff00d97a14
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-resume.json.tpl
@@ -0,0 +1,3 @@
+{
+ "resume": null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-suspend.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-suspend.json.tpl
new file mode 100644
index 0000000000..8c2206a063
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-suspend.json.tpl
@@ -0,0 +1,3 @@
+{
+ "suspend": null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-list-res.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-list-res.json.tpl
new file mode 100644
index 0000000000..757084d2f3
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-list-res.json.tpl
@@ -0,0 +1,14 @@
+{
+ "networks": [
+ {
+ "cidr": "10.0.0.0/29",
+ "id": "%(id)s",
+ "label": "test_0"
+ },
+ {
+ "cidr": "10.0.0.8/29",
+ "id": "%(id)s",
+ "label": "test_1"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-post-req.json.tpl
new file mode 100644
index 0000000000..fb1c2d3d06
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-post-req.json.tpl
@@ -0,0 +1,9 @@
+{
+ "network": {
+ "label": "public",
+ "cidr": "172.0.0.0/24",
+ "vlan_start": 1,
+ "num_networks": 1,
+ "network_size": 255
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-post-res.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-post-res.json.tpl
new file mode 100644
index 0000000000..ff9e2273d3
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-post-res.json.tpl
@@ -0,0 +1,7 @@
+{
+ "network": {
+ "cidr": "172.0.0.0/24",
+ "id": "%(id)s",
+ "label": "public"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-used-limits/usedlimits-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-used-limits/usedlimits-get-resp.json.tpl
new file mode 100644
index 0000000000..28309af04c
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-used-limits/usedlimits-get-resp.json.tpl
@@ -0,0 +1,26 @@
+{
+ "limits": {
+ "absolute": {
+ "maxImageMeta": 128,
+ "maxPersonality": 5,
+ "maxPersonalitySize": 10240,
+ "maxSecurityGroupRules": 20,
+ "maxSecurityGroups": 10,
+ "maxServerMeta": 128,
+ "maxTotalCores": 20,
+ "maxTotalFloatingIps": 10,
+ "maxTotalInstances": 10,
+ "maxTotalKeypairs": 100,
+ "maxTotalRAMSize": 51200,
+ "maxServerGroups": 10,
+ "maxServerGroupMembers": 10,
+ "totalCoresUsed": 0,
+ "totalInstancesUsed": 0,
+ "totalRAMUsed": 0,
+ "totalSecurityGroupsUsed": 0,
+ "totalFloatingIpsUsed": 0,
+ "totalServerGroupsUsed": 0
+ },
+ "rate": []
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-user-data/userdata-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-user-data/userdata-post-req.json.tpl
new file mode 100644
index 0000000000..37f0a75d0a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-user-data/userdata-post-req.json.tpl
@@ -0,0 +1,11 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "user_data" : "%(user_data)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-user-data/userdata-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-user-data/userdata-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-user-data/userdata-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-detail-resp.json.tpl
new file mode 100644
index 0000000000..82a63eda5f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-detail-resp.json.tpl
@@ -0,0 +1,24 @@
+{
+ "volumes": [
+ {
+ "attachments": [
+ {
+ "device": "/",
+ "id": "%(uuid)s",
+ "serverId": "%(uuid)s",
+ "volumeId": "%(uuid)s"
+ }
+ ],
+ "availabilityZone": "zone1:host1",
+ "createdAt": "%(strtime)s",
+ "displayDescription": "%(volume_desc)s",
+ "displayName": "%(volume_name)s",
+ "id": "%(uuid)s",
+ "metadata": {},
+ "size": 100,
+ "snapshotId": null,
+ "status": "in-use",
+ "volumeType": "Backup"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-get-resp.json.tpl
new file mode 100644
index 0000000000..84bfdd2a5b
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-get-resp.json.tpl
@@ -0,0 +1,22 @@
+{
+ "volume": {
+ "attachments": [
+ {
+ "device": "/",
+ "id": "%(uuid)s",
+ "serverId": "%(uuid)s",
+ "volumeId": "%(uuid)s"
+ }
+ ],
+ "availabilityZone": "zone1:host1",
+ "createdAt": "%(strtime)s",
+ "displayDescription": "%(volume_desc)s",
+ "displayName": "%(volume_name)s",
+ "id": "%(uuid)s",
+ "metadata": {},
+ "size": 100,
+ "snapshotId": null,
+ "status": "in-use",
+ "volumeType": "Backup"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-index-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-index-resp.json.tpl
new file mode 100644
index 0000000000..82a63eda5f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-index-resp.json.tpl
@@ -0,0 +1,24 @@
+{
+ "volumes": [
+ {
+ "attachments": [
+ {
+ "device": "/",
+ "id": "%(uuid)s",
+ "serverId": "%(uuid)s",
+ "volumeId": "%(uuid)s"
+ }
+ ],
+ "availabilityZone": "zone1:host1",
+ "createdAt": "%(strtime)s",
+ "displayDescription": "%(volume_desc)s",
+ "displayName": "%(volume_name)s",
+ "id": "%(uuid)s",
+ "metadata": {},
+ "size": 100,
+ "snapshotId": null,
+ "status": "in-use",
+ "volumeType": "Backup"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-post-req.json.tpl
new file mode 100644
index 0000000000..33e9a68944
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-post-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "volume": {
+ "availability_zone": "zone1:host1",
+ "display_name": "%(volume_name)s",
+ "display_description": "%(volume_desc)s",
+ "size": 100
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-post-resp.json.tpl
new file mode 100644
index 0000000000..d13ce20cc3
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-post-resp.json.tpl
@@ -0,0 +1,21 @@
+{
+ "volume": {
+ "status": "in-use",
+ "displayDescription": "%(volume_desc)s",
+ "availabilityZone": "zone1:host1",
+ "displayName": "%(volume_name)s",
+ "attachments": [
+ { "device": "/",
+ "serverId": "%(uuid)s",
+ "id": "%(uuid)s",
+ "volumeId": "%(uuid)s"
+ }
+ ],
+ "volumeType": "Backup",
+ "snapshotId": null,
+ "metadata": {},
+ "id": "%(uuid)s",
+ "createdAt": "%(strtime)s",
+ "size": 100
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-volumes/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/server-post-req.json.tpl
new file mode 100644
index 0000000000..3271a58a7d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef": "%(host)s/openstack/flavors/1",
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "personality": [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-volumes/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/server-post-resp.json.tpl
new file mode 100644
index 0000000000..adfaaa381e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshot-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshot-create-req.json.tpl
new file mode 100644
index 0000000000..a8d47ea031
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshot-create-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "snapshot": {
+ "display_name": "%(snapshot_name)s",
+ "display_description": "%(description)s",
+ "volume_id": "%(volume_id)s",
+ "force": false
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshot-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshot-create-resp.json.tpl
new file mode 100644
index 0000000000..6153e8140e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshot-create-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "snapshot": {
+ "createdAt": "%(strtime)s",
+ "displayDescription": "%(description)s",
+ "displayName": "%(snapshot_name)s",
+ "id": 100,
+ "size": 100,
+ "status": "available",
+ "volumeId": "%(uuid)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-detail-resp.json.tpl
new file mode 100644
index 0000000000..1b509d54f8
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-detail-resp.json.tpl
@@ -0,0 +1,31 @@
+{
+ "snapshots": [
+ {
+ "createdAt": "%(strtime)s",
+ "displayDescription": "Default description",
+ "displayName": "Default name",
+ "id": 100,
+ "size": 100,
+ "status": "available",
+ "volumeId": 12
+ },
+ {
+ "createdAt": "%(strtime)s",
+ "displayDescription": "Default description",
+ "displayName": "Default name",
+ "id": 101,
+ "size": 100,
+ "status": "available",
+ "volumeId": 12
+ },
+ {
+ "createdAt": "%(strtime)s",
+ "displayDescription": "Default description",
+ "displayName": "Default name",
+ "id": 102,
+ "size": 100,
+ "status": "available",
+ "volumeId": 12
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-list-resp.json.tpl
new file mode 100644
index 0000000000..c65d073ad7
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-list-resp.json.tpl
@@ -0,0 +1,31 @@
+{
+ "snapshots": [
+ {
+ "createdAt": "%(strtime)s",
+ "displayDescription": "%(text)s",
+ "displayName": "%(text)s",
+ "id": 100,
+ "size": 100,
+ "status": "available",
+ "volumeId": 12
+ },
+ {
+ "createdAt": "%(strtime)s",
+ "displayDescription": "%(text)s",
+ "displayName": "%(text)s",
+ "id": 101,
+ "size": 100,
+ "status": "available",
+ "volumeId": 12
+ },
+ {
+ "createdAt": "%(strtime)s",
+ "displayDescription": "%(text)s",
+ "displayName": "%(text)s",
+ "id": 102,
+ "size": 100,
+ "status": "available",
+ "volumeId": 12
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-show-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-show-resp.json.tpl
new file mode 100644
index 0000000000..a9ab6240d6
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-show-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "snapshot": {
+ "createdAt": "%(strtime)s",
+ "displayDescription": "%(description)s",
+ "displayName": "%(snapshot_name)s",
+ "id": "100",
+ "size": 100,
+ "status": "available",
+ "volumeId": 12
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/server-ips/server-ips-network-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/server-ips/server-ips-network-resp.json.tpl
new file mode 100644
index 0000000000..404649a36e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/server-ips/server-ips-network-resp.json.tpl
@@ -0,0 +1,10 @@
+{
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/server-ips/server-ips-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/server-ips/server-ips-resp.json.tpl
new file mode 100644
index 0000000000..322ff19802
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/server-ips/server-ips-resp.json.tpl
@@ -0,0 +1,12 @@
+{
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/server-ips/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/server-ips/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/server-ips/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/server-ips/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/server-ips/server-post-resp.json.tpl
new file mode 100644
index 0000000000..71654b4b8a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/server-ips/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-all-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-all-req.json.tpl
new file mode 100644
index 0000000000..3812a26c88
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-all-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "metadata": {
+ "foo": "%(value)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-all-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-all-resp.json.tpl
new file mode 100644
index 0000000000..3812a26c88
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-all-resp.json.tpl
@@ -0,0 +1,5 @@
+{
+ "metadata": {
+ "foo": "%(value)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-req.json.tpl
new file mode 100644
index 0000000000..85d69ec956
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "meta": {
+ "foo": "%(value)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-resp.json.tpl
new file mode 100644
index 0000000000..85d69ec956
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-resp.json.tpl
@@ -0,0 +1,5 @@
+{
+ "meta": {
+ "foo": "%(value)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-post-resp.json.tpl
new file mode 100644
index 0000000000..71654b4b8a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/servers/server-action-confirm-resize.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-confirm-resize.json.tpl
new file mode 100644
index 0000000000..432f6126e9
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-confirm-resize.json.tpl
@@ -0,0 +1,3 @@
+{
+ "confirmResize" : null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/servers/server-action-create-image.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-create-image.json.tpl
new file mode 100644
index 0000000000..19c2c489a4
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-create-image.json.tpl
@@ -0,0 +1,9 @@
+{
+ "createImage" : {
+ "name" : "%(name)s",
+ "metadata": {
+ "meta_var": "meta_val"
+ }
+ }
+}
+
diff --git a/nova/tests/unit/integrated/v3/api_samples/servers/server-action-reboot.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-reboot.json.tpl
new file mode 100644
index 0000000000..18eda9b9ab
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-reboot.json.tpl
@@ -0,0 +1,5 @@
+{
+ "reboot" : {
+ "type" : "%(type)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl
new file mode 100644
index 0000000000..3c44eb8d7e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4,
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed"
+ }
+ ]
+ },
+ "adminPass": "%(password)s",
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "meta_var": "meta_val"
+ },
+ "name": "%(name)s",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl
new file mode 100644
index 0000000000..8f38088c19
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl
@@ -0,0 +1,17 @@
+{
+ "rebuild" : {
+ "imageRef" : "%(glance_host)s/images/%(uuid)s",
+ "name" : "%(name)s",
+ "adminPass" : "%(pass)s",
+ "metadata" : {
+ "meta_var" : "meta_val"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ],
+ "preserve_ephemeral": %(preserve_ephemeral)s
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl
new file mode 100644
index 0000000000..3c44eb8d7e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4,
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed"
+ }
+ ]
+ },
+ "adminPass": "%(password)s",
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "meta_var": "meta_val"
+ },
+ "name": "%(name)s",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl
new file mode 100644
index 0000000000..6385f10593
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl
@@ -0,0 +1,16 @@
+{
+ "rebuild" : {
+ "imageRef" : "%(glance_host)s/images/%(uuid)s",
+ "name" : "%(name)s",
+ "adminPass" : "%(pass)s",
+ "metadata" : {
+ "meta_var" : "meta_val"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/servers/server-action-resize.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-resize.json.tpl
new file mode 100644
index 0000000000..468a88da24
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-resize.json.tpl
@@ -0,0 +1,5 @@
+{
+ "resize" : {
+ "flavorRef" : "%(id)s"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/servers/server-action-revert-resize.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-revert-resize.json.tpl
new file mode 100644
index 0000000000..2ddf6e5ab0
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-revert-resize.json.tpl
@@ -0,0 +1,3 @@
+{
+ "revertResize" : null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/servers/server-action-start.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-start.json.tpl
new file mode 100644
index 0000000000..883d0247a2
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-start.json.tpl
@@ -0,0 +1,3 @@
+{
+ "%(action)s" : null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/servers/server-action-stop.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-stop.json.tpl
new file mode 100644
index 0000000000..883d0247a2
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-stop.json.tpl
@@ -0,0 +1,3 @@
+{
+ "%(action)s" : null
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/servers/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-get-resp.json.tpl
new file mode 100644
index 0000000000..3c7cc62999
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-get-resp.json.tpl
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/servers/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-post-req.json.tpl
new file mode 100644
index 0000000000..ab0a3bb797
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-post-req.json.tpl
@@ -0,0 +1,10 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/servers/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-post-resp.json.tpl
new file mode 100644
index 0000000000..71654b4b8a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/servers/servers-details-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/servers-details-resp.json.tpl
new file mode 100644
index 0000000000..1d4f8d9031
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/servers-details-resp.json.tpl
@@ -0,0 +1,57 @@
+{
+ "servers": [
+ {
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/api_samples/servers/servers-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/servers-list-resp.json.tpl
new file mode 100644
index 0000000000..8797266b68
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/servers-list-resp.json.tpl
@@ -0,0 +1,18 @@
+{
+ "servers": [
+ {
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v3/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ]
+}
diff --git a/nova/tests/unit/integrated/v3/test_access_ips.py b/nova/tests/unit/integrated/v3/test_access_ips.py
new file mode 100644
index 0000000000..d0af25f281
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_access_ips.py
@@ -0,0 +1,93 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class AccessIPsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = 'os-access-ips'
+
+ def _servers_post(self, subs):
+ response = self._do_post('servers', 'server-post-req', subs)
+ subs.update(self._get_regexes())
+ return self._verify_response('server-post-resp', subs, response, 202)
+
+ def test_servers_post(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fe80::'
+ }
+ self._servers_post(subs)
+
+ def test_servers_get(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fe80::'
+ }
+ uuid = self._servers_post(subs)
+ response = self._do_get('servers/%s' % uuid)
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_servers_details(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fe80::'
+ }
+ uuid = self._servers_post(subs)
+ response = self._do_get('servers/detail')
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ self._verify_response('servers-details-resp', subs, response, 200)
+
+ def test_servers_rebuild(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fe80::'
+ }
+ uuid = self._servers_post(subs)
+ subs['access_ip_v4'] = "4.3.2.1"
+ subs['access_ip_v6'] = '80fe::'
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-action-rebuild', subs)
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ self._verify_response('server-action-rebuild-resp',
+ subs, response, 202)
+
+ def test_servers_update(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fe80::'
+ }
+ uuid = self._servers_post(subs)
+ subs['access_ip_v4'] = "4.3.2.1"
+ subs['access_ip_v6'] = '80fe::'
+ response = self._do_put('servers/%s' % uuid, 'server-put-req', subs)
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ self._verify_response('server-put-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_admin_actions.py b/nova/tests/unit/integrated/v3/test_admin_actions.py
new file mode 100644
index 0000000000..7530066438
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_admin_actions.py
@@ -0,0 +1,46 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class AdminActionsSamplesJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-admin-actions"
+
+ def setUp(self):
+ """setUp Method for AdminActions api samples extension
+
+ This method creates the server that will be used in each tests
+ """
+ super(AdminActionsSamplesJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ def test_post_reset_network(self):
+ # Get api samples to reset server network request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-reset-network', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_inject_network_info(self):
+ # Get api samples to inject network info request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-inject-network-info', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_reset_state(self):
+ # get api samples to server reset state request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-reset-server-state', {})
+ self.assertEqual(response.status_code, 202)
diff --git a/nova/tests/unit/integrated/v3/test_admin_password.py b/nova/tests/unit/integrated/v3/test_admin_password.py
new file mode 100644
index 0000000000..2062e857df
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_admin_password.py
@@ -0,0 +1,29 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class AdminPasswordJsonTest(test_servers.ServersSampleBase):
+ extension_name = 'os-admin-password'
+
+ def test_server_password(self):
+ uuid = self._post_server()
+ subs = {"password": "foo"}
+ response = self._do_post('servers/%s/action' % uuid,
+ 'admin-password-change-password',
+ subs)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
diff --git a/nova/tests/unit/integrated/v3/test_agents.py b/nova/tests/unit/integrated/v3/test_agents.py
new file mode 100644
index 0000000000..ade59e6ec0
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_agents.py
@@ -0,0 +1,98 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import db
+from nova.db.sqlalchemy import models
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class AgentsJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-agents"
+
+ def setUp(self):
+ super(AgentsJsonTest, self).setUp()
+
+ fake_agents_list = [{'url': 'http://example.com/path/to/resource',
+ 'hypervisor': 'hypervisor',
+ 'architecture': 'x86',
+ 'os': 'os',
+ 'version': '8.0',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545',
+ 'id': 1}]
+
+ def fake_agent_build_create(context, values):
+ values['id'] = 1
+ agent_build_ref = models.AgentBuild()
+ agent_build_ref.update(values)
+ return agent_build_ref
+
+ def fake_agent_build_get_all(context, hypervisor):
+ agent_build_all = []
+ for agent in fake_agents_list:
+ if hypervisor and hypervisor != agent['hypervisor']:
+ continue
+ agent_build_ref = models.AgentBuild()
+ agent_build_ref.update(agent)
+ agent_build_all.append(agent_build_ref)
+ return agent_build_all
+
+ def fake_agent_build_update(context, agent_build_id, values):
+ pass
+
+ def fake_agent_build_destroy(context, agent_update_id):
+ pass
+
+ self.stubs.Set(db, "agent_build_create",
+ fake_agent_build_create)
+ self.stubs.Set(db, "agent_build_get_all",
+ fake_agent_build_get_all)
+ self.stubs.Set(db, "agent_build_update",
+ fake_agent_build_update)
+ self.stubs.Set(db, "agent_build_destroy",
+ fake_agent_build_destroy)
+
+ def test_agent_create(self):
+ # Creates a new agent build.
+ project = {'url': 'http://example.com/path/to/resource',
+ 'hypervisor': 'hypervisor',
+ 'architecture': 'x86',
+ 'os': 'os',
+ 'version': '8.0',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'
+ }
+ response = self._do_post('os-agents', 'agent-post-req',
+ project)
+ self._verify_response('agent-post-resp', project, response, 200)
+
+ def test_agent_list(self):
+ # Return a list of all agent builds.
+ response = self._do_get('os-agents')
+ self._verify_response('agents-get-resp', {}, response, 200)
+
+ def test_agent_update(self):
+ # Update an existing agent build.
+ agent_id = 1
+ subs = {'version': '7.0',
+ 'url': 'http://example.com/path/to/resource',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}
+ response = self._do_put('os-agents/%s' % agent_id,
+ 'agent-update-put-req', subs)
+ self._verify_response('agent-update-put-resp', subs, response, 200)
+
+ def test_agent_delete(self):
+ # Deletes an existing agent build.
+ agent_id = 1
+ response = self._do_delete('os-agents/%s' % agent_id)
+ self.assertEqual(response.status_code, 200)
diff --git a/nova/tests/unit/integrated/v3/test_aggregates.py b/nova/tests/unit/integrated/v3/test_aggregates.py
new file mode 100644
index 0000000000..6e29ea0403
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_aggregates.py
@@ -0,0 +1,80 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class AggregatesSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-aggregates"
+
+ def test_aggregate_create(self):
+ subs = {
+ "aggregate_id": '(?P<id>\d+)'
+ }
+ response = self._do_post('os-aggregates', 'aggregate-post-req', subs)
+ subs.update(self._get_regexes())
+ return self._verify_response('aggregate-post-resp',
+ subs, response, 200)
+
+ def test_list_aggregates(self):
+ self.test_aggregate_create()
+ response = self._do_get('os-aggregates')
+ subs = self._get_regexes()
+ self._verify_response('aggregates-list-get-resp', subs, response, 200)
+
+ def test_aggregate_get(self):
+ agg_id = self.test_aggregate_create()
+ response = self._do_get('os-aggregates/%s' % agg_id)
+ subs = self._get_regexes()
+ self._verify_response('aggregates-get-resp', subs, response, 200)
+
+ def test_add_metadata(self):
+ agg_id = self.test_aggregate_create()
+ response = self._do_post('os-aggregates/%s/action' % agg_id,
+ 'aggregate-metadata-post-req',
+ {'action': 'set_metadata'})
+ subs = self._get_regexes()
+ self._verify_response('aggregates-metadata-post-resp', subs,
+ response, 200)
+
+ def test_add_host(self):
+ aggregate_id = self.test_aggregate_create()
+ subs = {
+ "host_name": self.compute.host,
+ }
+ response = self._do_post('os-aggregates/%s/action' % aggregate_id,
+ 'aggregate-add-host-post-req', subs)
+ subs.update(self._get_regexes())
+ self._verify_response('aggregates-add-host-post-resp', subs,
+ response, 200)
+
+ def test_remove_host(self):
+ self.test_add_host()
+ subs = {
+ "host_name": self.compute.host,
+ }
+ response = self._do_post('os-aggregates/1/action',
+ 'aggregate-remove-host-post-req', subs)
+ subs.update(self._get_regexes())
+ self._verify_response('aggregates-remove-host-post-resp',
+ subs, response, 200)
+
+ def test_update_aggregate(self):
+ aggregate_id = self.test_aggregate_create()
+ response = self._do_put('os-aggregates/%s' % aggregate_id,
+ 'aggregate-update-post-req', {})
+ subs = self._get_regexes()
+ self._verify_response('aggregate-update-post-resp',
+ subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_attach_interfaces.py b/nova/tests/unit/integrated/v3/test_attach_interfaces.py
new file mode 100644
index 0000000000..f35edcb740
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_attach_interfaces.py
@@ -0,0 +1,166 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.compute import api as compute_api
+from nova import exception
+from nova.network import api as network_api
+from nova.tests.unit import fake_network_cache_model
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class AttachInterfacesSampleJsonTest(test_servers.ServersSampleBase):
+ extension_name = 'os-attach-interfaces'
+
+ def setUp(self):
+ super(AttachInterfacesSampleJsonTest, self).setUp()
+
+ def fake_list_ports(self, *args, **kwargs):
+ uuid = kwargs.get('device_id', None)
+ if not uuid:
+ raise exception.InstanceNotFound(instance_id=None)
+ port_data = {
+ "id": "ce531f90-199f-48c0-816c-13e38010b442",
+ "network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "mac_address": "fa:16:3e:4c:2c:30",
+ "fixed_ips": [
+ {
+ "ip_address": "192.168.1.3",
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
+ }
+ ],
+ "device_id": uuid,
+ }
+ ports = {'ports': [port_data]}
+ return ports
+
+ def fake_show_port(self, context, port_id=None):
+ if not port_id:
+ raise exception.PortNotFound(port_id=None)
+ port_data = {
+ "id": port_id,
+ "network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "mac_address": "fa:16:3e:4c:2c:30",
+ "fixed_ips": [
+ {
+ "ip_address": "192.168.1.3",
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
+ }
+ ],
+ "device_id": 'bece68a3-2f8b-4e66-9092-244493d6aba7',
+ }
+ port = {'port': port_data}
+ return port
+
+ def fake_attach_interface(self, context, instance,
+ network_id, port_id,
+ requested_ip='192.168.1.3'):
+ if not network_id:
+ network_id = "fake_net_uuid"
+ if not port_id:
+ port_id = "fake_port_uuid"
+ vif = fake_network_cache_model.new_vif()
+ vif['id'] = port_id
+ vif['network']['id'] = network_id
+ vif['network']['subnets'][0]['ips'][0] = requested_ip
+ return vif
+
+ def fake_detach_interface(self, context, instance, port_id):
+ pass
+
+ self.stubs.Set(network_api.API, 'list_ports', fake_list_ports)
+ self.stubs.Set(network_api.API, 'show_port', fake_show_port)
+ self.stubs.Set(compute_api.API, 'attach_interface',
+ fake_attach_interface)
+ self.stubs.Set(compute_api.API, 'detach_interface',
+ fake_detach_interface)
+ self.flags(auth_strategy=None, group='neutron')
+ self.flags(url='http://anyhost/', group='neutron')
+ self.flags(url_timeout=30, group='neutron')
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['subnet_id'] = vanilla_regexes['uuid']
+ subs['net_id'] = vanilla_regexes['uuid']
+ subs['port_id'] = vanilla_regexes['uuid']
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+ subs['ip_address'] = vanilla_regexes['ip']
+ return subs
+
+ def test_list_interfaces(self):
+ instance_uuid = self._post_server()
+ response = self._do_get('servers/%s/os-interface'
+ % instance_uuid)
+ subs = {
+ 'ip_address': '192.168.1.3',
+ 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
+ 'mac_addr': 'fa:16:3e:4c:2c:30',
+ 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
+ 'port_id': 'ce531f90-199f-48c0-816c-13e38010b442',
+ 'port_state': 'ACTIVE'
+ }
+ self._verify_response('attach-interfaces-list-resp', subs,
+ response, 200)
+
+ def _stub_show_for_instance(self, instance_uuid, port_id):
+ show_port = network_api.API().show_port(None, port_id)
+ show_port['port']['device_id'] = instance_uuid
+ self.stubs.Set(network_api.API, 'show_port', lambda *a, **k: show_port)
+
+ def test_show_interfaces(self):
+ instance_uuid = self._post_server()
+ port_id = 'ce531f90-199f-48c0-816c-13e38010b442'
+ self._stub_show_for_instance(instance_uuid, port_id)
+ response = self._do_get('servers/%s/os-interface/%s' %
+ (instance_uuid, port_id))
+ subs = {
+ 'ip_address': '192.168.1.3',
+ 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
+ 'mac_addr': 'fa:16:3e:4c:2c:30',
+ 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
+ 'port_id': port_id,
+ 'port_state': 'ACTIVE'
+ }
+ self._verify_response('attach-interfaces-show-resp', subs,
+ response, 200)
+
+ def test_create_interfaces(self, instance_uuid=None):
+ if instance_uuid is None:
+ instance_uuid = self._post_server()
+ subs = {
+ 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
+ 'port_id': 'ce531f90-199f-48c0-816c-13e38010b442',
+ 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
+ 'ip_address': '192.168.1.3',
+ 'port_state': 'ACTIVE',
+ 'mac_addr': 'fa:16:3e:4c:2c:30',
+ }
+ self._stub_show_for_instance(instance_uuid, subs['port_id'])
+ response = self._do_post('servers/%s/os-interface'
+ % instance_uuid,
+ 'attach-interfaces-create-req', subs)
+ subs.update(self._get_regexes())
+ self._verify_response('attach-interfaces-create-resp', subs,
+ response, 200)
+
+ def test_delete_interfaces(self):
+ instance_uuid = self._post_server()
+ port_id = 'ce531f90-199f-48c0-816c-13e38010b442'
+ response = self._do_delete('servers/%s/os-interface/%s' %
+ (instance_uuid, port_id))
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
diff --git a/nova/tests/unit/integrated/v3/test_availability_zone.py b/nova/tests/unit/integrated/v3/test_availability_zone.py
new file mode 100644
index 0000000000..6f59e2c264
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_availability_zone.py
@@ -0,0 +1,49 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+CONF = cfg.CONF
+CONF.import_opt('manager', 'nova.cells.opts', group='cells')
+
+
+class AvailabilityZoneJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-availability-zone"
+
+ def _setup_services(self):
+ self.conductor = self.start_service('conductor',
+ host='conductor', manager=CONF.conductor.manager)
+ self.compute = self.start_service('compute', host='compute')
+ self.cert = self.start_service('cert', host='cert')
+ self.consoleauth = self.start_service('consoleauth',
+ host='consoleauth')
+ self.network = self.start_service('network', host='network')
+ self.scheduler = self.start_service('scheduler', host='scheduler')
+ self.cells = self.start_service('cells', host='cells',
+ manager=CONF.cells.manager)
+
+ def test_availability_zone_list(self):
+ response = self._do_get('os-availability-zone')
+ self._verify_response('availability-zone-list-resp', {}, response, 200)
+
+ def test_availability_zone_detail(self):
+ response = self._do_get('os-availability-zone/detail')
+ subs = self._get_regexes()
+ self._verify_response('availability-zone-detail-resp', subs, response,
+ 200)
+
+ def test_availability_zone_post(self):
+ self._post_server()
diff --git a/nova/tests/unit/integrated/v3/test_cells.py b/nova/tests/unit/integrated/v3/test_cells.py
new file mode 100644
index 0000000000..2d7aea9542
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_cells.py
@@ -0,0 +1,107 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.cells import rpcapi as cells_rpcapi
+from nova.cells import state
+from nova import db
+from nova.db.sqlalchemy import models
+from nova import exception
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class CellsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-cells"
+
+ def setUp(self):
+ # db_check_interval < 0 makes cells manager always hit the DB
+ self.flags(enable=True, db_check_interval=-1, group='cells')
+ super(CellsSampleJsonTest, self).setUp()
+ self._stub_cells()
+
+ def _stub_cells(self, num_cells=5):
+ self.cell_list = []
+ self.cells_next_id = 1
+
+ def _fake_cell_get_all(context):
+ return self.cell_list
+
+ def _fake_cell_get(inst, context, cell_name):
+ for cell in self.cell_list:
+ if cell['name'] == cell_name:
+ return cell
+ raise exception.CellNotFound(cell_name=cell_name)
+
+ for x in xrange(num_cells):
+ cell = models.Cell()
+ our_id = self.cells_next_id
+ self.cells_next_id += 1
+ cell.update({'id': our_id,
+ 'name': 'cell%s' % our_id,
+ 'transport_url': 'rabbit://username%s@/' % our_id,
+ 'is_parent': our_id % 2 == 0})
+ self.cell_list.append(cell)
+
+ self.stubs.Set(db, 'cell_get_all', _fake_cell_get_all)
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_get', _fake_cell_get)
+
+ def test_cells_empty_list(self):
+ # Override this
+ self._stub_cells(num_cells=0)
+ response = self._do_get('os-cells')
+ subs = self._get_regexes()
+ self._verify_response('cells-list-empty-resp', subs, response, 200)
+
+ def test_cells_list(self):
+ response = self._do_get('os-cells')
+ subs = self._get_regexes()
+ self._verify_response('cells-list-resp', subs, response, 200)
+
+ def test_cells_get(self):
+ response = self._do_get('os-cells/cell3')
+ subs = self._get_regexes()
+ self._verify_response('cells-get-resp', subs, response, 200)
+
+ def test_get_cell_capacity(self):
+ self._mock_cell_capacity()
+ state_manager = state.CellStateManager()
+ my_state = state_manager.get_my_state()
+ response = self._do_get('os-cells/%s/capacities' %
+ my_state.name)
+ subs = self._get_regexes()
+ return self._verify_response('cells-capacities-resp',
+ subs, response, 200)
+
+ def test_get_all_cells_capacity(self):
+ self._mock_cell_capacity()
+ response = self._do_get('os-cells/capacities')
+ subs = self._get_regexes()
+ return self._verify_response('cells-capacities-resp',
+ subs, response, 200)
+
+ def _mock_cell_capacity(self):
+ self.mox.StubOutWithMock(self.cells.manager.state_manager,
+ 'get_our_capacities')
+ response = {"ram_free":
+ {"units_by_mb": {"8192": 0, "512": 13,
+ "4096": 1, "2048": 3, "16384": 0},
+ "total_mb": 7680},
+ "disk_free":
+ {"units_by_mb": {"81920": 11, "20480": 46,
+ "40960": 23, "163840": 5, "0": 0},
+ "total_mb": 1052672}
+ }
+ self.cells.manager.state_manager.get_our_capacities(). \
+ AndReturn(response)
+ self.mox.ReplayAll()
diff --git a/nova/tests/unit/integrated/v3/test_certificates.py b/nova/tests/unit/integrated/v3/test_certificates.py
new file mode 100644
index 0000000000..96cbbc711c
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_certificates.py
@@ -0,0 +1,31 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class CertificatesSamplesJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-certificates"
+
+ def test_create_certificates(self):
+ response = self._do_post('os-certificates',
+ 'certificate-create-req', {})
+ subs = self._get_regexes()
+ self._verify_response('certificate-create-resp', subs, response, 200)
+
+ def test_get_root_certificate(self):
+ response = self._do_get('os-certificates/root')
+ subs = self._get_regexes()
+ self._verify_response('certificate-get-root-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_cloudpipe.py b/nova/tests/unit/integrated/v3/test_cloudpipe.py
new file mode 100644
index 0000000000..b8cb28d077
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_cloudpipe.py
@@ -0,0 +1,80 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid as uuid_lib
+
+from oslo.config import cfg
+
+from nova.cloudpipe import pipelib
+from nova.network import api as network_api
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+CONF = cfg.CONF
+CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
+
+
+class CloudPipeSampleTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-cloudpipe"
+
+ def setUp(self):
+ super(CloudPipeSampleTest, self).setUp()
+
+ def get_user_data(self, project_id):
+ """Stub method to generate user data for cloudpipe tests."""
+ return "VVNFUiBEQVRB\n"
+
+ def network_api_get(self, context, network_uuid):
+ """Stub to get a valid network and its information."""
+ return {'vpn_public_address': '127.0.0.1',
+ 'vpn_public_port': 22}
+
+ self.stubs.Set(pipelib.CloudPipe, 'get_encoded_zip', get_user_data)
+ self.stubs.Set(network_api.API, "get",
+ network_api_get)
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['project_id'] = '[0-9a-f-]+'
+ return subs
+
+ def test_cloud_pipe_create(self):
+ # Get api samples of cloud pipe extension creation.
+ self.flags(vpn_image_id=fake.get_valid_image_id())
+ project = {'project_id': str(uuid_lib.uuid4().hex)}
+ response = self._do_post('os-cloudpipe', 'cloud-pipe-create-req',
+ project)
+ subs = self._get_regexes()
+ subs.update(project)
+ subs['image_id'] = CONF.vpn_image_id
+ self._verify_response('cloud-pipe-create-resp', subs, response, 200)
+ return project
+
+ def test_cloud_pipe_list(self):
+ # Get api samples of cloud pipe extension get request.
+ project = self.test_cloud_pipe_create()
+ response = self._do_get('os-cloudpipe')
+ subs = self._get_regexes()
+ subs.update(project)
+ subs['image_id'] = CONF.vpn_image_id
+ self._verify_response('cloud-pipe-get-resp', subs, response, 200)
+
+ def test_cloud_pipe_update(self):
+ subs = {'vpn_ip': '192.168.1.1',
+ 'vpn_port': 2000}
+ response = self._do_put('os-cloudpipe/configure-project',
+ 'cloud-pipe-update-req',
+ subs)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
diff --git a/nova/tests/unit/integrated/v3/test_config_drive.py b/nova/tests/unit/integrated/v3/test_config_drive.py
new file mode 100644
index 0000000000..b8e7fc207a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_config_drive.py
@@ -0,0 +1,48 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ConfigDriveSampleJsonTest(test_servers.ServersSampleBase):
+ extension_name = 'os-config-drive'
+
+ def setUp(self):
+ super(ConfigDriveSampleJsonTest, self).setUp()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+
+ def test_config_drive_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ # config drive can be a string for True or empty value for False
+ subs['cdrive'] = '.*'
+ self._verify_response('server-config-drive-get-resp', subs,
+ response, 200)
+
+ def test_config_drive_detail(self):
+ self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ # config drive can be a string for True or empty value for False
+ subs['cdrive'] = '.*'
+ self._verify_response('servers-config-drive-details-resp',
+ subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_console_auth_tokens.py b/nova/tests/unit/integrated/v3/test_console_auth_tokens.py
new file mode 100644
index 0000000000..d286458678
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_console_auth_tokens.py
@@ -0,0 +1,51 @@
+# Copyright 2013 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+
+from oslo.serialization import jsonutils
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ConsoleAuthTokensSampleJsonTests(test_servers.ServersSampleBase):
+ extension_name = "os-console-auth-tokens"
+ extra_extensions_to_load = ["os-remote-consoles"]
+
+ def _get_console_url(self, data):
+ return jsonutils.loads(data)["console"]["url"]
+
+ def _get_console_token(self, uuid):
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-rdp-console-post-req',
+ {'action': 'os-getRDPConsole'})
+
+ url = self._get_console_url(response.content)
+ return re.match('.+?token=([^&]+)', url).groups()[0]
+
+ def test_get_console_connect_info(self):
+ self.flags(enabled=True, group='rdp')
+
+ uuid = self._post_server()
+ token = self._get_console_token(uuid)
+
+ response = self._do_get('os-console-auth-tokens/%s' % token)
+
+ subs = self._get_regexes()
+ subs["uuid"] = uuid
+ subs["host"] = r"[\w\.\-]+"
+ subs["port"] = "[0-9]+"
+ subs["internal_access_path"] = ".*"
+ self._verify_response('get-console-connect-info-get-resp', subs,
+ response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_console_output.py b/nova/tests/unit/integrated/v3/test_console_output.py
new file mode 100644
index 0000000000..6ad9a1d9e6
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_console_output.py
@@ -0,0 +1,27 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ConsoleOutputSampleJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-console-output"
+
+ def test_get_console_output(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'console-output-post-req', {})
+ subs = self._get_regexes()
+ self._verify_response('console-output-post-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_consoles.py b/nova/tests/unit/integrated/v3/test_consoles.py
new file mode 100644
index 0000000000..7a889aa4cf
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_consoles.py
@@ -0,0 +1,55 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ConsolesSamplesJsonTest(test_servers.ServersSampleBase):
+ sample_dir = "consoles"
+
+ def setUp(self):
+ super(ConsolesSamplesJsonTest, self).setUp()
+ self.flags(console_public_hostname='fake')
+ self.flags(console_host='fake')
+ self.flags(console_driver='nova.console.fake.FakeConsoleProxy')
+ self.console = self.start_service('console', host='fake')
+
+ def _create_consoles(self, server_uuid):
+ response = self._do_post('servers/%s/consoles' % server_uuid,
+ 'consoles-create-req', {})
+ self.assertEqual(response.status_code, 201)
+
+ def test_create_consoles(self):
+ uuid = self._post_server()
+ self._create_consoles(uuid)
+
+ def test_list_consoles(self):
+ uuid = self._post_server()
+ self._create_consoles(uuid)
+ response = self._do_get('servers/%s/consoles' % uuid)
+ self._verify_response('consoles-list-get-resp', {}, response, 200)
+
+ def test_console_get(self):
+ uuid = self._post_server()
+ self._create_consoles(uuid)
+ response = self._do_get('servers/%s/consoles/1' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('consoles-get-resp', subs, response, 200)
+
+ def test_console_delete(self):
+ uuid = self._post_server()
+ self._create_consoles(uuid)
+ response = self._do_delete('servers/%s/consoles/1' % uuid)
+ self.assertEqual(response.status_code, 202)
diff --git a/nova/tests/unit/integrated/v3/test_create_backup.py b/nova/tests/unit/integrated/v3/test_create_backup.py
new file mode 100644
index 0000000000..089a61fb5f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_create_backup.py
@@ -0,0 +1,38 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class CreateBackupSamplesJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-create-backup"
+
+ def setUp(self):
+ """setUp Method for PauseServer api samples extension
+
+ This method creates the server that will be used in each tests
+ """
+ super(CreateBackupSamplesJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ @mock.patch.object(fake._FakeImageService, 'detail', return_value=[])
+ def test_post_backup_server(self, mock_method):
+ # Get api samples to backup server request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'create-backup-req', {})
+ self.assertEqual(202, response.status_code)
diff --git a/nova/tests/unit/integrated/v3/test_deferred_delete.py b/nova/tests/unit/integrated/v3/test_deferred_delete.py
new file mode 100644
index 0000000000..0b8d970900
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_deferred_delete.py
@@ -0,0 +1,42 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class DeferredDeleteSampleJsonTests(test_servers.ServersSampleBase):
+ extension_name = "os-deferred-delete"
+
+ def setUp(self):
+ super(DeferredDeleteSampleJsonTests, self).setUp()
+ self.flags(reclaim_instance_interval=1)
+
+ def test_restore(self):
+ uuid = self._post_server()
+ response = self._do_delete('servers/%s' % uuid)
+
+ response = self._do_post('servers/%s/action' % uuid,
+ 'restore-post-req', {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_force_delete(self):
+ uuid = self._post_server()
+ response = self._do_delete('servers/%s' % uuid)
+
+ response = self._do_post('servers/%s/action' % uuid,
+ 'force-delete-post-req', {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
diff --git a/nova/tests/unit/integrated/v3/test_disk_config.py b/nova/tests/unit/integrated/v3/test_disk_config.py
new file mode 100644
index 0000000000..97eeb31ace
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_disk_config.py
@@ -0,0 +1,80 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class DiskConfigJsonTest(test_servers.ServersSampleBase):
+ extension_name = 'os-disk-config'
+ extra_extensions_to_load = ["images"]
+
+ def test_list_servers_detail(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ self._verify_response('list-servers-detail-get', subs, response, 200)
+
+ def test_get_server(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_update_server(self):
+ uuid = self._post_server()
+ response = self._do_put('servers/%s' % uuid,
+ 'server-update-put-req', {})
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-update-put-resp', subs, response, 200)
+
+ def test_resize_server(self):
+ self.flags(allow_resize_to_same_host=True)
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-resize-post-req', {})
+ self.assertEqual(response.status_code, 202)
+ # NOTE(tmello): Resize does not return response body
+ # Bug #1085213.
+ self.assertEqual(response.content, "")
+
+ def test_rebuild_server(self):
+ uuid = self._post_server()
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ }
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-action-rebuild-req', subs)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-action-rebuild-resp',
+ subs, response, 202)
+
+ def test_get_image(self):
+ image_id = fake.get_valid_image_id()
+ response = self._do_get('images/%s' % image_id)
+ subs = self._get_regexes()
+ subs['image_id'] = image_id
+ self._verify_response('image-get-resp', subs, response, 200)
+
+ def test_list_images(self):
+ response = self._do_get('images/detail')
+ subs = self._get_regexes()
+ self._verify_response('image-list-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_evacuate.py b/nova/tests/unit/integrated/v3/test_evacuate.py
new file mode 100644
index 0000000000..1d63404b6d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_evacuate.py
@@ -0,0 +1,91 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.compute import api as compute_api
+from nova.compute import manager as compute_manager
+from nova.servicegroup import api as service_group_api
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class EvacuateJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-evacuate"
+
+ def _test_evacuate(self, req_subs, server_req, server_resp,
+ expected_resp_code):
+ self.uuid = self._post_server()
+
+ def fake_service_is_up(self, service):
+ """Simulate validation of instance host is down."""
+ return False
+
+ def fake_service_get_by_compute_host(self, context, host):
+ """Simulate that given host is a valid host."""
+ return {
+ 'host_name': host,
+ 'service': 'compute',
+ 'zone': 'nova'
+ }
+
+ def fake_check_instance_exists(self, context, instance):
+ """Simulate validation of instance does not exist."""
+ return False
+
+ self.stubs.Set(service_group_api.API, 'service_is_up',
+ fake_service_is_up)
+ self.stubs.Set(compute_api.HostAPI, 'service_get_by_compute_host',
+ fake_service_get_by_compute_host)
+ self.stubs.Set(compute_manager.ComputeManager,
+ '_check_instance_exists',
+ fake_check_instance_exists)
+
+ response = self._do_post('servers/%s/action' % self.uuid,
+ server_req, req_subs)
+ subs = self._get_regexes()
+ self._verify_response(server_resp, subs, response, expected_resp_code)
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate(self, rebuild_mock):
+ # Note (wingwj): The host can't be the same one
+ req_subs = {
+ 'host': 'testHost',
+ "adminPass": "MySecretPass",
+ "onSharedStorage": 'False'
+ }
+ self._test_evacuate(req_subs, 'server-evacuate-req',
+ 'server-evacuate-resp', 200)
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=False, preserve_ephemeral=mock.ANY,
+ host='testHost')
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate_find_host(self, rebuild_mock):
+ req_subs = {
+ "adminPass": "MySecretPass",
+ "onSharedStorage": 'False'
+ }
+ self._test_evacuate(req_subs, 'server-evacuate-find-host-req',
+ 'server-evacuate-find-host-resp', 200)
+
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=False, preserve_ephemeral=mock.ANY,
+ host=None)
diff --git a/nova/tests/unit/integrated/v3/test_extended_availability_zone.py b/nova/tests/unit/integrated/v3/test_extended_availability_zone.py
new file mode 100644
index 0000000000..accd4a2cdf
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_extended_availability_zone.py
@@ -0,0 +1,34 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ExtendedAvailabilityZoneJsonTests(test_servers.ServersSampleBase):
+ extension_name = "os-extended-availability-zone"
+
+ def test_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_extended_server_attributes.py b/nova/tests/unit/integrated/v3/test_extended_server_attributes.py
new file mode 100644
index 0000000000..1a00f45237
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_extended_server_attributes.py
@@ -0,0 +1,42 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ExtendedServerAttributesJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-extended-server-attributes"
+
+ def test_show(self):
+ uuid = self._post_server()
+
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['instance_name'] = 'instance-\d{8}'
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ uuid = self._post_server()
+
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['instance_name'] = 'instance-\d{8}'
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_extended_status.py b/nova/tests/unit/integrated/v3/test_extended_status.py
new file mode 100644
index 0000000000..8f952eaacc
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_extended_status.py
@@ -0,0 +1,35 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ExtendedStatusSampleJsonTests(test_servers.ServersSampleBase):
+ extension_name = "os-extended-status"
+
+ def test_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_extended_volumes.py b/nova/tests/unit/integrated/v3/test_extended_volumes.py
new file mode 100644
index 0000000000..f6500eaac6
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_extended_volumes.py
@@ -0,0 +1,151 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.compute import api as compute_api
+from nova.compute import manager as compute_manager
+from nova import context
+from nova import db
+from nova import objects
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit.integrated.v3 import test_servers
+from nova.volume import cinder
+
+
+class ExtendedVolumesSampleJsonTests(test_servers.ServersSampleBase):
+ extension_name = "os-extended-volumes"
+
+ def _stub_compute_api_get_instance_bdms(self, server_id):
+
+ def fake_bdms_get_all_by_instance(context, instance_uuid,
+ use_slave=False):
+ bdms = [
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f803',
+ 'instance_uuid': server_id, 'source_type': 'volume',
+ 'destination_type': 'volume', 'device_name': '/dev/sdd'}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f804',
+ 'instance_uuid': server_id, 'source_type': 'volume',
+ 'destination_type': 'volume', 'device_name': '/dev/sdc'})
+ ]
+ return bdms
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_bdms_get_all_by_instance)
+
+ def _stub_compute_api_get(self):
+ def fake_compute_api_get(self, context, instance_id, **kwargs):
+ want_objects = kwargs.get('want_objects')
+ if want_objects:
+ return fake_instance.fake_instance_obj(
+ context, **{'uuid': instance_id})
+ else:
+ return {'uuid': instance_id}
+
+ self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
+
+ def test_show(self):
+ uuid = self._post_server()
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fakes.stub_bdm_get_all_by_instance)
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ uuid = self._post_server()
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fakes.stub_bdm_get_all_by_instance)
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
+
+ def test_attach_volume(self):
+ bdm = objects.BlockDeviceMapping()
+ device_name = '/dev/vdd'
+ bdm['device_name'] = device_name
+ self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
+ self.stubs.Set(cinder.API, 'check_attach', lambda *a, **k: None)
+ self.stubs.Set(cinder.API, 'reserve_volume', lambda *a, **k: None)
+ self.stubs.Set(compute_manager.ComputeManager,
+ "reserve_block_device_name",
+ lambda *a, **k: bdm)
+ self.stubs.Set(compute_manager.ComputeManager,
+ 'attach_volume',
+ lambda *a, **k: None)
+
+ volume = fakes.stub_volume_get(None, context.get_admin_context(),
+ 'a26887c6-c47b-4654-abb5-dfadf7d3f803')
+ subs = {
+ 'volume_id': volume['id'],
+ 'device': device_name,
+ 'disk_bus': 'ide',
+ 'device_type': 'cdrom'
+ }
+ server_id = self._post_server()
+ response = self._do_post('servers/%s/action'
+ % server_id,
+ 'attach-volume-req', subs)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_detach_volume(self):
+ server_id = self._post_server()
+ attach_id = "a26887c6-c47b-4654-abb5-dfadf7d3f803"
+ self._stub_compute_api_get_instance_bdms(server_id)
+ self._stub_compute_api_get()
+ self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
+ self.stubs.Set(compute_api.API, 'detach_volume', lambda *a, **k: None)
+ subs = {
+ 'volume_id': attach_id,
+ }
+ response = self._do_post('servers/%s/action'
+ % server_id, 'detach-volume-req', subs)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_swap_volume(self):
+ server_id = self._post_server()
+ old_volume_id = "a26887c6-c47b-4654-abb5-dfadf7d3f803"
+ old_new_volume = 'a26887c6-c47b-4654-abb5-dfadf7d3f805'
+ self._stub_compute_api_get_instance_bdms(server_id)
+
+ def stub_volume_get(self, context, volume_id):
+ if volume_id == old_volume_id:
+ return fakes.stub_volume(volume_id, instance_uuid=server_id)
+ else:
+ return fakes.stub_volume(volume_id, instance_uuid=None,
+ attach_status='detached')
+
+ self.stubs.Set(cinder.API, 'get', stub_volume_get)
+ self.stubs.Set(cinder.API, 'begin_detaching', lambda *a, **k: None)
+ self.stubs.Set(cinder.API, 'check_attach', lambda *a, **k: None)
+ self.stubs.Set(cinder.API, 'check_detach', lambda *a, **k: None)
+ self.stubs.Set(cinder.API, 'reserve_volume', lambda *a, **k: None)
+ self.stubs.Set(compute_manager.ComputeManager, 'swap_volume',
+ lambda *a, **k: None)
+ subs = {
+ 'old_volume_id': old_volume_id,
+ 'new_volume_id': old_new_volume
+ }
+ response = self._do_post('servers/%s/action' % server_id,
+ 'swap-volume-req', subs)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
diff --git a/nova/tests/unit/integrated/v3/test_extension_info.py b/nova/tests/unit/integrated/v3/test_extension_info.py
new file mode 100644
index 0000000000..c23339b96e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_extension_info.py
@@ -0,0 +1,71 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova.api.openstack import extensions as api_extensions
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class ExtensionInfoSamplesJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ sample_dir = "extension-info"
+
+ def test_list_extensions(self):
+ response = self._do_get('extensions')
+ subs = self._get_regexes()
+ self._verify_response('extensions-list-resp', subs, response, 200)
+
+ def test_get_extensions(self):
+ response = self._do_get('extensions/flavors')
+ subs = self._get_regexes()
+ self._verify_response('extensions-get-resp', subs, response, 200)
+
+
+class ExtensionInfoFormatTest(api_sample_base.ApiSampleTestBaseV3):
+ # NOTE: To check all extension formats, here makes authorize() return True
+ # always instead of fake_policy.py because most extensions are not set as
+ # "discoverable" in fake_policy.py.
+ all_extensions = True
+
+ def _test_list_extensions(self, key, pattern):
+ with mock.patch.object(api_extensions,
+ 'soft_extension_authorizer') as api_mock:
+ def fake_soft_extension_authorizer(api_name, extension_name):
+ def authorize(context, action=None):
+ return True
+ return authorize
+
+ api_mock.side_effect = fake_soft_extension_authorizer
+ response = self._do_get('extensions')
+ response = jsonutils.loads(response.content)
+ extensions = response['extensions']
+ pattern_comp = re.compile(pattern)
+ for ext in extensions:
+ self.assertIsNotNone(pattern_comp.match(ext[key]),
+ '%s does not match with %s' % (ext[key],
+ pattern))
+
+ def test_list_extensions_name_format(self):
+ # name should be CamelCase.
+ pattern = '^[A-Z]{1}[a-z]{1}[a-zA-Z]*$'
+ self._test_list_extensions('name', pattern)
+
+ def test_list_extensions_alias_format(self):
+ # alias should contain lowercase chars and '-' only.
+ pattern = '^[a-z-]+$'
+ self._test_list_extensions('alias', pattern)
diff --git a/nova/tests/unit/integrated/v3/test_fixed_ips.py b/nova/tests/unit/integrated/v3/test_fixed_ips.py
new file mode 100644
index 0000000000..cabeac018a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_fixed_ips.py
@@ -0,0 +1,109 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import db
+from nova import exception
+from nova.tests.unit.integrated.v3 import test_servers
+from nova.tests.unit.objects import test_network
+from nova.tests.unit import utils as test_utils
+
+
+class FixedIpTest(test_servers.ServersSampleBase):
+ extension_name = "os-fixed-ips"
+
+ def setUp(self):
+ super(FixedIpTest, self).setUp()
+
+ instance = dict(test_utils.get_test_instance(),
+ hostname='openstack', host='host')
+ fake_fixed_ips = [{'id': 1,
+ 'address': '192.168.1.1',
+ 'network_id': 1,
+ 'virtual_interface_id': 1,
+ 'instance_uuid': '1',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'created_at': None,
+ 'deleted_at': None,
+ 'updated_at': None,
+ 'deleted': None,
+ 'instance': instance,
+ 'network': test_network.fake_network,
+ 'host': None},
+ {'id': 2,
+ 'address': '192.168.1.2',
+ 'network_id': 1,
+ 'virtual_interface_id': 2,
+ 'instance_uuid': '2',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'created_at': None,
+ 'deleted_at': None,
+ 'updated_at': None,
+ 'deleted': None,
+ 'instance': instance,
+ 'network': test_network.fake_network,
+ 'host': None},
+ ]
+
+ def fake_fixed_ip_get_by_address(context, address,
+ columns_to_join=None):
+ for fixed_ip in fake_fixed_ips:
+ if fixed_ip['address'] == address:
+ return fixed_ip
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+ def fake_fixed_ip_get_by_address_detailed(context, address):
+ network = {'id': 1,
+ 'cidr': "192.168.1.0/24"}
+ host = {'host': "host",
+ 'hostname': 'openstack'}
+ for fixed_ip in fake_fixed_ips:
+ if fixed_ip['address'] == address:
+ return (fixed_ip, network, host)
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+ def fake_fixed_ip_update(context, address, values):
+ fixed_ip = fake_fixed_ip_get_by_address(context, address)
+ if fixed_ip is None:
+ raise exception.FixedIpNotFoundForAddress(address=address)
+ else:
+ for key in values:
+ fixed_ip[key] = values[key]
+
+ self.stubs.Set(db, "fixed_ip_get_by_address",
+ fake_fixed_ip_get_by_address)
+ self.stubs.Set(db, "fixed_ip_get_by_address_detailed",
+ fake_fixed_ip_get_by_address_detailed)
+ self.stubs.Set(db, "fixed_ip_update", fake_fixed_ip_update)
+
+ def test_fixed_ip_reserve(self):
+ # Reserve a Fixed IP.
+ project = {'reserve': None}
+ response = self._do_post('os-fixed-ips/192.168.1.1/action',
+ 'fixedip-post-req',
+ project)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_get_fixed_ip(self):
+ # Return data about the given fixed ip.
+ response = self._do_get('os-fixed-ips/192.168.1.1')
+ project = {'cidr': '192.168.1.0/24',
+ 'hostname': 'openstack',
+ 'host': 'host',
+ 'address': '192.168.1.1'}
+ self._verify_response('fixedips-get-resp', project, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_flavor_access.py b/nova/tests/unit/integrated/v3/test_flavor_access.py
new file mode 100644
index 0000000000..66316856a3
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_flavor_access.py
@@ -0,0 +1,89 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class FlavorAccessSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = 'flavor-access'
+
+ def _add_tenant(self):
+ subs = {
+ 'tenant_id': 'fake_tenant',
+ 'flavor_id': 10,
+ }
+ response = self._do_post('flavors/10/action',
+ 'flavor-access-add-tenant-req',
+ subs)
+ self._verify_response('flavor-access-add-tenant-resp',
+ subs, response, 200)
+
+ def _create_flavor(self):
+ subs = {
+ 'flavor_id': 10,
+ 'flavor_name': 'test_flavor'
+ }
+ response = self._do_post("flavors",
+ "flavor-access-create-req",
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response("flavor-access-create-resp", subs, response, 200)
+
+ def test_flavor_access_create(self):
+ self._create_flavor()
+
+ def test_flavor_access_detail(self):
+ response = self._do_get('flavors/detail')
+ subs = self._get_regexes()
+ self._verify_response('flavor-access-detail-resp', subs, response, 200)
+
+ def test_flavor_access_list(self):
+ self._create_flavor()
+ self._add_tenant()
+ flavor_id = 10
+ response = self._do_get('flavors/%s/os-flavor-access' % flavor_id)
+ subs = {
+ 'flavor_id': flavor_id,
+ 'tenant_id': 'fake_tenant',
+ }
+ self._verify_response('flavor-access-list-resp', subs, response, 200)
+
+ def test_flavor_access_show(self):
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ subs = {
+ 'flavor_id': flavor_id
+ }
+ subs.update(self._get_regexes())
+ self._verify_response('flavor-access-show-resp', subs, response, 200)
+
+ def test_flavor_access_add_tenant(self):
+ self._create_flavor()
+ self._add_tenant()
+
+ def test_flavor_access_remove_tenant(self):
+ self._create_flavor()
+ self._add_tenant()
+ subs = {
+ 'tenant_id': 'fake_tenant',
+ }
+ response = self._do_post('flavors/10/action',
+ "flavor-access-remove-tenant-req",
+ subs)
+ exp_subs = {
+ "tenant_id": self.api.project_id,
+ "flavor_id": "10"
+ }
+ self._verify_response('flavor-access-remove-tenant-resp',
+ exp_subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_flavor_extraspecs.py b/nova/tests/unit/integrated/v3/test_flavor_extraspecs.py
new file mode 100644
index 0000000000..ba823c7c24
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_flavor_extraspecs.py
@@ -0,0 +1,62 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class FlavorExtraSpecsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = 'flavor-extra-specs'
+
+ def _flavor_extra_specs_create(self):
+ subs = {'value1': 'value1',
+ 'value2': 'value2'
+ }
+ response = self._do_post('flavors/1/os-extra_specs',
+ 'flavor-extra-specs-create-req', subs)
+ self._verify_response('flavor-extra-specs-create-resp',
+ subs, response, 200)
+
+ def test_flavor_extra_specs_get(self):
+ subs = {'value1': 'value1'}
+ self._flavor_extra_specs_create()
+ response = self._do_get('flavors/1/os-extra_specs/key1')
+ self._verify_response('flavor-extra-specs-get-resp',
+ subs, response, 200)
+
+ def test_flavor_extra_specs_list(self):
+ subs = {'value1': 'value1',
+ 'value2': 'value2'
+ }
+ self._flavor_extra_specs_create()
+ response = self._do_get('flavors/1/os-extra_specs')
+ self._verify_response('flavor-extra-specs-list-resp',
+ subs, response, 200)
+
+ def test_flavor_extra_specs_create(self):
+ self._flavor_extra_specs_create()
+
+ def test_flavor_extra_specs_update(self):
+ subs = {'value1': 'new_value1'}
+ self._flavor_extra_specs_create()
+ response = self._do_put('flavors/1/os-extra_specs/key1',
+ 'flavor-extra-specs-update-req', subs)
+ self._verify_response('flavor-extra-specs-update-resp',
+ subs, response, 200)
+
+ def test_flavor_extra_specs_delete(self):
+ self._flavor_extra_specs_create()
+ response = self._do_delete('flavors/1/os-extra_specs/key1')
+ self.assertEqual(response.status_code, 200)
+ self.assertEqual(response.content, '')
diff --git a/nova/tests/unit/integrated/v3/test_flavor_manage.py b/nova/tests/unit/integrated/v3/test_flavor_manage.py
new file mode 100644
index 0000000000..e7911ef0a6
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_flavor_manage.py
@@ -0,0 +1,43 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class FlavorManageSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = 'flavor-manage'
+
+ def _create_flavor(self):
+ """Create a flavor."""
+ subs = {
+ 'flavor_id': 10,
+ 'flavor_name': "test_flavor"
+ }
+ response = self._do_post("flavors",
+ "flavor-create-post-req",
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response("flavor-create-post-resp", subs, response, 200)
+
+ def test_create_flavor(self):
+ # Get api sample to create a flavor.
+ self._create_flavor()
+
+ def test_delete_flavor(self):
+ # Get api sample to delete a flavor.
+ self._create_flavor()
+ response = self._do_delete("flavors/10")
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
diff --git a/nova/tests/unit/integrated/v3/test_flavor_rxtx.py b/nova/tests/unit/integrated/v3/test_flavor_rxtx.py
new file mode 100644
index 0000000000..2b0840259e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_flavor_rxtx.py
@@ -0,0 +1,46 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class FlavorRxtxJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = 'os-flavor-rxtx'
+
+ def test_flavor_rxtx_get(self):
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ subs = {
+ 'flavor_id': flavor_id,
+ 'flavor_name': 'm1.tiny'
+ }
+ subs.update(self._get_regexes())
+ self._verify_response('flavor-rxtx-get-resp', subs, response, 200)
+
+ def test_flavors_rxtx_detail(self):
+ response = self._do_get('flavors/detail')
+ subs = self._get_regexes()
+ self._verify_response('flavor-rxtx-list-resp', subs, response, 200)
+
+ def test_flavors_rxtx_create(self):
+ subs = {
+ 'flavor_id': 100,
+ 'flavor_name': 'flavortest'
+ }
+ response = self._do_post('flavors',
+ 'flavor-rxtx-post-req',
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response('flavor-rxtx-post-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_flavors.py b/nova/tests/unit/integrated/v3/test_flavors.py
new file mode 100644
index 0000000000..e8db9bc5a1
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_flavors.py
@@ -0,0 +1,35 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class FlavorsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ sample_dir = 'flavors'
+
+ def test_flavors_get(self):
+ response = self._do_get('flavors/1')
+ subs = self._get_regexes()
+ self._verify_response('flavor-get-resp', subs, response, 200)
+
+ def test_flavors_list(self):
+ response = self._do_get('flavors')
+ subs = self._get_regexes()
+ self._verify_response('flavors-list-resp', subs, response, 200)
+
+ def test_flavors_detail(self):
+ response = self._do_get('flavors/detail')
+ subs = self._get_regexes()
+ self._verify_response('flavors-detail-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_floating_ip_dns.py b/nova/tests/unit/integrated/v3/test_floating_ip_dns.py
new file mode 100644
index 0000000000..d0326b6535
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_floating_ip_dns.py
@@ -0,0 +1,91 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class FloatingIpDNSTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-floating-ip-dns"
+
+ domain = 'domain1.example.org'
+ name = 'instance1'
+ scope = 'public'
+ project = 'project1'
+ dns_type = 'A'
+ ip = '192.168.1.1'
+
+ def _create_or_update(self):
+ subs = {'project': self.project,
+ 'scope': self.scope}
+ response = self._do_put('os-floating-ip-dns/%s' % self.domain,
+ 'floating-ip-dns-create-or-update-req', subs)
+ subs.update({'domain': self.domain})
+ self._verify_response('floating-ip-dns-create-or-update-resp', subs,
+ response, 200)
+
+ def _create_or_update_entry(self):
+ subs = {'ip': self.ip, 'dns_type': self.dns_type}
+ response = self._do_put('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.name),
+ 'floating-ip-dns-create-or-update-entry-req',
+ subs)
+ subs.update({'name': self.name, 'domain': self.domain})
+ self._verify_response('floating-ip-dns-create-or-update-entry-resp',
+ subs, response, 200)
+
+ def test_floating_ip_dns_list(self):
+ self._create_or_update()
+ response = self._do_get('os-floating-ip-dns')
+ subs = {'domain': self.domain,
+ 'project': self.project,
+ 'scope': self.scope}
+ self._verify_response('floating-ip-dns-list-resp', subs,
+ response, 200)
+
+ def test_floating_ip_dns_create_or_update(self):
+ self._create_or_update()
+
+ def test_floating_ip_dns_delete(self):
+ self._create_or_update()
+ response = self._do_delete('os-floating-ip-dns/%s' % self.domain)
+ self.assertEqual(response.status_code, 202)
+
+ def test_floating_ip_dns_create_or_update_entry(self):
+ self._create_or_update_entry()
+
+ def test_floating_ip_dns_entry_get(self):
+ self._create_or_update_entry()
+ response = self._do_get('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.name))
+ subs = {'domain': self.domain,
+ 'ip': self.ip,
+ 'name': self.name}
+ self._verify_response('floating-ip-dns-entry-get-resp', subs,
+ response, 200)
+
+ def test_floating_ip_dns_entry_delete(self):
+ self._create_or_update_entry()
+ response = self._do_delete('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.name))
+ self.assertEqual(response.status_code, 202)
+
+ def test_floating_ip_dns_entry_list(self):
+ self._create_or_update_entry()
+ response = self._do_get('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.ip))
+ subs = {'domain': self.domain,
+ 'ip': self.ip,
+ 'name': self.name}
+ self._verify_response('floating-ip-dns-entry-list-resp', subs,
+ response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_floating_ip_pools.py b/nova/tests/unit/integrated/v3/test_floating_ip_pools.py
new file mode 100644
index 0000000000..4db76801c7
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_floating_ip_pools.py
@@ -0,0 +1,35 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.network import api as network_api
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class FloatingIPPoolsSampleTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-floating-ip-pools"
+
+ def test_list_floatingippools(self):
+ pool_list = ["pool1", "pool2"]
+
+ def fake_get_floating_ip_pools(self, context):
+ return pool_list
+
+ self.stubs.Set(network_api.API, "get_floating_ip_pools",
+ fake_get_floating_ip_pools)
+ response = self._do_get('os-floating-ip-pools')
+ subs = {
+ 'pool1': pool_list[0],
+ 'pool2': pool_list[1]
+ }
+ self._verify_response('floatingippools-list-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_floating_ips_bulk.py b/nova/tests/unit/integrated/v3/test_floating_ips_bulk.py
new file mode 100644
index 0000000000..9459b1bf61
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_floating_ips_bulk.py
@@ -0,0 +1,86 @@
+# Copyright 2014 IBM Corp.
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+from nova import context
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+CONF = cfg.CONF
+CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
+CONF.import_opt('public_interface', 'nova.network.linux_net')
+
+
+class FloatingIpsBulkTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-floating-ips-bulk"
+
+ def setUp(self):
+ super(FloatingIpsBulkTest, self).setUp()
+ pool = CONF.default_floating_pool
+ interface = CONF.public_interface
+
+ self.ip_pool = [
+ {
+ 'address': "10.10.10.1",
+ 'pool': pool,
+ 'interface': interface
+ },
+ {
+ 'address': "10.10.10.2",
+ 'pool': pool,
+ 'interface': interface
+ },
+ {
+ 'address': "10.10.10.3",
+ 'pool': pool,
+ 'interface': interface,
+ 'host': "testHost"
+ },
+ ]
+ self.compute.db.floating_ip_bulk_create(
+ context.get_admin_context(), self.ip_pool)
+
+ self.addCleanup(self.compute.db.floating_ip_bulk_destroy,
+ context.get_admin_context(), self.ip_pool)
+
+ def test_floating_ips_bulk_list(self):
+ response = self._do_get('os-floating-ips-bulk')
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-bulk-list-resp',
+ subs, response, 200)
+
+ def test_floating_ips_bulk_list_by_host(self):
+ response = self._do_get('os-floating-ips-bulk/testHost')
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-bulk-list-by-host-resp',
+ subs, response, 200)
+
+ def test_floating_ips_bulk_create(self):
+ response = self._do_post('os-floating-ips-bulk',
+ 'floating-ips-bulk-create-req',
+ {"ip_range": "192.168.1.0/24",
+ "pool": CONF.default_floating_pool,
+ "interface": CONF.public_interface})
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-bulk-create-resp', subs,
+ response, 200)
+
+ def test_floating_ips_bulk_delete(self):
+ response = self._do_put('os-floating-ips-bulk/delete',
+ 'floating-ips-bulk-delete-req',
+ {"ip_range": "192.168.1.0/24"})
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-bulk-delete-resp', subs,
+ response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_fping.py b/nova/tests/unit/integrated/v3/test_fping.py
new file mode 100644
index 0000000000..000c6d2484
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_fping.py
@@ -0,0 +1,45 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.api.openstack.compute.plugins.v3 import fping
+from nova.tests.unit.api.openstack.compute.contrib import test_fping
+from nova.tests.unit.integrated.v3 import test_servers
+from nova import utils
+
+
+class FpingSampleJsonTests(test_servers.ServersSampleBase):
+ extension_name = "os-fping"
+
+ def setUp(self):
+ super(FpingSampleJsonTests, self).setUp()
+
+ def fake_check_fping(self):
+ pass
+ self.stubs.Set(utils, "execute", test_fping.execute)
+ self.stubs.Set(fping.FpingController, "check_fping",
+ fake_check_fping)
+
+ def test_get_fping(self):
+ self._post_server()
+ response = self._do_get('os-fping')
+ subs = self._get_regexes()
+ self._verify_response('fping-get-resp', subs, response, 200)
+
+ def test_get_fping_details(self):
+ uuid = self._post_server()
+ response = self._do_get('os-fping/%s' % (uuid))
+ subs = self._get_regexes()
+ self._verify_response('fping-get-details-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_hide_server_addresses.py b/nova/tests/unit/integrated/v3/test_hide_server_addresses.py
new file mode 100644
index 0000000000..908fef62d1
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_hide_server_addresses.py
@@ -0,0 +1,39 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+from nova.compute import vm_states
+from nova.tests.unit.integrated.v3 import test_servers
+
+CONF = cfg.CONF
+CONF.import_opt('osapi_hide_server_address_states',
+ 'nova.api.openstack.compute.plugins.v3.hide_server_addresses')
+
+
+class ServersSampleHideAddressesJsonTest(test_servers.ServersSampleJsonTest):
+ extension_name = 'os-hide-server-addresses'
+ # Override the sample dirname because
+ # test_servers.ServersSampleJsonTest does and so it won't default
+ # to the extension name
+ sample_dir = extension_name
+
+ def setUp(self):
+ # We override osapi_hide_server_address_states in order
+ # to have an example of in the json samples of the
+ # addresses being hidden
+ CONF.set_override("osapi_hide_server_address_states",
+ [vm_states.ACTIVE])
+ super(ServersSampleHideAddressesJsonTest, self).setUp()
diff --git a/nova/tests/unit/integrated/v3/test_hosts.py b/nova/tests/unit/integrated/v3/test_hosts.py
new file mode 100644
index 0000000000..7142ee885a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_hosts.py
@@ -0,0 +1,57 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class HostsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-hosts"
+
+ def test_host_startup(self):
+ response = self._do_get('os-hosts/%s/startup' % self.compute.host)
+ subs = self._get_regexes()
+ self._verify_response('host-get-startup', subs, response, 200)
+
+ def test_host_reboot(self):
+ response = self._do_get('os-hosts/%s/reboot' % self.compute.host)
+ subs = self._get_regexes()
+ self._verify_response('host-get-reboot', subs, response, 200)
+
+ def test_host_shutdown(self):
+ response = self._do_get('os-hosts/%s/shutdown' % self.compute.host)
+ subs = self._get_regexes()
+ self._verify_response('host-get-shutdown', subs, response, 200)
+
+ def test_host_maintenance(self):
+ response = self._do_put('os-hosts/%s' % self.compute.host,
+ 'host-put-maintenance-req', {})
+ subs = self._get_regexes()
+ self._verify_response('host-put-maintenance-resp', subs, response, 200)
+
+ def test_host_get(self):
+ response = self._do_get('os-hosts/%s' % self.compute.host)
+ subs = self._get_regexes()
+ self._verify_response('host-get-resp', subs, response, 200)
+
+ def test_hosts_list(self):
+ response = self._do_get('os-hosts')
+ subs = self._get_regexes()
+ self._verify_response('hosts-list-resp', subs, response, 200)
+
+ def test_hosts_list_compute_service(self):
+ response = self._do_get('os-hosts?service=compute')
+ subs = self._get_regexes()
+ self._verify_response('hosts-list-compute-service-resp',
+ subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_hypervisors.py b/nova/tests/unit/integrated/v3/test_hypervisors.py
new file mode 100644
index 0000000000..f36f35ec84
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_hypervisors.py
@@ -0,0 +1,69 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.compute import api as compute_api
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class HypervisorsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-hypervisors"
+
+ def test_hypervisors_list(self):
+ response = self._do_get('os-hypervisors')
+ self._verify_response('hypervisors-list-resp', {}, response, 200)
+
+ def test_hypervisors_search(self):
+ response = self._do_get('os-hypervisors/fake/search')
+ self._verify_response('hypervisors-search-resp', {}, response, 200)
+
+ def test_hypervisors_servers(self):
+ response = self._do_get('os-hypervisors/fake/servers')
+ self._verify_response('hypervisors-servers-resp', {}, response, 200)
+
+ def test_hypervisors_detail(self):
+ hypervisor_id = 1
+ subs = {
+ 'hypervisor_id': hypervisor_id
+ }
+ response = self._do_get('os-hypervisors/detail')
+ subs.update(self._get_regexes())
+ self._verify_response('hypervisors-detail-resp', subs, response, 200)
+
+ def test_hypervisors_show(self):
+ hypervisor_id = 1
+ subs = {
+ 'hypervisor_id': hypervisor_id
+ }
+ response = self._do_get('os-hypervisors/%s' % hypervisor_id)
+ subs.update(self._get_regexes())
+ self._verify_response('hypervisors-show-resp', subs, response, 200)
+
+ def test_hypervisors_statistics(self):
+ response = self._do_get('os-hypervisors/statistics')
+ self._verify_response('hypervisors-statistics-resp', {}, response, 200)
+
+ def test_hypervisors_uptime(self):
+ def fake_get_host_uptime(self, context, hyp):
+ return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
+ " 0.20, 0.12, 0.14")
+
+ self.stubs.Set(compute_api.HostAPI,
+ 'get_host_uptime', fake_get_host_uptime)
+ hypervisor_id = 1
+ response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
+ subs = {
+ 'hypervisor_id': hypervisor_id,
+ }
+ self._verify_response('hypervisors-uptime-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_image_size.py b/nova/tests/unit/integrated/v3/test_image_size.py
new file mode 100644
index 0000000000..8aeb08e9d8
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_image_size.py
@@ -0,0 +1,37 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class ImageSizeSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "image-size"
+ extra_extensions_to_load = ["images", "image-metadata"]
+
+ def test_show(self):
+ # Get api sample of one single image details request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_get('images/%s' % image_id)
+ subs = self._get_regexes()
+ subs['image_id'] = image_id
+ self._verify_response('image-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ # Get api sample of all images details request.
+ response = self._do_get('images/detail')
+ subs = self._get_regexes()
+ self._verify_response('images-details-get-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_images.py b/nova/tests/unit/integrated/v3/test_images.py
new file mode 100644
index 0000000000..95dd0c971b
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_images.py
@@ -0,0 +1,85 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class ImagesSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = 'images'
+ extra_extensions_to_load = ["image-metadata"]
+
+ def test_images_list(self):
+ # Get api sample of images get list request.
+ response = self._do_get('images')
+ subs = self._get_regexes()
+ self._verify_response('images-list-get-resp', subs, response, 200)
+
+ def test_image_get(self):
+ # Get api sample of one single image details request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_get('images/%s' % image_id)
+ subs = self._get_regexes()
+ subs['image_id'] = image_id
+ self._verify_response('image-get-resp', subs, response, 200)
+
+ def test_images_details(self):
+ # Get api sample of all images details request.
+ response = self._do_get('images/detail')
+ subs = self._get_regexes()
+ self._verify_response('images-details-get-resp', subs, response, 200)
+
+ def test_image_metadata_get(self):
+ # Get api sample of an image metadata request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_get('images/%s/metadata' % image_id)
+ subs = self._get_regexes()
+ subs['image_id'] = image_id
+ self._verify_response('image-metadata-get-resp', subs, response, 200)
+
+ def test_image_metadata_post(self):
+ # Get api sample to update metadata of an image metadata request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_post(
+ 'images/%s/metadata' % image_id,
+ 'image-metadata-post-req', {})
+ subs = self._get_regexes()
+ self._verify_response('image-metadata-post-resp', subs, response, 200)
+
+ def test_image_metadata_put(self):
+ # Get api sample of image metadata put request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_put('images/%s/metadata' % image_id,
+ 'image-metadata-put-req', {})
+ subs = self._get_regexes()
+ self._verify_response('image-metadata-put-resp', subs, response, 200)
+
+ def test_image_meta_key_get(self):
+ # Get api sample of an image metadata key request.
+ image_id = fake.get_valid_image_id()
+ key = "kernel_id"
+ response = self._do_get('images/%s/metadata/%s' % (image_id, key))
+ subs = self._get_regexes()
+ self._verify_response('image-meta-key-get', subs, response, 200)
+
+ def test_image_meta_key_put(self):
+ # Get api sample of image metadata key put request.
+ image_id = fake.get_valid_image_id()
+ key = "auto_disk_config"
+ response = self._do_put('images/%s/metadata/%s' % (image_id, key),
+ 'image-meta-key-put-req', {})
+ subs = self._get_regexes()
+ self._verify_response('image-meta-key-put-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_instance_actions.py b/nova/tests/unit/integrated/v3/test_instance_actions.py
new file mode 100644
index 0000000000..3285fa4a69
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_instance_actions.py
@@ -0,0 +1,84 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from nova.compute import api as compute_api
+from nova import db
+from nova.tests.unit import fake_server_actions
+from nova.tests.unit.integrated.v3 import api_sample_base
+from nova.tests.unit import utils as test_utils
+
+
+class ServerActionsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = 'os-instance-actions'
+
+ def setUp(self):
+ super(ServerActionsSampleJsonTest, self).setUp()
+ self.actions = fake_server_actions.FAKE_ACTIONS
+ self.events = fake_server_actions.FAKE_EVENTS
+ self.instance = test_utils.get_test_instance()
+
+ def fake_instance_action_get_by_request_id(context, uuid, request_id):
+ return copy.deepcopy(self.actions[uuid][request_id])
+
+ def fake_server_actions_get(context, uuid):
+ return [copy.deepcopy(value) for value in
+ self.actions[uuid].itervalues()]
+
+ def fake_instance_action_events_get(context, action_id):
+ return copy.deepcopy(self.events[action_id])
+
+ def fake_instance_get_by_uuid(context, instance_id):
+ return self.instance
+
+ def fake_get(self, context, instance_uuid, **kwargs):
+ return {'uuid': instance_uuid}
+
+ self.stubs.Set(db, 'action_get_by_request_id',
+ fake_instance_action_get_by_request_id)
+ self.stubs.Set(db, 'actions_get', fake_server_actions_get)
+ self.stubs.Set(db, 'action_events_get',
+ fake_instance_action_events_get)
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+
+ def test_instance_action_get(self):
+ fake_uuid = fake_server_actions.FAKE_UUID
+ fake_request_id = fake_server_actions.FAKE_REQUEST_ID1
+ fake_action = self.actions[fake_uuid][fake_request_id]
+
+ response = self._do_get('servers/%s/os-instance-actions/%s' %
+ (fake_uuid, fake_request_id))
+ subs = self._get_regexes()
+ subs['action'] = '(reboot)|(resize)'
+ subs['instance_uuid'] = fake_uuid
+ subs['integer_id'] = '[0-9]+'
+ subs['request_id'] = fake_action['request_id']
+ subs['start_time'] = fake_action['start_time']
+ subs['result'] = '(Success)|(Error)'
+ subs['event'] = '(schedule)|(compute_create)'
+ self._verify_response('instance-action-get-resp', subs, response, 200)
+
+ def test_instance_actions_list(self):
+ fake_uuid = fake_server_actions.FAKE_UUID
+ response = self._do_get('servers/%s/os-instance-actions' % (fake_uuid))
+ subs = self._get_regexes()
+ subs['action'] = '(reboot)|(resize)'
+ subs['integer_id'] = '[0-9]+'
+ subs['request_id'] = ('req-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
+ '-[0-9a-f]{4}-[0-9a-f]{12}')
+ self._verify_response('instance-actions-list-resp', subs,
+ response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_keypairs.py b/nova/tests/unit/integrated/v3/test_keypairs.py
new file mode 100644
index 0000000000..d079407985
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_keypairs.py
@@ -0,0 +1,72 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class KeyPairsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ sample_dir = "keypairs"
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['keypair_name'] = 'keypair-[0-9a-f-]+'
+ return subs
+
+ def test_keypairs_post(self, public_key=None):
+ """Get api sample of key pairs post request."""
+ key_name = 'keypair-' + str(uuid.uuid4())
+ response = self._do_post('os-keypairs', 'keypairs-post-req',
+ {'keypair_name': key_name})
+ subs = self._get_regexes()
+ subs['keypair_name'] = '(%s)' % key_name
+ self._verify_response('keypairs-post-resp', subs, response, 200)
+ # NOTE(maurosr): return the key_name is necessary cause the
+ # verification returns the label of the last compared information in
+ # the response, not necessarily the key name.
+ return key_name
+
+ def test_keypairs_import_key_post(self):
+ # Get api sample of key pairs post to import user's key.
+ key_name = 'keypair-' + str(uuid.uuid4())
+ subs = {
+ 'keypair_name': key_name,
+ 'public_key': "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGg"
+ "B4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0l"
+ "RE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv"
+ "9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYc"
+ "pSxsIbECHw== Generated-by-Nova"
+ }
+ response = self._do_post('os-keypairs', 'keypairs-import-post-req',
+ subs)
+ subs = self._get_regexes()
+ subs['keypair_name'] = '(%s)' % key_name
+ self._verify_response('keypairs-import-post-resp', subs, response, 200)
+
+ def test_keypairs_list(self):
+ # Get api sample of key pairs list request.
+ key_name = self.test_keypairs_post()
+ response = self._do_get('os-keypairs')
+ subs = self._get_regexes()
+ subs['keypair_name'] = '(%s)' % key_name
+ self._verify_response('keypairs-list-resp', subs, response, 200)
+
+ def test_keypairs_get(self):
+ # Get api sample of key pairs get request.
+ key_name = self.test_keypairs_post()
+ response = self._do_get('os-keypairs/%s' % key_name)
+ subs = self._get_regexes()
+ subs['keypair_name'] = '(%s)' % key_name
+ self._verify_response('keypairs-get-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_lock_server.py b/nova/tests/unit/integrated/v3/test_lock_server.py
new file mode 100644
index 0000000000..0eb9676fbf
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_lock_server.py
@@ -0,0 +1,41 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class LockServerSamplesJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-lock-server"
+
+ def setUp(self):
+ """setUp Method for LockServer api samples extension
+
+ This method creates the server that will be used in each tests
+ """
+ super(LockServerSamplesJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ def test_post_lock_server(self):
+ # Get api samples to lock server request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'lock-server', {})
+ self.assertEqual(202, response.status_code)
+
+ def test_post_unlock_server(self):
+ # Get api samples to unlock server request.
+ self.test_post_lock_server()
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'unlock-server', {})
+ self.assertEqual(202, response.status_code)
diff --git a/nova/tests/unit/integrated/v3/test_migrate_server.py b/nova/tests/unit/integrated/v3/test_migrate_server.py
new file mode 100644
index 0000000000..a43703fbc2
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_migrate_server.py
@@ -0,0 +1,71 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.conductor import manager as conductor_manager
+from nova import db
+from nova.tests.unit.integrated.v3 import test_servers
+from nova import utils
+
+
+class MigrateServerSamplesJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-migrate-server"
+ ctype = 'json'
+
+ def setUp(self):
+ """setUp Method for MigrateServer api samples extension
+
+ This method creates the server that will be used in each tests
+ """
+ super(MigrateServerSamplesJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager._cold_migrate')
+ def test_post_migrate(self, mock_cold_migrate):
+ # Get api samples to migrate server request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'migrate-server', {})
+ self.assertEqual(202, response.status_code)
+
+ def test_post_live_migrate_server(self):
+ # Get api samples to server live migrate request.
+ def fake_live_migrate(_self, context, instance, scheduler_hint,
+ block_migration, disk_over_commit):
+ self.assertEqual(self.uuid, instance["uuid"])
+ host = scheduler_hint["host"]
+ self.assertEqual(self.compute.host, host)
+
+ self.stubs.Set(conductor_manager.ComputeTaskManager,
+ '_live_migrate',
+ fake_live_migrate)
+
+ def fake_get_compute(context, host):
+ service = dict(host=host,
+ binary='nova-compute',
+ topic='compute',
+ report_count=1,
+ updated_at='foo',
+ hypervisor_type='bar',
+ hypervisor_version=utils.convert_version_to_int(
+ '1.0'),
+ disabled=False)
+ return {'compute_node': [service]}
+ self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute)
+
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'live-migrate-server',
+ {'hostname': self.compute.host})
+ self.assertEqual(202, response.status_code)
diff --git a/nova/tests/unit/integrated/v3/test_migrations.py b/nova/tests/unit/integrated/v3/test_migrations.py
new file mode 100644
index 0000000000..ab8b214f6e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_migrations.py
@@ -0,0 +1,72 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from nova.compute import api as compute_api
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class MigrationsSamplesJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-migrations"
+
+ def _stub_migrations(self, context, filters):
+ fake_migrations = [
+ {
+ 'id': 1234,
+ 'source_node': 'node1',
+ 'dest_node': 'node2',
+ 'source_compute': 'compute1',
+ 'dest_compute': 'compute2',
+ 'dest_host': '1.2.3.4',
+ 'status': 'Done',
+ 'instance_uuid': 'instance_id_123',
+ 'old_instance_type_id': 1,
+ 'new_instance_type_id': 2,
+ 'created_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'deleted_at': None,
+ 'deleted': False
+ },
+ {
+ 'id': 5678,
+ 'source_node': 'node10',
+ 'dest_node': 'node20',
+ 'source_compute': 'compute10',
+ 'dest_compute': 'compute20',
+ 'dest_host': '5.6.7.8',
+ 'status': 'Done',
+ 'instance_uuid': 'instance_id_456',
+ 'old_instance_type_id': 5,
+ 'new_instance_type_id': 6,
+ 'created_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
+ 'updated_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
+ 'deleted_at': None,
+ 'deleted': False
+ }
+ ]
+ return fake_migrations
+
+ def setUp(self):
+ super(MigrationsSamplesJsonTest, self).setUp()
+ self.stubs.Set(compute_api.API, 'get_migrations',
+ self._stub_migrations)
+
+ def test_get_migrations(self):
+ response = self._do_get('os-migrations')
+ subs = self._get_regexes()
+
+ self.assertEqual(response.status_code, 200)
+ self._verify_response('migrations-get', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_multinic.py b/nova/tests/unit/integrated/v3/test_multinic.py
new file mode 100644
index 0000000000..3d55387632
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_multinic.py
@@ -0,0 +1,49 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class MultinicSampleJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-multinic"
+
+ def _disable_instance_dns_manager(self):
+ # NOTE(markmc): it looks like multinic and instance_dns_manager are
+ # incompatible. See:
+ # https://bugs.launchpad.net/nova/+bug/1213251
+ self.flags(
+ instance_dns_manager='nova.network.noop_dns_driver.NoopDNSDriver')
+
+ def setUp(self):
+ self._disable_instance_dns_manager()
+ super(MultinicSampleJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ def _add_fixed_ip(self):
+ subs = {"networkId": 1}
+ response = self._do_post('servers/%s/action' % (self.uuid),
+ 'multinic-add-fixed-ip-req', subs)
+ self.assertEqual(response.status_code, 202)
+
+ def test_add_fixed_ip(self):
+ self._add_fixed_ip()
+
+ def test_remove_fixed_ip(self):
+ self._add_fixed_ip()
+
+ subs = {"ip": "10.0.0.4"}
+ response = self._do_post('servers/%s/action' % (self.uuid),
+ 'multinic-remove-fixed-ip-req', subs)
+ self.assertEqual(response.status_code, 202)
diff --git a/nova/tests/unit/integrated/v3/test_multiple_create.py b/nova/tests/unit/integrated/v3/test_multiple_create.py
new file mode 100644
index 0000000000..76c2083b0d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_multiple_create.py
@@ -0,0 +1,45 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class MultipleCreateJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-multiple-create"
+
+ def test_multiple_create(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'min_count': "2",
+ 'max_count': "3"
+ }
+ response = self._do_post('servers', 'multiple-create-post-req', subs)
+ subs.update(self._get_regexes())
+ self._verify_response('multiple-create-post-resp', subs, response, 202)
+
+ def test_multiple_create_without_reservation_id(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'min_count': "2",
+ 'max_count': "3"
+ }
+ response = self._do_post('servers', 'multiple-create-no-resv-post-req',
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response('multiple-create-no-resv-post-resp', subs,
+ response, 202)
diff --git a/nova/tests/unit/integrated/v3/test_networks.py b/nova/tests/unit/integrated/v3/test_networks.py
new file mode 100644
index 0000000000..555c682c78
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_networks.py
@@ -0,0 +1,73 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.network import api as network_api
+from nova.tests.unit.api.openstack.compute.contrib import test_networks
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class NetworksJsonTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-networks"
+
+ def setUp(self):
+ super(NetworksJsonTests, self).setUp()
+ fake_network_api = test_networks.FakeNetworkAPI()
+ self.stubs.Set(network_api.API, "get_all",
+ fake_network_api.get_all)
+ self.stubs.Set(network_api.API, "get",
+ fake_network_api.get)
+ self.stubs.Set(network_api.API, "associate",
+ fake_network_api.associate)
+ self.stubs.Set(network_api.API, "delete",
+ fake_network_api.delete)
+ self.stubs.Set(network_api.API, "create",
+ fake_network_api.create)
+ self.stubs.Set(network_api.API, "add_network_to_project",
+ fake_network_api.add_network_to_project)
+
+ def test_network_list(self):
+ response = self._do_get('os-networks')
+ subs = self._get_regexes()
+ self._verify_response('networks-list-resp', subs, response, 200)
+
+ def test_network_disassociate(self):
+ uuid = test_networks.FAKE_NETWORKS[0]['uuid']
+ response = self._do_post('os-networks/%s/action' % uuid,
+ 'networks-disassociate-req', {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_network_show(self):
+ uuid = test_networks.FAKE_NETWORKS[0]['uuid']
+ response = self._do_get('os-networks/%s' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('network-show-resp', subs, response, 200)
+
+ def test_network_create(self):
+ response = self._do_post("os-networks",
+ 'network-create-req', {})
+ subs = self._get_regexes()
+ self._verify_response('network-create-resp', subs, response, 200)
+
+ def test_network_add(self):
+ response = self._do_post("os-networks/add",
+ 'network-add-req', {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_network_delete(self):
+ response = self._do_delete('os-networks/always_delete')
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
diff --git a/nova/tests/unit/integrated/v3/test_networks_associate.py b/nova/tests/unit/integrated/v3/test_networks_associate.py
new file mode 100644
index 0000000000..fe109d4d6c
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_networks_associate.py
@@ -0,0 +1,76 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+from nova.network import api as network_api
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+CONF = cfg.CONF
+CONF.import_opt('osapi_compute_extension',
+ 'nova.api.openstack.compute.extensions')
+
+
+class NetworksAssociateJsonTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-networks-associate"
+ extra_extensions_to_load = ["os-networks"]
+
+ _sentinel = object()
+
+ def _get_flags(self):
+ f = super(NetworksAssociateJsonTests, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # Networks_associate requires Networks to be update
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.os_networks.Os_networks')
+ return f
+
+ def setUp(self):
+ super(NetworksAssociateJsonTests, self).setUp()
+
+ def fake_associate(self, context, network_id,
+ host=NetworksAssociateJsonTests._sentinel,
+ project=NetworksAssociateJsonTests._sentinel):
+ return True
+
+ self.stubs.Set(network_api.API, "associate", fake_associate)
+
+ def test_disassociate(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-disassociate-req',
+ {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_disassociate_host(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-disassociate-host-req',
+ {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_disassociate_project(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-disassociate-project-req',
+ {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_associate_host(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-associate-host-req',
+ {"host": "testHost"})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
diff --git a/nova/tests/unit/integrated/v3/test_pause_server.py b/nova/tests/unit/integrated/v3/test_pause_server.py
new file mode 100644
index 0000000000..4993dc1048
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_pause_server.py
@@ -0,0 +1,41 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class PauseServerSamplesJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-pause-server"
+
+ def setUp(self):
+ """setUp Method for PauseServer api samples extension
+
+ This method creates the server that will be used in each test
+ """
+ super(PauseServerSamplesJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ def test_post_pause(self):
+ # Get api samples to pause server request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'pause-server', {})
+ self.assertEqual(202, response.status_code)
+
+ def test_post_unpause(self):
+ # Get api samples to unpause server request.
+ self.test_post_pause()
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'unpause-server', {})
+ self.assertEqual(202, response.status_code)
diff --git a/nova/tests/unit/integrated/v3/test_pci.py b/nova/tests/unit/integrated/v3/test_pci.py
new file mode 100644
index 0000000000..bb655a0ef0
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_pci.py
@@ -0,0 +1,182 @@
+# Copyright 2013 Intel.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova import db
+from nova.tests.unit.integrated.v3 import api_sample_base
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+fake_db_dev_1 = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 1,
+ 'compute_node_id': 1,
+ 'address': '0000:04:10.0',
+ 'vendor_id': '8086',
+ 'product_id': '1520',
+ 'dev_type': 'type-VF',
+ 'status': 'available',
+ 'dev_id': 'pci_0000_04_10_0',
+ 'label': 'label_8086_1520',
+ 'instance_uuid': '69ba1044-0766-4ec0-b60d-09595de034a1',
+ 'request_id': None,
+ 'extra_info': '{"key1": "value1", "key2": "value2"}'
+ }
+
+fake_db_dev_2 = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 2,
+ 'compute_node_id': 1,
+ 'address': '0000:04:10.1',
+ 'vendor_id': '8086',
+ 'product_id': '1520',
+ 'dev_type': 'type-VF',
+ 'status': 'available',
+ 'dev_id': 'pci_0000_04_10_1',
+ 'label': 'label_8086_1520',
+ 'instance_uuid': 'd5b446a6-a1b4-4d01-b4f0-eac37b3a62fc',
+ 'request_id': None,
+ 'extra_info': '{"key3": "value3", "key4": "value4"}'
+ }
+
+
+class ExtendedServerPciSampleJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-pci"
+
+ def test_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
+
+
+class ExtendedHyervisorPciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extra_extensions_to_load = ['os-hypervisors']
+ extension_name = 'os-pci'
+
+ def setUp(self):
+ super(ExtendedHyervisorPciSampleJsonTest, self).setUp()
+ self.fake_compute_node = {"cpu_info": "?",
+ "current_workload": 0,
+ "disk_available_least": 0,
+ "host_ip": "1.1.1.1",
+ "state": "up",
+ "status": "enabled",
+ "free_disk_gb": 1028,
+ "free_ram_mb": 7680,
+ "hypervisor_hostname": "fake-mini",
+ "hypervisor_type": "fake",
+ "hypervisor_version": 1000,
+ "id": 1,
+ "local_gb": 1028,
+ "local_gb_used": 0,
+ "memory_mb": 8192,
+ "memory_mb_used": 512,
+ "running_vms": 0,
+ "service": {"host": '043b3cacf6f34c90a'
+ '7245151fc8ebcda',
+ "disabled": False,
+ "disabled_reason": None},
+ "vcpus": 1,
+ "vcpus_used": 0,
+ "service_id": 2,
+ "pci_stats": [
+ {"count": 5,
+ "vendor_id": "8086",
+ "product_id": "1520",
+ "keya": "valuea",
+ "extra_info": {
+ "phys_function": '[["0x0000", '
+ '"0x04", "0x00",'
+ ' "0x1"]]',
+ "key1": "value1"}}]}
+
+ @mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
+ @mock.patch("nova.db.compute_node_get")
+ def test_pci_show(self, mock_db, mock_service):
+ self.fake_compute_node['pci_stats'] = jsonutils.dumps(
+ self.fake_compute_node['pci_stats'])
+ mock_db.return_value = self.fake_compute_node
+ hypervisor_id = 1
+ response = self._do_get('os-hypervisors/%s' % hypervisor_id)
+ subs = {
+ 'hypervisor_id': hypervisor_id,
+ }
+ subs.update(self._get_regexes())
+ self._verify_response('hypervisors-pci-show-resp',
+ subs, response, 200)
+
+ @mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
+ @mock.patch("nova.db.compute_node_get_all")
+ def test_pci_detail(self, mock_db, mock_service):
+ self.fake_compute_node['pci_stats'] = jsonutils.dumps(
+ self.fake_compute_node['pci_stats'])
+
+ mock_db.return_value = [self.fake_compute_node]
+ hypervisor_id = 1
+ subs = {
+ 'hypervisor_id': hypervisor_id
+ }
+ response = self._do_get('os-hypervisors/detail')
+
+ subs.update(self._get_regexes())
+ self._verify_response('hypervisors-pci-detail-resp',
+ subs, response, 200)
+
+
+class PciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-pci"
+
+ def _fake_pci_device_get_by_id(self, context, id):
+ return fake_db_dev_1
+
+ def _fake_pci_device_get_all_by_node(self, context, id):
+ return [fake_db_dev_1, fake_db_dev_2]
+
+ def test_pci_show(self):
+ self.stubs.Set(db, 'pci_device_get_by_id',
+ self._fake_pci_device_get_by_id)
+ response = self._do_get('os-pci/1')
+ subs = self._get_regexes()
+ self._verify_response('pci-show-resp', subs, response, 200)
+
+ def test_pci_index(self):
+ self.stubs.Set(db, 'pci_device_get_all_by_node',
+ self._fake_pci_device_get_all_by_node)
+ response = self._do_get('os-pci')
+ subs = self._get_regexes()
+ self._verify_response('pci-index-resp', subs, response, 200)
+
+ def test_pci_detail(self):
+ self.stubs.Set(db, 'pci_device_get_all_by_node',
+ self._fake_pci_device_get_all_by_node)
+ response = self._do_get('os-pci/detail')
+ subs = self._get_regexes()
+ self._verify_response('pci-detail-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_quota_sets.py b/nova/tests/unit/integrated/v3/test_quota_sets.py
new file mode 100644
index 0000000000..8848fd9d2a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_quota_sets.py
@@ -0,0 +1,70 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class QuotaSetsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-quota-sets"
+
+ def test_show_quotas(self):
+ # Get api sample to show quotas.
+ response = self._do_get('os-quota-sets/fake_tenant')
+ self._verify_response('quotas-show-get-resp', {}, response, 200)
+
+ def test_show_quotas_defaults(self):
+ # Get api sample to show quotas defaults.
+ response = self._do_get('os-quota-sets/fake_tenant/defaults')
+ self._verify_response('quotas-show-defaults-get-resp',
+ {}, response, 200)
+
+ def test_update_quotas(self):
+ # Get api sample to update quotas.
+ response = self._do_put('os-quota-sets/fake_tenant',
+ 'quotas-update-post-req',
+ {})
+ self._verify_response('quotas-update-post-resp', {}, response, 200)
+
+ def test_delete_quotas(self):
+ # Get api sample to delete quota.
+ response = self._do_delete('os-quota-sets/fake_tenant')
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_update_quotas_force(self):
+ # Get api sample to update quotas.
+ response = self._do_put('os-quota-sets/fake_tenant',
+ 'quotas-update-force-post-req',
+ {})
+ return self._verify_response('quotas-update-force-post-resp', {},
+ response, 200)
+
+ def test_show_quotas_for_user(self):
+ # Get api sample to show quotas for user.
+ response = self._do_get('os-quota-sets/fake_tenant?user_id=1')
+ self._verify_response('user-quotas-show-get-resp', {}, response, 200)
+
+ def test_delete_quotas_for_user(self):
+ response = self._do_delete('os-quota-sets/fake_tenant?user_id=1')
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_update_quotas_for_user(self):
+ # Get api sample to update quotas for user.
+ response = self._do_put('os-quota-sets/fake_tenant?user_id=1',
+ 'user-quotas-update-post-req',
+ {})
+ return self._verify_response('user-quotas-update-post-resp', {},
+ response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_remote_consoles.py b/nova/tests/unit/integrated/v3/test_remote_consoles.py
new file mode 100644
index 0000000000..6f35aafb34
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_remote_consoles.py
@@ -0,0 +1,70 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ConsolesSampleJsonTests(test_servers.ServersSampleBase):
+ extension_name = "os-remote-consoles"
+
+ def setUp(self):
+ super(ConsolesSampleJsonTests, self).setUp()
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=True, group='spice')
+ self.flags(enabled=True, group='rdp')
+ self.flags(enabled=True, group='serial_console')
+
+ def test_get_vnc_console(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-vnc-console-post-req',
+ {'action': 'os-getVNCConsole'})
+ subs = self._get_regexes()
+ subs["url"] = \
+ "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
+ self._verify_response('get-vnc-console-post-resp', subs, response, 200)
+
+ def test_get_spice_console(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-spice-console-post-req',
+ {'action': 'os-getSPICEConsole'})
+ subs = self._get_regexes()
+ subs["url"] = \
+ "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
+ self._verify_response('get-spice-console-post-resp', subs,
+ response, 200)
+
+ def test_get_rdp_console(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-rdp-console-post-req',
+ {'action': 'os-getRDPConsole'})
+ subs = self._get_regexes()
+ subs["url"] = \
+ "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
+ self._verify_response('get-rdp-console-post-resp', subs,
+ response, 200)
+
+ def test_get_serial_console(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-serial-console-post-req',
+ {'action': 'os-getSerialConsole'})
+ subs = self._get_regexes()
+ subs["url"] = \
+ "((ws?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
+ self._verify_response('get-serial-console-post-resp', subs,
+ response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_rescue.py b/nova/tests/unit/integrated/v3/test_rescue.py
new file mode 100644
index 0000000000..65532607d5
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_rescue.py
@@ -0,0 +1,82 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class RescueJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-rescue"
+
+ def _rescue(self, uuid):
+ req_subs = {
+ 'password': 'MySecretPass'
+ }
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-rescue-req', req_subs)
+ self._verify_response('server-rescue', req_subs, response, 200)
+
+ def _unrescue(self, uuid):
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-unrescue-req', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_server_rescue(self):
+ uuid = self._post_server()
+
+ self._rescue(uuid)
+
+ # Do a server get to make sure that the 'RESCUE' state is set
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['status'] = 'RESCUE'
+
+ self._verify_response('server-get-resp-rescue', subs, response, 200)
+
+ def test_server_rescue_with_image_ref_specified(self):
+ uuid = self._post_server()
+
+ req_subs = {
+ 'password': 'MySecretPass',
+ 'image_ref': '2341-Abc'
+ }
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-rescue-req-with-image-ref', req_subs)
+ self._verify_response('server-rescue', req_subs, response, 200)
+
+ # Do a server get to make sure that the 'RESCUE' state is set
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['status'] = 'RESCUE'
+
+ self._verify_response('server-get-resp-rescue', subs, response, 200)
+
+ def test_server_unrescue(self):
+ uuid = self._post_server()
+
+ self._rescue(uuid)
+ self._unrescue(uuid)
+
+ # Do a server get to make sure that the 'ACTIVE' state is back
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['status'] = 'ACTIVE'
+
+ self._verify_response('server-get-resp-unrescue', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_scheduler_hints.py b/nova/tests/unit/integrated/v3/test_scheduler_hints.py
new file mode 100644
index 0000000000..6ecea5efc7
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_scheduler_hints.py
@@ -0,0 +1,32 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class SchedulerHintsJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-scheduler-hints"
+
+ def test_scheduler_hints_post(self):
+ # Get api sample of scheduler hint post request.
+ subs = self._get_regexes()
+ subs.update({'image_id': fake.get_valid_image_id(),
+ 'image_near': str(uuid.uuid4())})
+ response = self._do_post('servers', 'scheduler-hints-post-req',
+ subs)
+ self._verify_response('scheduler-hints-post-resp', subs, response, 202)
diff --git a/nova/tests/unit/integrated/v3/test_security_group_default_rules.py b/nova/tests/unit/integrated/v3/test_security_group_default_rules.py
new file mode 100644
index 0000000000..e0c2ec8132
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_security_group_default_rules.py
@@ -0,0 +1,40 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class SecurityGroupDefaultRulesSampleJsonTest(
+ api_sample_base.ApiSampleTestBaseV3):
+ extension_name = 'os-security-group-default-rules'
+
+ def test_security_group_default_rules_create(self):
+ response = self._do_post('os-security-group-default-rules',
+ 'security-group-default-rules-create-req',
+ {})
+ self._verify_response('security-group-default-rules-create-resp',
+ {}, response, 200)
+
+ def test_security_group_default_rules_list(self):
+ self.test_security_group_default_rules_create()
+ response = self._do_get('os-security-group-default-rules')
+ self._verify_response('security-group-default-rules-list-resp',
+ {}, response, 200)
+
+ def test_security_group_default_rules_show(self):
+ self.test_security_group_default_rules_create()
+ rule_id = '1'
+ response = self._do_get('os-security-group-default-rules/%s' % rule_id)
+ self._verify_response('security-group-default-rules-show-resp',
+ {}, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_security_groups.py b/nova/tests/unit/integrated/v3/test_security_groups.py
new file mode 100644
index 0000000000..3afb26a06f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_security_groups.py
@@ -0,0 +1,166 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.network.security_group import neutron_driver
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+def fake_get(*args, **kwargs):
+ nova_group = {}
+ nova_group['id'] = 1
+ nova_group['description'] = 'default'
+ nova_group['name'] = 'default'
+ nova_group['project_id'] = 'openstack'
+ nova_group['rules'] = []
+ return nova_group
+
+
+def fake_get_instances_security_groups_bindings(self, context, servers,
+ detailed=False):
+ result = {}
+ for s in servers:
+ result[s.get('id')] = [{'name': 'test'}]
+ return result
+
+
+def fake_add_to_instance(self, context, instance, security_group_name):
+ pass
+
+
+def fake_remove_from_instance(self, context, instance, security_group_name):
+ pass
+
+
+def fake_list(self, context, names=None, ids=None, project=None,
+ search_opts=None):
+ return [fake_get()]
+
+
+def fake_get_instance_security_groups(self, context, instance_uuid,
+ detailed=False):
+ return [fake_get()]
+
+
+def fake_create_security_group(self, context, name, description):
+ return fake_get()
+
+
+class SecurityGroupsJsonTest(test_servers.ServersSampleBase):
+ extension_name = 'os-security-groups'
+
+ def setUp(self):
+ self.flags(security_group_api=('neutron'))
+ super(SecurityGroupsJsonTest, self).setUp()
+ self.stubs.Set(neutron_driver.SecurityGroupAPI, 'get', fake_get)
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'get_instances_security_groups_bindings',
+ fake_get_instances_security_groups_bindings)
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'add_to_instance',
+ fake_add_to_instance)
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'remove_from_instance',
+ fake_remove_from_instance)
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'list',
+ fake_list)
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'get_instance_security_groups',
+ fake_get_instance_security_groups)
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'create_security_group',
+ fake_create_security_group)
+
+ def test_server_create(self):
+ self._post_server()
+
+ def test_server_get(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_server_detail(self):
+ self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
+
+ def _get_create_subs(self):
+ return {
+ 'group_name': 'default',
+ "description": "default",
+ }
+
+ def _create_security_group(self):
+ subs = self._get_create_subs()
+ return self._do_post('os-security-groups',
+ 'security-group-post-req', subs)
+
+ def _add_group(self, uuid):
+ subs = {
+ 'group_name': 'test'
+ }
+ return self._do_post('servers/%s/action' % uuid,
+ 'security-group-add-post-req', subs)
+
+ def test_security_group_create(self):
+ response = self._create_security_group()
+ subs = self._get_create_subs()
+ self._verify_response('security-groups-create-resp', subs,
+ response, 200)
+
+ def test_security_groups_list(self):
+ # Get api sample of security groups get list request.
+ response = self._do_get('os-security-groups')
+ subs = self._get_regexes()
+ self._verify_response('security-groups-list-get-resp',
+ subs, response, 200)
+
+ def test_security_groups_get(self):
+ # Get api sample of security groups get request.
+ security_group_id = '11111111-1111-1111-1111-111111111111'
+ response = self._do_get('os-security-groups/%s' % security_group_id)
+ subs = self._get_regexes()
+ self._verify_response('security-groups-get-resp', subs, response, 200)
+
+ def test_security_groups_list_server(self):
+ # Get api sample of security groups for a specific server.
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/os-security-groups' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('server-security-groups-list-resp',
+ subs, response, 200)
+
+ def test_security_groups_add(self):
+ self._create_security_group()
+ uuid = self._post_server()
+ response = self._add_group(uuid)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_security_groups_remove(self):
+ self._create_security_group()
+ uuid = self._post_server()
+ self._add_group(uuid)
+ subs = {
+ 'group_name': 'test'
+ }
+ response = self._do_post('servers/%s/action' % uuid,
+ 'security-group-remove-post-req', subs)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
diff --git a/nova/tests/unit/integrated/v3/test_server_diagnostics.py b/nova/tests/unit/integrated/v3/test_server_diagnostics.py
new file mode 100644
index 0000000000..b2c41225e3
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_server_diagnostics.py
@@ -0,0 +1,27 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ServerDiagnosticsSamplesJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-server-diagnostics"
+
+ def test_server_diagnostics_get(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/diagnostics' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('server-diagnostics-get-resp', subs,
+ response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_server_external_events.py b/nova/tests/unit/integrated/v3/test_server_external_events.py
new file mode 100644
index 0000000000..9d2675a11c
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_server_external_events.py
@@ -0,0 +1,40 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ServerExternalEventsSamplesJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-server-external-events"
+
+ def setUp(self):
+ """setUp Method for AdminActions api samples extension
+
+ This method creates the server that will be used in each tests
+ """
+ super(ServerExternalEventsSamplesJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ def test_create_event(self):
+ subs = {
+ 'uuid': self.uuid,
+ 'name': 'network-changed',
+ 'status': 'completed',
+ 'tag': 'foo',
+ }
+ response = self._do_post('os-server-external-events',
+ 'event-create-req',
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response('event-create-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_server_groups.py b/nova/tests/unit/integrated/v3/test_server_groups.py
new file mode 100644
index 0000000000..f5cc253cc9
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_server_groups.py
@@ -0,0 +1,66 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ServerGroupsSampleJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-server-groups"
+
+ def _get_create_subs(self):
+ return {'name': 'test'}
+
+ def _post_server_group(self):
+ """Verify the response status and returns the UUID of the
+ newly created server group.
+ """
+ subs = self._get_create_subs()
+ response = self._do_post('os-server-groups',
+ 'server-groups-post-req', subs)
+ subs = self._get_regexes()
+ subs['name'] = 'test'
+ return self._verify_response('server-groups-post-resp',
+ subs, response, 200)
+
+ def _create_server_group(self):
+ subs = self._get_create_subs()
+ return self._do_post('os-server-groups',
+ 'server-groups-post-req', subs)
+
+ def test_server_groups_post(self):
+ return self._post_server_group()
+
+ def test_server_groups_list(self):
+ subs = self._get_create_subs()
+ uuid = self._post_server_group()
+ response = self._do_get('os-server-groups')
+ subs.update(self._get_regexes())
+ subs['id'] = uuid
+ self._verify_response('server-groups-list-resp',
+ subs, response, 200)
+
+ def test_server_groups_get(self):
+ # Get api sample of server groups get request.
+ subs = {'name': 'test'}
+ uuid = self._post_server_group()
+ subs['id'] = uuid
+ response = self._do_get('os-server-groups/%s' % uuid)
+
+ self._verify_response('server-groups-get-resp', subs, response, 200)
+
+ def test_server_groups_delete(self):
+ uuid = self._post_server_group()
+ response = self._do_delete('os-server-groups/%s' % uuid)
+ self.assertEqual(response.status_code, 204)
diff --git a/nova/tests/unit/integrated/v3/test_server_metadata.py b/nova/tests/unit/integrated/v3/test_server_metadata.py
new file mode 100644
index 0000000000..9b45af3d07
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_server_metadata.py
@@ -0,0 +1,80 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ServersMetadataJsonTest(test_servers.ServersSampleBase):
+ extends_name = 'core_only'
+ sample_dir = 'server-metadata'
+
+ def _create_and_set(self, subs):
+ uuid = self._post_server()
+ response = self._do_put('servers/%s/metadata' % uuid,
+ 'server-metadata-all-req',
+ subs)
+ self._verify_response('server-metadata-all-resp', subs, response, 200)
+ return uuid
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['value'] = '(Foo|Bar) Value'
+ return subs
+
+ def test_metadata_put_all(self):
+ # Test setting all metadata for a server.
+ subs = {'value': 'Foo Value'}
+ self._create_and_set(subs)
+
+ def test_metadata_post_all(self):
+ # Test updating all metadata for a server.
+ subs = {'value': 'Foo Value'}
+ uuid = self._create_and_set(subs)
+ subs['value'] = 'Bar Value'
+ response = self._do_post('servers/%s/metadata' % uuid,
+ 'server-metadata-all-req',
+ subs)
+ self._verify_response('server-metadata-all-resp', subs, response, 200)
+
+ def test_metadata_get_all(self):
+ # Test getting all metadata for a server.
+ subs = {'value': 'Foo Value'}
+ uuid = self._create_and_set(subs)
+ response = self._do_get('servers/%s/metadata' % uuid)
+ self._verify_response('server-metadata-all-resp', subs, response, 200)
+
+ def test_metadata_put(self):
+ # Test putting an individual metadata item for a server.
+ subs = {'value': 'Foo Value'}
+ uuid = self._create_and_set(subs)
+ subs['value'] = 'Bar Value'
+ response = self._do_put('servers/%s/metadata/foo' % uuid,
+ 'server-metadata-req',
+ subs)
+ self._verify_response('server-metadata-resp', subs, response, 200)
+
+ def test_metadata_get(self):
+ # Test getting an individual metadata item for a server.
+ subs = {'value': 'Foo Value'}
+ uuid = self._create_and_set(subs)
+ response = self._do_get('servers/%s/metadata/foo' % uuid)
+ self._verify_response('server-metadata-resp', subs, response, 200)
+
+ def test_metadata_delete(self):
+ # Test deleting an individual metadata item for a server.
+ subs = {'value': 'Foo Value'}
+ uuid = self._create_and_set(subs)
+ response = self._do_delete('servers/%s/metadata/foo' % uuid)
+ self.assertEqual(response.status_code, 204)
+ self.assertEqual(response.content, '')
diff --git a/nova/tests/unit/integrated/v3/test_server_usage.py b/nova/tests/unit/integrated/v3/test_server_usage.py
new file mode 100644
index 0000000000..1b6358bd43
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_server_usage.py
@@ -0,0 +1,39 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ServerUsageSampleJsonTest(test_servers.ServersSampleBase):
+ extension_name = 'os-server-usage'
+
+ def setUp(self):
+ """setUp method for server usage."""
+ super(ServerUsageSampleJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ def test_show(self):
+ response = self._do_get('servers/%s' % self.uuid)
+ subs = self._get_regexes()
+ subs['id'] = self.uuid
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_details(self):
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['id'] = self.uuid
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_servers.py b/nova/tests/unit/integrated/v3/test_servers.py
new file mode 100644
index 0000000000..dfa8f5a9d9
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_servers.py
@@ -0,0 +1,188 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.compute import api as compute_api
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class ServersSampleBase(api_sample_base.ApiSampleTestBaseV3):
+ def _post_server(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'glance_host': self._get_glance_host()
+ }
+ response = self._do_post('servers', 'server-post-req', subs)
+ subs = self._get_regexes()
+ return self._verify_response('server-post-resp', subs, response, 202)
+
+
+class ServersSampleJsonTest(ServersSampleBase):
+ sample_dir = 'servers'
+
+ def test_servers_post(self):
+ return self._post_server()
+
+ def test_servers_get(self):
+ uuid = self.test_servers_post()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_servers_list(self):
+ uuid = self._post_server()
+ response = self._do_get('servers')
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ self._verify_response('servers-list-resp', subs, response, 200)
+
+ def test_servers_details(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+ self._verify_response('servers-details-resp', subs, response, 200)
+
+
+class ServersSampleAllExtensionJsonTest(ServersSampleJsonTest):
+ all_extensions = True
+
+
+class ServersActionsJsonTest(ServersSampleBase):
+ sample_dir = 'servers'
+
+ def _test_server_action(self, uuid, action, req_tpl,
+ subs=None, resp_tpl=None, code=202):
+ subs = subs or {}
+ subs.update({'action': action,
+ 'glance_host': self._get_glance_host()})
+ response = self._do_post('servers/%s/action' % uuid,
+ req_tpl,
+ subs)
+ if resp_tpl:
+ subs.update(self._get_regexes())
+ self._verify_response(resp_tpl, subs, response, code)
+ else:
+ self.assertEqual(response.status_code, code)
+ self.assertEqual(response.content, "")
+
+ def test_server_reboot_hard(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, "reboot",
+ 'server-action-reboot',
+ {"type": "HARD"})
+
+ def test_server_reboot_soft(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, "reboot",
+ 'server-action-reboot',
+ {"type": "SOFT"})
+
+ def test_server_rebuild(self):
+ uuid = self._post_server()
+ image = fake.get_valid_image_id()
+ subs = {'host': self._get_host(),
+ 'uuid': image,
+ 'name': 'foobar',
+ 'pass': 'seekr3t',
+ 'hostid': '[a-f0-9]+',
+ }
+ self._test_server_action(uuid, 'rebuild',
+ 'server-action-rebuild',
+ subs,
+ 'server-action-rebuild-resp')
+
+ def _test_server_rebuild_preserve_ephemeral(self, value):
+ uuid = self._post_server()
+ image = fake.get_valid_image_id()
+ subs = {'host': self._get_host(),
+ 'uuid': image,
+ 'name': 'foobar',
+ 'pass': 'seekr3t',
+ 'hostid': '[a-f0-9]+',
+ 'preserve_ephemeral': str(value).lower(),
+ 'action': 'rebuild',
+ 'glance_host': self._get_glance_host(),
+ }
+
+ def fake_rebuild(self_, context, instance, image_href, admin_password,
+ files_to_inject=None, **kwargs):
+ self.assertEqual(kwargs['preserve_ephemeral'], value)
+ self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
+
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-action-rebuild-preserve-ephemeral',
+ subs)
+ self.assertEqual(response.status_code, 202)
+
+ def test_server_rebuild_preserve_ephemeral_true(self):
+ self._test_server_rebuild_preserve_ephemeral(True)
+
+ def test_server_rebuild_preserve_ephemeral_false(self):
+ self._test_server_rebuild_preserve_ephemeral(False)
+
+ def test_server_resize(self):
+ self.flags(allow_resize_to_same_host=True)
+ uuid = self._post_server()
+ self._test_server_action(uuid, "resize",
+ 'server-action-resize',
+ {"id": 2,
+ "host": self._get_host()})
+ return uuid
+
+ def test_server_revert_resize(self):
+ uuid = self.test_server_resize()
+ self._test_server_action(uuid, "revertResize",
+ 'server-action-revert-resize')
+
+ def test_server_confirm_resize(self):
+ uuid = self.test_server_resize()
+ self._test_server_action(uuid, "confirmResize",
+ 'server-action-confirm-resize',
+ code=204)
+
+ def test_server_create_image(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'createImage',
+ 'server-action-create-image',
+ {'name': 'foo-image'})
+
+
+class ServerStartStopJsonTest(ServersSampleBase):
+ sample_dir = 'servers'
+
+ def _test_server_action(self, uuid, action, req_tpl):
+ response = self._do_post('servers/%s/action' % uuid,
+ req_tpl,
+ {'action': action})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_server_start(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-stop', 'server-action-stop')
+ self._test_server_action(uuid, 'os-start', 'server-action-start')
+
+ def test_server_stop(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-stop', 'server-action-stop')
diff --git a/nova/tests/unit/integrated/v3/test_servers_ips.py b/nova/tests/unit/integrated/v3/test_servers_ips.py
new file mode 100644
index 0000000000..7c0b24b66b
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_servers_ips.py
@@ -0,0 +1,35 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ServersIpsJsonTest(test_servers.ServersSampleBase):
+ extends_name = 'core_only'
+ sample_dir = 'server-ips'
+
+ def test_get(self):
+ # Test getting a server's IP information.
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/ips' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('server-ips-resp', subs, response, 200)
+
+ def test_get_by_network(self):
+ # Test getting a server's IP information by network id.
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/ips/private' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('server-ips-network-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_services.py b/nova/tests/unit/integrated/v3/test_services.py
new file mode 100644
index 0000000000..9ce9ffdbe7
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_services.py
@@ -0,0 +1,87 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.utils import timeutils
+
+from nova import db
+from nova.tests.unit.api.openstack.compute.plugins.v3 import test_services
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class ServicesJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-services"
+
+ def setUp(self):
+ super(ServicesJsonTest, self).setUp()
+ self.stubs.Set(db, "service_get_all",
+ test_services.fake_db_api_service_get_all)
+ self.stubs.Set(timeutils, "utcnow", test_services.fake_utcnow)
+ self.stubs.Set(timeutils, "utcnow_ts",
+ test_services.fake_utcnow_ts)
+ self.stubs.Set(db, "service_get_by_args",
+ test_services.fake_service_get_by_host_binary)
+ self.stubs.Set(db, "service_update",
+ test_services.fake_service_update)
+
+ def tearDown(self):
+ super(ServicesJsonTest, self).tearDown()
+ timeutils.clear_time_override()
+
+ def test_services_list(self):
+ """Return a list of all agent builds."""
+ response = self._do_get('os-services')
+ subs = {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up'}
+ subs.update(self._get_regexes())
+ self._verify_response('services-list-get-resp', subs, response, 200)
+
+ def test_service_enable(self):
+ """Enable an existing agent build."""
+ subs = {"host": "host1",
+ 'binary': 'nova-compute'}
+ response = self._do_put('os-services/enable',
+ 'service-enable-put-req', subs)
+ subs = {"host": "host1",
+ "binary": "nova-compute"}
+ self._verify_response('service-enable-put-resp', subs, response, 200)
+
+ def test_service_disable(self):
+ """Disable an existing agent build."""
+ subs = {"host": "host1",
+ 'binary': 'nova-compute'}
+ response = self._do_put('os-services/disable',
+ 'service-disable-put-req', subs)
+ subs = {"host": "host1",
+ "binary": "nova-compute"}
+ self._verify_response('service-disable-put-resp', subs, response, 200)
+
+ def test_service_disable_log_reason(self):
+ """Disable an existing service and log the reason."""
+ subs = {"host": "host1",
+ 'binary': 'nova-compute',
+ 'disabled_reason': 'test2'}
+ response = self._do_put('os-services/disable-log-reason',
+ 'service-disable-log-put-req', subs)
+ return self._verify_response('service-disable-log-put-resp',
+ subs, response, 200)
+
+ def test_service_delete(self):
+ """Delete an existing service."""
+ response = self._do_delete('os-services/1')
+ self.assertEqual(response.status_code, 204)
+ self.assertEqual(response.content, "")
diff --git a/nova/tests/unit/integrated/v3/test_shelve.py b/nova/tests/unit/integrated/v3/test_shelve.py
new file mode 100644
index 0000000000..4a2224e783
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_shelve.py
@@ -0,0 +1,50 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+CONF = cfg.CONF
+CONF.import_opt('shelved_offload_time', 'nova.compute.manager')
+
+
+class ShelveJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-shelve"
+
+ def setUp(self):
+ super(ShelveJsonTest, self).setUp()
+ # Don't offload instance, so we can test the offload call.
+ CONF.set_override('shelved_offload_time', -1)
+
+ def _test_server_action(self, uuid, template, action):
+ response = self._do_post('servers/%s/action' % uuid,
+ template, {'action': action})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_shelve(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+
+ def test_shelve_offload(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ self._test_server_action(uuid, 'os-shelve-offload', 'shelveOffload')
+
+ def test_unshelve(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ self._test_server_action(uuid, 'os-unshelve', 'unshelve')
diff --git a/nova/tests/unit/integrated/v3/test_simple_tenant_usage.py b/nova/tests/unit/integrated/v3/test_simple_tenant_usage.py
new file mode 100644
index 0000000000..4508a36f8b
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_simple_tenant_usage.py
@@ -0,0 +1,61 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import urllib
+
+from oslo.utils import timeutils
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class SimpleTenantUsageSampleJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-simple-tenant-usage"
+
+ def setUp(self):
+ """setUp method for simple tenant usage."""
+ super(SimpleTenantUsageSampleJsonTest, self).setUp()
+
+ started = timeutils.utcnow()
+ now = started + datetime.timedelta(hours=1)
+
+ timeutils.set_time_override(started)
+ self._post_server()
+ timeutils.set_time_override(now)
+
+ self.query = {
+ 'start': str(started),
+ 'end': str(now)
+ }
+
+ def tearDown(self):
+ """tearDown method for simple tenant usage."""
+ super(SimpleTenantUsageSampleJsonTest, self).tearDown()
+ timeutils.clear_time_override()
+
+ def test_get_tenants_usage(self):
+ # Get api sample to get all tenants usage request.
+ response = self._do_get('os-simple-tenant-usage?%s' % (
+ urllib.urlencode(self.query)))
+ subs = self._get_regexes()
+ self._verify_response('simple-tenant-usage-get', subs, response, 200)
+
+ def test_get_tenant_usage_details(self):
+ # Get api sample to get specific tenant usage request.
+ tenant_id = 'openstack'
+ response = self._do_get('os-simple-tenant-usage/%s?%s' % (tenant_id,
+ urllib.urlencode(self.query)))
+ subs = self._get_regexes()
+ self._verify_response('simple-tenant-usage-get-specific', subs,
+ response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_suspend_server.py b/nova/tests/unit/integrated/v3/test_suspend_server.py
new file mode 100644
index 0000000000..11053b3e3e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_suspend_server.py
@@ -0,0 +1,41 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class SuspendServerSamplesJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-suspend-server"
+ ctype = 'json'
+
+ def setUp(self):
+ """setUp Method for SuspendServer api samples extension
+
+ This method creates the server that will be used in each tests
+ """
+ super(SuspendServerSamplesJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ def test_post_suspend(self):
+ # Get api samples to suspend server request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'server-suspend', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_resume(self):
+ # Get api samples to server resume request.
+ self.test_post_suspend()
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'server-resume', {})
+ self.assertEqual(response.status_code, 202)
diff --git a/nova/tests/unit/integrated/v3/test_tenant_networks.py b/nova/tests/unit/integrated/v3/test_tenant_networks.py
new file mode 100644
index 0000000000..72a888ba93
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_tenant_networks.py
@@ -0,0 +1,61 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+
+import nova.quota
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+CONF = cfg.CONF
+CONF.import_opt('enable_network_quota',
+ 'nova.api.openstack.compute.contrib.os_tenant_networks')
+
+
+class TenantNetworksJsonTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-tenant-networks"
+
+ def setUp(self):
+ super(TenantNetworksJsonTests, self).setUp()
+ CONF.set_override("enable_network_quota", True)
+
+ def fake(*args, **kwargs):
+ pass
+
+ self.stubs.Set(nova.quota.QUOTAS, "reserve", fake)
+ self.stubs.Set(nova.quota.QUOTAS, "commit", fake)
+ self.stubs.Set(nova.quota.QUOTAS, "rollback", fake)
+ self.stubs.Set(nova.quota.QuotaEngine, "reserve", fake)
+ self.stubs.Set(nova.quota.QuotaEngine, "commit", fake)
+ self.stubs.Set(nova.quota.QuotaEngine, "rollback", fake)
+
+ def test_list_networks(self):
+ response = self._do_get('os-tenant-networks')
+ subs = self._get_regexes()
+ self._verify_response('networks-list-res', subs, response, 200)
+
+ def test_create_network(self):
+ response = self._do_post('os-tenant-networks', "networks-post-req", {})
+ subs = self._get_regexes()
+ self._verify_response('networks-post-res', subs, response, 200)
+
+ def test_delete_network(self):
+ response = self._do_post('os-tenant-networks', "networks-post-req", {})
+ net = jsonutils.loads(response.content)
+ response = self._do_delete('os-tenant-networks/%s' %
+ net["network"]["id"])
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
diff --git a/nova/tests/unit/integrated/v3/test_used_limits.py b/nova/tests/unit/integrated/v3/test_used_limits.py
new file mode 100644
index 0000000000..6682246c9e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_used_limits.py
@@ -0,0 +1,34 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class UsedLimitsSamplesJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-used-limits"
+ extra_extensions_to_load = ["limits"]
+
+ def test_get_used_limits(self):
+ # Get api sample to used limits.
+ response = self._do_get('limits')
+ subs = self._get_regexes()
+ self._verify_response('usedlimits-get-resp', subs, response, 200)
+
+ def test_get_used_limits_for_admin(self):
+ tenant_id = 'openstack'
+ response = self._do_get('limits?tenant_id=%s' % tenant_id)
+ subs = self._get_regexes()
+ self._verify_response('usedlimits-get-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_user_data.py b/nova/tests/unit/integrated/v3/test_user_data.py
new file mode 100644
index 0000000000..6410fa5a24
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_user_data.py
@@ -0,0 +1,36 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class UserDataJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-user-data"
+
+ def test_user_data_post(self):
+ user_data_contents = '#!/bin/bash\n/bin/su\necho "I am in you!"\n'
+ user_data = base64.b64encode(user_data_contents)
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'user_data': user_data
+ }
+ response = self._do_post('servers', 'userdata-post-req', subs)
+
+ subs.update(self._get_regexes())
+ self._verify_response('userdata-post-resp', subs, response, 202)
diff --git a/nova/tests/unit/integrated/v3/test_volumes.py b/nova/tests/unit/integrated/v3/test_volumes.py
new file mode 100644
index 0000000000..3c7ff6e460
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_volumes.py
@@ -0,0 +1,184 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.integrated.v3 import api_sample_base
+from nova.tests.unit.integrated.v3 import test_servers
+from nova.volume import cinder
+
+
+class SnapshotsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-volumes"
+
+ create_subs = {
+ 'snapshot_name': 'snap-001',
+ 'description': 'Daily backup',
+ 'volume_id': '521752a6-acf6-4b2d-bc7a-119f9148cd8c'
+ }
+
+ def setUp(self):
+ super(SnapshotsSampleJsonTests, self).setUp()
+ self.stubs.Set(cinder.API, "get_all_snapshots",
+ fakes.stub_snapshot_get_all)
+ self.stubs.Set(cinder.API, "get_snapshot", fakes.stub_snapshot_get)
+
+ def _create_snapshot(self):
+ self.stubs.Set(cinder.API, "create_snapshot",
+ fakes.stub_snapshot_create)
+
+ response = self._do_post("os-snapshots",
+ "snapshot-create-req",
+ self.create_subs)
+ return response
+
+ def test_snapshots_create(self):
+ response = self._create_snapshot()
+ self.create_subs.update(self._get_regexes())
+ self._verify_response("snapshot-create-resp",
+ self.create_subs, response, 200)
+
+ def test_snapshots_delete(self):
+ self.stubs.Set(cinder.API, "delete_snapshot",
+ fakes.stub_snapshot_delete)
+ self._create_snapshot()
+ response = self._do_delete('os-snapshots/100')
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_snapshots_detail(self):
+ response = self._do_get('os-snapshots/detail')
+ subs = self._get_regexes()
+ self._verify_response('snapshots-detail-resp', subs, response, 200)
+
+ def test_snapshots_list(self):
+ response = self._do_get('os-snapshots')
+ subs = self._get_regexes()
+ self._verify_response('snapshots-list-resp', subs, response, 200)
+
+ def test_snapshots_show(self):
+ response = self._do_get('os-snapshots/100')
+ subs = {
+ 'snapshot_name': 'Default name',
+ 'description': 'Default description'
+ }
+ subs.update(self._get_regexes())
+ self._verify_response('snapshots-show-resp', subs, response, 200)
+
+
+class VolumesSampleJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-volumes"
+
+ def _get_volume_id(self):
+ return 'a26887c6-c47b-4654-abb5-dfadf7d3f803'
+
+ def _stub_volume(self, id, displayname="Volume Name",
+ displaydesc="Volume Description", size=100):
+ volume = {
+ 'id': id,
+ 'size': size,
+ 'availability_zone': 'zone1:host1',
+ 'instance_uuid': '3912f2b4-c5ba-4aec-9165-872876fe202e',
+ 'mountpoint': '/',
+ 'status': 'in-use',
+ 'attach_status': 'attached',
+ 'name': 'vol name',
+ 'display_name': displayname,
+ 'display_description': displaydesc,
+ 'created_at': datetime.datetime(2008, 12, 1, 11, 1, 55),
+ 'snapshot_id': None,
+ 'volume_type_id': 'fakevoltype',
+ 'volume_metadata': [],
+ 'volume_type': {'name': 'Backup'}
+ }
+ return volume
+
+ def _stub_volume_get(self, context, volume_id):
+ return self._stub_volume(volume_id)
+
+ def _stub_volume_delete(self, context, *args, **param):
+ pass
+
+ def _stub_volume_get_all(self, context, search_opts=None):
+ id = self._get_volume_id()
+ return [self._stub_volume(id)]
+
+ def _stub_volume_create(self, context, size, name, description, snapshot,
+ **param):
+ id = self._get_volume_id()
+ return self._stub_volume(id)
+
+ def setUp(self):
+ super(VolumesSampleJsonTest, self).setUp()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+
+ self.stubs.Set(cinder.API, "delete", self._stub_volume_delete)
+ self.stubs.Set(cinder.API, "get", self._stub_volume_get)
+ self.stubs.Set(cinder.API, "get_all", self._stub_volume_get_all)
+
+ def _post_volume(self):
+ subs_req = {
+ 'volume_name': "Volume Name",
+ 'volume_desc': "Volume Description",
+ }
+
+ self.stubs.Set(cinder.API, "create", self._stub_volume_create)
+ response = self._do_post('os-volumes', 'os-volumes-post-req',
+ subs_req)
+ subs = self._get_regexes()
+ subs.update(subs_req)
+ self._verify_response('os-volumes-post-resp', subs, response, 200)
+
+ def test_volumes_show(self):
+ subs = {
+ 'volume_name': "Volume Name",
+ 'volume_desc': "Volume Description",
+ }
+ vol_id = self._get_volume_id()
+ response = self._do_get('os-volumes/%s' % vol_id)
+ subs.update(self._get_regexes())
+ self._verify_response('os-volumes-get-resp', subs, response, 200)
+
+ def test_volumes_index(self):
+ subs = {
+ 'volume_name': "Volume Name",
+ 'volume_desc': "Volume Description",
+ }
+ response = self._do_get('os-volumes')
+ subs.update(self._get_regexes())
+ self._verify_response('os-volumes-index-resp', subs, response, 200)
+
+ def test_volumes_detail(self):
+ # For now, index and detail are the same.
+ # See the volumes api
+ subs = {
+ 'volume_name': "Volume Name",
+ 'volume_desc': "Volume Description",
+ }
+ response = self._do_get('os-volumes/detail')
+ subs.update(self._get_regexes())
+ self._verify_response('os-volumes-detail-resp', subs, response, 200)
+
+ def test_volumes_create(self):
+ self._post_volume()
+
+ def test_volumes_delete(self):
+ self._post_volume()
+ vol_id = self._get_volume_id()
+ response = self._do_delete('os-volumes/%s' % vol_id)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
diff --git a/nova/tests/unit/keymgr/__init__.py b/nova/tests/unit/keymgr/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/keymgr/__init__.py
diff --git a/nova/tests/unit/keymgr/fake.py b/nova/tests/unit/keymgr/fake.py
new file mode 100644
index 0000000000..25fb300c51
--- /dev/null
+++ b/nova/tests/unit/keymgr/fake.py
@@ -0,0 +1,24 @@
+# Copyright 2011 Justin Santa Barbara
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Implementation of a fake key manager."""
+
+
+from nova.keymgr import mock_key_mgr
+
+
+def fake_api():
+ return mock_key_mgr.MockKeyManager()
diff --git a/nova/tests/unit/keymgr/test_conf_key_mgr.py b/nova/tests/unit/keymgr/test_conf_key_mgr.py
new file mode 100644
index 0000000000..38bed78acf
--- /dev/null
+++ b/nova/tests/unit/keymgr/test_conf_key_mgr.py
@@ -0,0 +1,59 @@
+# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test cases for the conf key manager.
+"""
+
+import array
+
+from oslo.config import cfg
+
+from nova.keymgr import conf_key_mgr
+from nova.keymgr import key
+from nova.tests.unit.keymgr import test_single_key_mgr
+
+
+CONF = cfg.CONF
+CONF.import_opt('fixed_key', 'nova.keymgr.conf_key_mgr', group='keymgr')
+
+
+class ConfKeyManagerTestCase(test_single_key_mgr.SingleKeyManagerTestCase):
+ def __init__(self, *args, **kwargs):
+ super(ConfKeyManagerTestCase, self).__init__(*args, **kwargs)
+
+ self._hex_key = '0' * 64
+
+ def _create_key_manager(self):
+ CONF.set_default('fixed_key', default=self._hex_key, group='keymgr')
+ return conf_key_mgr.ConfKeyManager()
+
+ def setUp(self):
+ super(ConfKeyManagerTestCase, self).setUp()
+
+ encoded_key = array.array('B', self._hex_key.decode('hex')).tolist()
+ self.key = key.SymmetricKey('AES', encoded_key)
+
+ def test_init(self):
+ key_manager = self._create_key_manager()
+ self.assertEqual(self._hex_key, key_manager._hex_key)
+
+ def test_init_value_error(self):
+ CONF.set_default('fixed_key', default=None, group='keymgr')
+ self.assertRaises(ValueError, conf_key_mgr.ConfKeyManager)
+
+ def test_generate_hex_key(self):
+ key_manager = self._create_key_manager()
+ self.assertEqual(self._hex_key, key_manager._generate_hex_key())
diff --git a/nova/tests/unit/keymgr/test_key.py b/nova/tests/unit/keymgr/test_key.py
new file mode 100644
index 0000000000..14766fd201
--- /dev/null
+++ b/nova/tests/unit/keymgr/test_key.py
@@ -0,0 +1,67 @@
+# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test cases for the key classes.
+"""
+
+import array
+
+from nova.keymgr import key
+from nova import test
+
+
+class KeyTestCase(test.NoDBTestCase):
+
+ def _create_key(self):
+ raise NotImplementedError()
+
+ def setUp(self):
+ super(KeyTestCase, self).setUp()
+
+ self.key = self._create_key()
+
+
+class SymmetricKeyTestCase(KeyTestCase):
+
+ def _create_key(self):
+ return key.SymmetricKey(self.algorithm, self.encoded)
+
+ def setUp(self):
+ self.algorithm = 'AES'
+ self.encoded = array.array('B', ('0' * 64).decode('hex')).tolist()
+
+ super(SymmetricKeyTestCase, self).setUp()
+
+ def test_get_algorithm(self):
+ self.assertEqual(self.key.get_algorithm(), self.algorithm)
+
+ def test_get_format(self):
+ self.assertEqual(self.key.get_format(), 'RAW')
+
+ def test_get_encoded(self):
+ self.assertEqual(self.key.get_encoded(), self.encoded)
+
+ def test___eq__(self):
+ self.assertTrue(self.key == self.key)
+
+ self.assertFalse(self.key is None)
+ self.assertFalse(None == self.key)
+
+ def test___ne__(self):
+ self.assertFalse(self.key != self.key)
+
+ self.assertTrue(self.key is not None)
+ self.assertTrue(None != self.key)
diff --git a/nova/tests/unit/keymgr/test_key_mgr.py b/nova/tests/unit/keymgr/test_key_mgr.py
new file mode 100644
index 0000000000..cffcfc7a0c
--- /dev/null
+++ b/nova/tests/unit/keymgr/test_key_mgr.py
@@ -0,0 +1,31 @@
+# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test cases for the key manager.
+"""
+
+from nova import test
+
+
+class KeyManagerTestCase(test.NoDBTestCase):
+
+ def _create_key_manager(self):
+ raise NotImplementedError()
+
+ def setUp(self):
+ super(KeyManagerTestCase, self).setUp()
+
+ self.key_mgr = self._create_key_manager()
diff --git a/nova/tests/unit/keymgr/test_mock_key_mgr.py b/nova/tests/unit/keymgr/test_mock_key_mgr.py
new file mode 100644
index 0000000000..9d0c2174d1
--- /dev/null
+++ b/nova/tests/unit/keymgr/test_mock_key_mgr.py
@@ -0,0 +1,102 @@
+# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test cases for the mock key manager.
+"""
+
+import array
+
+from nova import context
+from nova import exception
+from nova.keymgr import key as keymgr_key
+from nova.keymgr import mock_key_mgr
+from nova.tests.unit.keymgr import test_key_mgr
+
+
+class MockKeyManagerTestCase(test_key_mgr.KeyManagerTestCase):
+
+ def _create_key_manager(self):
+ return mock_key_mgr.MockKeyManager()
+
+ def setUp(self):
+ super(MockKeyManagerTestCase, self).setUp()
+
+ self.ctxt = context.RequestContext('fake', 'fake')
+
+ def test_create_key(self):
+ key_id_1 = self.key_mgr.create_key(self.ctxt)
+ key_id_2 = self.key_mgr.create_key(self.ctxt)
+ # ensure that the UUIDs are unique
+ self.assertNotEqual(key_id_1, key_id_2)
+
+ def test_create_key_with_length(self):
+ for length in [64, 128, 256]:
+ key_id = self.key_mgr.create_key(self.ctxt, key_length=length)
+ key = self.key_mgr.get_key(self.ctxt, key_id)
+ self.assertEqual(length / 8, len(key.get_encoded()))
+
+ def test_create_null_context(self):
+ self.assertRaises(exception.Forbidden,
+ self.key_mgr.create_key, None)
+
+ def test_store_key(self):
+ secret_key = array.array('B', ('0' * 64).decode('hex')).tolist()
+ _key = keymgr_key.SymmetricKey('AES', secret_key)
+ key_id = self.key_mgr.store_key(self.ctxt, _key)
+
+ actual_key = self.key_mgr.get_key(self.ctxt, key_id)
+ self.assertEqual(_key, actual_key)
+
+ def test_store_null_context(self):
+ self.assertRaises(exception.Forbidden,
+ self.key_mgr.store_key, None, None)
+
+ def test_copy_key(self):
+ key_id = self.key_mgr.create_key(self.ctxt)
+ key = self.key_mgr.get_key(self.ctxt, key_id)
+
+ copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id)
+ copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id)
+
+ self.assertNotEqual(key_id, copied_key_id)
+ self.assertEqual(key, copied_key)
+
+ def test_copy_null_context(self):
+ self.assertRaises(exception.Forbidden,
+ self.key_mgr.copy_key, None, None)
+
+ def test_get_key(self):
+ pass
+
+ def test_get_null_context(self):
+ self.assertRaises(exception.Forbidden,
+ self.key_mgr.get_key, None, None)
+
+ def test_get_unknown_key(self):
+ self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, None)
+
+ def test_delete_key(self):
+ key_id = self.key_mgr.create_key(self.ctxt)
+ self.key_mgr.delete_key(self.ctxt, key_id)
+
+ self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, key_id)
+
+ def test_delete_null_context(self):
+ self.assertRaises(exception.Forbidden,
+ self.key_mgr.delete_key, None, None)
+
+ def test_delete_unknown_key(self):
+ self.assertRaises(KeyError, self.key_mgr.delete_key, self.ctxt, None)
diff --git a/nova/tests/unit/keymgr/test_not_implemented_key_mgr.py b/nova/tests/unit/keymgr/test_not_implemented_key_mgr.py
new file mode 100644
index 0000000000..8e6d0c8a27
--- /dev/null
+++ b/nova/tests/unit/keymgr/test_not_implemented_key_mgr.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test cases for the not implemented key manager.
+"""
+
+from nova.keymgr import not_implemented_key_mgr
+from nova.tests.unit.keymgr import test_key_mgr
+
+
+class NotImplementedKeyManagerTestCase(test_key_mgr.KeyManagerTestCase):
+
+ def _create_key_manager(self):
+ return not_implemented_key_mgr.NotImplementedKeyManager()
+
+ def test_create_key(self):
+ self.assertRaises(NotImplementedError,
+ self.key_mgr.create_key, None)
+
+ def test_store_key(self):
+ self.assertRaises(NotImplementedError,
+ self.key_mgr.store_key, None, None)
+
+ def test_copy_key(self):
+ self.assertRaises(NotImplementedError,
+ self.key_mgr.copy_key, None, None)
+
+ def test_get_key(self):
+ self.assertRaises(NotImplementedError,
+ self.key_mgr.get_key, None, None)
+
+ def test_delete_key(self):
+ self.assertRaises(NotImplementedError,
+ self.key_mgr.delete_key, None, None)
diff --git a/nova/tests/unit/keymgr/test_single_key_mgr.py b/nova/tests/unit/keymgr/test_single_key_mgr.py
new file mode 100644
index 0000000000..3cf1de8da2
--- /dev/null
+++ b/nova/tests/unit/keymgr/test_single_key_mgr.py
@@ -0,0 +1,72 @@
+# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test cases for the single key manager.
+"""
+
+import array
+
+from nova import exception
+from nova.keymgr import key
+from nova.keymgr import single_key_mgr
+from nova.tests.unit.keymgr import test_mock_key_mgr
+
+
+class SingleKeyManagerTestCase(test_mock_key_mgr.MockKeyManagerTestCase):
+
+ def _create_key_manager(self):
+ return single_key_mgr.SingleKeyManager()
+
+ def setUp(self):
+ super(SingleKeyManagerTestCase, self).setUp()
+
+ self.key_id = '00000000-0000-0000-0000-000000000000'
+ encoded = array.array('B', ('0' * 64).decode('hex')).tolist()
+ self.key = key.SymmetricKey('AES', encoded)
+
+ def test___init__(self):
+ self.assertEqual(self.key,
+ self.key_mgr.get_key(self.ctxt, self.key_id))
+
+ def test_create_key(self):
+ key_id_1 = self.key_mgr.create_key(self.ctxt)
+ key_id_2 = self.key_mgr.create_key(self.ctxt)
+ # ensure that the UUIDs are the same
+ self.assertEqual(key_id_1, key_id_2)
+
+ def test_create_key_with_length(self):
+ pass
+
+ def test_store_null_context(self):
+ self.assertRaises(exception.Forbidden,
+ self.key_mgr.store_key, None, self.key)
+
+ def test_copy_key(self):
+ key_id = self.key_mgr.create_key(self.ctxt)
+ key = self.key_mgr.get_key(self.ctxt, key_id)
+
+ copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id)
+ copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id)
+
+ self.assertEqual(key_id, copied_key_id)
+ self.assertEqual(key, copied_key)
+
+ def test_delete_key(self):
+ pass
+
+ def test_delete_unknown_key(self):
+ self.assertRaises(exception.KeyManagerError,
+ self.key_mgr.delete_key, self.ctxt, None)
diff --git a/nova/tests/unit/matchers.py b/nova/tests/unit/matchers.py
new file mode 100644
index 0000000000..b392e3e852
--- /dev/null
+++ b/nova/tests/unit/matchers.py
@@ -0,0 +1,466 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Matcher classes to be used inside of the testtools assertThat framework."""
+
+import pprint
+
+from lxml import etree
+from testtools import content
+
+
+class DictKeysMismatch(object):
+ def __init__(self, d1only, d2only):
+ self.d1only = d1only
+ self.d2only = d2only
+
+ def describe(self):
+ return ('Keys in d1 and not d2: %(d1only)s.'
+ ' Keys in d2 and not d1: %(d2only)s' %
+ {'d1only': self.d1only, 'd2only': self.d2only})
+
+ def get_details(self):
+ return {}
+
+
+class DictMismatch(object):
+ def __init__(self, key, d1_value, d2_value):
+ self.key = key
+ self.d1_value = d1_value
+ self.d2_value = d2_value
+
+ def describe(self):
+ return ("Dictionaries do not match at %(key)s."
+ " d1: %(d1_value)s d2: %(d2_value)s" %
+ {'key': self.key, 'd1_value': self.d1_value,
+ 'd2_value': self.d2_value})
+
+ def get_details(self):
+ return {}
+
+
+class DictMatches(object):
+
+ def __init__(self, d1, approx_equal=False, tolerance=0.001):
+ self.d1 = d1
+ self.approx_equal = approx_equal
+ self.tolerance = tolerance
+
+ def __str__(self):
+ return 'DictMatches(%s)' % (pprint.pformat(self.d1))
+
+ # Useful assertions
+ def match(self, d2):
+ """Assert two dicts are equivalent.
+
+ This is a 'deep' match in the sense that it handles nested
+ dictionaries appropriately.
+
+ NOTE:
+
+ If you don't care (or don't know) a given value, you can specify
+ the string DONTCARE as the value. This will cause that dict-item
+ to be skipped.
+
+ """
+
+ d1keys = set(self.d1.keys())
+ d2keys = set(d2.keys())
+ if d1keys != d2keys:
+ d1only = d1keys - d2keys
+ d2only = d2keys - d1keys
+ return DictKeysMismatch(d1only, d2only)
+
+ for key in d1keys:
+ d1value = self.d1[key]
+ d2value = d2[key]
+ try:
+ error = abs(float(d1value) - float(d2value))
+ within_tolerance = error <= self.tolerance
+ except (ValueError, TypeError):
+ # If both values aren't convertible to float, just ignore
+ # ValueError if arg is a str, TypeError if it's something else
+ # (like None)
+ within_tolerance = False
+
+ if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
+ matcher = DictMatches(d1value)
+ did_match = matcher.match(d2value)
+ if did_match is not None:
+ return did_match
+ elif 'DONTCARE' in (d1value, d2value):
+ continue
+ elif self.approx_equal and within_tolerance:
+ continue
+ elif d1value != d2value:
+ return DictMismatch(key, d1value, d2value)
+
+
+class ListLengthMismatch(object):
+ def __init__(self, len1, len2):
+ self.len1 = len1
+ self.len2 = len2
+
+ def describe(self):
+ return ('Length mismatch: len(L1)=%(len1)d != '
+ 'len(L2)=%(len2)d' % {'len1': self.len1, 'len2': self.len2})
+
+ def get_details(self):
+ return {}
+
+
+class DictListMatches(object):
+
+ def __init__(self, l1, approx_equal=False, tolerance=0.001):
+ self.l1 = l1
+ self.approx_equal = approx_equal
+ self.tolerance = tolerance
+
+ def __str__(self):
+ return 'DictListMatches(%s)' % (pprint.pformat(self.l1))
+
+ # Useful assertions
+ def match(self, l2):
+ """Assert a list of dicts are equivalent."""
+
+ l1count = len(self.l1)
+ l2count = len(l2)
+ if l1count != l2count:
+ return ListLengthMismatch(l1count, l2count)
+
+ for d1, d2 in zip(self.l1, l2):
+ matcher = DictMatches(d2,
+ approx_equal=self.approx_equal,
+ tolerance=self.tolerance)
+ did_match = matcher.match(d1)
+ if did_match:
+ return did_match
+
+
+class SubDictMismatch(object):
+ def __init__(self,
+ key=None,
+ sub_value=None,
+ super_value=None,
+ keys=False):
+ self.key = key
+ self.sub_value = sub_value
+ self.super_value = super_value
+ self.keys = keys
+
+ def describe(self):
+ if self.keys:
+ return "Keys between dictionaries did not match"
+ else:
+ return("Dictionaries do not match at %s. d1: %s d2: %s"
+ % (self.key,
+ self.super_value,
+ self.sub_value))
+
+ def get_details(self):
+ return {}
+
+
+class IsSubDictOf(object):
+
+ def __init__(self, super_dict):
+ self.super_dict = super_dict
+
+ def __str__(self):
+ return 'IsSubDictOf(%s)' % (self.super_dict)
+
+ def match(self, sub_dict):
+ """Assert a sub_dict is subset of super_dict."""
+ if not set(sub_dict.keys()).issubset(set(self.super_dict.keys())):
+ return SubDictMismatch(keys=True)
+ for k, sub_value in sub_dict.items():
+ super_value = self.super_dict[k]
+ if isinstance(sub_value, dict):
+ matcher = IsSubDictOf(super_value)
+ did_match = matcher.match(sub_value)
+ if did_match is not None:
+ return did_match
+ elif 'DONTCARE' in (sub_value, super_value):
+ continue
+ else:
+ if sub_value != super_value:
+ return SubDictMismatch(k, sub_value, super_value)
+
+
+class FunctionCallMatcher(object):
+
+ def __init__(self, expected_func_calls):
+ self.expected_func_calls = expected_func_calls
+ self.actual_func_calls = []
+
+ def call(self, *args, **kwargs):
+ func_call = {'args': args, 'kwargs': kwargs}
+ self.actual_func_calls.append(func_call)
+
+ def match(self):
+ dict_list_matcher = DictListMatches(self.expected_func_calls)
+ return dict_list_matcher.match(self.actual_func_calls)
+
+
+class XMLMismatch(object):
+ """Superclass for XML mismatch."""
+
+ def __init__(self, state):
+ self.path = str(state)
+ self.expected = state.expected
+ self.actual = state.actual
+
+ def describe(self):
+ return "%(path)s: XML does not match" % self.path
+
+ def get_details(self):
+ return {
+ 'expected': content.text_content(self.expected),
+ 'actual': content.text_content(self.actual),
+ }
+
+
+class XMLTagMismatch(XMLMismatch):
+ """XML tags don't match."""
+
+ def __init__(self, state, idx, expected_tag, actual_tag):
+ super(XMLTagMismatch, self).__init__(state)
+ self.idx = idx
+ self.expected_tag = expected_tag
+ self.actual_tag = actual_tag
+
+ def describe(self):
+ return ("%(path)s: XML tag mismatch at index %(idx)d: "
+ "expected tag <%(expected_tag)s>; "
+ "actual tag <%(actual_tag)s>" %
+ {'path': self.path, 'idx': self.idx,
+ 'expected_tag': self.expected_tag,
+ 'actual_tag': self.actual_tag})
+
+
+class XMLAttrKeysMismatch(XMLMismatch):
+ """XML attribute keys don't match."""
+
+ def __init__(self, state, expected_only, actual_only):
+ super(XMLAttrKeysMismatch, self).__init__(state)
+ self.expected_only = ', '.join(sorted(expected_only))
+ self.actual_only = ', '.join(sorted(actual_only))
+
+ def describe(self):
+ return ("%(path)s: XML attributes mismatch: "
+ "keys only in expected: %(expected_only)s; "
+ "keys only in actual: %(actual_only)s" %
+ {'path': self.path, 'expected_only': self.expected_only,
+ 'actual_only': self.actual_only})
+
+
+class XMLAttrValueMismatch(XMLMismatch):
+ """XML attribute values don't match."""
+
+ def __init__(self, state, key, expected_value, actual_value):
+ super(XMLAttrValueMismatch, self).__init__(state)
+ self.key = key
+ self.expected_value = expected_value
+ self.actual_value = actual_value
+
+ def describe(self):
+ return ("%(path)s: XML attribute value mismatch: "
+ "expected value of attribute %(key)s: %(expected_value)r; "
+ "actual value: %(actual_value)r" %
+ {'path': self.path, 'key': self.key,
+ 'expected_value': self.expected_value,
+ 'actual_value': self.actual_value})
+
+
+class XMLTextValueMismatch(XMLMismatch):
+ """XML text values don't match."""
+
+ def __init__(self, state, expected_text, actual_text):
+ super(XMLTextValueMismatch, self).__init__(state)
+ self.expected_text = expected_text
+ self.actual_text = actual_text
+
+ def describe(self):
+ return ("%(path)s: XML text value mismatch: "
+ "expected text value: %(expected_text)r; "
+ "actual value: %(actual_text)r" %
+ {'path': self.path, 'expected_text': self.expected_text,
+ 'actual_text': self.actual_text})
+
+
+class XMLUnexpectedChild(XMLMismatch):
+ """Unexpected child present in XML."""
+
+ def __init__(self, state, tag, idx):
+ super(XMLUnexpectedChild, self).__init__(state)
+ self.tag = tag
+ self.idx = idx
+
+ def describe(self):
+ return ("%(path)s: XML unexpected child element <%(tag)s> "
+ "present at index %(idx)d" %
+ {'path': self.path, 'tag': self.tag, 'idx': self.idx})
+
+
+class XMLExpectedChild(XMLMismatch):
+ """Expected child not present in XML."""
+
+ def __init__(self, state, tag, idx):
+ super(XMLExpectedChild, self).__init__(state)
+ self.tag = tag
+ self.idx = idx
+
+ def describe(self):
+ return ("%(path)s: XML expected child element <%(tag)s> "
+ "not present at index %(idx)d" %
+ {'path': self.path, 'tag': self.tag, 'idx': self.idx})
+
+
+class XMLMatchState(object):
+ """Maintain some state for matching.
+
+ Tracks the XML node path and saves the expected and actual full
+ XML text, for use by the XMLMismatch subclasses.
+ """
+
+ def __init__(self, expected, actual):
+ self.path = []
+ self.expected = expected
+ self.actual = actual
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ self.path.pop()
+ return False
+
+ def __str__(self):
+ return '/' + '/'.join(self.path)
+
+ def node(self, tag, idx):
+ """Adds tag and index to the path; they will be popped off when
+ the corresponding 'with' statement exits.
+
+ :param tag: The element tag
+ :param idx: If not None, the integer index of the element
+ within its parent. Not included in the path
+ element if None.
+ """
+
+ if idx is not None:
+ self.path.append("%s[%d]" % (tag, idx))
+ else:
+ self.path.append(tag)
+ return self
+
+
+class XMLMatches(object):
+ """Compare XML strings. More complete than string comparison."""
+
+ def __init__(self, expected):
+ self.expected_xml = expected
+ self.expected = etree.fromstring(expected)
+
+ def __str__(self):
+ return 'XMLMatches(%r)' % self.expected_xml
+
+ def match(self, actual_xml):
+ actual = etree.fromstring(actual_xml)
+
+ state = XMLMatchState(self.expected_xml, actual_xml)
+ result = self._compare_node(self.expected, actual, state, None)
+
+ if result is False:
+ return XMLMismatch(state)
+ elif result is not True:
+ return result
+
+ def _compare_node(self, expected, actual, state, idx):
+ """Recursively compares nodes within the XML tree."""
+
+ # Start by comparing the tags
+ if expected.tag != actual.tag:
+ return XMLTagMismatch(state, idx, expected.tag, actual.tag)
+
+ with state.node(expected.tag, idx):
+ # Compare the attribute keys
+ expected_attrs = set(expected.attrib.keys())
+ actual_attrs = set(actual.attrib.keys())
+ if expected_attrs != actual_attrs:
+ expected_only = expected_attrs - actual_attrs
+ actual_only = actual_attrs - expected_attrs
+ return XMLAttrKeysMismatch(state, expected_only, actual_only)
+
+ # Compare the attribute values
+ for key in expected_attrs:
+ expected_value = expected.attrib[key]
+ actual_value = actual.attrib[key]
+
+ if 'DONTCARE' in (expected_value, actual_value):
+ continue
+ elif expected_value != actual_value:
+ return XMLAttrValueMismatch(state, key, expected_value,
+ actual_value)
+
+ # Compare the contents of the node
+ if len(expected) == 0 and len(actual) == 0:
+ # No children, compare text values
+ if ('DONTCARE' not in (expected.text, actual.text) and
+ expected.text != actual.text):
+ return XMLTextValueMismatch(state, expected.text,
+ actual.text)
+ else:
+ expected_idx = 0
+ actual_idx = 0
+ while (expected_idx < len(expected) and
+ actual_idx < len(actual)):
+ # Ignore comments and processing instructions
+ # TODO(Vek): may interpret PIs in the future, to
+ # allow for, say, arbitrary ordering of some
+ # elements
+ if (expected[expected_idx].tag in
+ (etree.Comment, etree.ProcessingInstruction)):
+ expected_idx += 1
+ continue
+
+ # Compare the nodes
+ result = self._compare_node(expected[expected_idx],
+ actual[actual_idx], state,
+ actual_idx)
+ if result is not True:
+ return result
+
+ # Step on to comparing the next nodes...
+ expected_idx += 1
+ actual_idx += 1
+
+ # Make sure we consumed all nodes in actual
+ if actual_idx < len(actual):
+ return XMLUnexpectedChild(state, actual[actual_idx].tag,
+ actual_idx)
+
+ # Make sure we consumed all nodes in expected
+ if expected_idx < len(expected):
+ for node in expected[expected_idx:]:
+ if (node.tag in
+ (etree.Comment, etree.ProcessingInstruction)):
+ continue
+
+ return XMLExpectedChild(state, node.tag, actual_idx)
+
+ # The nodes match
+ return True
diff --git a/nova/tests/unit/monkey_patch_example/__init__.py b/nova/tests/unit/monkey_patch_example/__init__.py
new file mode 100644
index 0000000000..bf0a9e4214
--- /dev/null
+++ b/nova/tests/unit/monkey_patch_example/__init__.py
@@ -0,0 +1,31 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Example Module for testing utils.monkey_patch()."""
+
+
+CALLED_FUNCTION = []
+
+
+def example_decorator(name, function):
+ """decorator for notify which is used from utils.monkey_patch()
+
+ :param name: name of the function
+ :param function: - object of the function
+ :returns: function -- decorated function
+ """
+ def wrapped_func(*args, **kwarg):
+ CALLED_FUNCTION.append(name)
+ return function(*args, **kwarg)
+ return wrapped_func
diff --git a/nova/tests/unit/monkey_patch_example/example_a.py b/nova/tests/unit/monkey_patch_example/example_a.py
new file mode 100644
index 0000000000..3fdb4dcc05
--- /dev/null
+++ b/nova/tests/unit/monkey_patch_example/example_a.py
@@ -0,0 +1,27 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Example Module A for testing utils.monkey_patch()."""
+
+
+def example_function_a():
+ return 'Example function'
+
+
+class ExampleClassA():
+ def example_method(self):
+ return 'Example method'
+
+ def example_method_add(self, arg1, arg2):
+ return arg1 + arg2
diff --git a/nova/tests/unit/monkey_patch_example/example_b.py b/nova/tests/unit/monkey_patch_example/example_b.py
new file mode 100644
index 0000000000..2515fd2be4
--- /dev/null
+++ b/nova/tests/unit/monkey_patch_example/example_b.py
@@ -0,0 +1,28 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Example Module B for testing utils.monkey_patch()."""
+
+
+def example_function_b():
+ return 'Example function'
+
+
+class ExampleClassB():
+ def example_method(self):
+ return 'Example method'
+
+ def example_method_add(self, arg1, arg2):
+ return arg1 + arg2
diff --git a/nova/tests/unit/network/__init__.py b/nova/tests/unit/network/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/network/__init__.py
diff --git a/nova/tests/unit/network/security_group/__init__.py b/nova/tests/unit/network/security_group/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/network/security_group/__init__.py
diff --git a/nova/tests/unit/network/security_group/test_neutron_driver.py b/nova/tests/unit/network/security_group/test_neutron_driver.py
new file mode 100644
index 0000000000..6a86c6df1a
--- /dev/null
+++ b/nova/tests/unit/network/security_group/test_neutron_driver.py
@@ -0,0 +1,247 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+import mox
+from neutronclient.common import exceptions as n_exc
+from neutronclient.v2_0 import client
+
+from nova.api.openstack.compute.contrib import security_groups
+from nova import context
+from nova import exception
+from nova.network import neutronv2
+from nova.network.security_group import neutron_driver
+from nova import test
+
+
+class TestNeutronDriver(test.NoDBTestCase):
+ def setUp(self):
+ super(TestNeutronDriver, self).setUp()
+ self.mox.StubOutWithMock(neutronv2, 'get_client')
+ self.moxed_client = self.mox.CreateMock(client.Client)
+ neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
+ self.moxed_client)
+ self.context = context.RequestContext('userid', 'my_tenantid')
+ setattr(self.context,
+ 'auth_token',
+ 'bff4a5a6b9eb4ea2a6efec6eefb77936')
+
+ def test_list_with_project(self):
+ project_id = '0af70a4d22cf4652824ddc1f2435dd85'
+ security_groups_list = {'security_groups': []}
+ self.moxed_client.list_security_groups(tenant_id=project_id).AndReturn(
+ security_groups_list)
+ self.mox.ReplayAll()
+
+ sg_api = neutron_driver.SecurityGroupAPI()
+ sg_api.list(self.context, project=project_id)
+
+ def test_get_with_name_duplicated(self):
+ sg_name = 'web_server'
+ expected_sg_id = '85cc3048-abc3-43cc-89b3-377341426ac5'
+ list_security_groups = {'security_groups':
+ [{'name': sg_name,
+ 'id': expected_sg_id,
+ 'tenant_id': self.context.tenant,
+ 'description': 'server',
+ 'rules': []}
+ ]}
+ self.moxed_client.list_security_groups(name=sg_name, fields='id',
+ tenant_id=self.context.tenant).AndReturn(list_security_groups)
+
+ expected_sg = {'security_group': {'name': sg_name,
+ 'id': expected_sg_id,
+ 'tenant_id': self.context.tenant,
+ 'description': 'server', 'rules': []}}
+ self.moxed_client.show_security_group(expected_sg_id).AndReturn(
+ expected_sg)
+ self.mox.ReplayAll()
+
+ sg_api = neutron_driver.SecurityGroupAPI()
+ observed_sg = sg_api.get(self.context, name=sg_name)
+ expected_sg['security_group']['project_id'] = self.context.tenant
+ del expected_sg['security_group']['tenant_id']
+ self.assertEqual(expected_sg['security_group'], observed_sg)
+
+ def test_create_security_group_exceed_quota(self):
+ name = 'test-security-group'
+ description = 'test-security-group'
+ body = {'security_group': {'name': name,
+ 'description': description}}
+ message = "Quota exceeded for resources: ['security_group']"
+ self.moxed_client.create_security_group(
+ body).AndRaise(n_exc.NeutronClientException(status_code=409,
+ message=message))
+ self.mox.ReplayAll()
+ sg_api = security_groups.NativeNeutronSecurityGroupAPI()
+ self.assertRaises(exception.SecurityGroupLimitExceeded,
+ sg_api.create_security_group, self.context, name,
+ description)
+
+ def test_create_security_group_rules_exceed_quota(self):
+ vals = {'protocol': 'tcp', 'cidr': '0.0.0.0/0',
+ 'parent_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47',
+ 'group_id': None, 'from_port': 1025, 'to_port': 1025}
+ body = {'security_group_rules': [{'remote_group_id': None,
+ 'direction': 'ingress', 'protocol': 'tcp', 'ethertype': 'IPv4',
+ 'port_range_max': 1025, 'port_range_min': 1025,
+ 'security_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47',
+ 'remote_ip_prefix': '0.0.0.0/0'}]}
+ name = 'test-security-group'
+ message = "Quota exceeded for resources: ['security_group_rule']"
+ self.moxed_client.create_security_group_rule(
+ body).AndRaise(n_exc.NeutronClientException(status_code=409,
+ message=message))
+ self.mox.ReplayAll()
+ sg_api = security_groups.NativeNeutronSecurityGroupAPI()
+ self.assertRaises(exception.SecurityGroupLimitExceeded,
+ sg_api.add_rules, self.context, None, name, [vals])
+
+ def test_list_security_group_with_no_port_range_and_not_tcp_udp_icmp(self):
+ sg1 = {'description': 'default',
+ 'id': '07f1362f-34f6-4136-819a-2dcde112269e',
+ 'name': 'default',
+ 'tenant_id': 'c166d9316f814891bcb66b96c4c891d6',
+ 'security_group_rules':
+ [{'direction': 'ingress',
+ 'ethertype': 'IPv4',
+ 'id': '0a4647f1-e1aa-488d-90e1-97a7d0293beb',
+ 'port_range_max': None,
+ 'port_range_min': None,
+ 'protocol': '51',
+ 'remote_group_id': None,
+ 'remote_ip_prefix': None,
+ 'security_group_id':
+ '07f1362f-34f6-4136-819a-2dcde112269e',
+ 'tenant_id': 'c166d9316f814891bcb66b96c4c891d6'}]}
+
+ self.moxed_client.list_security_groups().AndReturn(
+ {'security_groups': [sg1]})
+ self.mox.ReplayAll()
+ sg_api = neutron_driver.SecurityGroupAPI()
+ result = sg_api.list(self.context)
+ expected = [{'rules':
+ [{'from_port': -1, 'protocol': '51', 'to_port': -1,
+ 'parent_group_id': '07f1362f-34f6-4136-819a-2dcde112269e',
+ 'cidr': '0.0.0.0/0', 'group_id': None,
+ 'id': '0a4647f1-e1aa-488d-90e1-97a7d0293beb'}],
+ 'project_id': 'c166d9316f814891bcb66b96c4c891d6',
+ 'id': '07f1362f-34f6-4136-819a-2dcde112269e',
+ 'name': 'default', 'description': 'default'}]
+ self.assertEqual(expected, result)
+
+ def test_instances_security_group_bindings(self):
+ server_id = 'c5a20e8d-c4b0-47cf-9dca-ebe4f758acb1'
+ port1_id = '4c505aec-09aa-47bc-bcc0-940477e84dc0'
+ port2_id = 'b3b31a53-6e29-479f-ae5c-00b7b71a6d44'
+ sg1_id = '2f7ce969-1a73-4ef9-bbd6-c9a91780ecd4'
+ sg2_id = '20c89ce5-9388-4046-896e-64ffbd3eb584'
+ servers = [{'id': server_id}]
+ ports = [{'id': port1_id, 'device_id': server_id,
+ 'security_groups': [sg1_id]},
+ {'id': port2_id, 'device_id': server_id,
+ 'security_groups': [sg2_id]}]
+ port_list = {'ports': ports}
+ sg1 = {'id': sg1_id, 'name': 'wol'}
+ sg2 = {'id': sg2_id, 'name': 'eor'}
+ security_groups_list = {'security_groups': [sg1, sg2]}
+
+ sg_bindings = {server_id: [{'name': 'wol'}, {'name': 'eor'}]}
+
+ self.moxed_client.list_ports(device_id=[server_id]).AndReturn(
+ port_list)
+ self.moxed_client.list_security_groups(id=[sg2_id, sg1_id]).AndReturn(
+ security_groups_list)
+ self.mox.ReplayAll()
+
+ sg_api = neutron_driver.SecurityGroupAPI()
+ result = sg_api.get_instances_security_groups_bindings(
+ self.context, servers)
+ self.assertEqual(result, sg_bindings)
+
+ def _test_instances_security_group_bindings_scale(self, num_servers):
+ max_query = 150
+ sg1_id = '2f7ce969-1a73-4ef9-bbd6-c9a91780ecd4'
+ sg2_id = '20c89ce5-9388-4046-896e-64ffbd3eb584'
+ sg1 = {'id': sg1_id, 'name': 'wol'}
+ sg2 = {'id': sg2_id, 'name': 'eor'}
+ security_groups_list = {'security_groups': [sg1, sg2]}
+ servers = []
+ device_ids = []
+ ports = []
+ sg_bindings = {}
+ for i in xrange(0, num_servers):
+ server_id = "server-%d" % i
+ port_id = "port-%d" % i
+ servers.append({'id': server_id})
+ device_ids.append(server_id)
+ ports.append({'id': port_id,
+ 'device_id': server_id,
+ 'security_groups': [sg1_id, sg2_id]})
+ sg_bindings[server_id] = [{'name': 'wol'}, {'name': 'eor'}]
+
+ for x in xrange(0, num_servers, max_query):
+ self.moxed_client.list_ports(
+ device_id=device_ids[x:x + max_query]).\
+ AndReturn({'ports': ports[x:x + max_query]})
+
+ self.moxed_client.list_security_groups(id=[sg2_id, sg1_id]).AndReturn(
+ security_groups_list)
+ self.mox.ReplayAll()
+
+ sg_api = neutron_driver.SecurityGroupAPI()
+ result = sg_api.get_instances_security_groups_bindings(
+ self.context, servers)
+ self.assertEqual(result, sg_bindings)
+
+ def test_instances_security_group_bindings_less_than_max(self):
+ self._test_instances_security_group_bindings_scale(100)
+
+ def test_instances_security_group_bindings_max(self):
+ self._test_instances_security_group_bindings_scale(150)
+
+ def test_instances_security_group_bindings_more_then_max(self):
+ self._test_instances_security_group_bindings_scale(300)
+
+ def test_instances_security_group_bindings_with_hidden_sg(self):
+ servers = [{'id': 'server_1'}]
+ ports = [{'id': '1', 'device_id': 'dev_1', 'security_groups': ['1']},
+ {'id': '2', 'device_id': 'dev_1', 'security_groups': ['2']}]
+ port_list = {'ports': ports}
+ sg1 = {'id': '1', 'name': 'wol'}
+ # User doesn't have access to sg2
+ security_groups_list = {'security_groups': [sg1]}
+
+ sg_bindings = {'dev_1': [{'name': 'wol'}]}
+
+ self.moxed_client.list_ports(device_id=['server_1']).AndReturn(
+ port_list)
+ self.moxed_client.list_security_groups(id=['1', '2']).AndReturn(
+ security_groups_list)
+ self.mox.ReplayAll()
+
+ sg_api = neutron_driver.SecurityGroupAPI()
+ result = sg_api.get_instances_security_groups_bindings(
+ self.context, servers)
+ self.assertEqual(result, sg_bindings)
+
+ def test_instance_empty_security_groups(self):
+
+ port_list = {'ports': [{'id': 1, 'device_id': '1',
+ 'security_groups': []}]}
+ self.moxed_client.list_ports(device_id=['1']).AndReturn(port_list)
+ self.mox.ReplayAll()
+ sg_api = neutron_driver.SecurityGroupAPI()
+ result = sg_api.get_instance_security_groups(self.context, '1')
+ self.assertEqual([], result)
diff --git a/nova/tests/unit/network/test_api.py b/nova/tests/unit/network/test_api.py
new file mode 100644
index 0000000000..efc7d29a4a
--- /dev/null
+++ b/nova/tests/unit/network/test_api.py
@@ -0,0 +1,589 @@
+# Copyright 2012 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for network API."""
+
+import contextlib
+import itertools
+
+import mock
+import mox
+
+from nova.compute import flavors
+from nova import context
+from nova import exception
+from nova import network
+from nova.network import api
+from nova.network import base_api
+from nova.network import floating_ips
+from nova.network import model as network_model
+from nova.network import rpcapi as network_rpcapi
+from nova import objects
+from nova.objects import fields
+from nova import policy
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit.objects import test_fixed_ip
+from nova.tests.unit.objects import test_flavor
+from nova.tests.unit.objects import test_virtual_interface
+from nova import utils
+
+FAKE_UUID = 'a47ae74e-ab08-547f-9eee-ffd23fc46c16'
+
+
+class NetworkPolicyTestCase(test.TestCase):
+ def setUp(self):
+ super(NetworkPolicyTestCase, self).setUp()
+
+ policy.reset()
+ policy.init()
+
+ self.context = context.get_admin_context()
+
+ def tearDown(self):
+ super(NetworkPolicyTestCase, self).tearDown()
+ policy.reset()
+
+ def test_check_policy(self):
+ self.mox.StubOutWithMock(policy, 'enforce')
+ target = {
+ 'project_id': self.context.project_id,
+ 'user_id': self.context.user_id,
+ }
+ policy.enforce(self.context, 'network:get_all', target)
+ self.mox.ReplayAll()
+ api.check_policy(self.context, 'get_all')
+
+
+class ApiTestCase(test.TestCase):
+ def setUp(self):
+ super(ApiTestCase, self).setUp()
+ self.network_api = network.API()
+ self.context = context.RequestContext('fake-user',
+ 'fake-project')
+
+ @mock.patch('nova.objects.NetworkList.get_all')
+ def test_get_all(self, mock_get_all):
+ mock_get_all.return_value = mock.sentinel.get_all
+ self.assertEqual(mock.sentinel.get_all,
+ self.network_api.get_all(self.context))
+ mock_get_all.assert_called_once_with(self.context,
+ project_only=True)
+
+ @mock.patch('nova.objects.NetworkList.get_all')
+ def test_get_all_liberal(self, mock_get_all):
+ self.flags(network_manager='nova.network.manager.FlatDHCPManaager')
+ mock_get_all.return_value = mock.sentinel.get_all
+ self.assertEqual(mock.sentinel.get_all,
+ self.network_api.get_all(self.context))
+ mock_get_all.assert_called_once_with(self.context,
+ project_only="allow_none")
+
+ @mock.patch('nova.objects.NetworkList.get_all')
+ def test_get_all_no_networks(self, mock_get_all):
+ mock_get_all.side_effect = exception.NoNetworksFound
+ self.assertEqual([], self.network_api.get_all(self.context))
+ mock_get_all.assert_called_once_with(self.context,
+ project_only=True)
+
+ @mock.patch('nova.objects.Network.get_by_uuid')
+ def test_get(self, mock_get):
+ mock_get.return_value = mock.sentinel.get_by_uuid
+ with mock.patch.object(self.context, 'elevated') as elevated:
+ elevated.return_value = mock.sentinel.elevated_context
+ self.assertEqual(mock.sentinel.get_by_uuid,
+ self.network_api.get(self.context, 'fake-uuid'))
+ mock_get.assert_called_once_with(mock.sentinel.elevated_context,
+ 'fake-uuid')
+
+ @mock.patch('nova.objects.Network.get_by_id')
+ @mock.patch('nova.db.virtual_interface_get_by_instance')
+ def test_get_vifs_by_instance(self, mock_get_by_instance,
+ mock_get_by_id):
+ mock_get_by_instance.return_value = [
+ dict(test_virtual_interface.fake_vif,
+ network_id=123)]
+ mock_get_by_id.return_value = objects.Network()
+ mock_get_by_id.return_value.uuid = mock.sentinel.network_uuid
+ instance = objects.Instance(uuid=mock.sentinel.inst_uuid)
+ vifs = self.network_api.get_vifs_by_instance(self.context,
+ instance)
+ self.assertEqual(1, len(vifs))
+ self.assertEqual(123, vifs[0].network_id)
+ self.assertEqual(str(mock.sentinel.network_uuid), vifs[0].net_uuid)
+ mock_get_by_instance.assert_called_once_with(
+ self.context, str(mock.sentinel.inst_uuid), use_slave=False)
+ mock_get_by_id.assert_called_once_with(self.context, 123,
+ project_only='allow_none')
+
+ @mock.patch('nova.objects.Network.get_by_id')
+ @mock.patch('nova.db.virtual_interface_get_by_address')
+ def test_get_vif_by_mac_address(self, mock_get_by_address,
+ mock_get_by_id):
+ mock_get_by_address.return_value = dict(
+ test_virtual_interface.fake_vif, network_id=123)
+ mock_get_by_id.return_value = objects.Network(
+ uuid=mock.sentinel.network_uuid)
+ vif = self.network_api.get_vif_by_mac_address(self.context,
+ mock.sentinel.mac)
+ self.assertEqual(123, vif.network_id)
+ self.assertEqual(str(mock.sentinel.network_uuid), vif.net_uuid)
+ mock_get_by_address.assert_called_once_with(self.context,
+ mock.sentinel.mac)
+ mock_get_by_id.assert_called_once_with(self.context, 123,
+ project_only='allow_none')
+
+ def test_allocate_for_instance_handles_macs_passed(self):
+ # If a macs argument is supplied to the 'nova-network' API, it is just
+ # ignored. This test checks that the call down to the rpcapi layer
+ # doesn't pass macs down: nova-network doesn't support hypervisor
+ # mac address limits (today anyhow).
+ macs = set(['ab:cd:ef:01:23:34'])
+ self.mox.StubOutWithMock(
+ self.network_api.network_rpcapi, "allocate_for_instance")
+ kwargs = dict(zip(['host', 'instance_id', 'project_id',
+ 'requested_networks', 'rxtx_factor', 'vpn', 'macs',
+ 'dhcp_options'],
+ itertools.repeat(mox.IgnoreArg())))
+ self.network_api.network_rpcapi.allocate_for_instance(
+ mox.IgnoreArg(), **kwargs).AndReturn([])
+ self.mox.ReplayAll()
+ flavor = flavors.get_default_flavor()
+ flavor['rxtx_factor'] = 0
+ sys_meta = flavors.save_flavor_info({}, flavor)
+ instance = dict(id=1, uuid='uuid', project_id='project_id',
+ host='host', system_metadata=utils.dict_to_metadata(sys_meta))
+ instance = fake_instance.fake_instance_obj(
+ self.context, expected_attrs=['system_metadata'], **instance)
+ self.network_api.allocate_for_instance(
+ self.context, instance, 'vpn', 'requested_networks', macs=macs)
+
+ def _do_test_associate_floating_ip(self, orig_instance_uuid):
+ """Test post-association logic."""
+
+ new_instance = {'uuid': 'new-uuid'}
+
+ def fake_associate(*args, **kwargs):
+ return orig_instance_uuid
+
+ self.stubs.Set(floating_ips.FloatingIP, 'associate_floating_ip',
+ fake_associate)
+
+ def fake_instance_get_by_uuid(context, instance_uuid,
+ columns_to_join=None,
+ use_slave=None):
+ return fake_instance.fake_db_instance(uuid=instance_uuid)
+
+ self.stubs.Set(self.network_api.db, 'instance_get_by_uuid',
+ fake_instance_get_by_uuid)
+
+ def fake_get_nw_info(ctxt, instance):
+ class FakeNWInfo(object):
+ def json(self):
+ pass
+ return FakeNWInfo()
+
+ self.stubs.Set(self.network_api, '_get_instance_nw_info',
+ fake_get_nw_info)
+
+ if orig_instance_uuid:
+ expected_updated_instances = [new_instance['uuid'],
+ orig_instance_uuid]
+ else:
+ expected_updated_instances = [new_instance['uuid']]
+
+ def fake_instance_info_cache_update(context, instance_uuid, cache):
+ self.assertEqual(instance_uuid,
+ expected_updated_instances.pop())
+
+ self.stubs.Set(self.network_api.db, 'instance_info_cache_update',
+ fake_instance_info_cache_update)
+
+ def fake_update_instance_cache_with_nw_info(api, context, instance,
+ nw_info=None,
+ update_cells=True):
+ return
+
+ self.stubs.Set(base_api, "update_instance_cache_with_nw_info",
+ fake_update_instance_cache_with_nw_info)
+
+ self.network_api.associate_floating_ip(self.context,
+ new_instance,
+ '172.24.4.225',
+ '10.0.0.2')
+
+ def test_associate_preassociated_floating_ip(self):
+ self._do_test_associate_floating_ip('orig-uuid')
+
+ def test_associate_unassociated_floating_ip(self):
+ self._do_test_associate_floating_ip(None)
+
+ def test_get_floating_ip_invalid_id(self):
+ self.assertRaises(exception.InvalidID,
+ self.network_api.get_floating_ip,
+ self.context, '123zzz')
+
+ @mock.patch('nova.objects.FloatingIP.get_by_id')
+ def test_get_floating_ip(self, mock_get):
+ floating = mock.sentinel.floating
+ mock_get.return_value = floating
+ self.assertEqual(floating,
+ self.network_api.get_floating_ip(self.context, 123))
+ mock_get.assert_called_once_with(self.context, 123)
+
+ @mock.patch('nova.objects.FloatingIP.get_pool_names')
+ def test_get_floating_ip_pools(self, mock_get):
+ pools = ['foo', 'bar']
+ mock_get.return_value = pools
+ self.assertEqual(pools,
+ self.network_api.get_floating_ip_pools(
+ self.context))
+
+ @mock.patch('nova.objects.FloatingIP.get_by_address')
+ def test_get_floating_ip_by_address(self, mock_get):
+ floating = mock.sentinel.floating
+ mock_get.return_value = floating
+ self.assertEqual(floating,
+ self.network_api.get_floating_ip_by_address(
+ self.context, mock.sentinel.address))
+ mock_get.assert_called_once_with(self.context,
+ mock.sentinel.address)
+
+ @mock.patch('nova.objects.FloatingIPList.get_by_project')
+ def test_get_floating_ips_by_project(self, mock_get):
+ floatings = mock.sentinel.floating_ips
+ mock_get.return_value = floatings
+ self.assertEqual(floatings,
+ self.network_api.get_floating_ips_by_project(
+ self.context))
+ mock_get.assert_called_once_with(self.context,
+ self.context.project_id)
+
+ @mock.patch('nova.objects.FloatingIPList.get_by_fixed_address')
+ def test_get_floating_ips_by_fixed_address(self, mock_get):
+ floatings = [objects.FloatingIP(id=1, address='1.2.3.4'),
+ objects.FloatingIP(id=2, address='5.6.7.8')]
+ mock_get.return_value = floatings
+ self.assertEqual(['1.2.3.4', '5.6.7.8'],
+ self.network_api.get_floating_ips_by_fixed_address(
+ self.context, mock.sentinel.fixed_address))
+ mock_get.assert_called_once_with(self.context,
+ mock.sentinel.fixed_address)
+
+ def _stub_migrate_instance_calls(self, method, multi_host, info):
+ fake_flavor = flavors.get_default_flavor()
+ fake_flavor['rxtx_factor'] = 1.21
+ sys_meta = utils.dict_to_metadata(
+ flavors.save_flavor_info({}, fake_flavor))
+ fake_instance = {'uuid': 'fake_uuid',
+ 'instance_type_id': fake_flavor['id'],
+ 'project_id': 'fake_project_id',
+ 'system_metadata': sys_meta}
+ fake_migration = {'source_compute': 'fake_compute_source',
+ 'dest_compute': 'fake_compute_dest'}
+
+ def fake_mig_inst_method(*args, **kwargs):
+ info['kwargs'] = kwargs
+
+ def fake_get_multi_addresses(*args, **kwargs):
+ return multi_host, ['fake_float1', 'fake_float2']
+
+ self.stubs.Set(network_rpcapi.NetworkAPI, method,
+ fake_mig_inst_method)
+ self.stubs.Set(self.network_api, '_get_multi_addresses',
+ fake_get_multi_addresses)
+
+ expected = {'instance_uuid': 'fake_uuid',
+ 'source_compute': 'fake_compute_source',
+ 'dest_compute': 'fake_compute_dest',
+ 'rxtx_factor': 1.21,
+ 'project_id': 'fake_project_id',
+ 'floating_addresses': None}
+ if multi_host:
+ expected['floating_addresses'] = ['fake_float1', 'fake_float2']
+ return fake_instance, fake_migration, expected
+
+ def test_migrate_instance_start_with_multhost(self):
+ info = {'kwargs': {}}
+ arg1, arg2, expected = self._stub_migrate_instance_calls(
+ 'migrate_instance_start', True, info)
+ expected['host'] = 'fake_compute_source'
+ self.network_api.migrate_instance_start(self.context, arg1, arg2)
+ self.assertEqual(info['kwargs'], expected)
+
+ def test_migrate_instance_start_without_multhost(self):
+ info = {'kwargs': {}}
+ arg1, arg2, expected = self._stub_migrate_instance_calls(
+ 'migrate_instance_start', False, info)
+ self.network_api.migrate_instance_start(self.context, arg1, arg2)
+ self.assertEqual(info['kwargs'], expected)
+
+ def test_migrate_instance_finish_with_multhost(self):
+ info = {'kwargs': {}}
+ arg1, arg2, expected = self._stub_migrate_instance_calls(
+ 'migrate_instance_finish', True, info)
+ expected['host'] = 'fake_compute_dest'
+ self.network_api.migrate_instance_finish(self.context, arg1, arg2)
+ self.assertEqual(info['kwargs'], expected)
+
+ def test_migrate_instance_finish_without_multhost(self):
+ info = {'kwargs': {}}
+ arg1, arg2, expected = self._stub_migrate_instance_calls(
+ 'migrate_instance_finish', False, info)
+ self.network_api.migrate_instance_finish(self.context, arg1, arg2)
+ self.assertEqual(info['kwargs'], expected)
+
+ def test_is_multi_host_instance_has_no_fixed_ip(self):
+ def fake_fixed_ip_get_by_instance(ctxt, uuid):
+ raise exception.FixedIpNotFoundForInstance(instance_uuid=uuid)
+ self.stubs.Set(self.network_api.db, 'fixed_ip_get_by_instance',
+ fake_fixed_ip_get_by_instance)
+ instance = {'uuid': FAKE_UUID}
+ result, floats = self.network_api._get_multi_addresses(self.context,
+ instance)
+ self.assertFalse(result)
+
+ @mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
+ def _test_is_multi_host_network_has_no_project_id(self, is_multi_host,
+ fip_get):
+ network = objects.Network(
+ id=123, project_id=None,
+ multi_host=is_multi_host)
+ fip_get.return_value = [
+ objects.FixedIP(instance_uuid=FAKE_UUID, network=network,
+ floating_ips=objects.FloatingIPList())]
+ instance = {'uuid': FAKE_UUID}
+ result, floats = self.network_api._get_multi_addresses(self.context,
+ instance)
+ self.assertEqual(is_multi_host, result)
+
+ def test_is_multi_host_network_has_no_project_id_multi(self):
+ self._test_is_multi_host_network_has_no_project_id(True)
+
+ def test_is_multi_host_network_has_no_project_id_non_multi(self):
+ self._test_is_multi_host_network_has_no_project_id(False)
+
+ @mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
+ def _test_is_multi_host_network_has_project_id(self, is_multi_host,
+ fip_get):
+ network = objects.Network(
+ id=123, project_id=self.context.project_id,
+ multi_host=is_multi_host)
+ fip_get.return_value = [
+ objects.FixedIP(instance_uuid=FAKE_UUID, network=network,
+ floating_ips=objects.FloatingIPList())]
+ instance = {'uuid': FAKE_UUID}
+ result, floats = self.network_api._get_multi_addresses(self.context,
+ instance)
+ self.assertEqual(is_multi_host, result)
+
+ def test_is_multi_host_network_has_project_id_multi(self):
+ self._test_is_multi_host_network_has_project_id(True)
+
+ def test_is_multi_host_network_has_project_id_non_multi(self):
+ self._test_is_multi_host_network_has_project_id(False)
+
+ @mock.patch('nova.objects.Network.get_by_uuid')
+ @mock.patch('nova.objects.Network.disassociate')
+ def test_network_disassociate_project(self, mock_disassociate, mock_get):
+ net_obj = objects.Network(context=self.context, id=1)
+ mock_get.return_value = net_obj
+ self.network_api.associate(self.context, FAKE_UUID, project=None)
+ mock_disassociate.assert_called_once_with(self.context, net_obj.id,
+ host=False, project=True)
+
+ @mock.patch('nova.objects.Network.get_by_uuid')
+ @mock.patch('nova.objects.Network.disassociate')
+ def test_network_disassociate_host(self, mock_disassociate, mock_get):
+ net_obj = objects.Network(context=self.context, id=1)
+ mock_get.return_value = net_obj
+ self.network_api.associate(self.context, FAKE_UUID, host=None)
+ mock_disassociate.assert_called_once_with(self.context, net_obj.id,
+ host=True, project=False)
+
+ @mock.patch('nova.objects.Network.get_by_uuid')
+ @mock.patch('nova.objects.Network.associate')
+ def test_network_associate_project(self, mock_associate, mock_get):
+ net_obj = objects.Network(context=self.context, id=1)
+ mock_get.return_value = net_obj
+ project = mock.sentinel.project
+ self.network_api.associate(self.context, FAKE_UUID, project=project)
+ mock_associate.assert_called_once_with(self.context, project,
+ network_id=net_obj.id,
+ force=True)
+
+ @mock.patch('nova.objects.Network.get_by_uuid')
+ @mock.patch('nova.objects.Network.save')
+ def test_network_associate_host(self, mock_save, mock_get):
+ net_obj = objects.Network(context=self.context, id=1)
+ mock_get.return_value = net_obj
+ host = str(mock.sentinel.host)
+ self.network_api.associate(self.context, FAKE_UUID, host=host)
+ mock_save.assert_called_once_with()
+ self.assertEqual(host, net_obj.host)
+
+ @mock.patch('nova.objects.Network.get_by_uuid')
+ @mock.patch('nova.objects.Network.disassociate')
+ def test_network_disassociate(self, mock_disassociate, mock_get):
+ mock_get.return_value = objects.Network(context=self.context, id=123)
+ self.network_api.disassociate(self.context, FAKE_UUID)
+ mock_disassociate.assert_called_once_with(self.context, 123,
+ project=True, host=True)
+
+ def _test_refresh_cache(self, method, *args, **kwargs):
+ # This test verifies that no call to get_instance_nw_info() is made
+ # from the @refresh_cache decorator for the tested method.
+ with contextlib.nested(
+ mock.patch.object(self.network_api.network_rpcapi, method),
+ mock.patch.object(self.network_api.network_rpcapi,
+ 'get_instance_nw_info'),
+ mock.patch.object(network_model.NetworkInfo, 'hydrate'),
+ ) as (
+ method_mock, nwinfo_mock, hydrate_mock
+ ):
+ nw_info = network_model.NetworkInfo([])
+ method_mock.return_value = nw_info
+ hydrate_mock.return_value = nw_info
+ getattr(self.network_api, method)(*args, **kwargs)
+ hydrate_mock.assert_called_once_with(nw_info)
+ self.assertFalse(nwinfo_mock.called)
+
+ def test_allocate_for_instance_refresh_cache(self):
+ sys_meta = flavors.save_flavor_info({}, test_flavor.fake_flavor)
+ instance = fake_instance.fake_instance_obj(
+ self.context, expected_attrs=['system_metadata'],
+ system_metadata=sys_meta)
+ vpn = 'fake-vpn'
+ requested_networks = 'fake-networks'
+ self._test_refresh_cache('allocate_for_instance', self.context,
+ instance, vpn, requested_networks)
+
+ def test_add_fixed_ip_to_instance_refresh_cache(self):
+ sys_meta = flavors.save_flavor_info({}, test_flavor.fake_flavor)
+ instance = fake_instance.fake_instance_obj(
+ self.context, expected_attrs=['system_metadata'],
+ system_metadata=sys_meta)
+ network_id = 'fake-network-id'
+ self._test_refresh_cache('add_fixed_ip_to_instance', self.context,
+ instance, network_id)
+
+ def test_remove_fixed_ip_from_instance_refresh_cache(self):
+ sys_meta = flavors.save_flavor_info({}, test_flavor.fake_flavor)
+ instance = fake_instance.fake_instance_obj(
+ self.context, expected_attrs=['system_metadata'],
+ system_metadata=sys_meta)
+ address = 'fake-address'
+ self._test_refresh_cache('remove_fixed_ip_from_instance', self.context,
+ instance, address)
+
+ @mock.patch('nova.db.fixed_ip_get_by_address')
+ def test_get_fixed_ip_by_address(self, fip_get):
+ fip_get.return_value = test_fixed_ip.fake_fixed_ip
+ fip = self.network_api.get_fixed_ip_by_address(self.context,
+ 'fake-addr')
+ self.assertIsInstance(fip, objects.FixedIP)
+
+ @mock.patch('nova.objects.FixedIP.get_by_id')
+ def test_get_fixed_ip(self, mock_get_by_id):
+ mock_get_by_id.return_value = mock.sentinel.fixed_ip
+ self.assertEqual(mock.sentinel.fixed_ip,
+ self.network_api.get_fixed_ip(self.context,
+ mock.sentinel.id))
+ mock_get_by_id.assert_called_once_with(self.context, mock.sentinel.id)
+
+ @mock.patch('nova.objects.FixedIP.get_by_floating_address')
+ def test_get_instance_by_floating_address(self, mock_get_by_floating):
+ mock_get_by_floating.return_value = objects.FixedIP(
+ instance_uuid = mock.sentinel.instance_uuid)
+ self.assertEqual(str(mock.sentinel.instance_uuid),
+ self.network_api.get_instance_id_by_floating_address(
+ self.context, mock.sentinel.floating))
+ mock_get_by_floating.assert_called_once_with(self.context,
+ mock.sentinel.floating)
+
+ @mock.patch('nova.objects.FixedIP.get_by_floating_address')
+ def test_get_instance_by_floating_address_none(self, mock_get_by_floating):
+ mock_get_by_floating.return_value = None
+ self.assertIsNone(
+ self.network_api.get_instance_id_by_floating_address(
+ self.context, mock.sentinel.floating))
+ mock_get_by_floating.assert_called_once_with(self.context,
+ mock.sentinel.floating)
+
+
+@mock.patch('nova.network.api.API')
+@mock.patch('nova.db.instance_info_cache_update')
+class TestUpdateInstanceCache(test.TestCase):
+ def setUp(self):
+ super(TestUpdateInstanceCache, self).setUp()
+ self.context = context.get_admin_context()
+ self.instance = {'uuid': FAKE_UUID}
+ vifs = [network_model.VIF(id='super_vif')]
+ self.nw_info = network_model.NetworkInfo(vifs)
+ self.nw_json = fields.NetworkModel.to_primitive(self, 'network_info',
+ self.nw_info)
+
+ def test_update_nw_info_none(self, db_mock, api_mock):
+ api_mock._get_instance_nw_info.return_value = self.nw_info
+
+ base_api.update_instance_cache_with_nw_info(api_mock, self.context,
+ self.instance, None)
+ api_mock._get_instance_nw_info.assert_called_once_with(self.context,
+ self.instance)
+ db_mock.assert_called_once_with(self.context, self.instance['uuid'],
+ {'network_info': self.nw_json})
+
+ def test_update_nw_info_one_network(self, db_mock, api_mock):
+ api_mock._get_instance_nw_info.return_value = self.nw_info
+ base_api.update_instance_cache_with_nw_info(api_mock, self.context,
+ self.instance, self.nw_info)
+ self.assertFalse(api_mock._get_instance_nw_info.called)
+ db_mock.assert_called_once_with(self.context, self.instance['uuid'],
+ {'network_info': self.nw_json})
+
+ def test_update_nw_info_empty_list(self, db_mock, api_mock):
+ api_mock._get_instance_nw_info.return_value = self.nw_info
+ base_api.update_instance_cache_with_nw_info(api_mock, self.context,
+ self.instance,
+ network_model.NetworkInfo([]))
+ self.assertFalse(api_mock._get_instance_nw_info.called)
+ db_mock.assert_called_once_with(self.context, self.instance['uuid'],
+ {'network_info': '[]'})
+
+ def test_decorator_return_object(self, db_mock, api_mock):
+ @base_api.refresh_cache
+ def func(self, context, instance):
+ return network_model.NetworkInfo([])
+ func(api_mock, self.context, self.instance)
+ self.assertFalse(api_mock._get_instance_nw_info.called)
+ db_mock.assert_called_once_with(self.context, self.instance['uuid'],
+ {'network_info': '[]'})
+
+ def test_decorator_return_none(self, db_mock, api_mock):
+ @base_api.refresh_cache
+ def func(self, context, instance):
+ pass
+ api_mock._get_instance_nw_info.return_value = self.nw_info
+ func(api_mock, self.context, self.instance)
+ api_mock._get_instance_nw_info.assert_called_once_with(self.context,
+ self.instance)
+ db_mock.assert_called_once_with(self.context, self.instance['uuid'],
+ {'network_info': self.nw_json})
+
+
+class NetworkHooksTestCase(test.BaseHookTestCase):
+ def test_instance_network_info_hook(self):
+ info_func = base_api.update_instance_cache_with_nw_info
+ self.assert_has_hook('instance_network_info', info_func)
diff --git a/nova/tests/unit/network/test_linux_net.py b/nova/tests/unit/network/test_linux_net.py
new file mode 100644
index 0000000000..c07d43b2f3
--- /dev/null
+++ b/nova/tests/unit/network/test_linux_net.py
@@ -0,0 +1,1115 @@
+# Copyright 2011 NTT
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import calendar
+import contextlib
+import datetime
+import os
+
+import mock
+import mox
+from oslo.concurrency import processutils
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+
+from nova import context
+from nova import db
+from nova import exception
+from nova.network import driver
+from nova.network import linux_net
+from nova import objects
+from nova.openstack.common import fileutils
+from nova.openstack.common import log as logging
+from nova import test
+from nova import utils
+
+LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+CONF.import_opt('share_dhcp_address', 'nova.objects.network')
+CONF.import_opt('network_device_mtu', 'nova.objects.network')
+
+HOST = "testhost"
+
+instances = {'00000000-0000-0000-0000-0000000000000000':
+ {'id': 0,
+ 'uuid': '00000000-0000-0000-0000-0000000000000000',
+ 'host': 'fake_instance00',
+ 'created_at': datetime.datetime(1955, 11, 5, 0, 0, 0),
+ 'updated_at': datetime.datetime(1985, 10, 26, 1, 35, 0),
+ 'hostname': 'fake_instance00'},
+ '00000000-0000-0000-0000-0000000000000001':
+ {'id': 1,
+ 'uuid': '00000000-0000-0000-0000-0000000000000001',
+ 'host': 'fake_instance01',
+ 'created_at': datetime.datetime(1955, 11, 5, 0, 0, 0),
+ 'updated_at': datetime.datetime(1985, 10, 26, 1, 35, 0),
+ 'hostname': 'fake_instance01'}}
+
+
+addresses = [{"address": "10.0.0.1"},
+ {"address": "10.0.0.2"},
+ {"address": "10.0.0.3"},
+ {"address": "10.0.0.4"},
+ {"address": "10.0.0.5"},
+ {"address": "10.0.0.6"}]
+
+
+networks = [{'id': 0,
+ 'uuid': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
+ 'label': 'test0',
+ 'injected': False,
+ 'multi_host': False,
+ 'cidr': '192.168.0.0/24',
+ 'cidr_v6': '2001:db8::/64',
+ 'gateway_v6': '2001:db8::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'bridge': 'fa0',
+ 'bridge_interface': 'fake_fa0',
+ 'gateway': '192.168.0.1',
+ 'broadcast': '192.168.0.255',
+ 'dns1': '192.168.0.1',
+ 'dns2': '192.168.0.2',
+ 'dhcp_server': '0.0.0.0',
+ 'dhcp_start': '192.168.100.1',
+ 'vlan': None,
+ 'host': None,
+ 'project_id': 'fake_project',
+ 'vpn_public_address': '192.168.0.2',
+ 'mtu': None,
+ 'dhcp_server': '192.168.0.1',
+ 'enable_dhcp': True,
+ 'share_address': False},
+ {'id': 1,
+ 'uuid': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
+ 'label': 'test1',
+ 'injected': False,
+ 'multi_host': True,
+ 'cidr': '192.168.1.0/24',
+ 'cidr_v6': '2001:db9::/64',
+ 'gateway_v6': '2001:db9::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'bridge': 'fa1',
+ 'bridge_interface': 'fake_fa1',
+ 'gateway': '192.168.1.1',
+ 'broadcast': '192.168.1.255',
+ 'dns1': '192.168.0.1',
+ 'dns2': '192.168.0.2',
+ 'dhcp_server': '0.0.0.0',
+ 'dhcp_start': '192.168.100.1',
+ 'vlan': None,
+ 'host': None,
+ 'project_id': 'fake_project',
+ 'vpn_public_address': '192.168.1.2',
+ 'mtu': None,
+ 'dhcp_server': '192.168.1.1',
+ 'enable_dhcp': True,
+ 'share_address': False}]
+
+
+fixed_ips = [{'id': 0,
+ 'network_id': 0,
+ 'address': '192.168.0.100',
+ 'instance_id': 0,
+ 'allocated': True,
+ 'leased': True,
+ 'virtual_interface_id': 0,
+ 'default_route': True,
+ 'instance_uuid': '00000000-0000-0000-0000-0000000000000000',
+ 'floating_ips': []},
+ {'id': 1,
+ 'network_id': 1,
+ 'address': '192.168.1.100',
+ 'instance_id': 0,
+ 'allocated': True,
+ 'leased': True,
+ 'virtual_interface_id': 1,
+ 'default_route': False,
+ 'instance_uuid': '00000000-0000-0000-0000-0000000000000000',
+ 'floating_ips': []},
+ {'id': 2,
+ 'network_id': 1,
+ 'address': '192.168.0.101',
+ 'instance_id': 1,
+ 'allocated': True,
+ 'leased': True,
+ 'virtual_interface_id': 2,
+ 'default_route': True,
+ 'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
+ 'floating_ips': []},
+ {'id': 3,
+ 'network_id': 0,
+ 'address': '192.168.1.101',
+ 'instance_id': 1,
+ 'allocated': True,
+ 'leased': True,
+ 'virtual_interface_id': 3,
+ 'default_route': False,
+ 'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
+ 'floating_ips': []},
+ {'id': 4,
+ 'network_id': 0,
+ 'address': '192.168.0.102',
+ 'instance_id': 0,
+ 'allocated': True,
+ 'leased': False,
+ 'virtual_interface_id': 4,
+ 'default_route': False,
+ 'instance_uuid': '00000000-0000-0000-0000-0000000000000000',
+ 'floating_ips': []},
+ {'id': 5,
+ 'network_id': 1,
+ 'address': '192.168.1.102',
+ 'instance_id': 1,
+ 'allocated': True,
+ 'leased': False,
+ 'virtual_interface_id': 5,
+ 'default_route': False,
+ 'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
+ 'floating_ips': []},
+ {'id': 6,
+ 'network_id': 1,
+ 'address': '192.168.1.103',
+ 'instance_id': 1,
+ 'allocated': False,
+ 'leased': True,
+ 'virtual_interface_id': 6,
+ 'default_route': False,
+ 'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
+ 'floating_ips': []}]
+
+
+vifs = [{'id': 0,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'address': 'DE:AD:BE:EF:00:00',
+ 'uuid': '00000000-0000-0000-0000-0000000000000000',
+ 'network_id': 0,
+ 'instance_uuid': '00000000-0000-0000-0000-0000000000000000'},
+ {'id': 1,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'address': 'DE:AD:BE:EF:00:01',
+ 'uuid': '00000000-0000-0000-0000-0000000000000001',
+ 'network_id': 1,
+ 'instance_uuid': '00000000-0000-0000-0000-0000000000000000'},
+ {'id': 2,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'address': 'DE:AD:BE:EF:00:02',
+ 'uuid': '00000000-0000-0000-0000-0000000000000002',
+ 'network_id': 1,
+ 'instance_uuid': '00000000-0000-0000-0000-0000000000000001'},
+ {'id': 3,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'address': 'DE:AD:BE:EF:00:03',
+ 'uuid': '00000000-0000-0000-0000-0000000000000003',
+ 'network_id': 0,
+ 'instance_uuid': '00000000-0000-0000-0000-0000000000000001'},
+ {'id': 4,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'address': 'DE:AD:BE:EF:00:04',
+ 'uuid': '00000000-0000-0000-0000-0000000000000004',
+ 'network_id': 0,
+ 'instance_uuid': '00000000-0000-0000-0000-0000000000000000'},
+ {'id': 5,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'address': 'DE:AD:BE:EF:00:05',
+ 'uuid': '00000000-0000-0000-0000-0000000000000005',
+ 'network_id': 1,
+ 'instance_uuid': '00000000-0000-0000-0000-0000000000000001'},
+ {'id': 6,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'address': 'DE:AD:BE:EF:00:06',
+ 'uuid': '00000000-0000-0000-0000-0000000000000006',
+ 'network_id': 1,
+ 'instance_uuid': '00000000-0000-0000-0000-0000000000000001'}]
+
+
+def get_associated(context, network_id, host=None, address=None):
+ result = []
+ for datum in fixed_ips:
+ if (datum['network_id'] == network_id
+ and datum['instance_uuid'] is not None
+ and datum['virtual_interface_id'] is not None):
+ instance = instances[datum['instance_uuid']]
+ if host and host != instance['host']:
+ continue
+ if address and address != datum['address']:
+ continue
+ cleaned = {}
+ cleaned['address'] = datum['address']
+ cleaned['instance_uuid'] = datum['instance_uuid']
+ cleaned['network_id'] = datum['network_id']
+ cleaned['vif_id'] = datum['virtual_interface_id']
+ vif = vifs[datum['virtual_interface_id']]
+ cleaned['vif_address'] = vif['address']
+ cleaned['instance_hostname'] = instance['hostname']
+ cleaned['instance_updated'] = instance['updated_at']
+ cleaned['instance_created'] = instance['created_at']
+ cleaned['allocated'] = datum['allocated']
+ cleaned['leased'] = datum['leased']
+ cleaned['default_route'] = datum['default_route']
+ result.append(cleaned)
+ return result
+
+
+class LinuxNetworkTestCase(test.NoDBTestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(LinuxNetworkTestCase, self).setUp()
+ self.driver = driver.load_network_driver()
+ self.driver.db = db
+ self.context = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+
+ def get_vifs(_context, instance_uuid, use_slave):
+ return [vif for vif in vifs if vif['instance_uuid'] ==
+ instance_uuid]
+
+ def get_instance(_context, instance_id):
+ return instances[instance_id]
+
+ self.stubs.Set(db, 'virtual_interface_get_by_instance', get_vifs)
+ self.stubs.Set(db, 'instance_get', get_instance)
+ self.stubs.Set(db, 'network_get_associated_fixed_ips', get_associated)
+
+ def _test_add_snat_rule(self, expected, is_external):
+
+ def verify_add_rule(chain, rule):
+ self.assertEqual(chain, 'snat')
+ self.assertEqual(rule, expected)
+ self.called = True
+
+ self.stubs.Set(linux_net.iptables_manager.ipv4['nat'],
+ 'add_rule', verify_add_rule)
+ self.called = False
+ linux_net.add_snat_rule('10.0.0.0/24', is_external)
+ if expected:
+ self.assertTrue(self.called)
+
+ def test_add_snat_rule_no_ext(self):
+ self.flags(routing_source_ip='10.10.10.1')
+ expected = ('-s 10.0.0.0/24 -d 0.0.0.0/0 '
+ '-j SNAT --to-source 10.10.10.1 -o eth0')
+ self._test_add_snat_rule(expected, False)
+
+ def test_add_snat_rule_ext(self):
+ self.flags(routing_source_ip='10.10.10.1')
+ expected = ()
+ self._test_add_snat_rule(expected, True)
+
+ def test_add_snat_rule_snat_range_no_ext(self):
+ self.flags(routing_source_ip='10.10.10.1',
+ force_snat_range=['10.10.10.0/24'])
+ expected = ('-s 10.0.0.0/24 -d 0.0.0.0/0 '
+ '-j SNAT --to-source 10.10.10.1 -o eth0')
+ self._test_add_snat_rule(expected, False)
+
+ def test_add_snat_rule_snat_range_ext(self):
+ self.flags(routing_source_ip='10.10.10.1',
+ force_snat_range=['10.10.10.0/24'])
+ expected = ('-s 10.0.0.0/24 -d 10.10.10.0/24 '
+ '-j SNAT --to-source 10.10.10.1')
+ self._test_add_snat_rule(expected, True)
+
+ def test_update_dhcp_for_nw00(self):
+ self.flags(use_single_default_gateway=True)
+
+ self.mox.StubOutWithMock(self.driver, 'write_to_file')
+ self.mox.StubOutWithMock(fileutils, 'ensure_tree')
+ self.mox.StubOutWithMock(os, 'chmod')
+
+ self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
+ self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
+ fileutils.ensure_tree(mox.IgnoreArg())
+ fileutils.ensure_tree(mox.IgnoreArg())
+ fileutils.ensure_tree(mox.IgnoreArg())
+ fileutils.ensure_tree(mox.IgnoreArg())
+ fileutils.ensure_tree(mox.IgnoreArg())
+ fileutils.ensure_tree(mox.IgnoreArg())
+ fileutils.ensure_tree(mox.IgnoreArg())
+ os.chmod(mox.IgnoreArg(), mox.IgnoreArg())
+ os.chmod(mox.IgnoreArg(), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ self.driver.update_dhcp(self.context, "eth0", networks[0])
+
+ def test_update_dhcp_for_nw01(self):
+ self.flags(use_single_default_gateway=True)
+
+ self.mox.StubOutWithMock(self.driver, 'write_to_file')
+ self.mox.StubOutWithMock(fileutils, 'ensure_tree')
+ self.mox.StubOutWithMock(os, 'chmod')
+
+ self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
+ self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
+ fileutils.ensure_tree(mox.IgnoreArg())
+ fileutils.ensure_tree(mox.IgnoreArg())
+ fileutils.ensure_tree(mox.IgnoreArg())
+ fileutils.ensure_tree(mox.IgnoreArg())
+ fileutils.ensure_tree(mox.IgnoreArg())
+ fileutils.ensure_tree(mox.IgnoreArg())
+ fileutils.ensure_tree(mox.IgnoreArg())
+ os.chmod(mox.IgnoreArg(), mox.IgnoreArg())
+ os.chmod(mox.IgnoreArg(), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ self.driver.update_dhcp(self.context, "eth0", networks[0])
+
+ def _get_fixedips(self, network, host=None):
+ return objects.FixedIPList.get_by_network(self.context,
+ network,
+ host=host)
+
+ def test_get_dhcp_hosts_for_nw00(self):
+ self.flags(use_single_default_gateway=True)
+
+ expected = (
+ "DE:AD:BE:EF:00:00,fake_instance00.novalocal,"
+ "192.168.0.100,net:NW-0\n"
+ "DE:AD:BE:EF:00:03,fake_instance01.novalocal,"
+ "192.168.1.101,net:NW-3\n"
+ "DE:AD:BE:EF:00:04,fake_instance00.novalocal,"
+ "192.168.0.102,net:NW-4"
+ )
+ fixedips = self._get_fixedips(networks[0])
+ actual_hosts = self.driver.get_dhcp_hosts(self.context, networks[0],
+ fixedips)
+
+ self.assertEqual(actual_hosts, expected)
+
+ def test_get_dhcp_hosts_for_nw01(self):
+ self.flags(use_single_default_gateway=True)
+
+ expected = (
+ "DE:AD:BE:EF:00:02,fake_instance01.novalocal,"
+ "192.168.0.101,net:NW-2\n"
+ "DE:AD:BE:EF:00:05,fake_instance01.novalocal,"
+ "192.168.1.102,net:NW-5"
+ )
+ fixedips = self._get_fixedips(networks[1], host='fake_instance01')
+ actual_hosts = self.driver.get_dhcp_hosts(self.context, networks[1],
+ fixedips)
+ self.assertEqual(actual_hosts, expected)
+
+ def test_get_dns_hosts_for_nw00(self):
+ expected = (
+ "192.168.0.100\tfake_instance00.novalocal\n"
+ "192.168.1.101\tfake_instance01.novalocal\n"
+ "192.168.0.102\tfake_instance00.novalocal"
+ )
+ actual_hosts = self.driver.get_dns_hosts(self.context, networks[0])
+ self.assertEqual(actual_hosts, expected)
+
+ def test_get_dns_hosts_for_nw01(self):
+ expected = (
+ "192.168.1.100\tfake_instance00.novalocal\n"
+ "192.168.0.101\tfake_instance01.novalocal\n"
+ "192.168.1.102\tfake_instance01.novalocal"
+ )
+ actual_hosts = self.driver.get_dns_hosts(self.context, networks[1])
+ self.assertEqual(actual_hosts, expected)
+
+ def test_get_dhcp_opts_for_nw00(self):
+ self.flags(use_single_default_gateway=True)
+ expected_opts = 'NW-0,3,192.168.0.1\nNW-3,3\nNW-4,3'
+ fixedips = self._get_fixedips(networks[0])
+ actual_opts = self.driver.get_dhcp_opts(self.context, networks[0],
+ fixedips)
+
+ self.assertEqual(actual_opts, expected_opts)
+
+ def test_get_dhcp_opts_for_nw00_no_single_default_gateway(self):
+ self.flags(use_single_default_gateway=False)
+ expected_opts = '3,192.168.0.1'
+ fixedips = self._get_fixedips(networks[0])
+ actual_opts = self.driver.get_dhcp_opts(self.context, networks[0],
+ fixedips)
+
+ self.assertEqual(actual_opts, expected_opts)
+
+ def test_get_dhcp_opts_for_nw01(self):
+ self.flags(use_single_default_gateway=True)
+ expected_opts = "NW-2,3,192.168.1.1\nNW-5,3"
+ fixedips = self._get_fixedips(networks[1], 'fake_instance01')
+ actual_opts = self.driver.get_dhcp_opts(self.context, networks[1],
+ fixedips)
+
+ self.assertEqual(actual_opts, expected_opts)
+
+ def test_get_dhcp_leases_for_nw00(self):
+ timestamp = timeutils.utcnow()
+ seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
+
+ leases = self.driver.get_dhcp_leases(self.context, networks[0])
+ leases = leases.split('\n')
+ for lease in leases:
+ lease = lease.split(' ')
+ data = get_associated(self.context, 0, address=lease[2])[0]
+ self.assertTrue(data['allocated'])
+ self.assertTrue(data['leased'])
+ self.assertTrue(lease[0] > seconds_since_epoch)
+ self.assertEqual(data['vif_address'], lease[1])
+ self.assertEqual(data['address'], lease[2])
+ self.assertEqual(data['instance_hostname'], lease[3])
+ self.assertEqual('*', lease[4])
+
+ def test_get_dhcp_leases_for_nw01(self):
+ self.flags(host='fake_instance01')
+ timestamp = timeutils.utcnow()
+ seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
+
+ leases = self.driver.get_dhcp_leases(self.context, networks[1])
+ leases = leases.split('\n')
+ for lease in leases:
+ lease = lease.split(' ')
+ data = get_associated(self.context, 1, address=lease[2])[0]
+ self.assertTrue(data['leased'])
+ self.assertTrue(lease[0] > seconds_since_epoch)
+ self.assertEqual(data['vif_address'], lease[1])
+ self.assertEqual(data['address'], lease[2])
+ self.assertEqual(data['instance_hostname'], lease[3])
+ self.assertEqual('*', lease[4])
+
+ def test_dhcp_opts_not_default_gateway_network(self):
+ expected = "NW-0,3"
+ fixedip = objects.FixedIPList.get_by_network(self.context,
+ {'id': 0})[0]
+ actual = self.driver._host_dhcp_opts(fixedip.virtual_interface_id)
+ self.assertEqual(actual, expected)
+
+ def test_host_dhcp_without_default_gateway_network(self):
+ expected = ','.join(['DE:AD:BE:EF:00:00',
+ 'fake_instance00.novalocal',
+ '192.168.0.100'])
+ fixedip = objects.FixedIPList.get_by_network(self.context,
+ {'id': 0})[0]
+ actual = self.driver._host_dhcp(fixedip)
+ self.assertEqual(actual, expected)
+
+ def test_host_dns_without_default_gateway_network(self):
+ expected = "192.168.0.100\tfake_instance00.novalocal"
+ fixedip = objects.FixedIPList.get_by_network(self.context,
+ {'id': 0})[0]
+ actual = self.driver._host_dns(fixedip)
+ self.assertEqual(actual, expected)
+
+ def test_linux_bridge_driver_plug(self):
+ """Makes sure plug doesn't drop FORWARD by default.
+
+ Ensures bug 890195 doesn't reappear.
+ """
+
+ def fake_execute(*args, **kwargs):
+ return "", ""
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ def verify_add_rule(chain, rule):
+ self.assertEqual(chain, 'FORWARD')
+ self.assertIn('ACCEPT', rule)
+ self.stubs.Set(linux_net.iptables_manager.ipv4['filter'],
+ 'add_rule', verify_add_rule)
+ driver = linux_net.LinuxBridgeInterfaceDriver()
+ driver.plug({"bridge": "br100", "bridge_interface": "eth0",
+ "share_address": False}, "fakemac")
+
+ def test_linux_ovs_driver_plug_exception(self):
+ self.flags(fake_network=False)
+
+ def fake_execute(*args, **kwargs):
+ raise processutils.ProcessExecutionError('error')
+
+ def fake_device_exists(*args, **kwargs):
+ return False
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ self.stubs.Set(linux_net, 'device_exists', fake_device_exists)
+ driver = linux_net.LinuxOVSInterfaceDriver()
+ self.assertRaises(exception.AgentError,
+ driver.plug, {'uuid': 'fake_network_uuid'},
+ 'fake_mac')
+
+ def test_vlan_override(self):
+ """Makes sure vlan_interface flag overrides network bridge_interface.
+
+ Allows heterogeneous networks a la bug 833426
+ """
+
+ driver = linux_net.LinuxBridgeInterfaceDriver()
+
+ info = {}
+
+ @staticmethod
+ def test_ensure(vlan, bridge, interface, network, mac_address, mtu):
+ info['passed_interface'] = interface
+
+ self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
+ 'ensure_vlan_bridge', test_ensure)
+
+ network = {
+ "bridge": "br100",
+ "bridge_interface": "base_interface",
+ "share_address": False,
+ "vlan": "fake"
+ }
+ self.flags(vlan_interface="")
+ driver.plug(network, "fakemac")
+ self.assertEqual(info['passed_interface'], "base_interface")
+ self.flags(vlan_interface="override_interface")
+ driver.plug(network, "fakemac")
+ self.assertEqual(info['passed_interface'], "override_interface")
+ driver.plug(network, "fakemac")
+
+ def test_flat_override(self):
+ """Makes sure flat_interface flag overrides network bridge_interface.
+
+ Allows heterogeneous networks a la bug 833426
+ """
+
+ driver = linux_net.LinuxBridgeInterfaceDriver()
+
+ info = {}
+
+ @staticmethod
+ def test_ensure(bridge, interface, network, gateway):
+ info['passed_interface'] = interface
+
+ self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
+ 'ensure_bridge', test_ensure)
+
+ network = {
+ "bridge": "br100",
+ "bridge_interface": "base_interface",
+ "share_address": False,
+ }
+ driver.plug(network, "fakemac")
+ self.assertEqual(info['passed_interface'], "base_interface")
+ self.flags(flat_interface="override_interface")
+ driver.plug(network, "fakemac")
+ self.assertEqual(info['passed_interface'], "override_interface")
+
+ def _test_dnsmasq_execute(self, extra_expected=None):
+ network_ref = {'id': 'fake',
+ 'label': 'fake',
+ 'gateway': '10.0.0.1',
+ 'multi_host': False,
+ 'cidr': '10.0.0.0/24',
+ 'netmask': '255.255.255.0',
+ 'dns1': '8.8.4.4',
+ 'dhcp_start': '1.0.0.2',
+ 'dhcp_server': '10.0.0.1',
+ 'share_address': False}
+
+ def fake_execute(*args, **kwargs):
+ executes.append(args)
+ return "", ""
+
+ def fake_add_dhcp_mangle_rule(*args, **kwargs):
+ executes.append(args)
+
+ self.stubs.Set(linux_net, '_execute', fake_execute)
+ self.stubs.Set(linux_net, '_add_dhcp_mangle_rule',
+ fake_add_dhcp_mangle_rule)
+
+ self.stubs.Set(os, 'chmod', lambda *a, **kw: None)
+ self.stubs.Set(linux_net, 'write_to_file', lambda *a, **kw: None)
+ self.stubs.Set(linux_net, '_dnsmasq_pid_for', lambda *a, **kw: None)
+ dev = 'br100'
+
+ default_domain = CONF.dhcp_domain
+ for domain in ('', default_domain):
+ executes = []
+ self.flags(dhcp_domain=domain)
+ fixedips = self._get_fixedips(network_ref)
+ linux_net.restart_dhcp(self.context, dev, network_ref, fixedips)
+ expected = ['env',
+ 'CONFIG_FILE=%s' % jsonutils.dumps(CONF.dhcpbridge_flagfile),
+ 'NETWORK_ID=fake',
+ 'dnsmasq',
+ '--strict-order',
+ '--bind-interfaces',
+ '--conf-file=%s' % CONF.dnsmasq_config_file,
+ '--pid-file=%s' % linux_net._dhcp_file(dev, 'pid'),
+ '--dhcp-optsfile=%s' % linux_net._dhcp_file(dev, 'opts'),
+ '--listen-address=%s' % network_ref['dhcp_server'],
+ '--except-interface=lo',
+ "--dhcp-range=set:%s,%s,static,%s,%ss" % (network_ref['label'],
+ network_ref['dhcp_start'],
+ network_ref['netmask'],
+ CONF.dhcp_lease_time),
+ '--dhcp-lease-max=256',
+ '--dhcp-hostsfile=%s' % linux_net._dhcp_file(dev, 'conf'),
+ '--dhcp-script=%s' % CONF.dhcpbridge,
+ '--no-hosts',
+ '--leasefile-ro']
+
+ if CONF.dhcp_domain:
+ expected.append('--domain=%s' % CONF.dhcp_domain)
+
+ if extra_expected:
+ expected += extra_expected
+ self.assertEqual([(dev,), tuple(expected)], executes)
+
+ def test_dnsmasq_execute(self):
+ self._test_dnsmasq_execute()
+
+ def test_dnsmasq_execute_dns_servers(self):
+ self.flags(dns_server=['1.1.1.1', '2.2.2.2'])
+ expected = [
+ '--no-resolv',
+ '--server=1.1.1.1',
+ '--server=2.2.2.2',
+ ]
+ self._test_dnsmasq_execute(expected)
+
+ def test_dnsmasq_execute_use_network_dns_servers(self):
+ self.flags(use_network_dns_servers=True)
+ expected = [
+ '--no-resolv',
+ '--server=8.8.4.4',
+ ]
+ self._test_dnsmasq_execute(expected)
+
+ def test_isolated_host(self):
+ self.flags(fake_network=False,
+ share_dhcp_address=True)
+ # NOTE(vish): use a fresh copy of the manager for each test
+ self.stubs.Set(linux_net, 'iptables_manager',
+ linux_net.IptablesManager())
+ self.stubs.Set(linux_net, 'binary_name', 'test')
+ executes = []
+
+ def fake_execute(*args, **kwargs):
+ executes.append(args)
+ return "", ""
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ driver = linux_net.LinuxBridgeInterfaceDriver()
+
+ @staticmethod
+ def fake_ensure(bridge, interface, network, gateway):
+ return bridge
+
+ self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
+ 'ensure_bridge', fake_ensure)
+
+ iface = 'eth0'
+ dhcp = '192.168.1.1'
+ network = {'dhcp_server': dhcp,
+ 'share_address': False,
+ 'bridge': 'br100',
+ 'bridge_interface': iface}
+ driver.plug(network, 'fakemac')
+ expected = [
+ ('ebtables', '-t', 'filter', '-D', 'INPUT', '-p', 'ARP', '-i',
+ iface, '--arp-ip-dst', dhcp, '-j', 'DROP'),
+ ('ebtables', '-t', 'filter', '-I', 'INPUT', '-p', 'ARP', '-i',
+ iface, '--arp-ip-dst', dhcp, '-j', 'DROP'),
+ ('ebtables', '-t', 'filter', '-D', 'OUTPUT', '-p', 'ARP', '-o',
+ iface, '--arp-ip-src', dhcp, '-j', 'DROP'),
+ ('ebtables', '-t', 'filter', '-I', 'OUTPUT', '-p', 'ARP', '-o',
+ iface, '--arp-ip-src', dhcp, '-j', 'DROP'),
+ ('ebtables', '-t', 'filter', '-D', 'FORWARD', '-p', 'IPv4', '-i',
+ iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68',
+ '-j', 'DROP'),
+ ('ebtables', '-t', 'filter', '-I', 'FORWARD', '-p', 'IPv4', '-i',
+ iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68',
+ '-j', 'DROP'),
+ ('ebtables', '-t', 'filter', '-D', 'FORWARD', '-p', 'IPv4', '-o',
+ iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68',
+ '-j', 'DROP'),
+ ('ebtables', '-t', 'filter', '-I', 'FORWARD', '-p', 'IPv4', '-o',
+ iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68',
+ '-j', 'DROP'),
+ ('iptables-save', '-c'),
+ ('iptables-restore', '-c'),
+ ('ip6tables-save', '-c'),
+ ('ip6tables-restore', '-c'),
+ ]
+ self.assertEqual(executes, expected)
+
+ executes = []
+
+ @staticmethod
+ def fake_remove(bridge, gateway):
+ return
+
+ self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
+ 'remove_bridge', fake_remove)
+
+ driver.unplug(network)
+ expected = [
+ ('ebtables', '-t', 'filter', '-D', 'INPUT', '-p', 'ARP', '-i',
+ iface, '--arp-ip-dst', dhcp, '-j', 'DROP'),
+ ('ebtables', '-t', 'filter', '-D', 'OUTPUT', '-p', 'ARP', '-o',
+ iface, '--arp-ip-src', dhcp, '-j', 'DROP'),
+ ('ebtables', '-t', 'filter', '-D', 'FORWARD', '-p', 'IPv4', '-i',
+ iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68',
+ '-j', 'DROP'),
+ ('ebtables', '-t', 'filter', '-D', 'FORWARD', '-p', 'IPv4', '-o',
+ iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68',
+ '-j', 'DROP'),
+ ]
+ self.assertEqual(executes, expected)
+
+ def _test_initialize_gateway(self, existing, expected, routes=''):
+ self.flags(fake_network=False)
+ executes = []
+
+ def fake_execute(*args, **kwargs):
+ executes.append(args)
+ if args[0] == 'ip' and args[1] == 'addr' and args[2] == 'show':
+ return existing, ""
+ if args[0] == 'ip' and args[1] == 'route' and args[2] == 'show':
+ return routes, ""
+ if args[0] == 'sysctl':
+ return '1\n', ''
+ self.stubs.Set(utils, 'execute', fake_execute)
+ network = {'dhcp_server': '192.168.1.1',
+ 'cidr': '192.168.1.0/24',
+ 'broadcast': '192.168.1.255',
+ 'cidr_v6': '2001:db8::/64'}
+ self.driver.initialize_gateway_device('eth0', network)
+ self.assertEqual(executes, expected)
+
+ def test_initialize_gateway_moves_wrong_ip(self):
+ existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
+ " mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
+ " link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
+ " inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n"
+ " inet6 dead::beef:dead:beef:dead/64 scope link\n"
+ " valid_lft forever preferred_lft forever\n")
+ expected = [
+ ('sysctl', '-n', 'net.ipv4.ip_forward'),
+ ('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
+ ('ip', 'route', 'show', 'dev', 'eth0'),
+ ('ip', 'addr', 'del', '192.168.0.1/24',
+ 'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
+ ('ip', 'addr', 'add', '192.168.1.1/24',
+ 'brd', '192.168.1.255', 'dev', 'eth0'),
+ ('ip', 'addr', 'add', '192.168.0.1/24',
+ 'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
+ ('ip', '-f', 'inet6', 'addr', 'change',
+ '2001:db8::/64', 'dev', 'eth0'),
+ ]
+ self._test_initialize_gateway(existing, expected)
+
+ def test_initialize_gateway_resets_route(self):
+ routes = ("default via 192.168.0.1 dev eth0\n"
+ "192.168.100.0/24 via 192.168.0.254 dev eth0 proto static\n")
+ existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
+ " mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
+ " link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
+ " inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n"
+ " inet6 dead::beef:dead:beef:dead/64 scope link\n"
+ " valid_lft forever preferred_lft forever\n")
+ expected = [
+ ('sysctl', '-n', 'net.ipv4.ip_forward'),
+ ('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
+ ('ip', 'route', 'show', 'dev', 'eth0'),
+ ('ip', 'route', 'del', 'default', 'dev', 'eth0'),
+ ('ip', 'route', 'del', '192.168.100.0/24', 'dev', 'eth0'),
+ ('ip', 'addr', 'del', '192.168.0.1/24',
+ 'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
+ ('ip', 'addr', 'add', '192.168.1.1/24',
+ 'brd', '192.168.1.255', 'dev', 'eth0'),
+ ('ip', 'addr', 'add', '192.168.0.1/24',
+ 'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
+ ('ip', 'route', 'add', 'default', 'via', '192.168.0.1',
+ 'dev', 'eth0'),
+ ('ip', 'route', 'add', '192.168.100.0/24', 'via', '192.168.0.254',
+ 'dev', 'eth0', 'proto', 'static'),
+ ('ip', '-f', 'inet6', 'addr', 'change',
+ '2001:db8::/64', 'dev', 'eth0'),
+ ]
+ self._test_initialize_gateway(existing, expected, routes)
+
+ def test_initialize_gateway_no_move_right_ip(self):
+ existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
+ " mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
+ " link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
+ " inet 192.168.1.1/24 brd 192.168.1.255 scope global eth0\n"
+ " inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n"
+ " inet6 dead::beef:dead:beef:dead/64 scope link\n"
+ " valid_lft forever preferred_lft forever\n")
+ expected = [
+ ('sysctl', '-n', 'net.ipv4.ip_forward'),
+ ('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
+ ('ip', '-f', 'inet6', 'addr', 'change',
+ '2001:db8::/64', 'dev', 'eth0'),
+ ]
+ self._test_initialize_gateway(existing, expected)
+
+ def test_initialize_gateway_add_if_blank(self):
+ existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
+ " mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
+ " link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
+ " inet6 dead::beef:dead:beef:dead/64 scope link\n"
+ " valid_lft forever preferred_lft forever\n")
+ expected = [
+ ('sysctl', '-n', 'net.ipv4.ip_forward'),
+ ('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
+ ('ip', 'route', 'show', 'dev', 'eth0'),
+ ('ip', 'addr', 'add', '192.168.1.1/24',
+ 'brd', '192.168.1.255', 'dev', 'eth0'),
+ ('ip', '-f', 'inet6', 'addr', 'change',
+ '2001:db8::/64', 'dev', 'eth0'),
+ ]
+ self._test_initialize_gateway(existing, expected)
+
+ def test_ensure_floating_no_duplicate_forwards(self):
+ ln = linux_net
+ self.stubs.Set(ln.iptables_manager, 'apply', lambda: None)
+ self.stubs.Set(ln, 'ensure_ebtables_rules', lambda *a, **kw: None)
+ net = {'bridge': 'br100', 'cidr': '10.0.0.0/24'}
+ ln.ensure_floating_forward('10.10.10.10', '10.0.0.1', 'eth0', net)
+ ln.ensure_floating_forward('10.10.10.11', '10.0.0.10', 'eth0', net)
+ two_forward_rules = len(linux_net.iptables_manager.ipv4['nat'].rules)
+ ln.ensure_floating_forward('10.10.10.10', '10.0.0.3', 'eth0', net)
+ dup_forward_rules = len(linux_net.iptables_manager.ipv4['nat'].rules)
+ self.assertEqual(two_forward_rules, dup_forward_rules)
+
+ def test_apply_ran(self):
+ manager = linux_net.IptablesManager()
+ manager.iptables_apply_deferred = False
+ self.mox.StubOutWithMock(manager, '_apply')
+ manager._apply()
+ self.mox.ReplayAll()
+ empty_ret = manager.apply()
+ self.assertIsNone(empty_ret)
+
+ def test_apply_not_run(self):
+ manager = linux_net.IptablesManager()
+ manager.iptables_apply_deferred = True
+ self.mox.StubOutWithMock(manager, '_apply')
+ self.mox.ReplayAll()
+ manager.apply()
+
+ def test_deferred_unset_apply_ran(self):
+ manager = linux_net.IptablesManager()
+ manager.iptables_apply_deferred = True
+ self.mox.StubOutWithMock(manager, '_apply')
+ manager._apply()
+ self.mox.ReplayAll()
+ manager.defer_apply_off()
+ self.assertFalse(manager.iptables_apply_deferred)
+
+ def _test_add_metadata_accept_rule(self, expected):
+ def verify_add_rule(chain, rule):
+ self.assertEqual(chain, 'INPUT')
+ self.assertEqual(expected, rule)
+
+ self.stubs.Set(linux_net.iptables_manager.ipv4['filter'],
+ 'add_rule', verify_add_rule)
+ linux_net.metadata_accept()
+
+ def test_metadata_accept(self):
+ self.flags(metadata_port='8775')
+ self.flags(metadata_host='10.10.10.1')
+ expected = ('-s 0.0.0.0/0 -p tcp -m tcp --dport 8775 '
+ '-d 10.10.10.1 -j ACCEPT')
+ self._test_add_metadata_accept_rule(expected)
+
+ def test_metadata_accept_localhost(self):
+ self.flags(metadata_port='8775')
+ self.flags(metadata_host='127.0.0.1')
+ expected = ('-s 0.0.0.0/0 -p tcp -m tcp --dport 8775 '
+ '-m addrtype --dst-type LOCAL -j ACCEPT')
+ self._test_add_metadata_accept_rule(expected)
+
+ def _test_add_metadata_forward_rule(self, expected):
+ def verify_add_rule(chain, rule):
+ self.assertEqual(chain, 'PREROUTING')
+ self.assertEqual(expected, rule)
+
+ self.stubs.Set(linux_net.iptables_manager.ipv4['nat'],
+ 'add_rule', verify_add_rule)
+ linux_net.metadata_forward()
+
+ def test_metadata_forward(self):
+ self.flags(metadata_port='8775')
+ self.flags(metadata_host='10.10.10.1')
+ expected = ('-s 0.0.0.0/0 -d 169.254.169.254/32 -p tcp -m tcp '
+ '--dport 80 -j DNAT --to-destination 10.10.10.1:8775')
+ self._test_add_metadata_forward_rule(expected)
+
+ def test_metadata_forward_localhost(self):
+ self.flags(metadata_port='8775')
+ self.flags(metadata_host='127.0.0.1')
+ expected = ('-s 0.0.0.0/0 -d 169.254.169.254/32 -p tcp -m tcp '
+ '--dport 80 -j REDIRECT --to-ports 8775')
+ self._test_add_metadata_forward_rule(expected)
+
+ def test_ensure_bridge_brings_up_interface(self):
+ calls = {
+ 'device_exists': [mock.call('bridge')],
+ '_execute': [
+ mock.call('brctl', 'addif', 'bridge', 'eth0',
+ run_as_root=True, check_exit_code=False),
+ mock.call('ip', 'link', 'set', 'eth0', 'up',
+ run_as_root=True, check_exit_code=False),
+ mock.call('ip', 'route', 'show', 'dev', 'eth0'),
+ mock.call('ip', 'addr', 'show', 'dev', 'eth0', 'scope',
+ 'global'),
+ ]
+ }
+ with contextlib.nested(
+ mock.patch.object(linux_net, 'device_exists', return_value=True),
+ mock.patch.object(linux_net, '_execute', return_value=('', ''))
+ ) as (device_exists, _execute):
+ driver = linux_net.LinuxBridgeInterfaceDriver()
+ driver.ensure_bridge('bridge', 'eth0')
+ device_exists.assert_has_calls(calls['device_exists'])
+ _execute.assert_has_calls(calls['_execute'])
+
+ def test_ensure_bridge_brclt_addif_exception(self):
+ def fake_execute(*cmd, **kwargs):
+ if ('brctl', 'addif', 'bridge', 'eth0') == cmd:
+ return ('', 'some error happens')
+ else:
+ return ('', '')
+
+ with contextlib.nested(
+ mock.patch.object(linux_net, 'device_exists', return_value=True),
+ mock.patch.object(linux_net, '_execute', fake_execute)
+ ) as (device_exists, _):
+ driver = linux_net.LinuxBridgeInterfaceDriver()
+ self.assertRaises(exception.NovaException,
+ driver.ensure_bridge, 'bridge', 'eth0')
+ device_exists.assert_called_once_with('bridge')
+
+ def test_set_device_mtu_configured(self):
+ self.flags(network_device_mtu=10000)
+ calls = [
+ mock.call('ip', 'link', 'set', 'fake-dev', 'mtu',
+ 10000, run_as_root=True,
+ check_exit_code=[0, 2, 254])
+ ]
+ with mock.patch.object(utils, 'execute', return_value=('', '')) as ex:
+ linux_net._set_device_mtu('fake-dev')
+ ex.assert_has_calls(calls)
+
+ def test_set_device_mtu_default(self):
+ calls = []
+ with mock.patch.object(utils, 'execute', return_value=('', '')) as ex:
+ linux_net._set_device_mtu('fake-dev')
+ ex.assert_has_calls(calls)
+
+ def _ovs_vif_port(self, calls):
+ with mock.patch.object(utils, 'execute', return_value=('', '')) as ex:
+ linux_net.create_ovs_vif_port('fake-bridge', 'fake-dev',
+ 'fake-iface-id', 'fake-mac',
+ 'fake-instance-uuid')
+ ex.assert_has_calls(calls)
+
+ def test_ovs_vif_port(self):
+ calls = [
+ mock.call('ovs-vsctl', '--timeout=120', '--', '--if-exists',
+ 'del-port', 'fake-dev', '--', 'add-port',
+ 'fake-bridge', 'fake-dev',
+ '--', 'set', 'Interface', 'fake-dev',
+ 'external-ids:iface-id=fake-iface-id',
+ 'external-ids:iface-status=active',
+ 'external-ids:attached-mac=fake-mac',
+ 'external-ids:vm-uuid=fake-instance-uuid',
+ run_as_root=True)
+ ]
+ self._ovs_vif_port(calls)
+
+ def test_ovs_vif_port_with_mtu(self):
+ self.flags(network_device_mtu=10000)
+ calls = [
+ mock.call('ovs-vsctl', '--timeout=120', '--', '--if-exists',
+ 'del-port', 'fake-dev', '--', 'add-port',
+ 'fake-bridge', 'fake-dev',
+ '--', 'set', 'Interface', 'fake-dev',
+ 'external-ids:iface-id=fake-iface-id',
+ 'external-ids:iface-status=active',
+ 'external-ids:attached-mac=fake-mac',
+ 'external-ids:vm-uuid=fake-instance-uuid',
+ run_as_root=True),
+ mock.call('ip', 'link', 'set', 'fake-dev', 'mtu',
+ 10000, run_as_root=True,
+ check_exit_code=[0, 2, 254])
+ ]
+ self._ovs_vif_port(calls)
+
+ def _create_veth_pair(self, calls):
+ with mock.patch.object(utils, 'execute', return_value=('', '')) as ex:
+ linux_net._create_veth_pair('fake-dev1', 'fake-dev2')
+ ex.assert_has_calls(calls)
+
+ def test_create_veth_pair(self):
+ calls = [
+ mock.call('ip', 'link', 'add', 'fake-dev1', 'type', 'veth',
+ 'peer', 'name', 'fake-dev2', run_as_root=True),
+ mock.call('ip', 'link', 'set', 'fake-dev1', 'up',
+ run_as_root=True),
+ mock.call('ip', 'link', 'set', 'fake-dev1', 'promisc', 'on',
+ run_as_root=True),
+ mock.call('ip', 'link', 'set', 'fake-dev2', 'up',
+ run_as_root=True),
+ mock.call('ip', 'link', 'set', 'fake-dev2', 'promisc', 'on',
+ run_as_root=True)
+ ]
+ self._create_veth_pair(calls)
+
+ def test_create_veth_pair_with_mtu(self):
+ self.flags(network_device_mtu=10000)
+ calls = [
+ mock.call('ip', 'link', 'add', 'fake-dev1', 'type', 'veth',
+ 'peer', 'name', 'fake-dev2', run_as_root=True),
+ mock.call('ip', 'link', 'set', 'fake-dev1', 'up',
+ run_as_root=True),
+ mock.call('ip', 'link', 'set', 'fake-dev1', 'promisc', 'on',
+ run_as_root=True),
+ mock.call('ip', 'link', 'set', 'fake-dev1', 'mtu',
+ 10000, run_as_root=True,
+ check_exit_code=[0, 2, 254]),
+ mock.call('ip', 'link', 'set', 'fake-dev2', 'up',
+ run_as_root=True),
+ mock.call('ip', 'link', 'set', 'fake-dev2', 'promisc', 'on',
+ run_as_root=True),
+ mock.call('ip', 'link', 'set', 'fake-dev2', 'mtu',
+ 10000, run_as_root=True,
+ check_exit_code=[0, 2, 254])
+ ]
+ self._create_veth_pair(calls)
diff --git a/nova/tests/unit/network/test_manager.py b/nova/tests/unit/network/test_manager.py
new file mode 100644
index 0000000000..776160cd0c
--- /dev/null
+++ b/nova/tests/unit/network/test_manager.py
@@ -0,0 +1,3358 @@
+# Copyright 2011 Rackspace
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+
+import fixtures
+import mock
+import mox
+import netaddr
+from oslo.concurrency import processutils
+from oslo.config import cfg
+from oslo.db import exception as db_exc
+from oslo import messaging
+from oslo.utils import importutils
+import six
+
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import models
+from nova import exception
+from nova import ipv6
+from nova.network import floating_ips
+from nova.network import linux_net
+from nova.network import manager as network_manager
+from nova.network import model as net_model
+from nova import objects
+from nova.objects import quotas as quotas_obj
+from nova.objects import virtual_interface as vif_obj
+from nova.openstack.common import log as logging
+from nova import quota
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_ldap
+from nova.tests.unit import fake_network
+from nova.tests.unit import matchers
+from nova.tests.unit.objects import test_fixed_ip
+from nova.tests.unit.objects import test_floating_ip
+from nova.tests.unit.objects import test_network
+from nova.tests.unit.objects import test_service
+from nova import utils
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+
+
+HOST = "testhost"
+FAKEUUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
+
+
+fake_inst = fake_instance.fake_db_instance
+
+
+networks = [{'id': 0,
+ 'uuid': FAKEUUID,
+ 'label': 'test0',
+ 'injected': False,
+ 'multi_host': False,
+ 'cidr': '192.168.0.0/24',
+ 'cidr_v6': '2001:db8::/64',
+ 'gateway_v6': '2001:db8::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'bridge': 'fa0',
+ 'bridge_interface': 'fake_fa0',
+ 'gateway': '192.168.0.1',
+ 'dhcp_server': '192.168.0.1',
+ 'broadcast': '192.168.0.255',
+ 'dns1': '192.168.0.1',
+ 'dns2': '192.168.0.2',
+ 'vlan': None,
+ 'host': HOST,
+ 'project_id': 'fake_project',
+ 'vpn_public_address': '192.168.0.2',
+ 'vpn_public_port': '22',
+ 'vpn_private_address': '10.0.0.2'},
+ {'id': 1,
+ 'uuid': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ 'label': 'test1',
+ 'injected': False,
+ 'multi_host': False,
+ 'cidr': '192.168.1.0/24',
+ 'cidr_v6': '2001:db9::/64',
+ 'gateway_v6': '2001:db9::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'bridge': 'fa1',
+ 'bridge_interface': 'fake_fa1',
+ 'gateway': '192.168.1.1',
+ 'dhcp_server': '192.168.1.1',
+ 'broadcast': '192.168.1.255',
+ 'dns1': '192.168.0.1',
+ 'dns2': '192.168.0.2',
+ 'vlan': None,
+ 'host': HOST,
+ 'project_id': 'fake_project',
+ 'vpn_public_address': '192.168.1.2',
+ 'vpn_public_port': '22',
+ 'vpn_private_address': '10.0.0.2'}]
+
+fixed_ips = [{'id': 0,
+ 'network_id': 0,
+ 'address': '192.168.0.100',
+ 'instance_uuid': 0,
+ 'allocated': False,
+ 'virtual_interface_id': 0,
+ 'floating_ips': []},
+ {'id': 0,
+ 'network_id': 1,
+ 'address': '192.168.1.100',
+ 'instance_uuid': 0,
+ 'allocated': False,
+ 'virtual_interface_id': 0,
+ 'floating_ips': []},
+ {'id': 0,
+ 'network_id': 1,
+ 'address': '2001:db9:0:1::10',
+ 'instance_uuid': 0,
+ 'allocated': False,
+ 'virtual_interface_id': 0,
+ 'floating_ips': []}]
+
+
+flavor = {'id': 0,
+ 'rxtx_cap': 3}
+
+
+floating_ip_fields = {'id': 0,
+ 'address': '192.168.10.100',
+ 'pool': 'nova',
+ 'interface': 'eth0',
+ 'fixed_ip_id': 0,
+ 'project_id': None,
+ 'auto_assigned': False}
+
+vifs = [{'id': 0,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'address': 'DE:AD:BE:EF:00:00',
+ 'uuid': '00000000-0000-0000-0000-0000000000000000',
+ 'network_id': 0,
+ 'instance_uuid': 0},
+ {'id': 1,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'address': 'DE:AD:BE:EF:00:01',
+ 'uuid': '00000000-0000-0000-0000-0000000000000001',
+ 'network_id': 1,
+ 'instance_uuid': 0},
+ {'id': 2,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'address': 'DE:AD:BE:EF:00:02',
+ 'uuid': '00000000-0000-0000-0000-0000000000000002',
+ 'network_id': 2,
+ 'instance_uuid': 0}]
+
+
+class FlatNetworkTestCase(test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(FlatNetworkTestCase, self).setUp()
+ self.tempdir = self.useFixture(fixtures.TempDir()).path
+ self.flags(log_dir=self.tempdir)
+ self.flags(use_local=True, group='conductor')
+ self.network = network_manager.FlatManager(host=HOST)
+ self.network.instance_dns_domain = ''
+ self.network.db = db
+ self.context = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ def test_get_instance_nw_info(self):
+ fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info
+
+ nw_info = fake_get_instance_nw_info(self.stubs, 0, 2)
+ self.assertFalse(nw_info)
+
+ nw_info = fake_get_instance_nw_info(self.stubs, 1, 2)
+
+ for i, vif in enumerate(nw_info):
+ nid = i + 1
+ check = {'bridge': 'fake_br%d' % nid,
+ 'cidr': '192.168.%s.0/24' % nid,
+ 'cidr_v6': '2001:db8:0:%x::/64' % nid,
+ 'id': '00000000-0000-0000-0000-00000000000000%02d' % nid,
+ 'multi_host': False,
+ 'injected': False,
+ 'bridge_interface': None,
+ 'vlan': None,
+ 'broadcast': '192.168.%d.255' % nid,
+ 'dhcp_server': '192.168.1.1',
+ 'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid],
+ 'gateway': '192.168.%d.1' % nid,
+ 'gateway_v6': '2001:db8:0:1::1',
+ 'label': 'test%d' % nid,
+ 'mac': 'DE:AD:BE:EF:00:%02x' % nid,
+ 'rxtx_cap': 30,
+ 'vif_type': net_model.VIF_TYPE_BRIDGE,
+ 'vif_devname': None,
+ 'vif_uuid':
+ '00000000-0000-0000-0000-00000000000000%02d' % nid,
+ 'ovs_interfaceid': None,
+ 'qbh_params': None,
+ 'qbg_params': None,
+ 'should_create_vlan': False,
+ 'should_create_bridge': False,
+ 'ip': '192.168.%d.%03d' % (nid, nid + 99),
+ 'ip_v6': '2001:db8:0:1:dcad:beff:feef:%x' % nid,
+ 'netmask': '255.255.255.0',
+ 'netmask_v6': 64,
+ 'physical_network': None,
+ }
+
+ network = vif['network']
+ net_v4 = vif['network']['subnets'][0]
+ net_v6 = vif['network']['subnets'][1]
+
+ vif_dict = dict(bridge=network['bridge'],
+ cidr=net_v4['cidr'],
+ cidr_v6=net_v6['cidr'],
+ id=vif['id'],
+ multi_host=network.get_meta('multi_host', False),
+ injected=network.get_meta('injected', False),
+ bridge_interface=
+ network.get_meta('bridge_interface'),
+ vlan=network.get_meta('vlan'),
+ broadcast=str(net_v4.as_netaddr().broadcast),
+ dhcp_server=network.get_meta('dhcp_server',
+ net_v4['gateway']['address']),
+ dns=[ip['address'] for ip in net_v4['dns']],
+ gateway=net_v4['gateway']['address'],
+ gateway_v6=net_v6['gateway']['address'],
+ label=network['label'],
+ mac=vif['address'],
+ rxtx_cap=vif.get_meta('rxtx_cap'),
+ vif_type=vif['type'],
+ vif_devname=vif.get('devname'),
+ vif_uuid=vif['id'],
+ ovs_interfaceid=vif.get('ovs_interfaceid'),
+ qbh_params=vif.get('qbh_params'),
+ qbg_params=vif.get('qbg_params'),
+ should_create_vlan=
+ network.get_meta('should_create_vlan', False),
+ should_create_bridge=
+ network.get_meta('should_create_bridge',
+ False),
+ ip=net_v4['ips'][i]['address'],
+ ip_v6=net_v6['ips'][i]['address'],
+ netmask=str(net_v4.as_netaddr().netmask),
+ netmask_v6=net_v6.as_netaddr()._prefixlen,
+ physical_network=
+ network.get_meta('physical_network', None))
+
+ self.assertThat(vif_dict, matchers.DictMatches(check))
+
+ def test_validate_networks(self):
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+ self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
+
+ requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ '192.168.1.100'),
+ ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ '192.168.0.100')]
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **net)
+ for net in networks])
+
+ ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[1])
+ ip['network'] = dict(test_network.fake_network,
+ **networks[1])
+ ip['instance_uuid'] = None
+ db.fixed_ip_get_by_address(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ columns_to_join=mox.IgnoreArg()
+ ).AndReturn(ip)
+ ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[0])
+ ip['network'] = dict(test_network.fake_network,
+ **networks[0])
+ ip['instance_uuid'] = None
+ db.fixed_ip_get_by_address(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ columns_to_join=mox.IgnoreArg()
+ ).AndReturn(ip)
+
+ self.mox.ReplayAll()
+ self.network.validate_networks(self.context, requested_networks)
+
+ def test_validate_networks_valid_fixed_ipv6(self):
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+ self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
+
+ requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ '2001:db9:0:1::10')]
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **networks[1])])
+
+ ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[2])
+ ip['network'] = dict(test_network.fake_network,
+ **networks[1])
+ ip['instance_uuid'] = None
+ db.fixed_ip_get_by_address(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ columns_to_join=mox.IgnoreArg()
+ ).AndReturn(ip)
+
+ self.mox.ReplayAll()
+ self.network.validate_networks(self.context, requested_networks)
+
+ def test_validate_reserved(self):
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ nets = self.network.create_networks(context_admin, 'fake',
+ '192.168.0.0/24', False, 1,
+ 256, None, None, None, None, None)
+ self.assertEqual(1, len(nets))
+ network = nets[0]
+ self.assertEqual(4, db.network_count_reserved_ips(context_admin,
+ network['id']))
+
+ def test_validate_reserved_start_end(self):
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ nets = self.network.create_networks(context_admin, 'fake',
+ '192.168.0.0/24', False, 1,
+ 256, dhcp_server='192.168.0.11',
+ allowed_start='192.168.0.10',
+ allowed_end='192.168.0.245')
+ self.assertEqual(1, len(nets))
+ network = nets[0]
+ # gateway defaults to beginning of allowed_start
+ self.assertEqual('192.168.0.10', network['gateway'])
+ # vpn_server doesn't conflict with dhcp_start
+ self.assertEqual('192.168.0.12', network['vpn_private_address'])
+ # dhcp_start doesn't conflict with dhcp_server
+ self.assertEqual('192.168.0.13', network['dhcp_start'])
+ # NOTE(vish): 10 from the beginning, 10 from the end, and
+ # 1 for the gateway, 1 for the dhcp server,
+ # 1 for the vpn server
+ self.assertEqual(23, db.network_count_reserved_ips(context_admin,
+ network['id']))
+
+ def test_validate_reserved_start_out_of_range(self):
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ self.assertRaises(exception.AddressOutOfRange,
+ self.network.create_networks,
+ context_admin, 'fake', '192.168.0.0/24', False,
+ 1, 256, allowed_start='192.168.1.10')
+
+ def test_validate_reserved_end_invalid(self):
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ self.assertRaises(exception.InvalidAddress,
+ self.network.create_networks,
+ context_admin, 'fake', '192.168.0.0/24', False,
+ 1, 256, allowed_end='invalid')
+
+ def test_validate_cidr_invalid(self):
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ self.assertRaises(exception.InvalidCidr,
+ self.network.create_networks,
+ context_admin, 'fake', 'invalid', False,
+ 1, 256)
+
+ def test_validate_non_int_size(self):
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ self.assertRaises(exception.InvalidIntValue,
+ self.network.create_networks,
+ context_admin, 'fake', '192.168.0.0/24', False,
+ 1, 'invalid')
+
+ def test_validate_networks_none_requested_networks(self):
+ self.network.validate_networks(self.context, None)
+
+ def test_validate_networks_empty_requested_networks(self):
+ requested_networks = []
+ self.mox.ReplayAll()
+
+ self.network.validate_networks(self.context, requested_networks)
+
+ def test_validate_networks_invalid_fixed_ip(self):
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+ requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ '192.168.1.100.1'),
+ ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ '192.168.0.100.1')]
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **net)
+ for net in networks])
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.FixedIpInvalid,
+ self.network.validate_networks, self.context,
+ requested_networks)
+
+ def test_validate_networks_empty_fixed_ip(self):
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+
+ requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ ''),
+ ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ '')]
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **net)
+ for net in networks])
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.FixedIpInvalid,
+ self.network.validate_networks,
+ self.context, requested_networks)
+
+ def test_validate_networks_none_fixed_ip(self):
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+
+ requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ None),
+ ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ None)]
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **net)
+ for net in networks])
+ self.mox.ReplayAll()
+
+ self.network.validate_networks(self.context, requested_networks)
+
+ @mock.patch('nova.objects.quotas.Quotas.reserve')
+ def test_add_fixed_ip_instance_using_id_without_vpn(self, reserve):
+ self.stubs.Set(self.network,
+ '_do_trigger_security_group_members_refresh_for_instance',
+ lambda *a, **kw: None)
+ self.mox.StubOutWithMock(db, 'network_get')
+ self.mox.StubOutWithMock(db, 'network_update')
+ self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
+ self.mox.StubOutWithMock(db,
+ 'virtual_interface_get_by_instance_and_network')
+ self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
+
+ fixed = dict(test_fixed_ip.fake_fixed_ip,
+ address='192.168.0.101')
+ db.fixed_ip_associate_pool(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ instance_uuid=mox.IgnoreArg(),
+ host=None).AndReturn(fixed)
+
+ db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
+
+ db.fixed_ip_update(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
+ db.instance_get_by_uuid(self.context,
+ mox.IgnoreArg(), use_slave=False,
+ columns_to_join=['info_cache',
+ 'security_groups']
+ ).AndReturn(inst)
+
+ db.network_get(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ project_only=mox.IgnoreArg()
+ ).AndReturn(dict(test_network.fake_network,
+ **networks[0]))
+ db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
+
+ self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
+ networks[0]['id'])
+ exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
+ inst)
+ reserve.assert_called_once_with(self.context, fixed_ips=1,
+ project_id=exp_project,
+ user_id=exp_user)
+
+ @mock.patch('nova.objects.quotas.Quotas.reserve')
+ def test_add_fixed_ip_instance_using_uuid_without_vpn(self, reserve):
+ self.stubs.Set(self.network,
+ '_do_trigger_security_group_members_refresh_for_instance',
+ lambda *a, **kw: None)
+ self.mox.StubOutWithMock(db, 'network_get_by_uuid')
+ self.mox.StubOutWithMock(db, 'network_update')
+ self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
+ self.mox.StubOutWithMock(db,
+ 'virtual_interface_get_by_instance_and_network')
+ self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
+
+ fixed = dict(test_fixed_ip.fake_fixed_ip,
+ address='192.168.0.101')
+ db.fixed_ip_associate_pool(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ instance_uuid=mox.IgnoreArg(),
+ host=None).AndReturn(fixed)
+
+ db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
+
+ db.fixed_ip_update(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
+ db.instance_get_by_uuid(self.context,
+ mox.IgnoreArg(), use_slave=False,
+ columns_to_join=['info_cache',
+ 'security_groups']
+ ).AndReturn(inst)
+
+ db.network_get_by_uuid(mox.IgnoreArg(),
+ mox.IgnoreArg()
+ ).AndReturn(dict(test_network.fake_network,
+ **networks[0]))
+ db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
+
+ self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
+ networks[0]['uuid'])
+ exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
+ inst)
+ reserve.assert_called_once_with(self.context, fixed_ips=1,
+ project_id=exp_project,
+ user_id=exp_user)
+
+ def test_mini_dns_driver(self):
+ zone1 = "example.org"
+ zone2 = "example.com"
+ driver = self.network.instance_dns_manager
+ driver.create_entry("hostone", "10.0.0.1", "A", zone1)
+ driver.create_entry("hosttwo", "10.0.0.2", "A", zone1)
+ driver.create_entry("hostthree", "10.0.0.3", "A", zone1)
+ driver.create_entry("hostfour", "10.0.0.4", "A", zone1)
+ driver.create_entry("hostfive", "10.0.0.5", "A", zone2)
+
+ driver.delete_entry("hostone", zone1)
+ driver.modify_address("hostfour", "10.0.0.1", zone1)
+ driver.modify_address("hostthree", "10.0.0.1", zone1)
+ names = driver.get_entries_by_address("10.0.0.1", zone1)
+ self.assertEqual(len(names), 2)
+ self.assertIn('hostthree', names)
+ self.assertIn('hostfour', names)
+
+ names = driver.get_entries_by_address("10.0.0.5", zone2)
+ self.assertEqual(len(names), 1)
+ self.assertIn('hostfive', names)
+
+ addresses = driver.get_entries_by_name("hosttwo", zone1)
+ self.assertEqual(len(addresses), 1)
+ self.assertIn('10.0.0.2', addresses)
+
+ self.assertRaises(exception.InvalidInput,
+ driver.create_entry,
+ "hostname",
+ "10.10.10.10",
+ "invalidtype",
+ zone1)
+
+ def test_mini_dns_driver_with_mixed_case(self):
+ zone1 = "example.org"
+ driver = self.network.instance_dns_manager
+ driver.create_entry("HostTen", "10.0.0.10", "A", zone1)
+ addresses = driver.get_entries_by_address("10.0.0.10", zone1)
+ self.assertEqual(len(addresses), 1)
+ for n in addresses:
+ driver.delete_entry(n, zone1)
+ addresses = driver.get_entries_by_address("10.0.0.10", zone1)
+ self.assertEqual(len(addresses), 0)
+
+ @mock.patch('nova.objects.quotas.Quotas.reserve')
+ def test_instance_dns(self, reserve):
+ self.stubs.Set(self.network,
+ '_do_trigger_security_group_members_refresh_for_instance',
+ lambda *a, **kw: None)
+ fixedip = dict(test_fixed_ip.fake_fixed_ip,
+ address='192.168.0.101')
+ self.mox.StubOutWithMock(db, 'network_get_by_uuid')
+ self.mox.StubOutWithMock(db, 'network_update')
+ self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
+ self.mox.StubOutWithMock(db,
+ 'virtual_interface_get_by_instance_and_network')
+ self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
+
+ db.fixed_ip_associate_pool(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ instance_uuid=mox.IgnoreArg(),
+ host=None
+ ).AndReturn(fixedip)
+
+ db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
+
+ db.fixed_ip_update(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
+ db.instance_get_by_uuid(self.context,
+ mox.IgnoreArg(), use_slave=False,
+ columns_to_join=['info_cache',
+ 'security_groups']
+ ).AndReturn(inst)
+
+ db.network_get_by_uuid(mox.IgnoreArg(),
+ mox.IgnoreArg()
+ ).AndReturn(dict(test_network.fake_network,
+ **networks[0]))
+ db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
+
+ self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
+ networks[0]['uuid'])
+
+ instance_manager = self.network.instance_dns_manager
+ addresses = instance_manager.get_entries_by_name(HOST,
+ self.network.instance_dns_domain)
+ self.assertEqual(len(addresses), 1)
+ self.assertEqual(addresses[0], fixedip['address'])
+ addresses = instance_manager.get_entries_by_name(FAKEUUID,
+ self.network.instance_dns_domain)
+ self.assertEqual(len(addresses), 1)
+ self.assertEqual(addresses[0], fixedip['address'])
+ exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
+ inst)
+ reserve.assert_called_once_with(self.context, fixed_ips=1,
+ project_id=exp_project,
+ user_id=exp_user)
+
+ def test_allocate_floating_ip(self):
+ self.assertIsNone(self.network.allocate_floating_ip(self.context,
+ 1, None))
+
+ def test_deallocate_floating_ip(self):
+ self.assertIsNone(self.network.deallocate_floating_ip(self.context,
+ 1, None))
+
+ def test_associate_floating_ip(self):
+ self.assertIsNone(self.network.associate_floating_ip(self.context,
+ None, None))
+
+ def test_disassociate_floating_ip(self):
+ self.assertIsNone(self.network.disassociate_floating_ip(self.context,
+ None, None))
+
+ def test_get_networks_by_uuids_ordering(self):
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+
+ requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **net)
+ for net in networks])
+
+ self.mox.ReplayAll()
+ res = self.network._get_networks_by_uuids(self.context,
+ requested_networks)
+
+ self.assertEqual(res[0]['id'], 1)
+ self.assertEqual(res[1]['id'], 0)
+
+ @mock.patch('nova.objects.instance.Instance.get_by_uuid')
+ @mock.patch('nova.objects.quotas.Quotas.reserve')
+ @mock.patch('nova.objects.quotas.ids_from_instance')
+ def test_allocate_calculates_quota_auth(self, util_method, reserve,
+ get_by_uuid):
+ inst = objects.Instance()
+ inst['uuid'] = 'nosuch'
+ get_by_uuid.return_value = inst
+ reserve.side_effect = exception.OverQuota(overs='testing',
+ quotas={'fixed_ips': 10},
+ headroom={'fixed_ips': 0})
+ util_method.return_value = ('foo', 'bar')
+ self.assertRaises(exception.FixedIpLimitExceeded,
+ self.network.allocate_fixed_ip,
+ self.context, 123, {'uuid': 'nosuch'})
+ util_method.assert_called_once_with(self.context, inst)
+
+ @mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address')
+ @mock.patch('nova.objects.quotas.Quotas.reserve')
+ @mock.patch('nova.objects.quotas.ids_from_instance')
+ def test_deallocate_calculates_quota_auth(self, util_method, reserve,
+ get_by_address):
+ inst = objects.Instance(uuid='fake-uuid')
+ fip = objects.FixedIP(instance_uuid='fake-uuid',
+ virtual_interface_id=1)
+ get_by_address.return_value = fip
+ util_method.return_value = ('foo', 'bar')
+ # This will fail right after the reserve call when it tries
+ # to look up the fake instance we created above
+ self.assertRaises(exception.InstanceNotFound,
+ self.network.deallocate_fixed_ip,
+ self.context, '1.2.3.4', instance=inst)
+ util_method.assert_called_once_with(self.context, inst)
+
+ @mock.patch('nova.objects.instance.Instance.get_by_uuid')
+ @mock.patch('nova.objects.fixed_ip.FixedIP.associate')
+ def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
+ mock_get):
+ mock_associate.side_effect = test.TestingException
+ instance = objects.Instance(context=self.context)
+ instance.create()
+ mock_get.return_value = instance
+ self.assertRaises(test.TestingException,
+ self.network.allocate_fixed_ip,
+ self.context, instance.uuid,
+ {'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
+ address=netaddr.IPAddress('1.2.3.4'))
+ mock_associate.assert_called_once_with(self.context,
+ '1.2.3.4',
+ instance.uuid,
+ 1)
+
+ @mock.patch('nova.objects.instance.Instance.get_by_uuid')
+ @mock.patch('nova.objects.virtual_interface.VirtualInterface'
+ '.get_by_instance_and_network')
+ @mock.patch('nova.objects.fixed_ip.FixedIP.disassociate')
+ @mock.patch('nova.objects.fixed_ip.FixedIP.associate')
+ @mock.patch('nova.objects.fixed_ip.FixedIP.save')
+ def test_allocate_fixed_ip_cleanup(self,
+ mock_fixedip_save,
+ mock_fixedip_associate,
+ mock_fixedip_disassociate,
+ mock_vif_get,
+ mock_instance_get):
+ address = netaddr.IPAddress('1.2.3.4')
+
+ fip = objects.FixedIP(instance_uuid='fake-uuid',
+ address=address,
+ virtual_interface_id=1)
+ mock_fixedip_associate.return_value = fip
+
+ instance = objects.Instance(context=self.context)
+ instance.create()
+ mock_instance_get.return_value = instance
+
+ mock_vif_get.return_value = vif_obj.VirtualInterface(
+ instance_uuid='fake-uuid', id=1)
+
+ with contextlib.nested(
+ mock.patch.object(self.network, '_setup_network_on_host'),
+ mock.patch.object(self.network, 'instance_dns_manager'),
+ mock.patch.object(self.network,
+ '_do_trigger_security_group_members_refresh_for_instance')
+ ) as (mock_setup_network, mock_dns_manager, mock_ignored):
+ mock_setup_network.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ self.network.allocate_fixed_ip,
+ self.context, instance.uuid,
+ {'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
+ address=address)
+
+ mock_dns_manager.delete_entry.assert_has_calls([
+ mock.call(instance.display_name, ''),
+ mock.call(instance.uuid, '')
+ ])
+
+ mock_fixedip_disassociate.assert_called_once_with(self.context)
+
+
+class FlatDHCPNetworkTestCase(test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(FlatDHCPNetworkTestCase, self).setUp()
+ self.useFixture(test.SampleNetworks())
+ self.flags(use_local=True, group='conductor')
+ self.network = network_manager.FlatDHCPManager(host=HOST)
+ self.network.db = db
+ self.context = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+ self.context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+
+ @mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
+ @mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
+ @mock.patch('nova.network.linux_net.iptables_manager._apply')
+ def test_init_host_iptables_defer_apply(self, iptable_apply,
+ floating_get_by_host,
+ fixed_get_by_id):
+ def get_by_id(context, fixed_ip_id, **kwargs):
+ net = objects.Network(bridge='testbridge',
+ cidr='192.168.1.0/24')
+ if fixed_ip_id == 1:
+ return objects.FixedIP(address='192.168.1.4',
+ network=net)
+ elif fixed_ip_id == 2:
+ return objects.FixedIP(address='192.168.1.5',
+ network=net)
+
+ def fake_apply():
+ fake_apply.count += 1
+
+ fake_apply.count = 0
+ ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
+ float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
+ float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
+ float1._context = ctxt
+ float2._context = ctxt
+
+ iptable_apply.side_effect = fake_apply
+ floating_get_by_host.return_value = [float1, float2]
+ fixed_get_by_id.side_effect = get_by_id
+
+ self.network.init_host()
+ self.assertEqual(1, fake_apply.count)
+
+
+class VlanNetworkTestCase(test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(VlanNetworkTestCase, self).setUp()
+ self.useFixture(test.SampleNetworks())
+ self.flags(use_local=True, group='conductor')
+ self.network = network_manager.VlanManager(host=HOST)
+ self.network.db = db
+ self.context = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+ self.context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+
+ def test_quota_driver_type(self):
+ self.assertEqual(objects.QuotasNoOp,
+ self.network.quotas_cls)
+
+ def test_vpn_allocate_fixed_ip(self):
+ self.mox.StubOutWithMock(db, 'fixed_ip_associate')
+ self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ self.mox.StubOutWithMock(db,
+ 'virtual_interface_get_by_instance_and_network')
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+
+ fixed = dict(test_fixed_ip.fake_fixed_ip,
+ address='192.168.0.1')
+ db.fixed_ip_associate(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ network_id=mox.IgnoreArg(),
+ reserved=True).AndReturn(fixed)
+ db.fixed_ip_update(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
+ db.instance_get_by_uuid(mox.IgnoreArg(),
+ mox.IgnoreArg(), use_slave=False,
+ columns_to_join=['info_cache',
+ 'security_groups']
+ ).AndReturn(fake_inst(display_name=HOST,
+ uuid=FAKEUUID))
+ self.mox.ReplayAll()
+
+ network = objects.Network._from_db_object(
+ self.context, objects.Network(),
+ dict(test_network.fake_network, **networks[0]))
+ network.vpn_private_address = '192.168.0.2'
+ self.network.allocate_fixed_ip(self.context, FAKEUUID, network,
+ vpn=True)
+
+ def test_vpn_allocate_fixed_ip_no_network_id(self):
+ network = dict(networks[0])
+ network['vpn_private_address'] = '192.168.0.2'
+ network['id'] = None
+ instance = db.instance_create(self.context, {})
+ self.assertRaises(exception.FixedIpNotFoundForNetwork,
+ self.network.allocate_fixed_ip,
+ self.context_admin,
+ instance['uuid'],
+ network,
+ vpn=True)
+
+ def test_allocate_fixed_ip(self):
+ self.stubs.Set(self.network,
+ '_do_trigger_security_group_members_refresh_for_instance',
+ lambda *a, **kw: None)
+ self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
+ self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ self.mox.StubOutWithMock(db,
+ 'virtual_interface_get_by_instance_and_network')
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+
+ fixed = dict(test_fixed_ip.fake_fixed_ip,
+ address='192.168.0.1')
+ db.fixed_ip_associate_pool(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ instance_uuid=mox.IgnoreArg(),
+ host=None).AndReturn(fixed)
+ db.fixed_ip_update(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
+ db.instance_get_by_uuid(mox.IgnoreArg(),
+ mox.IgnoreArg(), use_slave=False,
+ columns_to_join=['info_cache',
+ 'security_groups']
+ ).AndReturn(fake_inst(display_name=HOST,
+ uuid=FAKEUUID))
+ self.mox.ReplayAll()
+
+ network = objects.Network._from_db_object(
+ self.context, objects.Network(),
+ dict(test_network.fake_network, **networks[0]))
+ network.vpn_private_address = '192.168.0.2'
+ self.network.allocate_fixed_ip(self.context, FAKEUUID, network)
+
+ @mock.patch('nova.objects.instance.Instance.get_by_uuid')
+ @mock.patch('nova.objects.fixed_ip.FixedIP.associate')
+ def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
+ mock_get):
+ mock_associate.side_effect = test.TestingException
+ instance = objects.Instance(context=self.context)
+ instance.create()
+ mock_get.return_value = instance
+ self.assertRaises(test.TestingException,
+ self.network.allocate_fixed_ip,
+ self.context, instance.uuid,
+ {'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
+ address=netaddr.IPAddress('1.2.3.4'))
+ mock_associate.assert_called_once_with(self.context,
+ '1.2.3.4',
+ instance.uuid,
+ 1)
+
+ @mock.patch('nova.objects.instance.Instance.get_by_uuid')
+ @mock.patch('nova.objects.fixed_ip.FixedIP.associate')
+ def test_allocate_fixed_ip_passes_string_address_vpn(self, mock_associate,
+ mock_get):
+ mock_associate.side_effect = test.TestingException
+ instance = objects.Instance(context=self.context)
+ instance.create()
+ mock_get.return_value = instance
+ self.assertRaises(test.TestingException,
+ self.network.allocate_fixed_ip,
+ self.context, instance.uuid,
+ {'cidr': '24', 'id': 1, 'uuid': 'nosuch',
+ 'vpn_private_address': netaddr.IPAddress('1.2.3.4')
+ }, vpn=1)
+ mock_associate.assert_called_once_with(self.context,
+ '1.2.3.4',
+ instance.uuid,
+ 1, reserved=True)
+
+ def test_create_networks_too_big(self):
+ self.assertRaises(ValueError, self.network.create_networks, None,
+ num_networks=4094, vlan_start=1)
+
+ def test_create_networks_too_many(self):
+ self.assertRaises(ValueError, self.network.create_networks, None,
+ num_networks=100, vlan_start=1,
+ cidr='192.168.0.1/24', network_size=100)
+
+ def test_duplicate_vlan_raises(self):
+ # VLAN 100 is already used and we force the network to be created
+ # in that vlan (vlan=100).
+ self.assertRaises(exception.DuplicateVlan,
+ self.network.create_networks,
+ self.context_admin, label="fake", num_networks=1,
+ vlan=100, cidr='192.168.0.1/24', network_size=100)
+
+ def test_vlan_start(self):
+ # VLAN 100 and 101 are used, so this network shoud be created in 102
+ networks = self.network.create_networks(
+ self.context_admin, label="fake", num_networks=1,
+ vlan_start=100, cidr='192.168.3.1/24',
+ network_size=100)
+
+ self.assertEqual(networks[0]["vlan"], 102)
+
+ def test_vlan_start_multiple(self):
+ # VLAN 100 and 101 are used, so these networks shoud be created in 102
+ # and 103
+ networks = self.network.create_networks(
+ self.context_admin, label="fake", num_networks=2,
+ vlan_start=100, cidr='192.168.3.1/24',
+ network_size=100)
+
+ self.assertEqual(networks[0]["vlan"], 102)
+ self.assertEqual(networks[1]["vlan"], 103)
+
+ def test_vlan_start_used(self):
+ # VLAN 100 and 101 are used, but vlan_start=99.
+ networks = self.network.create_networks(
+ self.context_admin, label="fake", num_networks=1,
+ vlan_start=99, cidr='192.168.3.1/24',
+ network_size=100)
+
+ self.assertEqual(networks[0]["vlan"], 102)
+
+ def test_vlan_parameter(self):
+ # vlan parameter could not be greater than 4094
+ exc = self.assertRaises(ValueError,
+ self.network.create_networks,
+ self.context_admin, label="fake",
+ num_networks=1,
+ vlan=4095, cidr='192.168.0.1/24')
+ error_msg = 'The vlan number cannot be greater than 4094'
+ self.assertIn(error_msg, six.text_type(exc))
+
+ # vlan parameter could not be less than 1
+ exc = self.assertRaises(ValueError,
+ self.network.create_networks,
+ self.context_admin, label="fake",
+ num_networks=1,
+ vlan=0, cidr='192.168.0.1/24')
+ error_msg = 'The vlan number cannot be less than 1'
+ self.assertIn(error_msg, six.text_type(exc))
+
+ def test_vlan_be_integer(self):
+ # vlan must be an integer
+ exc = self.assertRaises(ValueError,
+ self.network.create_networks,
+ self.context_admin, label="fake",
+ num_networks=1,
+ vlan='fake', cidr='192.168.0.1/24')
+ error_msg = 'vlan must be an integer'
+ self.assertIn(error_msg, six.text_type(exc))
+
+ @mock.patch('nova.db.network_get')
+ def test_validate_networks(self, net_get):
+ def network_get(_context, network_id, project_only='allow_none'):
+ return dict(test_network.fake_network, **networks[network_id])
+
+ net_get.side_effect = network_get
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+ self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
+
+ requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ '192.168.1.100'),
+ ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ '192.168.0.100')]
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **net)
+ for net in networks])
+
+ db_fixed1 = dict(test_fixed_ip.fake_fixed_ip,
+ network_id=networks[1]['id'],
+ network=dict(test_network.fake_network,
+ **networks[1]),
+ instance_uuid=None)
+ db.fixed_ip_get_by_address(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ columns_to_join=mox.IgnoreArg()
+ ).AndReturn(db_fixed1)
+ db_fixed2 = dict(test_fixed_ip.fake_fixed_ip,
+ network_id=networks[0]['id'],
+ network=dict(test_network.fake_network,
+ **networks[0]),
+ instance_uuid=None)
+ db.fixed_ip_get_by_address(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ columns_to_join=mox.IgnoreArg()
+ ).AndReturn(db_fixed2)
+
+ self.mox.ReplayAll()
+ self.network.validate_networks(self.context, requested_networks)
+
+ def test_validate_networks_none_requested_networks(self):
+ self.network.validate_networks(self.context, None)
+
+ def test_validate_networks_empty_requested_networks(self):
+ requested_networks = []
+ self.mox.ReplayAll()
+
+ self.network.validate_networks(self.context, requested_networks)
+
+ def test_validate_networks_invalid_fixed_ip(self):
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+ requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ '192.168.1.100.1'),
+ ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ '192.168.0.100.1')]
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **net)
+ for net in networks])
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.FixedIpInvalid,
+ self.network.validate_networks, self.context,
+ requested_networks)
+
+ def test_validate_networks_empty_fixed_ip(self):
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+
+ requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', ''),
+ ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '')]
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **net)
+ for net in networks])
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.FixedIpInvalid,
+ self.network.validate_networks,
+ self.context, requested_networks)
+
+ def test_validate_networks_none_fixed_ip(self):
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+
+ requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', None),
+ ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)]
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **net)
+ for net in networks])
+ self.mox.ReplayAll()
+ self.network.validate_networks(self.context, requested_networks)
+
+ def test_floating_ip_owned_by_project(self):
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ # raises because floating_ip project_id is None
+ floating_ip = objects.FloatingIP(address='10.0.0.1',
+ project_id=None)
+ self.assertRaises(exception.Forbidden,
+ self.network._floating_ip_owned_by_project,
+ ctxt,
+ floating_ip)
+
+ # raises because floating_ip project_id is not equal to ctxt project_id
+ floating_ip = objects.FloatingIP(address='10.0.0.1',
+ project_id=ctxt.project_id + '1')
+ self.assertRaises(exception.Forbidden,
+ self.network._floating_ip_owned_by_project,
+ ctxt,
+ floating_ip)
+
+ # does not raise (floating ip is owned by ctxt project)
+ floating_ip = objects.FloatingIP(address='10.0.0.1',
+ project_id=ctxt.project_id)
+ self.network._floating_ip_owned_by_project(ctxt, floating_ip)
+
+ ctxt = context.RequestContext(None, None,
+ is_admin=True)
+
+ # does not raise (ctxt is admin)
+ floating_ip = objects.FloatingIP(address='10.0.0.1',
+ project_id=None)
+ self.network._floating_ip_owned_by_project(ctxt, floating_ip)
+
+ # does not raise (ctxt is admin)
+ floating_ip = objects.FloatingIP(address='10.0.0.1',
+ project_id='testproject')
+ self.network._floating_ip_owned_by_project(ctxt, floating_ip)
+
+ def test_allocate_floating_ip(self):
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ def fake_allocate_address(*args, **kwargs):
+ return {'address': '10.0.0.1', 'project_id': ctxt.project_id}
+
+ self.stubs.Set(self.network.db, 'floating_ip_allocate_address',
+ fake_allocate_address)
+
+ self.network.allocate_floating_ip(ctxt, ctxt.project_id)
+
+ @mock.patch('nova.quota.QUOTAS.reserve')
+ @mock.patch('nova.quota.QUOTAS.commit')
+ def test_deallocate_floating_ip(self, mock_commit, mock_reserve):
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ def fake1(*args, **kwargs):
+ return dict(test_floating_ip.fake_floating_ip)
+
+ def fake2(*args, **kwargs):
+ return dict(test_floating_ip.fake_floating_ip,
+ address='10.0.0.1', fixed_ip_id=1)
+
+ def fake3(*args, **kwargs):
+ return dict(test_floating_ip.fake_floating_ip,
+ address='10.0.0.1', fixed_ip_id=None,
+ project_id=ctxt.project_id)
+
+ self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1)
+ self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
+
+ # this time should raise because floating ip is associated to fixed_ip
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
+ self.assertRaises(exception.FloatingIpAssociated,
+ self.network.deallocate_floating_ip,
+ ctxt,
+ mox.IgnoreArg())
+
+ mock_reserve.return_value = 'reserve'
+ # this time should not raise
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
+ self.network.deallocate_floating_ip(ctxt, ctxt.project_id)
+
+ mock_commit.assert_called_once_with(ctxt, 'reserve',
+ project_id='testproject')
+
+ @mock.patch('nova.db.fixed_ip_get')
+ def test_associate_floating_ip(self, fixed_get):
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ def fake1(*args, **kwargs):
+ return dict(test_fixed_ip.fake_fixed_ip,
+ address='10.0.0.1',
+ network=test_network.fake_network)
+
+ # floating ip that's already associated
+ def fake2(*args, **kwargs):
+ return dict(test_floating_ip.fake_floating_ip,
+ address='10.0.0.1',
+ pool='nova',
+ interface='eth0',
+ fixed_ip_id=1)
+
+ # floating ip that isn't associated
+ def fake3(*args, **kwargs):
+ return dict(test_floating_ip.fake_floating_ip,
+ address='10.0.0.1',
+ pool='nova',
+ interface='eth0',
+ fixed_ip_id=None)
+
+ # fixed ip with remote host
+ def fake4(*args, **kwargs):
+ return dict(test_fixed_ip.fake_fixed_ip,
+ address='10.0.0.1',
+ pool='nova',
+ instance_uuid=FAKEUUID,
+ interface='eth0',
+ network_id=123)
+
+ def fake4_network(*args, **kwargs):
+ return dict(test_network.fake_network,
+ multi_host=False, host='jibberjabber')
+
+ # fixed ip with local host
+ def fake5(*args, **kwargs):
+ return dict(test_fixed_ip.fake_fixed_ip,
+ address='10.0.0.1',
+ pool='nova',
+ instance_uuid=FAKEUUID,
+ interface='eth0',
+ network_id=1234)
+
+ def fake5_network(*args, **kwargs):
+ return dict(test_network.fake_network,
+ multi_host=False, host='testhost')
+
+ def fake6(ctxt, method, **kwargs):
+ self.local = False
+
+ def fake7(*args, **kwargs):
+ self.local = True
+
+ def fake8(*args, **kwargs):
+ raise processutils.ProcessExecutionError('',
+ 'Cannot find device "em0"\n')
+
+ def fake9(*args, **kwargs):
+ raise test.TestingException()
+
+ # raises because interface doesn't exist
+ self.stubs.Set(self.network.db,
+ 'floating_ip_fixed_ip_associate',
+ fake1)
+ self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1)
+ self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake8)
+ self.assertRaises(exception.NoFloatingIpInterface,
+ self.network._associate_floating_ip,
+ ctxt,
+ '1.2.3.4',
+ '1.2.3.5',
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
+
+ # raises because floating_ip is already associated to a fixed_ip
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
+ self.stubs.Set(self.network, 'disassociate_floating_ip', fake9)
+
+ fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
+ address='1.2.3.4',
+ instance_uuid='fake_uuid',
+ network=test_network.fake_network)
+
+ # doesn't raise because we exit early if the address is the same
+ self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), '1.2.3.4')
+
+ # raises because we call disassociate which is mocked
+ self.assertRaises(test.TestingException,
+ self.network.associate_floating_ip,
+ ctxt,
+ mox.IgnoreArg(),
+ 'new')
+
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
+
+ # does not raise and makes call remotely
+ self.local = True
+ self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4)
+ self.stubs.Set(self.network.db, 'network_get', fake4_network)
+ self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
+ lambda **kw: self.network.network_rpcapi.client)
+ self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
+ self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.assertFalse(self.local)
+
+ # does not raise and makes call locally
+ self.local = False
+ self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5)
+ self.stubs.Set(self.network.db, 'network_get', fake5_network)
+ self.stubs.Set(self.network, '_associate_floating_ip', fake7)
+ self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.assertTrue(self.local)
+
+ def test_add_floating_ip_nat_before_bind(self):
+ # Tried to verify order with documented mox record/verify
+ # functionality, but it doesn't seem to work since I can't make it
+ # fail. I'm using stubs and a flag for now, but if this mox feature
+ # can be made to work, it would be a better way to test this.
+ #
+ # self.mox.StubOutWithMock(self.network.driver,
+ # 'ensure_floating_forward')
+ # self.mox.StubOutWithMock(self.network.driver, 'bind_floating_ip')
+ #
+ # self.network.driver.ensure_floating_forward(mox.IgnoreArg(),
+ # mox.IgnoreArg(),
+ # mox.IgnoreArg(),
+ # mox.IgnoreArg())
+ # self.network.driver.bind_floating_ip(mox.IgnoreArg(),
+ # mox.IgnoreArg())
+ # self.mox.ReplayAll()
+
+ nat_called = [False]
+
+ def fake_nat(*args, **kwargs):
+ nat_called[0] = True
+
+ def fake_bind(*args, **kwargs):
+ self.assertTrue(nat_called[0])
+
+ self.stubs.Set(self.network.driver,
+ 'ensure_floating_forward',
+ fake_nat)
+ self.stubs.Set(self.network.driver, 'bind_floating_ip', fake_bind)
+
+ self.network.l3driver.add_floating_ip('fakefloat',
+ 'fakefixed',
+ 'fakeiface',
+ 'fakenet')
+
+ @mock.patch('nova.db.floating_ip_get_all_by_host')
+ @mock.patch('nova.db.fixed_ip_get')
+ def _test_floating_ip_init_host(self, fixed_get, floating_get,
+ public_interface, expected_arg):
+
+ floating_get.return_value = [
+ dict(test_floating_ip.fake_floating_ip,
+ interface='foo',
+ address='1.2.3.4'),
+ dict(test_floating_ip.fake_floating_ip,
+ interface='fakeiface',
+ address='1.2.3.5',
+ fixed_ip_id=1),
+ dict(test_floating_ip.fake_floating_ip,
+ interface='bar',
+ address='1.2.3.6',
+ fixed_ip_id=2),
+ ]
+
+ def fixed_ip_get(_context, fixed_ip_id, get_network):
+ if fixed_ip_id == 1:
+ return dict(test_fixed_ip.fake_fixed_ip,
+ address='1.2.3.4',
+ network=test_network.fake_network)
+ raise exception.FixedIpNotFound(id=fixed_ip_id)
+ fixed_get.side_effect = fixed_ip_get
+
+ self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip')
+ self.flags(public_interface=public_interface)
+ self.network.l3driver.add_floating_ip(netaddr.IPAddress('1.2.3.5'),
+ netaddr.IPAddress('1.2.3.4'),
+ expected_arg,
+ mox.IsA(objects.Network))
+ self.mox.ReplayAll()
+ self.network.init_host_floating_ips()
+ self.mox.UnsetStubs()
+ self.mox.VerifyAll()
+
+ def test_floating_ip_init_host_without_public_interface(self):
+ self._test_floating_ip_init_host(public_interface=False,
+ expected_arg='fakeiface')
+
+ def test_floating_ip_init_host_with_public_interface(self):
+ self._test_floating_ip_init_host(public_interface='fooiface',
+ expected_arg='fooiface')
+
+ def test_disassociate_floating_ip(self):
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ def fake1(*args, **kwargs):
+ pass
+
+ # floating ip that isn't associated
+ def fake2(*args, **kwargs):
+ return dict(test_floating_ip.fake_floating_ip,
+ address='10.0.0.1',
+ pool='nova',
+ interface='eth0',
+ fixed_ip_id=None)
+
+ # floating ip that is associated
+ def fake3(*args, **kwargs):
+ return dict(test_floating_ip.fake_floating_ip,
+ address='10.0.0.1',
+ pool='nova',
+ interface='eth0',
+ fixed_ip_id=1,
+ project_id=ctxt.project_id)
+
+ # fixed ip with remote host
+ def fake4(*args, **kwargs):
+ return dict(test_fixed_ip.fake_fixed_ip,
+ address='10.0.0.1',
+ pool='nova',
+ instance_uuid=FAKEUUID,
+ interface='eth0',
+ network_id=123)
+
+ def fake4_network(*args, **kwargs):
+ return dict(test_network.fake_network,
+ multi_host=False,
+ host='jibberjabber')
+
+ # fixed ip with local host
+ def fake5(*args, **kwargs):
+ return dict(test_fixed_ip.fake_fixed_ip,
+ address='10.0.0.1',
+ pool='nova',
+ instance_uuid=FAKEUUID,
+ interface='eth0',
+ network_id=1234)
+
+ def fake5_network(*args, **kwargs):
+ return dict(test_network.fake_network,
+ multi_host=False, host='testhost')
+
+ def fake6(ctxt, method, **kwargs):
+ self.local = False
+
+ def fake7(*args, **kwargs):
+ self.local = True
+
+ def fake8(*args, **kwargs):
+ return dict(test_floating_ip.fake_floating_ip,
+ address='10.0.0.1',
+ pool='nova',
+ interface='eth0',
+ fixed_ip_id=1,
+ auto_assigned=True,
+ project_id=ctxt.project_id)
+
+ self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
+
+ # raises because floating_ip is not associated to a fixed_ip
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
+ self.assertRaises(exception.FloatingIpNotAssociated,
+ self.network.disassociate_floating_ip,
+ ctxt,
+ mox.IgnoreArg())
+
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
+
+ # does not raise and makes call remotely
+ self.local = True
+ self.stubs.Set(self.network.db, 'fixed_ip_get', fake4)
+ self.stubs.Set(self.network.db, 'network_get', fake4_network)
+ self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
+ lambda **kw: self.network.network_rpcapi.client)
+ self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
+ self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
+ self.assertFalse(self.local)
+
+ # does not raise and makes call locally
+ self.local = False
+ self.stubs.Set(self.network.db, 'fixed_ip_get', fake5)
+ self.stubs.Set(self.network.db, 'network_get', fake5_network)
+ self.stubs.Set(self.network, '_disassociate_floating_ip', fake7)
+ self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
+ self.assertTrue(self.local)
+
+ # raises because auto_assigned floating IP cannot be disassociated
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake8)
+ self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
+ self.network.disassociate_floating_ip,
+ ctxt,
+ mox.IgnoreArg())
+
+ def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
+ self.stubs.Set(self.network,
+ '_do_trigger_security_group_members_refresh_for_instance',
+ lambda *a, **kw: None)
+ self.mox.StubOutWithMock(db, 'network_get')
+ self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
+ self.mox.StubOutWithMock(db,
+ 'virtual_interface_get_by_instance_and_network')
+ self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
+
+ db.fixed_ip_update(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
+
+ fixed = dict(test_fixed_ip.fake_fixed_ip,
+ address='192.168.0.101')
+ db.fixed_ip_associate_pool(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ instance_uuid=mox.IgnoreArg(),
+ host=None).AndReturn(fixed)
+ db.network_get(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ project_only=mox.IgnoreArg()
+ ).AndReturn(dict(test_network.fake_network,
+ **networks[0]))
+ db.instance_get_by_uuid(mox.IgnoreArg(),
+ mox.IgnoreArg(), use_slave=False,
+ columns_to_join=['info_cache',
+ 'security_groups']
+ ).AndReturn(fake_inst(display_name=HOST,
+ uuid=FAKEUUID))
+ self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
+ networks[0]['id'])
+
+ @mock.patch('nova.db.fixed_ip_get_by_address')
+ @mock.patch('nova.db.network_get')
+ def test_ip_association_and_allocation_of_other_project(self, net_get,
+ fixed_get):
+ """Makes sure that we cannot deallocaate or disassociate
+ a public ip of other project.
+ """
+ net_get.return_value = dict(test_network.fake_network,
+ **networks[1])
+
+ context1 = context.RequestContext('user', 'project1')
+ context2 = context.RequestContext('user', 'project2')
+
+ float_ip = db.floating_ip_create(context1.elevated(),
+ {'address': '1.2.3.4',
+ 'project_id': context1.project_id})
+
+ float_addr = float_ip['address']
+
+ instance = db.instance_create(context1,
+ {'project_id': 'project1'})
+
+ fix_addr = db.fixed_ip_associate_pool(context1.elevated(),
+ 1, instance['uuid']).address
+ fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
+ address=fix_addr,
+ instance_uuid=instance.uuid,
+ network=dict(test_network.fake_network,
+ **networks[1]))
+
+ # Associate the IP with non-admin user context
+ self.assertRaises(exception.Forbidden,
+ self.network.associate_floating_ip,
+ context2,
+ float_addr,
+ fix_addr)
+
+ # Deallocate address from other project
+ self.assertRaises(exception.Forbidden,
+ self.network.deallocate_floating_ip,
+ context2,
+ float_addr)
+
+ # Now Associates the address to the actual project
+ self.network.associate_floating_ip(context1, float_addr, fix_addr)
+
+ # Now try dis-associating from other project
+ self.assertRaises(exception.Forbidden,
+ self.network.disassociate_floating_ip,
+ context2,
+ float_addr)
+
+ # Clean up the ip addresses
+ self.network.disassociate_floating_ip(context1, float_addr)
+ self.network.deallocate_floating_ip(context1, float_addr)
+ self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
+ db.floating_ip_destroy(context1.elevated(), float_addr)
+ db.fixed_ip_disassociate(context1.elevated(), fix_addr)
+
+ @mock.patch('nova.db.fixed_ip_get_by_address')
+ @mock.patch('nova.db.network_get')
+ @mock.patch('nova.db.fixed_ip_update')
+ def test_deallocate_fixed(self, fixed_update, net_get, fixed_get):
+ """Verify that release is called properly.
+
+ Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return
+ """
+ net_get.return_value = dict(test_network.fake_network,
+ **networks[1])
+
+ def vif_get(_context, _vif_id):
+ return vifs[0]
+
+ self.stubs.Set(db, 'virtual_interface_get', vif_get)
+ context1 = context.RequestContext('user', 'project1')
+
+ instance = db.instance_create(context1,
+ {'project_id': 'project1'})
+
+ elevated = context1.elevated()
+ fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
+ fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
+ address=fix_addr.address,
+ instance_uuid=instance.uuid,
+ allocated=True,
+ virtual_interface_id=3,
+ network=dict(test_network.fake_network,
+ **networks[1]))
+
+ self.flags(force_dhcp_release=True)
+ self.mox.StubOutWithMock(linux_net, 'release_dhcp')
+ linux_net.release_dhcp(networks[1]['bridge'], fix_addr.address,
+ 'DE:AD:BE:EF:00:00')
+ self.mox.ReplayAll()
+ self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
+ fixed_update.assert_called_once_with(context1, fix_addr.address,
+ {'allocated': False})
+
+ @mock.patch('nova.db.fixed_ip_get_by_address')
+ @mock.patch('nova.db.network_get')
+ @mock.patch('nova.db.fixed_ip_update')
+ def test_deallocate_fixed_with_dhcp_exception(self, fixed_update, net_get,
+ fixed_get):
+ net_get.return_value = dict(test_network.fake_network,
+ **networks[1])
+
+ def vif_get(_context, _vif_id):
+ return vifs[0]
+
+ with contextlib.nested(
+ mock.patch.object(db, 'virtual_interface_get', vif_get),
+ mock.patch.object(
+ utils, 'execute',
+ side_effect=processutils.ProcessExecutionError()),
+ ) as (_vif_get, _execute):
+ context1 = context.RequestContext('user', 'project1')
+
+ instance = db.instance_create(context1,
+ {'project_id': 'project1'})
+
+ elevated = context1.elevated()
+ fix_addr = db.fixed_ip_associate_pool(elevated, 1,
+ instance['uuid'])
+ fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
+ address=fix_addr.address,
+ instance_uuid=instance.uuid,
+ allocated=True,
+ virtual_interface_id=3,
+ network=dict(
+ test_network.fake_network,
+ **networks[1]))
+ self.flags(force_dhcp_release=True)
+ self.network.deallocate_fixed_ip(context1, fix_addr.address,
+ 'fake')
+ fixed_update.assert_called_once_with(context1, fix_addr.address,
+ {'allocated': False})
+ _execute.assert_called_once_with('dhcp_release',
+ networks[1]['bridge'],
+ fix_addr.address,
+ 'DE:AD:BE:EF:00:00',
+ run_as_root=True)
+
+ def test_deallocate_fixed_deleted(self):
+ # Verify doesn't deallocate deleted fixed_ip from deleted network.
+
+ def teardown_network_on_host(_context, network):
+ if network['id'] == 0:
+ raise test.TestingException()
+
+ self.stubs.Set(self.network, '_teardown_network_on_host',
+ teardown_network_on_host)
+
+ context1 = context.RequestContext('user', 'project1')
+ elevated = context1.elevated()
+
+ instance = db.instance_create(context1,
+ {'project_id': 'project1'})
+ network = db.network_create_safe(elevated, networks[0])
+
+ _fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
+ fix_addr = _fix_addr.address
+ db.fixed_ip_update(elevated, fix_addr, {'deleted': 1})
+ elevated.read_deleted = 'yes'
+ delfixed = db.fixed_ip_get_by_address(elevated, fix_addr)
+ values = {'address': fix_addr,
+ 'network_id': network.id,
+ 'instance_uuid': delfixed['instance_uuid']}
+ db.fixed_ip_create(elevated, values)
+ elevated.read_deleted = 'no'
+ elevated.read_deleted = 'yes'
+
+ deallocate = self.network.deallocate_fixed_ip
+ self.assertRaises(test.TestingException, deallocate, context1,
+ fix_addr, 'fake')
+
+ @mock.patch('nova.db.fixed_ip_get_by_address')
+ @mock.patch('nova.db.network_get')
+ @mock.patch('nova.db.fixed_ip_update')
+ def test_deallocate_fixed_no_vif(self, fixed_update, net_get, fixed_get):
+ """Verify that deallocate doesn't raise when no vif is returned.
+
+ Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return
+ """
+ net_get.return_value = dict(test_network.fake_network,
+ **networks[1])
+
+ def vif_get(_context, _vif_id):
+ return None
+
+ self.stubs.Set(db, 'virtual_interface_get', vif_get)
+ context1 = context.RequestContext('user', 'project1')
+
+ instance = db.instance_create(context1,
+ {'project_id': 'project1'})
+
+ elevated = context1.elevated()
+ fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
+ fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
+ address=fix_addr.address,
+ allocated=True,
+ virtual_interface_id=3,
+ instance_uuid=instance.uuid,
+ network=dict(test_network.fake_network,
+ **networks[1]))
+ self.flags(force_dhcp_release=True)
+ fixed_update.return_value = fixed_get.return_value
+ self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
+ fixed_update.assert_called_once_with(context1, fix_addr.address,
+ {'allocated': False})
+
+ @mock.patch('nova.db.fixed_ip_get_by_address')
+ @mock.patch('nova.db.network_get')
+ @mock.patch('nova.db.fixed_ip_update')
+ def test_fixed_ip_cleanup_fail(self, fixed_update, net_get, fixed_get):
+ # Verify IP is not deallocated if the security group refresh fails.
+ net_get.return_value = dict(test_network.fake_network,
+ **networks[1])
+ context1 = context.RequestContext('user', 'project1')
+
+ instance = db.instance_create(context1,
+ {'project_id': 'project1'})
+
+ elevated = context1.elevated()
+ fix_addr = objects.FixedIP.associate_pool(elevated, 1,
+ instance['uuid'])
+
+ def fake_refresh(instance_uuid):
+ raise test.TestingException()
+ self.stubs.Set(self.network,
+ '_do_trigger_security_group_members_refresh_for_instance',
+ fake_refresh)
+ fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
+ address=fix_addr.address,
+ allocated=True,
+ virtual_interface_id=3,
+ instance_uuid=instance.uuid,
+ network=dict(test_network.fake_network,
+ **networks[1]))
+ self.assertRaises(test.TestingException,
+ self.network.deallocate_fixed_ip,
+ context1, str(fix_addr.address), 'fake')
+ self.assertFalse(fixed_update.called)
+
+ def test_get_networks_by_uuids_ordering(self):
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+
+ requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **net)
+ for net in networks])
+
+ self.mox.ReplayAll()
+ res = self.network._get_networks_by_uuids(self.context,
+ requested_networks)
+
+ self.assertEqual(res[0]['id'], 1)
+ self.assertEqual(res[1]['id'], 0)
+
+ @mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
+ @mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
+ @mock.patch('nova.network.linux_net.iptables_manager._apply')
+ def test_init_host_iptables_defer_apply(self, iptable_apply,
+ floating_get_by_host,
+ fixed_get_by_id):
+ def get_by_id(context, fixed_ip_id, **kwargs):
+ net = objects.Network(bridge='testbridge',
+ cidr='192.168.1.0/24')
+ if fixed_ip_id == 1:
+ return objects.FixedIP(address='192.168.1.4',
+ network=net)
+ elif fixed_ip_id == 2:
+ return objects.FixedIP(address='192.168.1.5',
+ network=net)
+
+ def fake_apply():
+ fake_apply.count += 1
+
+ fake_apply.count = 0
+ ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
+ float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
+ float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
+ float1._context = ctxt
+ float2._context = ctxt
+
+ iptable_apply.side_effect = fake_apply
+ floating_get_by_host.return_value = [float1, float2]
+ fixed_get_by_id.side_effect = get_by_id
+
+ self.network.init_host()
+ self.assertEqual(1, fake_apply.count)
+
+
+class _TestDomainObject(object):
+ def __init__(self, **kwargs):
+ for k, v in kwargs.iteritems():
+ self.__setattr__(k, v)
+
+
+class FakeNetwork(object):
+ def __init__(self, **kwargs):
+ self.vlan = None
+ for k, v in kwargs.iteritems():
+ self.__setattr__(k, v)
+
+ def __getitem__(self, item):
+ return getattr(self, item)
+
+
+class CommonNetworkTestCase(test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(CommonNetworkTestCase, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+ self.flags(ipv6_backend='rfc2462')
+ self.flags(use_local=True, group='conductor')
+ ipv6.reset_backend()
+
+ def test_validate_instance_zone_for_dns_domain(self):
+ domain = 'example.com'
+ az = 'test_az'
+ domains = {
+ domain: _TestDomainObject(
+ domain=domain,
+ availability_zone=az)}
+
+ def dnsdomain_get(context, instance_domain):
+ return domains.get(instance_domain)
+
+ self.stubs.Set(db, 'dnsdomain_get', dnsdomain_get)
+ fake_instance = {'uuid': FAKEUUID,
+ 'availability_zone': az}
+
+ manager = network_manager.NetworkManager()
+ res = manager._validate_instance_zone_for_dns_domain(self.context,
+ fake_instance)
+ self.assertTrue(res)
+
+ def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None,
+ extra_reserved=None, bottom_reserved=0,
+ top_reserved=0):
+ return None
+
+ def test_get_instance_nw_info_client_exceptions(self):
+ manager = network_manager.NetworkManager()
+ self.mox.StubOutWithMock(manager.db,
+ 'fixed_ip_get_by_instance')
+ manager.db.fixed_ip_get_by_instance(
+ self.context, FAKEUUID).AndRaise(exception.InstanceNotFound(
+ instance_id=FAKEUUID))
+ self.mox.ReplayAll()
+ self.assertRaises(messaging.ExpectedException,
+ manager.get_instance_nw_info,
+ self.context, FAKEUUID, 'fake_rxtx_factor', HOST)
+
+ @mock.patch('nova.db.instance_get')
+ @mock.patch('nova.db.fixed_ip_get_by_instance')
+ def test_deallocate_for_instance_passes_host_info(self, fixed_get,
+ instance_get):
+ manager = fake_network.FakeNetworkManager()
+ db = manager.db
+ instance_get.return_value = fake_inst(uuid='ignoreduuid')
+ db.virtual_interface_delete_by_instance = lambda _x, _y: None
+ ctx = context.RequestContext('igonre', 'igonre')
+
+ fixed_get.return_value = [dict(test_fixed_ip.fake_fixed_ip,
+ address='1.2.3.4',
+ network_id=123)]
+
+ manager.deallocate_for_instance(
+ ctx, instance=objects.Instance._from_db_object(self.context,
+ objects.Instance(), instance_get.return_value))
+
+ self.assertEqual([
+ (ctx, '1.2.3.4', 'fake-host')
+ ], manager.deallocate_fixed_ip_calls)
+
+ def test_deallocate_for_instance_with_requested_networks(self):
+ manager = fake_network.FakeNetworkManager()
+ db = manager.db
+ db.virtual_interface_delete_by_instance = mock.Mock()
+ ctx = context.RequestContext('igonre', 'igonre')
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest.from_tuple(t)
+ for t in [('123', '1.2.3.4'), ('123', '4.3.2.1')]])
+ manager.deallocate_for_instance(
+ ctx,
+ instance=fake_instance.fake_instance_obj(ctx),
+ requested_networks=requested_networks)
+
+ self.assertEqual([
+ (ctx, '1.2.3.4', 'fake-host'), (ctx, '4.3.2.1', 'fake-host')
+ ], manager.deallocate_fixed_ip_calls)
+
+ @mock.patch('nova.db.fixed_ip_get_by_instance')
+ @mock.patch('nova.db.fixed_ip_disassociate')
+ def test_remove_fixed_ip_from_instance(self, disassociate, get):
+ manager = fake_network.FakeNetworkManager()
+ get.return_value = [
+ dict(test_fixed_ip.fake_fixed_ip, **x)
+ for x in manager.db.fixed_ip_get_by_instance(None,
+ FAKEUUID)]
+ manager.remove_fixed_ip_from_instance(self.context, FAKEUUID,
+ HOST,
+ '10.0.0.1')
+
+ self.assertEqual(manager.deallocate_called, '10.0.0.1')
+ disassociate.assert_called_once_with(self.context, '10.0.0.1')
+
+ @mock.patch('nova.db.fixed_ip_get_by_instance')
+ def test_remove_fixed_ip_from_instance_bad_input(self, get):
+ manager = fake_network.FakeNetworkManager()
+ get.return_value = []
+ self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
+ manager.remove_fixed_ip_from_instance,
+ self.context, 99, HOST, 'bad input')
+
+ def test_validate_cidrs(self):
+ manager = fake_network.FakeNetworkManager()
+ nets = manager.create_networks(self.context.elevated(), 'fake',
+ '192.168.0.0/24',
+ False, 1, 256, None, None, None,
+ None, None)
+ self.assertEqual(1, len(nets))
+ cidrs = [str(net['cidr']) for net in nets]
+ self.assertIn('192.168.0.0/24', cidrs)
+
+ def test_validate_cidrs_split_exact_in_half(self):
+ manager = fake_network.FakeNetworkManager()
+ nets = manager.create_networks(self.context.elevated(), 'fake',
+ '192.168.0.0/24',
+ False, 2, 128, None, None, None,
+ None, None)
+ self.assertEqual(2, len(nets))
+ cidrs = [str(net['cidr']) for net in nets]
+ self.assertIn('192.168.0.0/25', cidrs)
+ self.assertIn('192.168.0.128/25', cidrs)
+
+ @mock.patch('nova.db.network_get_all')
+ def test_validate_cidrs_split_cidr_in_use_middle_of_range(self, get_all):
+ manager = fake_network.FakeNetworkManager()
+ get_all.return_value = [dict(test_network.fake_network,
+ id=1, cidr='192.168.2.0/24')]
+ nets = manager.create_networks(self.context.elevated(), 'fake',
+ '192.168.0.0/16',
+ False, 4, 256, None, None, None,
+ None, None)
+ self.assertEqual(4, len(nets))
+ cidrs = [str(net['cidr']) for net in nets]
+ exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
+ '192.168.4.0/24']
+ for exp_cidr in exp_cidrs:
+ self.assertIn(exp_cidr, cidrs)
+ self.assertNotIn('192.168.2.0/24', cidrs)
+
+ @mock.patch('nova.db.network_get_all')
+ def test_validate_cidrs_smaller_subnet_in_use(self, get_all):
+ manager = fake_network.FakeNetworkManager()
+ get_all.return_value = [dict(test_network.fake_network,
+ id=1, cidr='192.168.2.9/25')]
+ # CidrConflict: requested cidr (192.168.2.0/24) conflicts with
+ # existing smaller cidr
+ args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
+ 1, 256, None, None, None, None, None)
+ self.assertRaises(exception.CidrConflict,
+ manager.create_networks, *args)
+
+ @mock.patch('nova.db.network_get_all')
+ def test_validate_cidrs_split_smaller_cidr_in_use(self, get_all):
+ manager = fake_network.FakeNetworkManager()
+ get_all.return_value = [dict(test_network.fake_network,
+ id=1, cidr='192.168.2.0/25')]
+ nets = manager.create_networks(self.context.elevated(), 'fake',
+ '192.168.0.0/16',
+ False, 4, 256, None, None, None, None,
+ None)
+ self.assertEqual(4, len(nets))
+ cidrs = [str(net['cidr']) for net in nets]
+ exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
+ '192.168.4.0/24']
+ for exp_cidr in exp_cidrs:
+ self.assertIn(exp_cidr, cidrs)
+ self.assertNotIn('192.168.2.0/24', cidrs)
+
+ @mock.patch('nova.db.network_get_all')
+ def test_validate_cidrs_split_smaller_cidr_in_use2(self, get_all):
+ manager = fake_network.FakeNetworkManager()
+ self.mox.StubOutWithMock(manager.db, 'network_get_all')
+ get_all.return_value = [dict(test_network.fake_network, id=1,
+ cidr='192.168.2.9/29')]
+ nets = manager.create_networks(self.context.elevated(), 'fake',
+ '192.168.2.0/24',
+ False, 3, 32, None, None, None, None,
+ None)
+ self.assertEqual(3, len(nets))
+ cidrs = [str(net['cidr']) for net in nets]
+ exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
+ for exp_cidr in exp_cidrs:
+ self.assertIn(exp_cidr, cidrs)
+ self.assertNotIn('192.168.2.0/27', cidrs)
+
+ @mock.patch('nova.db.network_get_all')
+ def test_validate_cidrs_split_all_in_use(self, get_all):
+ manager = fake_network.FakeNetworkManager()
+ in_use = [dict(test_network.fake_network, **values) for values in
+ [{'id': 1, 'cidr': '192.168.2.9/29'},
+ {'id': 2, 'cidr': '192.168.2.64/26'},
+ {'id': 3, 'cidr': '192.168.2.128/26'}]]
+ get_all.return_value = in_use
+ args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
+ 3, 64, None, None, None, None, None)
+ # CidrConflict: Not enough subnets avail to satisfy requested num_
+ # networks - some subnets in requested range already
+ # in use
+ self.assertRaises(exception.CidrConflict,
+ manager.create_networks, *args)
+
+ def test_validate_cidrs_one_in_use(self):
+ manager = fake_network.FakeNetworkManager()
+ args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
+ None, None, None)
+ # ValueError: network_size * num_networks exceeds cidr size
+ self.assertRaises(ValueError, manager.create_networks, *args)
+
+ @mock.patch('nova.db.network_get_all')
+ def test_validate_cidrs_already_used(self, get_all):
+ manager = fake_network.FakeNetworkManager()
+ get_all.return_value = [dict(test_network.fake_network,
+ cidr='192.168.0.0/24')]
+ # CidrConflict: cidr already in use
+ args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
+ 1, 256, None, None, None, None, None)
+ self.assertRaises(exception.CidrConflict,
+ manager.create_networks, *args)
+
+ def test_validate_cidrs_too_many(self):
+ manager = fake_network.FakeNetworkManager()
+ args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
+ None, None, None)
+ # ValueError: Not enough subnets avail to satisfy requested
+ # num_networks
+ self.assertRaises(ValueError, manager.create_networks, *args)
+
+ def test_validate_cidrs_split_partial(self):
+ manager = fake_network.FakeNetworkManager()
+ nets = manager.create_networks(self.context.elevated(), 'fake',
+ '192.168.0.0/16',
+ False, 2, 256, None, None, None, None,
+ None)
+ returned_cidrs = [str(net['cidr']) for net in nets]
+ self.assertIn('192.168.0.0/24', returned_cidrs)
+ self.assertIn('192.168.1.0/24', returned_cidrs)
+
+ @mock.patch('nova.db.network_get_all')
+ def test_validate_cidrs_conflict_existing_supernet(self, get_all):
+ manager = fake_network.FakeNetworkManager()
+ get_all.return_value = [dict(test_network.fake_network,
+ id=1, cidr='192.168.0.0/8')]
+ args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
+ 1, 256, None, None, None, None, None)
+ # CidrConflict: requested cidr (192.168.0.0/24) conflicts
+ # with existing supernet
+ self.assertRaises(exception.CidrConflict,
+ manager.create_networks, *args)
+
+ def test_create_networks(self):
+ cidr = '192.168.0.0/24'
+ manager = fake_network.FakeNetworkManager()
+ self.stubs.Set(manager, '_create_fixed_ips',
+ self.fake_create_fixed_ips)
+ args = [self.context.elevated(), 'foo', cidr, None, 1, 256,
+ 'fd00::/48', None, None, None, None, None]
+ self.assertTrue(manager.create_networks(*args))
+
+ @mock.patch('nova.db.network_get_all')
+ def test_create_networks_cidr_already_used(self, get_all):
+ manager = fake_network.FakeNetworkManager()
+ get_all.return_value = [dict(test_network.fake_network,
+ id=1, cidr='192.168.0.0/24')]
+ args = [self.context.elevated(), 'foo', '192.168.0.0/24', None, 1, 256,
+ 'fd00::/48', None, None, None, None, None]
+ self.assertRaises(exception.CidrConflict,
+ manager.create_networks, *args)
+
+ def test_create_networks_many(self):
+ cidr = '192.168.0.0/16'
+ manager = fake_network.FakeNetworkManager()
+ self.stubs.Set(manager, '_create_fixed_ips',
+ self.fake_create_fixed_ips)
+ args = [self.context.elevated(), 'foo', cidr, None, 10, 256,
+ 'fd00::/48', None, None, None, None, None]
+ self.assertTrue(manager.create_networks(*args))
+
+ @mock.patch('nova.db.network_get')
+ @mock.patch('nova.db.fixed_ips_by_virtual_interface')
+ def test_get_instance_uuids_by_ip_regex(self, fixed_get, network_get):
+ manager = fake_network.FakeNetworkManager(self.stubs)
+ fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
+ _vifs = manager.db.virtual_interface_get_all(None)
+ fake_context = context.RequestContext('user', 'project')
+ network_get.return_value = dict(test_network.fake_network,
+ **manager.db.network_get(None, 1))
+
+ # Greedy get eveything
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip': '.*'})
+ self.assertEqual(len(res), len(_vifs))
+
+ # Doesn't exist
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip': '10.0.0.1'})
+ self.assertFalse(res)
+
+ # Get instance 1
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip': '172.16.0.2'})
+ self.assertTrue(res)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
+
+ # Get instance 2
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip': '173.16.0.2'})
+ self.assertTrue(res)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
+
+ # Get instance 0 and 1
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip': '172.16.0.*'})
+ self.assertTrue(res)
+ self.assertEqual(len(res), 2)
+ self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
+ self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
+
+ # Get instance 1 and 2
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip': '17..16.0.2'})
+ self.assertTrue(res)
+ self.assertEqual(len(res), 2)
+ self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
+ self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
+
+ @mock.patch('nova.db.network_get')
+ def test_get_instance_uuids_by_ipv6_regex(self, network_get):
+ manager = fake_network.FakeNetworkManager(self.stubs)
+ _vifs = manager.db.virtual_interface_get_all(None)
+ fake_context = context.RequestContext('user', 'project')
+
+ def _network_get(context, network_id, **args):
+ return dict(test_network.fake_network,
+ **manager.db.network_get(context, network_id))
+ network_get.side_effect = _network_get
+
+ # Greedy get eveything
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip6': '.*'})
+ self.assertEqual(len(res), len(_vifs))
+
+ # Doesn't exist
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip6': '.*1034.*'})
+ self.assertFalse(res)
+
+ # Get instance 1
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip6': '2001:.*2'})
+ self.assertTrue(res)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
+
+ # Get instance 2
+ ip6 = '2001:db8:69:1f:dead:beff:feff:ef03'
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip6': ip6})
+ self.assertTrue(res)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
+
+ # Get instance 0 and 1
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip6': '.*ef0[1,2]'})
+ self.assertTrue(res)
+ self.assertEqual(len(res), 2)
+ self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
+ self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
+
+ # Get instance 1 and 2
+ ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.'
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip6': ip6})
+ self.assertTrue(res)
+ self.assertEqual(len(res), 2)
+ self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
+ self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
+
+ @mock.patch('nova.db.network_get')
+ @mock.patch('nova.db.fixed_ips_by_virtual_interface')
+ def test_get_instance_uuids_by_ip(self, fixed_get, network_get):
+ manager = fake_network.FakeNetworkManager(self.stubs)
+ fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
+ _vifs = manager.db.virtual_interface_get_all(None)
+ fake_context = context.RequestContext('user', 'project')
+ network_get.return_value = dict(test_network.fake_network,
+ **manager.db.network_get(None, 1))
+
+ # No regex for you!
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'fixed_ip': '.*'})
+ self.assertFalse(res)
+
+ # Doesn't exist
+ ip = '10.0.0.1'
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'fixed_ip': ip})
+ self.assertFalse(res)
+
+ # Get instance 1
+ ip = '172.16.0.2'
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'fixed_ip': ip})
+ self.assertTrue(res)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
+
+ # Get instance 2
+ ip = '173.16.0.2'
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'fixed_ip': ip})
+ self.assertTrue(res)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
+
+ @mock.patch('nova.db.network_get_by_uuid')
+ def test_get_network(self, get):
+ manager = fake_network.FakeNetworkManager()
+ fake_context = context.RequestContext('user', 'project')
+ get.return_value = dict(test_network.fake_network, **networks[0])
+ uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ network = manager.get_network(fake_context, uuid)
+ self.assertEqual(network['uuid'], uuid)
+
+ @mock.patch('nova.db.network_get_by_uuid')
+ def test_get_network_not_found(self, get):
+ manager = fake_network.FakeNetworkManager()
+ fake_context = context.RequestContext('user', 'project')
+ get.side_effect = exception.NetworkNotFoundForUUID(uuid='foo')
+ uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ self.assertRaises(exception.NetworkNotFound,
+ manager.get_network, fake_context, uuid)
+
+ @mock.patch('nova.db.network_get_all')
+ def test_get_all_networks(self, get_all):
+ manager = fake_network.FakeNetworkManager()
+ fake_context = context.RequestContext('user', 'project')
+ get_all.return_value = [dict(test_network.fake_network, **net)
+ for net in networks]
+ output = manager.get_all_networks(fake_context)
+ self.assertEqual(len(networks), 2)
+ self.assertEqual(output[0]['uuid'],
+ 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
+ self.assertEqual(output[1]['uuid'],
+ 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')
+
+ @mock.patch('nova.db.network_get_by_uuid')
+ @mock.patch('nova.db.network_disassociate')
+ def test_disassociate_network(self, disassociate, get):
+ manager = fake_network.FakeNetworkManager()
+ disassociate.return_value = True
+ fake_context = context.RequestContext('user', 'project')
+ get.return_value = dict(test_network.fake_network,
+ **networks[0])
+ uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ manager.disassociate_network(fake_context, uuid)
+
+ @mock.patch('nova.db.network_get_by_uuid')
+ def test_disassociate_network_not_found(self, get):
+ manager = fake_network.FakeNetworkManager()
+ fake_context = context.RequestContext('user', 'project')
+ get.side_effect = exception.NetworkNotFoundForUUID(uuid='fake')
+ uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ self.assertRaises(exception.NetworkNotFound,
+ manager.disassociate_network, fake_context, uuid)
+
+ def _test_init_host_dynamic_fixed_range(self, net_manager):
+ self.flags(fake_network=True,
+ routing_source_ip='172.16.0.1',
+ metadata_host='172.16.0.1',
+ public_interface='eth1',
+ dmz_cidr=['10.0.3.0/24'])
+ binary_name = linux_net.get_binary_name()
+
+ # Stub out calls we don't want to really run, mock the db
+ self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None)
+ self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips',
+ lambda *args: None)
+ self.stubs.Set(net_manager.l3driver, 'initialize_gateway',
+ lambda *args: None)
+ self.mox.StubOutWithMock(db, 'network_get_all_by_host')
+ fake_networks = [dict(test_network.fake_network, **n)
+ for n in networks]
+ db.network_get_all_by_host(mox.IgnoreArg(),
+ mox.IgnoreArg()
+ ).MultipleTimes().AndReturn(fake_networks)
+ self.mox.ReplayAll()
+
+ net_manager.init_host()
+
+ # Get the iptables rules that got created
+ current_lines = []
+ new_lines = linux_net.iptables_manager._modify_rules(current_lines,
+ linux_net.iptables_manager.ipv4['nat'],
+ table_name='nat')
+
+ expected_lines = ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
+ '-j SNAT --to-source %s -o %s'
+ % (binary_name, networks[0]['cidr'],
+ CONF.routing_source_ip,
+ CONF.public_interface),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
+ % (binary_name, networks[0]['cidr'],
+ CONF.metadata_host),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
+ % (binary_name, networks[0]['cidr'],
+ CONF.dmz_cidr[0]),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
+ '--ctstate DNAT -j ACCEPT' % (binary_name,
+ networks[0]['cidr'],
+ networks[0]['cidr']),
+ '[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
+ '-j SNAT --to-source %s -o %s'
+ % (binary_name, networks[1]['cidr'],
+ CONF.routing_source_ip,
+ CONF.public_interface),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
+ % (binary_name, networks[1]['cidr'],
+ CONF.metadata_host),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
+ % (binary_name, networks[1]['cidr'],
+ CONF.dmz_cidr[0]),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
+ '--ctstate DNAT -j ACCEPT' % (binary_name,
+ networks[1]['cidr'],
+ networks[1]['cidr'])]
+
+ # Compare the expected rules against the actual ones
+ for line in expected_lines:
+ self.assertIn(line, new_lines)
+
+ # Add an additional network and ensure the rules get configured
+ new_network = {'id': 2,
+ 'uuid': 'cccccccc-cccc-cccc-cccc-cccccccc',
+ 'label': 'test2',
+ 'injected': False,
+ 'multi_host': False,
+ 'cidr': '192.168.2.0/24',
+ 'cidr_v6': '2001:dba::/64',
+ 'gateway_v6': '2001:dba::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'bridge': 'fa1',
+ 'bridge_interface': 'fake_fa1',
+ 'gateway': '192.168.2.1',
+ 'dhcp_server': '192.168.2.1',
+ 'broadcast': '192.168.2.255',
+ 'dns1': '192.168.2.1',
+ 'dns2': '192.168.2.2',
+ 'vlan': None,
+ 'host': HOST,
+ 'project_id': 'fake_project',
+ 'vpn_public_address': '192.168.2.2',
+ 'vpn_public_port': '22',
+ 'vpn_private_address': '10.0.0.2'}
+ new_network_obj = objects.Network._from_db_object(
+ self.context, objects.Network(),
+ dict(test_network.fake_network, **new_network))
+
+ ctxt = context.get_admin_context()
+ net_manager._setup_network_on_host(ctxt, new_network_obj)
+
+ # Get the new iptables rules that got created from adding a new network
+ current_lines = []
+ new_lines = linux_net.iptables_manager._modify_rules(current_lines,
+ linux_net.iptables_manager.ipv4['nat'],
+ table_name='nat')
+
+ # Add the new expected rules to the old ones
+ expected_lines += ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
+ '-j SNAT --to-source %s -o %s'
+ % (binary_name, new_network['cidr'],
+ CONF.routing_source_ip,
+ CONF.public_interface),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
+ % (binary_name, new_network['cidr'],
+ CONF.metadata_host),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
+ % (binary_name, new_network['cidr'],
+ CONF.dmz_cidr[0]),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack '
+ '! --ctstate DNAT -j ACCEPT' % (binary_name,
+ new_network['cidr'],
+ new_network['cidr'])]
+
+ # Compare the expected rules (with new network) against the actual ones
+ for line in expected_lines:
+ self.assertIn(line, new_lines)
+
+ def test_flatdhcpmanager_dynamic_fixed_range(self):
+ """Test FlatDHCPManager NAT rules for fixed_range."""
+ # Set the network manager
+ self.network = network_manager.FlatDHCPManager(host=HOST)
+ self.network.db = db
+
+ # Test new behavior:
+ # CONF.fixed_range is not set, defaults to None
+ # Determine networks to NAT based on lookup
+ self._test_init_host_dynamic_fixed_range(self.network)
+
+ def test_vlanmanager_dynamic_fixed_range(self):
+ """Test VlanManager NAT rules for fixed_range."""
+ # Set the network manager
+ self.network = network_manager.VlanManager(host=HOST)
+ self.network.db = db
+
+ # Test new behavior:
+ # CONF.fixed_range is not set, defaults to None
+ # Determine networks to NAT based on lookup
+ self._test_init_host_dynamic_fixed_range(self.network)
+
+ @mock.patch('nova.objects.quotas.Quotas.rollback')
+ @mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address')
+ @mock.patch('nova.network.manager.NetworkManager.'
+ '_do_trigger_security_group_members_refresh_for_instance')
+ def test_fixed_ip_cleanup_rollback(self, fake_trig,
+ fixed_get, rollback):
+ manager = network_manager.NetworkManager()
+
+ fake_trig.side_effect = test.TestingException
+
+ self.assertRaises(test.TestingException,
+ manager.deallocate_fixed_ip,
+ self.context, 'fake', 'fake',
+ instance=fake_inst(uuid='ignoreduuid'))
+ rollback.assert_called_once_with(self.context)
+
+ def test_fixed_cidr_out_of_range(self):
+ manager = network_manager.NetworkManager()
+ ctxt = context.get_admin_context()
+ self.assertRaises(exception.AddressOutOfRange,
+ manager.create_networks, ctxt, label="fake",
+ cidr='10.1.0.0/24', fixed_cidr='10.1.1.0/25')
+
+
+class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
+ network_manager.NetworkManager):
+ """Dummy manager that implements RPCAllocateFixedIP."""
+
+
+class RPCAllocateTestCase(test.TestCase):
+ """Tests nova.network.manager.RPCAllocateFixedIP."""
+ def setUp(self):
+ super(RPCAllocateTestCase, self).setUp()
+ self.flags(use_local=True, group='conductor')
+ self.rpc_fixed = TestRPCFixedManager()
+ self.context = context.RequestContext('fake', 'fake')
+
+ def test_rpc_allocate(self):
+ """Test to verify bug 855030 doesn't resurface.
+
+ Mekes sure _rpc_allocate_fixed_ip returns a value so the call
+ returns properly and the greenpool completes.
+ """
+ address = '10.10.10.10'
+
+ def fake_allocate(*args, **kwargs):
+ return address
+
+ def fake_network_get(*args, **kwargs):
+ return test_network.fake_network
+
+ self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate)
+ self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get)
+ rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context,
+ 'fake_instance',
+ 'fake_network')
+ self.assertEqual(rval, address)
+
+
+class TestFloatingIPManager(floating_ips.FloatingIP,
+ network_manager.NetworkManager):
+ """Dummy manager that implements FloatingIP."""
+
+
+class AllocateTestCase(test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(AllocateTestCase, self).setUp()
+ dns = 'nova.network.noop_dns_driver.NoopDNSDriver'
+ self.flags(instance_dns_manager=dns)
+ self.useFixture(test.SampleNetworks())
+ self.conductor = self.start_service(
+ 'conductor', manager=CONF.conductor.manager)
+ self.compute = self.start_service('compute')
+ self.network = self.start_service('network')
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id,
+ is_admin=True)
+ self.user_context = context.RequestContext('testuser',
+ 'testproject')
+
+ def test_allocate_for_instance(self):
+ address = "10.10.10.10"
+ self.flags(auto_assign_floating_ip=True)
+
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ inst = objects.Instance()
+ inst.host = self.compute.host
+ inst.display_name = HOST
+ inst.instance_type_id = 1
+ inst.uuid = FAKEUUID
+ inst.create(self.context)
+ networks = db.network_get_all(self.context)
+ for network in networks:
+ db.network_update(self.context, network['id'],
+ {'host': self.network.host})
+ project_id = self.user_context.project_id
+ nw_info = self.network.allocate_for_instance(self.user_context,
+ instance_id=inst['id'], instance_uuid=inst['uuid'],
+ host=inst['host'], vpn=None, rxtx_factor=3,
+ project_id=project_id, macs=None)
+ self.assertEqual(1, len(nw_info))
+ fixed_ip = nw_info.fixed_ips()[0]['address']
+ self.assertTrue(utils.is_valid_ipv4(fixed_ip))
+ self.network.deallocate_for_instance(self.context,
+ instance=inst)
+
+ def test_allocate_for_instance_illegal_network(self):
+ networks = db.network_get_all(self.context)
+ requested_networks = []
+ for network in networks:
+ # set all networks to other projects
+ db.network_update(self.context, network['id'],
+ {'host': self.network.host,
+ 'project_id': 'otherid'})
+ requested_networks.append((network['uuid'], None))
+ # set the first network to our project
+ db.network_update(self.context, networks[0]['id'],
+ {'project_id': self.user_context.project_id})
+
+ inst = objects.Instance()
+ inst.host = self.compute.host
+ inst.display_name = HOST
+ inst.instance_type_id = 1
+ inst.uuid = FAKEUUID
+ inst.create(self.context)
+ self.assertRaises(exception.NetworkNotFoundForProject,
+ self.network.allocate_for_instance, self.user_context,
+ instance_id=inst['id'], instance_uuid=inst['uuid'],
+ host=inst['host'], vpn=None, rxtx_factor=3,
+ project_id=self.context.project_id, macs=None,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_with_mac(self):
+ available_macs = set(['ca:fe:de:ad:be:ef'])
+ inst = db.instance_create(self.context, {'host': self.compute.host,
+ 'display_name': HOST,
+ 'instance_type_id': 1})
+ networks = db.network_get_all(self.context)
+ for network in networks:
+ db.network_update(self.context, network['id'],
+ {'host': self.network.host})
+ project_id = self.context.project_id
+ nw_info = self.network.allocate_for_instance(self.user_context,
+ instance_id=inst['id'], instance_uuid=inst['uuid'],
+ host=inst['host'], vpn=None, rxtx_factor=3,
+ project_id=project_id, macs=available_macs)
+ assigned_macs = [vif['address'] for vif in nw_info]
+ self.assertEqual(1, len(assigned_macs))
+ self.assertEqual(available_macs.pop(), assigned_macs[0])
+ self.network.deallocate_for_instance(self.context,
+ instance_id=inst['id'],
+ host=self.network.host,
+ project_id=project_id)
+
+ def test_allocate_for_instance_not_enough_macs(self):
+ available_macs = set()
+ inst = db.instance_create(self.context, {'host': self.compute.host,
+ 'display_name': HOST,
+ 'instance_type_id': 1})
+ networks = db.network_get_all(self.context)
+ for network in networks:
+ db.network_update(self.context, network['id'],
+ {'host': self.network.host})
+ project_id = self.context.project_id
+ self.assertRaises(exception.VirtualInterfaceCreateException,
+ self.network.allocate_for_instance,
+ self.user_context,
+ instance_id=inst['id'], instance_uuid=inst['uuid'],
+ host=inst['host'], vpn=None, rxtx_factor=3,
+ project_id=project_id, macs=available_macs)
+
+
+class FloatingIPTestCase(test.TestCase):
+ """Tests nova.network.manager.FloatingIP."""
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(FloatingIPTestCase, self).setUp()
+ self.tempdir = self.useFixture(fixtures.TempDir()).path
+ self.flags(log_dir=self.tempdir)
+ self.flags(use_local=True, group='conductor')
+ self.network = TestFloatingIPManager()
+ self.network.db = db
+ self.project_id = 'testproject'
+ self.context = context.RequestContext('testuser', self.project_id,
+ is_admin=False)
+
+ @mock.patch('nova.db.fixed_ip_get')
+ @mock.patch('nova.db.network_get')
+ @mock.patch('nova.db.instance_get_by_uuid')
+ @mock.patch('nova.db.service_get_by_host_and_topic')
+ @mock.patch('nova.db.floating_ip_get_by_address')
+ def test_disassociate_floating_ip_multi_host_calls(self, floating_get,
+ service_get,
+ inst_get, net_get,
+ fixed_get):
+ floating_ip = dict(test_floating_ip.fake_floating_ip,
+ fixed_ip_id=12)
+
+ fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
+ network_id=None,
+ instance_uuid='instance-uuid')
+
+ network = dict(test_network.fake_network,
+ multi_host=True)
+
+ instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
+
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ self.stubs.Set(self.network,
+ '_floating_ip_owned_by_project',
+ lambda _x, _y: True)
+
+ floating_get.return_value = floating_ip
+ fixed_get.return_value = fixed_ip
+ net_get.return_value = network
+ inst_get.return_value = instance
+ service_get.return_value = test_service.fake_service
+
+ self.stubs.Set(self.network.servicegroup_api,
+ 'service_is_up',
+ lambda _x: True)
+
+ self.mox.StubOutWithMock(
+ self.network.network_rpcapi, '_disassociate_floating_ip')
+
+ self.network.network_rpcapi._disassociate_floating_ip(
+ ctxt, 'fl_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid')
+ self.mox.ReplayAll()
+
+ self.network.disassociate_floating_ip(ctxt, 'fl_ip', True)
+
+ @mock.patch('nova.db.fixed_ip_get_by_address')
+ @mock.patch('nova.db.network_get')
+ @mock.patch('nova.db.instance_get_by_uuid')
+ @mock.patch('nova.db.floating_ip_get_by_address')
+ def test_associate_floating_ip_multi_host_calls(self, floating_get,
+ inst_get, net_get,
+ fixed_get):
+ floating_ip = dict(test_floating_ip.fake_floating_ip,
+ fixed_ip_id=None)
+
+ fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
+ network_id=None,
+ instance_uuid='instance-uuid')
+
+ network = dict(test_network.fake_network,
+ multi_host=True)
+
+ instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
+
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ self.stubs.Set(self.network,
+ '_floating_ip_owned_by_project',
+ lambda _x, _y: True)
+
+ floating_get.return_value = floating_ip
+ fixed_get.return_value = fixed_ip
+ net_get.return_value = network
+ inst_get.return_value = instance
+
+ self.mox.StubOutWithMock(
+ self.network.network_rpcapi, '_associate_floating_ip')
+
+ self.network.network_rpcapi._associate_floating_ip(
+ ctxt, 'fl_ip', 'fix_ip', mox.IgnoreArg(), 'some-other-host',
+ 'instance-uuid')
+ self.mox.ReplayAll()
+
+ self.network.associate_floating_ip(ctxt, 'fl_ip', 'fix_ip', True)
+
+ def test_double_deallocation(self):
+ instance_ref = db.instance_create(self.context,
+ {"project_id": self.project_id})
+ # Run it twice to make it fault if it does not handle
+ # instances without fixed networks
+ # If this fails in either, it does not handle having no addresses
+ self.network.deallocate_for_instance(self.context,
+ instance_id=instance_ref['id'])
+ self.network.deallocate_for_instance(self.context,
+ instance_id=instance_ref['id'])
+
+ def test_deallocate_floating_ip_quota_rollback(self):
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ def fake(*args, **kwargs):
+ return dict(test_floating_ip.fake_floating_ip,
+ address='10.0.0.1', fixed_ip_id=None,
+ project_id=ctxt.project_id)
+
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake)
+ self.mox.StubOutWithMock(db, 'floating_ip_deallocate')
+ self.mox.StubOutWithMock(self.network,
+ '_floating_ip_owned_by_project')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
+ quota.QUOTAS.reserve(self.context,
+ floating_ips=-1,
+ project_id='testproject').AndReturn('fake-rsv')
+ self.network._floating_ip_owned_by_project(self.context,
+ mox.IgnoreArg())
+ db.floating_ip_deallocate(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(None)
+ quota.QUOTAS.rollback(self.context, 'fake-rsv',
+ project_id='testproject')
+
+ self.mox.ReplayAll()
+ self.network.deallocate_floating_ip(self.context, '10.0.0.1')
+
+ def test_deallocation_deleted_instance(self):
+ self.stubs.Set(self.network, '_teardown_network_on_host',
+ lambda *args, **kwargs: None)
+ instance = objects.Instance()
+ instance.project_id = self.project_id
+ instance.deleted = True
+ instance.create(self.context)
+ network = db.network_create_safe(self.context.elevated(), {
+ 'project_id': self.project_id,
+ 'host': CONF.host,
+ 'label': 'foo'})
+ fixed = db.fixed_ip_create(self.context, {'allocated': True,
+ 'instance_uuid': instance.uuid, 'address': '10.1.1.1',
+ 'network_id': network['id']})
+ db.floating_ip_create(self.context, {
+ 'address': '10.10.10.10', 'instance_uuid': instance.uuid,
+ 'fixed_ip_id': fixed['id'],
+ 'project_id': self.project_id})
+ self.network.deallocate_for_instance(self.context, instance=instance)
+
+ def test_deallocation_duplicate_floating_ip(self):
+ self.stubs.Set(self.network, '_teardown_network_on_host',
+ lambda *args, **kwargs: None)
+ instance = objects.Instance()
+ instance.project_id = self.project_id
+ instance.create(self.context)
+ network = db.network_create_safe(self.context.elevated(), {
+ 'project_id': self.project_id,
+ 'host': CONF.host,
+ 'label': 'foo'})
+ fixed = db.fixed_ip_create(self.context, {'allocated': True,
+ 'instance_uuid': instance.uuid, 'address': '10.1.1.1',
+ 'network_id': network['id']})
+ db.floating_ip_create(self.context, {
+ 'address': '10.10.10.10',
+ 'deleted': True})
+ db.floating_ip_create(self.context, {
+ 'address': '10.10.10.10', 'instance_uuid': instance.uuid,
+ 'fixed_ip_id': fixed['id'],
+ 'project_id': self.project_id})
+ self.network.deallocate_for_instance(self.context, instance=instance)
+
+ @mock.patch('nova.db.fixed_ip_get')
+ @mock.patch('nova.db.floating_ip_get_by_address')
+ @mock.patch('nova.db.floating_ip_update')
+ def test_migrate_instance_start(self, floating_update, floating_get,
+ fixed_get):
+ called = {'count': 0}
+
+ def fake_floating_ip_get_by_address(context, address):
+ return dict(test_floating_ip.fake_floating_ip,
+ address=address,
+ fixed_ip_id=0)
+
+ def fake_is_stale_floating_ip_address(context, floating_ip):
+ return str(floating_ip.address) == '172.24.4.23'
+
+ floating_get.side_effect = fake_floating_ip_get_by_address
+ fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
+ instance_uuid='fake_uuid',
+ address='10.0.0.2',
+ network=test_network.fake_network)
+ floating_update.return_value = fake_floating_ip_get_by_address(
+ None, '1.2.3.4')
+
+ def fake_remove_floating_ip(floating_addr, fixed_addr, interface,
+ network):
+ called['count'] += 1
+
+ def fake_clean_conntrack(fixed_ip):
+ if not str(fixed_ip) == "10.0.0.2":
+ raise exception.FixedIpInvalid(address=fixed_ip)
+
+ self.stubs.Set(self.network, '_is_stale_floating_ip_address',
+ fake_is_stale_floating_ip_address)
+ self.stubs.Set(self.network.l3driver, 'remove_floating_ip',
+ fake_remove_floating_ip)
+ self.stubs.Set(self.network.driver, 'clean_conntrack',
+ fake_clean_conntrack)
+ self.mox.ReplayAll()
+ addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
+ self.network.migrate_instance_start(self.context,
+ instance_uuid=FAKEUUID,
+ floating_addresses=addresses,
+ rxtx_factor=3,
+ project_id=self.project_id,
+ source='fake_source',
+ dest='fake_dest')
+
+ self.assertEqual(called['count'], 2)
+
+ @mock.patch('nova.db.fixed_ip_get')
+ @mock.patch('nova.db.floating_ip_update')
+ def test_migrate_instance_finish(self, floating_update, fixed_get):
+ called = {'count': 0}
+
+ def fake_floating_ip_get_by_address(context, address):
+ return dict(test_floating_ip.fake_floating_ip,
+ address=address,
+ fixed_ip_id=0)
+
+ def fake_is_stale_floating_ip_address(context, floating_ip):
+ return str(floating_ip.address) == '172.24.4.23'
+
+ fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
+ instance_uuid='fake_uuid',
+ address='10.0.0.2',
+ network=test_network.fake_network)
+ floating_update.return_value = fake_floating_ip_get_by_address(
+ None, '1.2.3.4')
+
+ def fake_add_floating_ip(floating_addr, fixed_addr, interface,
+ network):
+ called['count'] += 1
+
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
+ fake_floating_ip_get_by_address)
+ self.stubs.Set(self.network, '_is_stale_floating_ip_address',
+ fake_is_stale_floating_ip_address)
+ self.stubs.Set(self.network.l3driver, 'add_floating_ip',
+ fake_add_floating_ip)
+ self.mox.ReplayAll()
+ addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
+ self.network.migrate_instance_finish(self.context,
+ instance_uuid=FAKEUUID,
+ floating_addresses=addresses,
+ host='fake_dest',
+ rxtx_factor=3,
+ project_id=self.project_id,
+ source='fake_source')
+
+ self.assertEqual(called['count'], 2)
+
+ def test_floating_dns_create_conflict(self):
+ zone = "example.org"
+ address1 = "10.10.10.11"
+ name1 = "foo"
+
+ self.network.add_dns_entry(self.context, address1, name1, "A", zone)
+
+ self.assertRaises(exception.FloatingIpDNSExists,
+ self.network.add_dns_entry, self.context,
+ address1, name1, "A", zone)
+
+ def test_floating_create_and_get(self):
+ zone = "example.org"
+ address1 = "10.10.10.11"
+ name1 = "foo"
+ name2 = "bar"
+ entries = self.network.get_dns_entries_by_address(self.context,
+ address1, zone)
+ self.assertFalse(entries)
+
+ self.network.add_dns_entry(self.context, address1, name1, "A", zone)
+ self.network.add_dns_entry(self.context, address1, name2, "A", zone)
+ entries = self.network.get_dns_entries_by_address(self.context,
+ address1, zone)
+ self.assertEqual(len(entries), 2)
+ self.assertEqual(entries[0], name1)
+ self.assertEqual(entries[1], name2)
+
+ entries = self.network.get_dns_entries_by_name(self.context,
+ name1, zone)
+ self.assertEqual(len(entries), 1)
+ self.assertEqual(entries[0], address1)
+
+ def test_floating_dns_delete(self):
+ zone = "example.org"
+ address1 = "10.10.10.11"
+ name1 = "foo"
+ name2 = "bar"
+
+ self.network.add_dns_entry(self.context, address1, name1, "A", zone)
+ self.network.add_dns_entry(self.context, address1, name2, "A", zone)
+ self.network.delete_dns_entry(self.context, name1, zone)
+
+ entries = self.network.get_dns_entries_by_address(self.context,
+ address1, zone)
+ self.assertEqual(len(entries), 1)
+ self.assertEqual(entries[0], name2)
+
+ self.assertRaises(exception.NotFound,
+ self.network.delete_dns_entry, self.context,
+ name1, zone)
+
+ def test_floating_dns_domains_public(self):
+ zone1 = "testzone"
+ domain1 = "example.org"
+ domain2 = "example.com"
+ address1 = '10.10.10.10'
+ entryname = 'testentry'
+
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+
+ self.assertRaises(exception.AdminRequired,
+ self.network.create_public_dns_domain, self.context,
+ domain1, zone1)
+ self.network.create_public_dns_domain(context_admin, domain1,
+ 'testproject')
+ self.network.create_public_dns_domain(context_admin, domain2,
+ 'fakeproject')
+
+ domains = self.network.get_dns_domains(self.context)
+ self.assertEqual(len(domains), 2)
+ self.assertEqual(domains[0]['domain'], domain1)
+ self.assertEqual(domains[1]['domain'], domain2)
+ self.assertEqual(domains[0]['project'], 'testproject')
+ self.assertEqual(domains[1]['project'], 'fakeproject')
+
+ self.network.add_dns_entry(self.context, address1, entryname,
+ 'A', domain1)
+ entries = self.network.get_dns_entries_by_name(self.context,
+ entryname, domain1)
+ self.assertEqual(len(entries), 1)
+ self.assertEqual(entries[0], address1)
+
+ self.assertRaises(exception.AdminRequired,
+ self.network.delete_dns_domain, self.context,
+ domain1)
+ self.network.delete_dns_domain(context_admin, domain1)
+ self.network.delete_dns_domain(context_admin, domain2)
+
+ # Verify that deleting the domain deleted the associated entry
+ entries = self.network.get_dns_entries_by_name(self.context,
+ entryname, domain1)
+ self.assertFalse(entries)
+
+ def test_delete_all_by_ip(self):
+ domain1 = "example.org"
+ domain2 = "example.com"
+ address = "10.10.10.10"
+ name1 = "foo"
+ name2 = "bar"
+
+ def fake_domains(context):
+ return [{'domain': 'example.org', 'scope': 'public'},
+ {'domain': 'example.com', 'scope': 'public'},
+ {'domain': 'test.example.org', 'scope': 'public'}]
+
+ self.stubs.Set(self.network, 'get_dns_domains', fake_domains)
+
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+
+ self.network.create_public_dns_domain(context_admin, domain1,
+ 'testproject')
+ self.network.create_public_dns_domain(context_admin, domain2,
+ 'fakeproject')
+
+ domains = self.network.get_dns_domains(self.context)
+ for domain in domains:
+ self.network.add_dns_entry(self.context, address,
+ name1, "A", domain['domain'])
+ self.network.add_dns_entry(self.context, address,
+ name2, "A", domain['domain'])
+ entries = self.network.get_dns_entries_by_address(self.context,
+ address,
+ domain['domain'])
+ self.assertEqual(len(entries), 2)
+
+ self.network._delete_all_entries_for_ip(self.context, address)
+
+ for domain in domains:
+ entries = self.network.get_dns_entries_by_address(self.context,
+ address,
+ domain['domain'])
+ self.assertFalse(entries)
+
+ self.network.delete_dns_domain(context_admin, domain1)
+ self.network.delete_dns_domain(context_admin, domain2)
+
+ def test_mac_conflicts(self):
+ # Make sure MAC collisions are retried.
+ self.flags(create_unique_mac_address_attempts=3)
+ ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
+ macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa']
+
+ # Create a VIF with aa:aa:aa:aa:aa:aa
+ crash_test_dummy_vif = {
+ 'address': macs[1],
+ 'instance_uuid': 'fake_uuid',
+ 'network_id': 123,
+ 'uuid': 'fake_uuid',
+ }
+ self.network.db.virtual_interface_create(ctxt, crash_test_dummy_vif)
+
+ # Hand out a collision first, then a legit MAC
+ def fake_gen_mac():
+ return macs.pop()
+ self.stubs.Set(utils, 'generate_mac_address', fake_gen_mac)
+
+ # SQLite doesn't seem to honor the uniqueness constraint on the
+ # address column, so fake the collision-avoidance here
+ def fake_vif_save(vif):
+ if vif.address == crash_test_dummy_vif['address']:
+ raise db_exc.DBError("If you're smart, you'll retry!")
+ # NOTE(russellb) The VirtualInterface object requires an ID to be
+ # set, and we expect it to get set automatically when we do the
+ # save.
+ vif.id = 1
+ self.stubs.Set(models.VirtualInterface, 'save', fake_vif_save)
+
+ # Attempt to add another and make sure that both MACs are consumed
+ # by the retry loop
+ self.network._add_virtual_interface(ctxt, 'fake_uuid', 123)
+ self.assertEqual(macs, [])
+
+ def test_deallocate_client_exceptions(self):
+ # Ensure that FloatingIpNotFoundForAddress is wrapped.
+ self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
+ self.network.db.floating_ip_get_by_address(
+ self.context, '1.2.3.4').AndRaise(
+ exception.FloatingIpNotFoundForAddress(address='fake'))
+ self.mox.ReplayAll()
+ self.assertRaises(messaging.ExpectedException,
+ self.network.deallocate_floating_ip,
+ self.context, '1.2.3.4')
+
+ def test_associate_client_exceptions(self):
+ # Ensure that FloatingIpNotFoundForAddress is wrapped.
+ self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
+ self.network.db.floating_ip_get_by_address(
+ self.context, '1.2.3.4').AndRaise(
+ exception.FloatingIpNotFoundForAddress(address='fake'))
+ self.mox.ReplayAll()
+ self.assertRaises(messaging.ExpectedException,
+ self.network.associate_floating_ip,
+ self.context, '1.2.3.4', '10.0.0.1')
+
+ def test_disassociate_client_exceptions(self):
+ # Ensure that FloatingIpNotFoundForAddress is wrapped.
+ self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
+ self.network.db.floating_ip_get_by_address(
+ self.context, '1.2.3.4').AndRaise(
+ exception.FloatingIpNotFoundForAddress(address='fake'))
+ self.mox.ReplayAll()
+ self.assertRaises(messaging.ExpectedException,
+ self.network.disassociate_floating_ip,
+ self.context, '1.2.3.4')
+
+ def test_get_floating_ip_client_exceptions(self):
+ # Ensure that FloatingIpNotFoundForAddress is wrapped.
+ self.mox.StubOutWithMock(self.network.db, 'floating_ip_get')
+ self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise(
+ exception.FloatingIpNotFound(id='fake'))
+ self.mox.ReplayAll()
+ self.assertRaises(messaging.ExpectedException,
+ self.network.get_floating_ip,
+ self.context, 'fake-id')
+
+ def _test_associate_floating_ip_failure(self, stdout, expected_exception):
+ def _fake_catchall(*args, **kwargs):
+ return dict(test_fixed_ip.fake_fixed_ip,
+ network=test_network.fake_network)
+
+ def _fake_add_floating_ip(*args, **kwargs):
+ raise processutils.ProcessExecutionError(stdout)
+
+ self.stubs.Set(self.network.db, 'floating_ip_fixed_ip_associate',
+ _fake_catchall)
+ self.stubs.Set(self.network.db, 'floating_ip_disassociate',
+ _fake_catchall)
+ self.stubs.Set(self.network.l3driver, 'add_floating_ip',
+ _fake_add_floating_ip)
+
+ self.assertRaises(expected_exception,
+ self.network._associate_floating_ip, self.context,
+ '1.2.3.4', '1.2.3.5', '', '')
+
+ def test_associate_floating_ip_failure(self):
+ self._test_associate_floating_ip_failure(None,
+ processutils.ProcessExecutionError)
+
+ def test_associate_floating_ip_failure_interface_not_found(self):
+ self._test_associate_floating_ip_failure('Cannot find device',
+ exception.NoFloatingIpInterface)
+
+ @mock.patch('nova.objects.FloatingIP.get_by_address')
+ def test_get_floating_ip_by_address(self, mock_get):
+ mock_get.return_value = mock.sentinel.floating
+ self.assertEqual(mock.sentinel.floating,
+ self.network.get_floating_ip_by_address(
+ self.context,
+ mock.sentinel.address))
+ mock_get.assert_called_once_with(self.context, mock.sentinel.address)
+
+ @mock.patch('nova.objects.FloatingIPList.get_by_project')
+ def test_get_floating_ips_by_project(self, mock_get):
+ mock_get.return_value = mock.sentinel.floatings
+ self.assertEqual(mock.sentinel.floatings,
+ self.network.get_floating_ips_by_project(
+ self.context))
+ mock_get.assert_called_once_with(self.context, self.context.project_id)
+
+ @mock.patch('nova.objects.FloatingIPList.get_by_fixed_address')
+ def test_get_floating_ips_by_fixed_address(self, mock_get):
+ mock_get.return_value = [objects.FloatingIP(address='1.2.3.4'),
+ objects.FloatingIP(address='5.6.7.8')]
+ self.assertEqual(['1.2.3.4', '5.6.7.8'],
+ self.network.get_floating_ips_by_fixed_address(
+ self.context, mock.sentinel.address))
+ mock_get.assert_called_once_with(self.context, mock.sentinel.address)
+
+
+class InstanceDNSTestCase(test.TestCase):
+ """Tests nova.network.manager instance DNS."""
+ def setUp(self):
+ super(InstanceDNSTestCase, self).setUp()
+ self.tempdir = self.useFixture(fixtures.TempDir()).path
+ self.flags(log_dir=self.tempdir)
+ self.flags(use_local=True, group='conductor')
+ self.network = TestFloatingIPManager()
+ self.network.db = db
+ self.project_id = 'testproject'
+ self.context = context.RequestContext('testuser', self.project_id,
+ is_admin=False)
+
+ def test_dns_domains_private(self):
+ zone1 = 'testzone'
+ domain1 = 'example.org'
+
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+
+ self.assertRaises(exception.AdminRequired,
+ self.network.create_private_dns_domain, self.context,
+ domain1, zone1)
+
+ self.network.create_private_dns_domain(context_admin, domain1, zone1)
+ domains = self.network.get_dns_domains(self.context)
+ self.assertEqual(len(domains), 1)
+ self.assertEqual(domains[0]['domain'], domain1)
+ self.assertEqual(domains[0]['availability_zone'], zone1)
+
+ self.assertRaises(exception.AdminRequired,
+ self.network.delete_dns_domain, self.context,
+ domain1)
+ self.network.delete_dns_domain(context_admin, domain1)
+
+
+domain1 = "example.org"
+domain2 = "example.com"
+
+
+class LdapDNSTestCase(test.TestCase):
+ """Tests nova.network.ldapdns.LdapDNS."""
+ def setUp(self):
+ super(LdapDNSTestCase, self).setUp()
+
+ self.useFixture(test.ReplaceModule('ldap', fake_ldap))
+ dns_class = 'nova.network.ldapdns.LdapDNS'
+ self.driver = importutils.import_object(dns_class)
+
+ attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
+ 'domain', 'dcobject', 'top'],
+ 'associateddomain': ['root'],
+ 'dc': ['root']}
+ self.driver.lobj.add_s("ou=hosts,dc=example,dc=org", attrs.items())
+ self.driver.create_domain(domain1)
+ self.driver.create_domain(domain2)
+
+ def tearDown(self):
+ self.driver.delete_domain(domain1)
+ self.driver.delete_domain(domain2)
+ super(LdapDNSTestCase, self).tearDown()
+
+ def test_ldap_dns_domains(self):
+ domains = self.driver.get_domains()
+ self.assertEqual(len(domains), 2)
+ self.assertIn(domain1, domains)
+ self.assertIn(domain2, domains)
+
+ def test_ldap_dns_create_conflict(self):
+ address1 = "10.10.10.11"
+ name1 = "foo"
+
+ self.driver.create_entry(name1, address1, "A", domain1)
+
+ self.assertRaises(exception.FloatingIpDNSExists,
+ self.driver.create_entry,
+ name1, address1, "A", domain1)
+
+ def test_ldap_dns_create_and_get(self):
+ address1 = "10.10.10.11"
+ name1 = "foo"
+ name2 = "bar"
+ entries = self.driver.get_entries_by_address(address1, domain1)
+ self.assertFalse(entries)
+
+ self.driver.create_entry(name1, address1, "A", domain1)
+ self.driver.create_entry(name2, address1, "A", domain1)
+ entries = self.driver.get_entries_by_address(address1, domain1)
+ self.assertEqual(len(entries), 2)
+ self.assertEqual(entries[0], name1)
+ self.assertEqual(entries[1], name2)
+
+ entries = self.driver.get_entries_by_name(name1, domain1)
+ self.assertEqual(len(entries), 1)
+ self.assertEqual(entries[0], address1)
+
+ def test_ldap_dns_delete(self):
+ address1 = "10.10.10.11"
+ name1 = "foo"
+ name2 = "bar"
+
+ self.driver.create_entry(name1, address1, "A", domain1)
+ self.driver.create_entry(name2, address1, "A", domain1)
+ entries = self.driver.get_entries_by_address(address1, domain1)
+ self.assertEqual(len(entries), 2)
+
+ self.driver.delete_entry(name1, domain1)
+ entries = self.driver.get_entries_by_address(address1, domain1)
+ LOG.debug("entries: %s" % entries)
+ self.assertEqual(len(entries), 1)
+ self.assertEqual(entries[0], name2)
+
+ self.assertRaises(exception.NotFound,
+ self.driver.delete_entry,
+ name1, domain1)
diff --git a/nova/tests/unit/network/test_network_info.py b/nova/tests/unit/network/test_network_info.py
new file mode 100644
index 0000000000..456d4c3a18
--- /dev/null
+++ b/nova/tests/unit/network/test_network_info.py
@@ -0,0 +1,800 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import exception
+from nova.network import model
+from nova import test
+from nova.tests.unit import fake_network_cache_model
+from nova.virt import netutils
+
+
+class RouteTests(test.NoDBTestCase):
+ def test_create_route_with_attrs(self):
+ route = fake_network_cache_model.new_route()
+ fake_network_cache_model.new_ip(dict(address='192.168.1.1'))
+ self.assertEqual(route['cidr'], '0.0.0.0/24')
+ self.assertEqual(route['gateway']['address'], '192.168.1.1')
+ self.assertEqual(route['interface'], 'eth0')
+
+ def test_routes_equal(self):
+ route1 = model.Route()
+ route2 = model.Route()
+ self.assertEqual(route1, route2)
+
+ def test_routes_not_equal(self):
+ route1 = model.Route(cidr='1.1.1.0/24')
+ route2 = model.Route(cidr='2.2.2.0/24')
+ self.assertNotEqual(route1, route2)
+
+ route1 = model.Route(cidr='1.1.1.1/24', gateway='1.1.1.1')
+ route2 = model.Route(cidr='1.1.1.1/24', gateway='1.1.1.2')
+ self.assertNotEqual(route1, route2)
+
+ route1 = model.Route(cidr='1.1.1.1/24', interface='tap0')
+ route2 = model.Route(cidr='1.1.1.1/24', interface='tap1')
+ self.assertNotEqual(route1, route2)
+
+ def test_hydrate(self):
+ route = model.Route.hydrate(
+ {'gateway': fake_network_cache_model.new_ip(
+ dict(address='192.168.1.1'))})
+ self.assertIsNone(route['cidr'])
+ self.assertEqual(route['gateway']['address'], '192.168.1.1')
+ self.assertIsNone(route['interface'])
+
+
+class IPTests(test.NoDBTestCase):
+ def test_ip_equal(self):
+ ip1 = model.IP(address='127.0.0.1')
+ ip2 = model.IP(address='127.0.0.1')
+ self.assertEqual(ip1, ip2)
+
+ def test_ip_not_equal(self):
+ ip1 = model.IP(address='127.0.0.1')
+ ip2 = model.IP(address='172.0.0.3')
+ self.assertNotEqual(ip1, ip2)
+
+ ip1 = model.IP(address='127.0.0.1', type=1)
+ ip2 = model.IP(address='172.0.0.1', type=2)
+ self.assertNotEqual(ip1, ip2)
+
+ ip1 = model.IP(address='127.0.0.1', version=4)
+ ip2 = model.IP(address='172.0.0.1', version=6)
+ self.assertNotEqual(ip1, ip2)
+
+
+class FixedIPTests(test.NoDBTestCase):
+ def test_createnew_fixed_ip_with_attrs(self):
+ fixed_ip = model.FixedIP(address='192.168.1.100')
+ self.assertEqual(fixed_ip['address'], '192.168.1.100')
+ self.assertEqual(fixed_ip['floating_ips'], [])
+ self.assertEqual(fixed_ip['type'], 'fixed')
+ self.assertEqual(fixed_ip['version'], 4)
+
+ def test_create_fixed_ipv6(self):
+ fixed_ip = model.FixedIP(address='::1')
+ self.assertEqual(fixed_ip['address'], '::1')
+ self.assertEqual(fixed_ip['floating_ips'], [])
+ self.assertEqual(fixed_ip['type'], 'fixed')
+ self.assertEqual(fixed_ip['version'], 6)
+
+ def test_create_fixed_bad_ip_fails(self):
+ self.assertRaises(exception.InvalidIpAddressError,
+ model.FixedIP,
+ address='picklespicklespickles')
+
+ def test_equate_two_fixed_ips(self):
+ fixed_ip = model.FixedIP(address='::1')
+ fixed_ip2 = model.FixedIP(address='::1')
+ self.assertEqual(fixed_ip, fixed_ip2)
+
+ def test_equate_two_dissimilar_fixed_ips_fails(self):
+ fixed_ip = model.FixedIP(address='::1')
+ fixed_ip2 = model.FixedIP(address='::2')
+ self.assertNotEqual(fixed_ip, fixed_ip2)
+
+ fixed_ip = model.FixedIP(address='::1', type='1')
+ fixed_ip2 = model.FixedIP(address='::1', type='2')
+ self.assertNotEqual(fixed_ip, fixed_ip2)
+
+ fixed_ip = model.FixedIP(address='::1', version='6')
+ fixed_ip2 = model.FixedIP(address='::1', version='4')
+ self.assertNotEqual(fixed_ip, fixed_ip2)
+
+ fixed_ip = model.FixedIP(address='::1', floating_ips='1.1.1.1')
+ fixed_ip2 = model.FixedIP(address='::1', floating_ips='8.8.8.8')
+ self.assertNotEqual(fixed_ip, fixed_ip2)
+
+ def test_hydrate(self):
+ fixed_ip = model.FixedIP.hydrate({})
+ self.assertEqual(fixed_ip['floating_ips'], [])
+ self.assertIsNone(fixed_ip['address'])
+ self.assertEqual(fixed_ip['type'], 'fixed')
+ self.assertIsNone(fixed_ip['version'])
+
+ def test_add_floating_ip(self):
+ fixed_ip = model.FixedIP(address='192.168.1.100')
+ fixed_ip.add_floating_ip('192.168.1.101')
+ self.assertEqual(fixed_ip['floating_ips'], ['192.168.1.101'])
+
+ def test_add_floating_ip_repeatedly_only_one_instance(self):
+ fixed_ip = model.FixedIP(address='192.168.1.100')
+ for i in xrange(10):
+ fixed_ip.add_floating_ip('192.168.1.101')
+ self.assertEqual(fixed_ip['floating_ips'], ['192.168.1.101'])
+
+
+class SubnetTests(test.NoDBTestCase):
+ def test_create_subnet_with_attrs(self):
+ subnet = fake_network_cache_model.new_subnet()
+
+ route1 = fake_network_cache_model.new_route()
+
+ self.assertEqual(subnet['cidr'], '10.10.0.0/24')
+ self.assertEqual(subnet['dns'],
+ [fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
+ fake_network_cache_model.new_ip(dict(address='2.3.4.5'))])
+ self.assertEqual(subnet['gateway']['address'], '10.10.0.1')
+ self.assertEqual(subnet['ips'],
+ [fake_network_cache_model.new_fixed_ip(
+ dict(address='10.10.0.2')),
+ fake_network_cache_model.new_fixed_ip(
+ dict(address='10.10.0.3'))])
+ self.assertEqual(subnet['routes'], [route1])
+ self.assertEqual(subnet['version'], 4)
+
+ def test_subnet_equal(self):
+ subnet1 = fake_network_cache_model.new_subnet()
+ subnet2 = fake_network_cache_model.new_subnet()
+ self.assertEqual(subnet1, subnet2)
+
+ def test_subnet_not_equal(self):
+ subnet1 = model.Subnet(cidr='1.1.1.0/24')
+ subnet2 = model.Subnet(cidr='2.2.2.0/24')
+ self.assertNotEqual(subnet1, subnet2)
+
+ subnet1 = model.Subnet(dns='1.1.1.0/24')
+ subnet2 = model.Subnet(dns='2.2.2.0/24')
+ self.assertNotEqual(subnet1, subnet2)
+
+ subnet1 = model.Subnet(gateway='1.1.1.1/24')
+ subnet2 = model.Subnet(gateway='2.2.2.1/24')
+ self.assertNotEqual(subnet1, subnet2)
+
+ subnet1 = model.Subnet(ips='1.1.1.0/24')
+ subnet2 = model.Subnet(ips='2.2.2.0/24')
+ self.assertNotEqual(subnet1, subnet2)
+
+ subnet1 = model.Subnet(routes='1.1.1.0/24')
+ subnet2 = model.Subnet(routes='2.2.2.0/24')
+ self.assertNotEqual(subnet1, subnet2)
+
+ subnet1 = model.Subnet(version='4')
+ subnet2 = model.Subnet(version='6')
+ self.assertNotEqual(subnet1, subnet2)
+
+ def test_add_route(self):
+ subnet = fake_network_cache_model.new_subnet()
+ route1 = fake_network_cache_model.new_route()
+ route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'})
+ subnet.add_route(route2)
+ self.assertEqual(subnet['routes'], [route1, route2])
+
+ def test_add_route_a_lot(self):
+ subnet = fake_network_cache_model.new_subnet()
+ route1 = fake_network_cache_model.new_route()
+ route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'})
+ for i in xrange(10):
+ subnet.add_route(route2)
+ self.assertEqual(subnet['routes'], [route1, route2])
+
+ def test_add_dns(self):
+ subnet = fake_network_cache_model.new_subnet()
+ dns = fake_network_cache_model.new_ip(dict(address='9.9.9.9'))
+ subnet.add_dns(dns)
+ self.assertEqual(subnet['dns'],
+ [fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
+ fake_network_cache_model.new_ip(dict(address='2.3.4.5')),
+ fake_network_cache_model.new_ip(dict(address='9.9.9.9'))])
+
+ def test_add_dns_a_lot(self):
+ subnet = fake_network_cache_model.new_subnet()
+ for i in xrange(10):
+ subnet.add_dns(fake_network_cache_model.new_ip(
+ dict(address='9.9.9.9')))
+ self.assertEqual(subnet['dns'],
+ [fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
+ fake_network_cache_model.new_ip(dict(address='2.3.4.5')),
+ fake_network_cache_model.new_ip(dict(address='9.9.9.9'))])
+
+ def test_add_ip(self):
+ subnet = fake_network_cache_model.new_subnet()
+ subnet.add_ip(fake_network_cache_model.new_ip(
+ dict(address='192.168.1.102')))
+ self.assertEqual(subnet['ips'],
+ [fake_network_cache_model.new_fixed_ip(
+ dict(address='10.10.0.2')),
+ fake_network_cache_model.new_fixed_ip(
+ dict(address='10.10.0.3')),
+ fake_network_cache_model.new_ip(
+ dict(address='192.168.1.102'))])
+
+ def test_add_ip_a_lot(self):
+ subnet = fake_network_cache_model.new_subnet()
+ for i in xrange(10):
+ subnet.add_ip(fake_network_cache_model.new_fixed_ip(
+ dict(address='192.168.1.102')))
+ self.assertEqual(subnet['ips'],
+ [fake_network_cache_model.new_fixed_ip(
+ dict(address='10.10.0.2')),
+ fake_network_cache_model.new_fixed_ip(
+ dict(address='10.10.0.3')),
+ fake_network_cache_model.new_fixed_ip(
+ dict(address='192.168.1.102'))])
+
+ def test_hydrate(self):
+ subnet_dict = {
+ 'cidr': '255.255.255.0',
+ 'dns': [fake_network_cache_model.new_ip(dict(address='1.1.1.1'))],
+ 'ips': [fake_network_cache_model.new_fixed_ip(
+ dict(address='2.2.2.2'))],
+ 'routes': [fake_network_cache_model.new_route()],
+ 'version': 4,
+ 'gateway': fake_network_cache_model.new_ip(
+ dict(address='3.3.3.3'))}
+ subnet = model.Subnet.hydrate(subnet_dict)
+
+ self.assertEqual(subnet['cidr'], '255.255.255.0')
+ self.assertEqual(subnet['dns'], [fake_network_cache_model.new_ip(
+ dict(address='1.1.1.1'))])
+ self.assertEqual(subnet['gateway']['address'], '3.3.3.3')
+ self.assertEqual(subnet['ips'], [fake_network_cache_model.new_fixed_ip(
+ dict(address='2.2.2.2'))])
+ self.assertEqual(subnet['routes'], [
+ fake_network_cache_model.new_route()])
+ self.assertEqual(subnet['version'], 4)
+
+
+class NetworkTests(test.NoDBTestCase):
+ def test_create_network(self):
+ network = fake_network_cache_model.new_network()
+ self.assertEqual(network['id'], 1)
+ self.assertEqual(network['bridge'], 'br0')
+ self.assertEqual(network['label'], 'public')
+ self.assertEqual(network['subnets'],
+ [fake_network_cache_model.new_subnet(),
+ fake_network_cache_model.new_subnet(
+ dict(cidr='255.255.255.255'))])
+
+ def test_add_subnet(self):
+ network = fake_network_cache_model.new_network()
+ network.add_subnet(fake_network_cache_model.new_subnet(
+ dict(cidr='0.0.0.0')))
+ self.assertEqual(network['subnets'],
+ [fake_network_cache_model.new_subnet(),
+ fake_network_cache_model.new_subnet(
+ dict(cidr='255.255.255.255')),
+ fake_network_cache_model.new_subnet(dict(cidr='0.0.0.0'))])
+
+ def test_add_subnet_a_lot(self):
+ network = fake_network_cache_model.new_network()
+ for i in xrange(10):
+ network.add_subnet(fake_network_cache_model.new_subnet(
+ dict(cidr='0.0.0.0')))
+ self.assertEqual(network['subnets'],
+ [fake_network_cache_model.new_subnet(),
+ fake_network_cache_model.new_subnet(
+ dict(cidr='255.255.255.255')),
+ fake_network_cache_model.new_subnet(dict(cidr='0.0.0.0'))])
+
+ def test_network_equal(self):
+ network1 = model.Network()
+ network2 = model.Network()
+ self.assertEqual(network1, network2)
+
+ def test_network_not_equal(self):
+ network1 = model.Network(id='1')
+ network2 = model.Network(id='2')
+ self.assertNotEqual(network1, network2)
+
+ network1 = model.Network(bridge='br-int')
+ network2 = model.Network(bridge='br0')
+ self.assertNotEqual(network1, network2)
+
+ network1 = model.Network(label='net1')
+ network2 = model.Network(label='net2')
+ self.assertNotEqual(network1, network2)
+
+ network1 = model.Network(subnets='1.1.1.0/24')
+ network2 = model.Network(subnets='2.2.2.0/24')
+ self.assertNotEqual(network1, network2)
+
+ def test_hydrate(self):
+ fake_network_cache_model.new_subnet()
+ fake_network_cache_model.new_subnet(dict(cidr='255.255.255.255'))
+ network = model.Network.hydrate(fake_network_cache_model.new_network())
+
+ self.assertEqual(network['id'], 1)
+ self.assertEqual(network['bridge'], 'br0')
+ self.assertEqual(network['label'], 'public')
+ self.assertEqual(network['subnets'],
+ [fake_network_cache_model.new_subnet(),
+ fake_network_cache_model.new_subnet(
+ dict(cidr='255.255.255.255'))])
+
+
+class VIFTests(test.NoDBTestCase):
+ def test_create_vif(self):
+ vif = fake_network_cache_model.new_vif()
+ self.assertEqual(vif['id'], 1)
+ self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
+ self.assertEqual(vif['network'],
+ fake_network_cache_model.new_network())
+
+ def test_vif_equal(self):
+ vif1 = model.VIF()
+ vif2 = model.VIF()
+ self.assertEqual(vif1, vif2)
+
+ def test_vif_not_equal(self):
+ vif1 = model.VIF(id=1)
+ vif2 = model.VIF(id=2)
+ self.assertNotEqual(vif1, vif2)
+
+ vif1 = model.VIF(address='00:00:00:00:00:11')
+ vif2 = model.VIF(address='00:00:00:00:00:22')
+ self.assertNotEqual(vif1, vif2)
+
+ vif1 = model.VIF(network='net1')
+ vif2 = model.VIF(network='net2')
+ self.assertNotEqual(vif1, vif2)
+
+ vif1 = model.VIF(type='ovs')
+ vif2 = model.VIF(type='linuxbridge')
+ self.assertNotEqual(vif1, vif2)
+
+ vif1 = model.VIF(devname='ovs1234')
+ vif2 = model.VIF(devname='linuxbridge1234')
+ self.assertNotEqual(vif1, vif2)
+
+ vif1 = model.VIF(qbh_params=1)
+ vif2 = model.VIF(qbh_params=None)
+ self.assertNotEqual(vif1, vif2)
+
+ vif1 = model.VIF(qbg_params=1)
+ vif2 = model.VIF(qbg_params=None)
+ self.assertNotEqual(vif1, vif2)
+
+ vif1 = model.VIF(active=True)
+ vif2 = model.VIF(active=False)
+ self.assertNotEqual(vif1, vif2)
+
+ vif1 = model.VIF(vnic_type=model.VNIC_TYPE_NORMAL)
+ vif2 = model.VIF(vnic_type=model.VNIC_TYPE_DIRECT)
+ self.assertNotEqual(vif1, vif2)
+
+ vif1 = model.VIF(profile={'pci_slot': '0000:0a:00.1'})
+ vif2 = model.VIF(profile={'pci_slot': '0000:0a:00.2'})
+ self.assertNotEqual(vif1, vif2)
+
+ def test_create_vif_with_type(self):
+ vif_dict = dict(
+ id=1,
+ address='aa:aa:aa:aa:aa:aa',
+ network=fake_network_cache_model.new_network(),
+ type='bridge')
+ vif = fake_network_cache_model.new_vif(vif_dict)
+ self.assertEqual(vif['id'], 1)
+ self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
+ self.assertEqual(vif['type'], 'bridge')
+ self.assertEqual(vif['network'],
+ fake_network_cache_model.new_network())
+
+ def test_vif_get_fixed_ips(self):
+ vif = fake_network_cache_model.new_vif()
+ fixed_ips = vif.fixed_ips()
+ ips = [
+ fake_network_cache_model.new_fixed_ip(dict(address='10.10.0.2')),
+ fake_network_cache_model.new_fixed_ip(dict(address='10.10.0.3'))
+ ] * 2
+ self.assertEqual(fixed_ips, ips)
+
+ def test_vif_get_floating_ips(self):
+ vif = fake_network_cache_model.new_vif()
+ vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1')
+ floating_ips = vif.floating_ips()
+ self.assertEqual(floating_ips, ['192.168.1.1'])
+
+ def test_vif_get_labeled_ips(self):
+ vif = fake_network_cache_model.new_vif()
+ labeled_ips = vif.labeled_ips()
+ ip_dict = {
+ 'network_id': 1,
+ 'ips': [fake_network_cache_model.new_ip(
+ {'address': '10.10.0.2', 'type': 'fixed'}),
+ fake_network_cache_model.new_ip(
+ {'address': '10.10.0.3', 'type': 'fixed'})] * 2,
+ 'network_label': 'public'}
+ self.assertEqual(labeled_ips, ip_dict)
+
+ def test_hydrate(self):
+ fake_network_cache_model.new_network()
+ vif = model.VIF.hydrate(fake_network_cache_model.new_vif())
+ self.assertEqual(vif['id'], 1)
+ self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
+ self.assertEqual(vif['network'],
+ fake_network_cache_model.new_network())
+
+ def test_hydrate_vif_with_type(self):
+ vif_dict = dict(
+ id=1,
+ address='aa:aa:aa:aa:aa:aa',
+ network=fake_network_cache_model.new_network(),
+ type='bridge')
+ vif = model.VIF.hydrate(fake_network_cache_model.new_vif(vif_dict))
+ self.assertEqual(vif['id'], 1)
+ self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
+ self.assertEqual(vif['type'], 'bridge')
+ self.assertEqual(vif['network'],
+ fake_network_cache_model.new_network())
+
+
+class NetworkInfoTests(test.NoDBTestCase):
+ def test_create_model(self):
+ ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(),
+ fake_network_cache_model.new_vif(
+ {'address': 'bb:bb:bb:bb:bb:bb'})])
+ self.assertEqual(ninfo.fixed_ips(),
+ [fake_network_cache_model.new_fixed_ip(
+ {'address': '10.10.0.2'}),
+ fake_network_cache_model.new_fixed_ip(
+ {'address': '10.10.0.3'})] * 4)
+
+ def test_create_async_model(self):
+ def async_wrapper():
+ return model.NetworkInfo(
+ [fake_network_cache_model.new_vif(),
+ fake_network_cache_model.new_vif(
+ {'address': 'bb:bb:bb:bb:bb:bb'})])
+
+ ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
+ self.assertEqual(ninfo.fixed_ips(),
+ [fake_network_cache_model.new_fixed_ip(
+ {'address': '10.10.0.2'}),
+ fake_network_cache_model.new_fixed_ip(
+ {'address': '10.10.0.3'})] * 4)
+
+ def test_create_async_model_exceptions(self):
+ def async_wrapper():
+ raise test.TestingException()
+
+ ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
+ self.assertRaises(test.TestingException, ninfo.wait)
+ # 2nd one doesn't raise
+ self.assertIsNone(ninfo.wait())
+ # Test that do_raise=False works on .wait()
+ ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
+ self.assertIsNone(ninfo.wait(do_raise=False))
+ # Test we also raise calling a method
+ ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
+ self.assertRaises(test.TestingException, ninfo.fixed_ips)
+
+ def test_get_floating_ips(self):
+ vif = fake_network_cache_model.new_vif()
+ vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1')
+ ninfo = model.NetworkInfo([vif,
+ fake_network_cache_model.new_vif(
+ {'address': 'bb:bb:bb:bb:bb:bb'})])
+ self.assertEqual(ninfo.floating_ips(), ['192.168.1.1'])
+
+ def test_hydrate(self):
+ ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(),
+ fake_network_cache_model.new_vif(
+ {'address': 'bb:bb:bb:bb:bb:bb'})])
+ model.NetworkInfo.hydrate(ninfo)
+ self.assertEqual(ninfo.fixed_ips(),
+ [fake_network_cache_model.new_fixed_ip(
+ {'address': '10.10.0.2'}),
+ fake_network_cache_model.new_fixed_ip(
+ {'address': '10.10.0.3'})] * 4)
+
+ def _setup_injected_network_scenario(self, should_inject=True,
+ use_ipv4=True, use_ipv6=False,
+ gateway=True, dns=True,
+ two_interfaces=False,
+ libvirt_virt_type=None):
+ """Check that netutils properly decides whether to inject based on
+ whether the supplied subnet is static or dynamic.
+ """
+ network = fake_network_cache_model.new_network({'subnets': []})
+
+ subnet_dict = {}
+ if not gateway:
+ subnet_dict['gateway'] = None
+
+ if not dns:
+ subnet_dict['dns'] = None
+
+ if not should_inject:
+ subnet_dict['dhcp_server'] = '10.10.0.1'
+
+ if use_ipv4:
+ network.add_subnet(
+ fake_network_cache_model.new_subnet(subnet_dict))
+
+ if should_inject and use_ipv6:
+ gateway_ip = fake_network_cache_model.new_ip(dict(
+ address='1234:567::1'))
+ ip = fake_network_cache_model.new_ip(dict(
+ address='1234:567::2'))
+ ipv6_subnet_dict = dict(
+ cidr='1234:567::/48',
+ gateway=gateway_ip,
+ dns=[fake_network_cache_model.new_ip(
+ dict(address='2001:4860:4860::8888')),
+ fake_network_cache_model.new_ip(
+ dict(address='2001:4860:4860::8844'))],
+ ips=[ip])
+ if not gateway:
+ ipv6_subnet_dict['gateway'] = None
+ network.add_subnet(fake_network_cache_model.new_subnet(
+ ipv6_subnet_dict))
+
+ # Behave as though CONF.flat_injected is True
+ network['meta']['injected'] = True
+ vif = fake_network_cache_model.new_vif({'network': network})
+ vifs = [vif]
+ if two_interfaces:
+ vifs.append(vif)
+
+ nwinfo = model.NetworkInfo(vifs)
+ return netutils.get_injected_network_template(
+ nwinfo, use_ipv6=use_ipv6, libvirt_virt_type=libvirt_virt_type)
+
+ def test_injection_dynamic(self):
+ expected = None
+ template = self._setup_injected_network_scenario(should_inject=False)
+ self.assertEqual(expected, template)
+
+ def test_injection_static(self):
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ gateway 10.10.0.1
+ dns-nameservers 1.2.3.4 2.3.4.5
+"""
+ template = self._setup_injected_network_scenario()
+ self.assertEqual(expected, template)
+
+ def test_injection_static_no_gateway(self):
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ dns-nameservers 1.2.3.4 2.3.4.5
+"""
+ template = self._setup_injected_network_scenario(gateway=False)
+ self.assertEqual(expected, template)
+
+ def test_injection_static_no_dns(self):
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ gateway 10.10.0.1
+"""
+ template = self._setup_injected_network_scenario(dns=False)
+ self.assertEqual(expected, template)
+
+ def test_injection_static_ipv6(self):
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ gateway 10.10.0.1
+ dns-nameservers 1.2.3.4 2.3.4.5
+iface eth0 inet6 static
+ address 1234:567::2
+ netmask 48
+ gateway 1234:567::1
+ dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
+"""
+ template = self._setup_injected_network_scenario(use_ipv6=True)
+ self.assertEqual(expected, template)
+
+ def test_injection_static_ipv6_no_gateway(self):
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ dns-nameservers 1.2.3.4 2.3.4.5
+iface eth0 inet6 static
+ address 1234:567::2
+ netmask 48
+ dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
+"""
+ template = self._setup_injected_network_scenario(use_ipv6=True,
+ gateway=False)
+ self.assertEqual(expected, template)
+
+ def test_injection_static_with_ipv4_off(self):
+ expected = None
+ template = self._setup_injected_network_scenario(use_ipv4=False)
+ self.assertEqual(expected, template)
+
+ def test_injection_ipv6_two_interfaces(self):
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ gateway 10.10.0.1
+ dns-nameservers 1.2.3.4 2.3.4.5
+iface eth0 inet6 static
+ address 1234:567::2
+ netmask 48
+ gateway 1234:567::1
+ dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
+
+auto eth1
+iface eth1 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ gateway 10.10.0.1
+ dns-nameservers 1.2.3.4 2.3.4.5
+iface eth1 inet6 static
+ address 1234:567::2
+ netmask 48
+ gateway 1234:567::1
+ dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
+"""
+ template = self._setup_injected_network_scenario(use_ipv6=True,
+ two_interfaces=True)
+ self.assertEqual(expected, template)
+
+ def test_injection_ipv6_with_lxc(self):
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ gateway 10.10.0.1
+ dns-nameservers 1.2.3.4 2.3.4.5
+ post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
+ post-up ip -6 route add default via 1234:567::1 dev ${IFACE}
+
+auto eth1
+iface eth1 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ gateway 10.10.0.1
+ dns-nameservers 1.2.3.4 2.3.4.5
+ post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
+ post-up ip -6 route add default via 1234:567::1 dev ${IFACE}
+"""
+ template = self._setup_injected_network_scenario(
+ use_ipv6=True, two_interfaces=True, libvirt_virt_type='lxc')
+ self.assertEqual(expected, template)
+
+ def test_injection_ipv6_with_lxc_no_gateway(self):
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ dns-nameservers 1.2.3.4 2.3.4.5
+ post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
+
+auto eth1
+iface eth1 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ dns-nameservers 1.2.3.4 2.3.4.5
+ post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
+"""
+ template = self._setup_injected_network_scenario(
+ use_ipv6=True, gateway=False, two_interfaces=True,
+ libvirt_virt_type='lxc')
+ self.assertEqual(expected, template)
diff --git a/nova/tests/unit/network/test_neutronv2.py b/nova/tests/unit/network/test_neutronv2.py
new file mode 100644
index 0000000000..a34c8cc899
--- /dev/null
+++ b/nova/tests/unit/network/test_neutronv2.py
@@ -0,0 +1,3194 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import collections
+import contextlib
+import copy
+import uuid
+
+import mock
+import mox
+from neutronclient.common import exceptions
+from neutronclient.v2_0 import client
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import six
+
+from nova.compute import flavors
+from nova import context
+from nova import exception
+from nova.network import model
+from nova.network import neutronv2
+from nova.network.neutronv2 import api as neutronapi
+from nova.network.neutronv2 import constants
+from nova import objects
+from nova.openstack.common import policy as common_policy
+from nova.pci import manager as pci_manager
+from nova.pci import whitelist as pci_whitelist
+from nova import policy
+from nova import test
+from nova.tests.unit import fake_instance
+from nova import utils
+
+CONF = cfg.CONF
+
+# NOTE: Neutron client raises Exception which is discouraged by HACKING.
+# We set this variable here and use it for assertions below to avoid
+# the hacking checks until we can make neutron client throw a custom
+# exception class instead.
+NEUTRON_CLIENT_EXCEPTION = Exception
+
+
+class MyComparator(mox.Comparator):
+ def __init__(self, lhs):
+ self.lhs = lhs
+
+ def _com_dict(self, lhs, rhs):
+ if len(lhs) != len(rhs):
+ return False
+ for key, value in lhs.iteritems():
+ if key not in rhs:
+ return False
+ rhs_value = rhs[key]
+ if not self._com(value, rhs_value):
+ return False
+ return True
+
+ def _com_list(self, lhs, rhs):
+ if len(lhs) != len(rhs):
+ return False
+ for lhs_value in lhs:
+ if lhs_value not in rhs:
+ return False
+ return True
+
+ def _com(self, lhs, rhs):
+ if lhs is None:
+ return rhs is None
+ if isinstance(lhs, dict):
+ if not isinstance(rhs, dict):
+ return False
+ return self._com_dict(lhs, rhs)
+ if isinstance(lhs, list):
+ if not isinstance(rhs, list):
+ return False
+ return self._com_list(lhs, rhs)
+ if isinstance(lhs, tuple):
+ if not isinstance(rhs, tuple):
+ return False
+ return self._com_list(lhs, rhs)
+ return lhs == rhs
+
+ def equals(self, rhs):
+ return self._com(self.lhs, rhs)
+
+ def __repr__(self):
+ return str(self.lhs)
+
+
+class TestNeutronClient(test.TestCase):
+ def test_withtoken(self):
+ self.flags(url='http://anyhost/', group='neutron')
+ self.flags(url_timeout=30, group='neutron')
+ my_context = context.RequestContext('userid',
+ 'my_tenantid',
+ auth_token='token')
+ self.mox.StubOutWithMock(client.Client, "__init__")
+ client.Client.__init__(
+ auth_strategy=CONF.neutron.auth_strategy,
+ endpoint_url=CONF.neutron.url,
+ token=my_context.auth_token,
+ timeout=CONF.neutron.url_timeout,
+ insecure=False,
+ ca_cert=None).AndReturn(None)
+ self.mox.ReplayAll()
+ neutronv2.get_client(my_context)
+
+ def test_withouttoken(self):
+ my_context = context.RequestContext('userid', 'my_tenantid')
+ self.assertRaises(exceptions.Unauthorized,
+ neutronv2.get_client,
+ my_context)
+
+ def test_withtoken_context_is_admin(self):
+ self.flags(url='http://anyhost/', group='neutron')
+ self.flags(url_timeout=30, group='neutron')
+ my_context = context.RequestContext('userid',
+ 'my_tenantid',
+ auth_token='token',
+ is_admin=True)
+ self.mox.StubOutWithMock(client.Client, "__init__")
+ client.Client.__init__(
+ auth_strategy=CONF.neutron.auth_strategy,
+ endpoint_url=CONF.neutron.url,
+ token=my_context.auth_token,
+ timeout=CONF.neutron.url_timeout,
+ insecure=False,
+ ca_cert=None).AndReturn(None)
+ self.mox.ReplayAll()
+ # Note that although we have admin set in the context we
+ # are not asking for an admin client, and so we auth with
+ # our own token
+ neutronv2.get_client(my_context)
+
+ def test_withouttoken_keystone_connection_error(self):
+ self.flags(auth_strategy='keystone', group='neutron')
+ self.flags(url='http://anyhost/', group='neutron')
+ my_context = context.RequestContext('userid', 'my_tenantid')
+ self.assertRaises(NEUTRON_CLIENT_EXCEPTION,
+ neutronv2.get_client,
+ my_context)
+
+ def test_reuse_admin_token(self):
+ self.flags(url='http://anyhost/', group='neutron')
+ self.flags(url_timeout=30, group='neutron')
+ token_store = neutronv2.AdminTokenStore.get()
+ token_store.admin_auth_token = 'new_token'
+ my_context = context.RequestContext('userid', 'my_tenantid',
+ auth_token='token')
+ with contextlib.nested(
+ mock.patch.object(client.Client, "list_networks",
+ side_effect=mock.Mock),
+ mock.patch.object(client.Client, 'get_auth_info',
+ return_value={'auth_token': 'new_token1'}),
+ ):
+ client1 = neutronv2.get_client(my_context, True)
+ client1.list_networks(retrieve_all=False)
+ self.assertEqual('new_token1', token_store.admin_auth_token)
+ client1 = neutronv2.get_client(my_context, True)
+ client1.list_networks(retrieve_all=False)
+ self.assertEqual('new_token1', token_store.admin_auth_token)
+
+ def test_admin_token_updated(self):
+ self.flags(url='http://anyhost/', group='neutron')
+ self.flags(url_timeout=30, group='neutron')
+ token_store = neutronv2.AdminTokenStore.get()
+ token_store.admin_auth_token = 'new_token'
+ tokens = [{'auth_token': 'new_token1'}, {'auth_token': 'new_token'}]
+ my_context = context.RequestContext('userid', 'my_tenantid',
+ auth_token='token')
+ with contextlib.nested(
+ mock.patch.object(client.Client, "list_networks",
+ side_effect=mock.Mock),
+ mock.patch.object(client.Client, 'get_auth_info',
+ side_effect=tokens.pop),
+ ):
+ client1 = neutronv2.get_client(my_context, True)
+ client1.list_networks(retrieve_all=False)
+ self.assertEqual('new_token', token_store.admin_auth_token)
+ client1 = neutronv2.get_client(my_context, True)
+ client1.list_networks(retrieve_all=False)
+ self.assertEqual('new_token1', token_store.admin_auth_token)
+
+
+class TestNeutronv2Base(test.TestCase):
+
+ def setUp(self):
+ super(TestNeutronv2Base, self).setUp()
+ self.context = context.RequestContext('userid', 'my_tenantid')
+ setattr(self.context,
+ 'auth_token',
+ 'bff4a5a6b9eb4ea2a6efec6eefb77936')
+ self.instance = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0',
+ 'uuid': str(uuid.uuid4()),
+ 'display_name': 'test_instance',
+ 'availability_zone': 'nova',
+ 'host': 'some_host',
+ 'security_groups': []}
+ self.instance2 = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0',
+ 'uuid': str(uuid.uuid4()),
+ 'display_name': 'test_instance2',
+ 'availability_zone': 'nova',
+ 'security_groups': []}
+ self.nets1 = [{'id': 'my_netid1',
+ 'name': 'my_netname1',
+ 'subnets': ['mysubnid1'],
+ 'tenant_id': 'my_tenantid'}]
+ self.nets2 = []
+ self.nets2.append(self.nets1[0])
+ self.nets2.append({'id': 'my_netid2',
+ 'name': 'my_netname2',
+ 'subnets': ['mysubnid2'],
+ 'tenant_id': 'my_tenantid'})
+ self.nets3 = self.nets2 + [{'id': 'my_netid3',
+ 'name': 'my_netname3',
+ 'tenant_id': 'my_tenantid'}]
+ self.nets4 = [{'id': 'his_netid4',
+ 'name': 'his_netname4',
+ 'tenant_id': 'his_tenantid'}]
+ # A network request with external networks
+ self.nets5 = self.nets1 + [{'id': 'the-external-one',
+ 'name': 'out-of-this-world',
+ 'router:external': True,
+ 'tenant_id': 'should-be-an-admin'}]
+ # A network request with a duplicate
+ self.nets6 = []
+ self.nets6.append(self.nets1[0])
+ self.nets6.append(self.nets1[0])
+ # A network request with a combo
+ self.nets7 = []
+ self.nets7.append(self.nets2[1])
+ self.nets7.append(self.nets1[0])
+ self.nets7.append(self.nets2[1])
+ self.nets7.append(self.nets1[0])
+ # A network request with only external network
+ self.nets8 = [self.nets5[1]]
+
+ self.nets = [self.nets1, self.nets2, self.nets3, self.nets4,
+ self.nets5, self.nets6, self.nets7, self.nets8]
+
+ self.port_address = '10.0.1.2'
+ self.port_data1 = [{'network_id': 'my_netid1',
+ 'device_id': self.instance2['uuid'],
+ 'device_owner': 'compute:nova',
+ 'id': 'my_portid1',
+ 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
+ 'status': 'DOWN',
+ 'admin_state_up': True,
+ 'fixed_ips': [{'ip_address': self.port_address,
+ 'subnet_id': 'my_subid1'}],
+ 'mac_address': 'my_mac1', }]
+ self.float_data1 = [{'port_id': 'my_portid1',
+ 'fixed_ip_address': self.port_address,
+ 'floating_ip_address': '172.0.1.2'}]
+ self.dhcp_port_data1 = [{'fixed_ips': [{'ip_address': '10.0.1.9',
+ 'subnet_id': 'my_subid1'}],
+ 'status': 'ACTIVE',
+ 'admin_state_up': True}]
+ self.port_address2 = '10.0.2.2'
+ self.port_data2 = []
+ self.port_data2.append(self.port_data1[0])
+ self.port_data2.append({'network_id': 'my_netid2',
+ 'device_id': self.instance['uuid'],
+ 'admin_state_up': True,
+ 'status': 'ACTIVE',
+ 'device_owner': 'compute:nova',
+ 'id': 'my_portid2',
+ 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
+ 'fixed_ips':
+ [{'ip_address': self.port_address2,
+ 'subnet_id': 'my_subid2'}],
+ 'mac_address': 'my_mac2', })
+ self.float_data2 = []
+ self.float_data2.append(self.float_data1[0])
+ self.float_data2.append({'port_id': 'my_portid2',
+ 'fixed_ip_address': '10.0.2.2',
+ 'floating_ip_address': '172.0.2.2'})
+ self.port_data3 = [{'network_id': 'my_netid1',
+ 'device_id': 'device_id3',
+ 'status': 'DOWN',
+ 'admin_state_up': True,
+ 'device_owner': 'compute:nova',
+ 'id': 'my_portid3',
+ 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
+ 'fixed_ips': [], # no fixed ip
+ 'mac_address': 'my_mac3', }]
+ self.subnet_data1 = [{'id': 'my_subid1',
+ 'cidr': '10.0.1.0/24',
+ 'network_id': 'my_netid1',
+ 'gateway_ip': '10.0.1.1',
+ 'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
+ self.subnet_data2 = []
+ self.subnet_data_n = [{'id': 'my_subid1',
+ 'cidr': '10.0.1.0/24',
+ 'network_id': 'my_netid1',
+ 'gateway_ip': '10.0.1.1',
+ 'dns_nameservers': ['8.8.1.1', '8.8.1.2']},
+ {'id': 'my_subid2',
+ 'cidr': '20.0.1.0/24',
+ 'network_id': 'my_netid2',
+ 'gateway_ip': '20.0.1.1',
+ 'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
+ self.subnet_data2.append({'id': 'my_subid2',
+ 'cidr': '10.0.2.0/24',
+ 'network_id': 'my_netid2',
+ 'gateway_ip': '10.0.2.1',
+ 'dns_nameservers': ['8.8.2.1', '8.8.2.2']})
+
+ self.fip_pool = {'id': '4fdbfd74-eaf8-4884-90d9-00bd6f10c2d3',
+ 'name': 'ext_net',
+ 'router:external': True,
+ 'tenant_id': 'admin_tenantid'}
+ self.fip_pool_nova = {'id': '435e20c3-d9f1-4f1b-bee5-4611a1dd07db',
+ 'name': 'nova',
+ 'router:external': True,
+ 'tenant_id': 'admin_tenantid'}
+ self.fip_unassociated = {'tenant_id': 'my_tenantid',
+ 'id': 'fip_id1',
+ 'floating_ip_address': '172.24.4.227',
+ 'floating_network_id': self.fip_pool['id'],
+ 'port_id': None,
+ 'fixed_ip_address': None,
+ 'router_id': None}
+ fixed_ip_address = self.port_data2[1]['fixed_ips'][0]['ip_address']
+ self.fip_associated = {'tenant_id': 'my_tenantid',
+ 'id': 'fip_id2',
+ 'floating_ip_address': '172.24.4.228',
+ 'floating_network_id': self.fip_pool['id'],
+ 'port_id': self.port_data2[1]['id'],
+ 'fixed_ip_address': fixed_ip_address,
+ 'router_id': 'router_id1'}
+ self._returned_nw_info = []
+ self.mox.StubOutWithMock(neutronv2, 'get_client')
+ self.moxed_client = self.mox.CreateMock(client.Client)
+ self.addCleanup(CONF.reset)
+ self.addCleanup(self.mox.VerifyAll)
+ self.addCleanup(self.mox.UnsetStubs)
+ self.addCleanup(self.stubs.UnsetAll)
+
+ def _stub_allocate_for_instance(self, net_idx=1, **kwargs):
+ # TODO(mriedem): Remove this conversion when all neutronv2 APIs are
+ # converted to handling instance objects.
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
+ self.instance2 = fake_instance.fake_instance_obj(self.context,
+ **self.instance2)
+
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api, 'get_instance_nw_info')
+ has_portbinding = False
+ has_extra_dhcp_opts = False
+ dhcp_options = kwargs.get('dhcp_options')
+ if dhcp_options is not None:
+ has_extra_dhcp_opts = True
+
+ if kwargs.get('portbinding'):
+ has_portbinding = True
+ api.extensions[constants.PORTBINDING_EXT] = 1
+ self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache')
+ neutronv2.get_client(mox.IgnoreArg()).AndReturn(
+ self.moxed_client)
+ neutronv2.get_client(
+ mox.IgnoreArg(), admin=True).AndReturn(
+ self.moxed_client)
+ api._refresh_neutron_extensions_cache(mox.IgnoreArg(),
+ neutron=self.moxed_client)
+ self.mox.StubOutWithMock(api, '_has_port_binding_extension')
+ api._has_port_binding_extension(mox.IgnoreArg(),
+ neutron=self.moxed_client,
+ refresh_cache=True).AndReturn(has_portbinding)
+ else:
+ self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache')
+ api._refresh_neutron_extensions_cache(mox.IgnoreArg(),
+ neutron=self.moxed_client)
+ self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
+ # Net idx is 1-based for compatibility with existing unit tests
+ nets = self.nets[net_idx - 1]
+ ports = {}
+ fixed_ips = {}
+ macs = kwargs.get('macs')
+ if macs:
+ macs = set(macs)
+ req_net_ids = []
+ ordered_networks = []
+ port = {}
+ if 'requested_networks' in kwargs:
+ for request in kwargs['requested_networks']:
+ if request.port_id:
+ if request.port_id == 'my_portid3':
+ self.moxed_client.show_port(request.port_id
+ ).AndReturn(
+ {'port': {'id': 'my_portid3',
+ 'network_id': 'my_netid1',
+ 'mac_address': 'my_mac1',
+ 'device_id': kwargs.get('_device') and
+ self.instance2.uuid or
+ ''}})
+ ports['my_netid1'] = [self.port_data1[0],
+ self.port_data3[0]]
+ ports[request.port_id] = self.port_data3[0]
+ request.network_id = 'my_netid1'
+ if macs is not None:
+ macs.discard('my_mac1')
+ else:
+ self.moxed_client.show_port(request.port_id).AndReturn(
+ {'port': {'id': 'my_portid1',
+ 'network_id': 'my_netid1',
+ 'mac_address': 'my_mac1',
+ 'device_id': kwargs.get('_device') and
+ self.instance2.uuid or
+ ''}})
+ ports[request.port_id] = self.port_data1[0]
+ request.network_id = 'my_netid1'
+ if macs is not None:
+ macs.discard('my_mac1')
+ else:
+ fixed_ips[request.network_id] = request.address
+ req_net_ids.append(request.network_id)
+ ordered_networks.append(request)
+ else:
+ for n in nets:
+ ordered_networks.append(
+ objects.NetworkRequest(network_id=n['id']))
+ if kwargs.get('_break') == 'pre_list_networks':
+ self.mox.ReplayAll()
+ return api
+ # search all req_net_ids as in api.py
+ search_ids = req_net_ids
+ if search_ids:
+ mox_list_params = {'id': mox.SameElementsAs(search_ids)}
+ self.moxed_client.list_networks(
+ **mox_list_params).AndReturn({'networks': nets})
+ else:
+ mox_list_params = {'tenant_id': self.instance.project_id,
+ 'shared': False}
+ self.moxed_client.list_networks(
+ **mox_list_params).AndReturn({'networks': nets})
+ mox_list_params = {'shared': True}
+ self.moxed_client.list_networks(
+ **mox_list_params).AndReturn({'networks': []})
+
+ if (('requested_networks' not in kwargs or
+ kwargs['requested_networks'].as_tuples() == [(None, None, None)])
+ and len(nets) > 1):
+ self.mox.ReplayAll()
+ return api
+
+ ports_in_requested_net_order = []
+ nets_in_requested_net_order = []
+ for request in ordered_networks:
+ port_req_body = {
+ 'port': {
+ 'device_id': self.instance.uuid,
+ 'device_owner': 'compute:nova',
+ },
+ }
+ # Network lookup for available network_id
+ network = None
+ for net in nets:
+ if net['id'] == request.network_id:
+ network = net
+ break
+ # if net_id did not pass validate_networks() and not available
+ # here then skip it safely not continuing with a None Network
+ else:
+ continue
+ if has_portbinding:
+ port_req_body['port']['binding:host_id'] = (
+ self.instance.get('host'))
+ if not has_portbinding:
+ api._populate_neutron_extension_values(mox.IgnoreArg(),
+ self.instance, mox.IgnoreArg(),
+ mox.IgnoreArg(), neutron=self.moxed_client).AndReturn(None)
+ else:
+ # since _populate_neutron_extension_values() will call
+ # _has_port_binding_extension()
+ api._has_port_binding_extension(mox.IgnoreArg(),
+ neutron=self.moxed_client).\
+ AndReturn(has_portbinding)
+ if request.port_id:
+ port = ports[request.port_id]
+ self.moxed_client.update_port(request.port_id,
+ MyComparator(port_req_body)
+ ).AndReturn(
+ {'port': port})
+ ports_in_requested_net_order.append(request.port_id)
+ else:
+ request.address = fixed_ips.get(request.network_id)
+ if request.address:
+ port_req_body['port']['fixed_ips'] = [{'ip_address':
+ request.address}]
+ port_req_body['port']['network_id'] = request.network_id
+ port_req_body['port']['admin_state_up'] = True
+ port_req_body['port']['tenant_id'] = \
+ self.instance.project_id
+ if macs:
+ port_req_body['port']['mac_address'] = macs.pop()
+ if has_portbinding:
+ port_req_body['port']['binding:host_id'] = (
+ self.instance.get('host'))
+ res_port = {'port': {'id': 'fake'}}
+ if has_extra_dhcp_opts:
+ port_req_body['port']['extra_dhcp_opts'] = dhcp_options
+ if kwargs.get('_break') == 'mac' + request.network_id:
+ self.mox.ReplayAll()
+ return api
+ self.moxed_client.create_port(
+ MyComparator(port_req_body)).AndReturn(res_port)
+ ports_in_requested_net_order.append(res_port['port']['id'])
+
+ nets_in_requested_net_order.append(network)
+
+ api.get_instance_nw_info(mox.IgnoreArg(),
+ self.instance,
+ networks=nets_in_requested_net_order,
+ port_ids=ports_in_requested_net_order,
+ admin_client=None
+ ).AndReturn(self._returned_nw_info)
+ self.mox.ReplayAll()
+ return api
+
+ def _verify_nw_info(self, nw_inf, index=0):
+ id_suffix = index + 1
+ self.assertEqual('10.0.%s.2' % id_suffix,
+ nw_inf.fixed_ips()[index]['address'])
+ self.assertEqual('172.0.%s.2' % id_suffix,
+ nw_inf.fixed_ips()[index].floating_ip_addresses()[0])
+ self.assertEqual('my_netname%s' % id_suffix,
+ nw_inf[index]['network']['label'])
+ self.assertEqual('my_portid%s' % id_suffix, nw_inf[index]['id'])
+ self.assertEqual('my_mac%s' % id_suffix, nw_inf[index]['address'])
+ self.assertEqual('10.0.%s.0/24' % id_suffix,
+ nw_inf[index]['network']['subnets'][0]['cidr'])
+
+ ip_addr = model.IP(address='8.8.%s.1' % id_suffix,
+ version=4, type='dns')
+ self.assertIn(ip_addr, nw_inf[index]['network']['subnets'][0]['dns'])
+
+ def _get_instance_nw_info(self, number):
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
+ api.db.instance_info_cache_update(mox.IgnoreArg(),
+ self.instance['uuid'],
+ mox.IgnoreArg())
+ port_data = number == 1 and self.port_data1 or self.port_data2
+ nets = number == 1 and self.nets1 or self.nets2
+ net_info_cache = []
+ for port in port_data:
+ net_info_cache.append({"network": {"id": port['network_id']},
+ "id": port['id']})
+
+ instance = copy.copy(self.instance)
+ # This line here does not wrap net_info_cache in jsonutils.dumps()
+ # intentionally to test the other code path when it's not unicode.
+ instance['info_cache'] = {'network_info': net_info_cache}
+
+ self.moxed_client.list_ports(
+ tenant_id=self.instance['project_id'],
+ device_id=self.instance['uuid']).AndReturn(
+ {'ports': port_data})
+ net_ids = [port['network_id'] for port in port_data]
+ nets = number == 1 and self.nets1 or self.nets2
+ self.moxed_client.list_networks(
+ id=net_ids).AndReturn({'networks': nets})
+ for i in xrange(1, number + 1):
+ float_data = number == 1 and self.float_data1 or self.float_data2
+ for ip in port_data[i - 1]['fixed_ips']:
+ float_data = [x for x in float_data
+ if x['fixed_ip_address'] == ip['ip_address']]
+ self.moxed_client.list_floatingips(
+ fixed_ip_address=ip['ip_address'],
+ port_id=port_data[i - 1]['id']).AndReturn(
+ {'floatingips': float_data})
+ subnet_data = i == 1 and self.subnet_data1 or self.subnet_data2
+ self.moxed_client.list_subnets(
+ id=mox.SameElementsAs(['my_subid%s' % i])).AndReturn(
+ {'subnets': subnet_data})
+ self.moxed_client.list_ports(
+ network_id=subnet_data[0]['network_id'],
+ device_owner='network:dhcp').AndReturn(
+ {'ports': []})
+ self.mox.ReplayAll()
+ nw_inf = api.get_instance_nw_info(self.context, instance)
+ for i in xrange(0, number):
+ self._verify_nw_info(nw_inf, i)
+
+ def _allocate_for_instance(self, net_idx=1, **kwargs):
+ api = self._stub_allocate_for_instance(net_idx, **kwargs)
+ return api.allocate_for_instance(self.context, self.instance, **kwargs)
+
+
+class TestNeutronv2(TestNeutronv2Base):
+
+ def setUp(self):
+ super(TestNeutronv2, self).setUp()
+ neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
+ self.moxed_client)
+
+ def test_get_instance_nw_info_1(self):
+ # Test to get one port in one network and subnet.
+ neutronv2.get_client(mox.IgnoreArg(),
+ admin=True).MultipleTimes().AndReturn(
+ self.moxed_client)
+ self._get_instance_nw_info(1)
+
+ def test_get_instance_nw_info_2(self):
+ # Test to get one port in each of two networks and subnets.
+ neutronv2.get_client(mox.IgnoreArg(),
+ admin=True).MultipleTimes().AndReturn(
+ self.moxed_client)
+ self._get_instance_nw_info(2)
+
+ def test_get_instance_nw_info_with_nets_add_interface(self):
+ # This tests that adding an interface to an instance does not
+ # remove the first instance from the instance.
+ network_model = model.Network(id='network_id',
+ bridge='br-int',
+ injected='injected',
+ label='fake_network',
+ tenant_id='fake_tenant')
+ network_cache = {'info_cache': {
+ 'network_info': [{'id': self.port_data2[0]['id'],
+ 'address': 'mac_address',
+ 'network': network_model,
+ 'type': 'ovs',
+ 'ovs_interfaceid': 'ovs_interfaceid',
+ 'devname': 'devname'}]}}
+
+ self._fake_get_instance_nw_info_helper(network_cache,
+ self.port_data2,
+ self.nets2,
+ [self.port_data2[1]['id']])
+
+ def test_get_instance_nw_info_remove_ports_from_neutron(self):
+ # This tests that when a port is removed in neutron it
+ # is also removed from the nova.
+ network_model = model.Network(id=self.port_data2[0]['network_id'],
+ bridge='br-int',
+ injected='injected',
+ label='fake_network',
+ tenant_id='fake_tenant')
+ network_cache = {'info_cache': {
+ 'network_info': [{'id': 'network_id',
+ 'address': 'mac_address',
+ 'network': network_model,
+ 'type': 'ovs',
+ 'ovs_interfaceid': 'ovs_interfaceid',
+ 'devname': 'devname'}]}}
+
+ self._fake_get_instance_nw_info_helper(network_cache,
+ self.port_data2,
+ None,
+ None)
+
+ def test_get_instance_nw_info_ignores_neturon_ports(self):
+ # Tests that only ports in the network_cache are updated
+ # and ports returned from neutron that match the same
+ # instance_id/device_id are ignored.
+ port_data2 = copy.copy(self.port_data2)
+
+ # set device_id on the ports to be the same.
+ port_data2[1]['device_id'] = port_data2[0]['device_id']
+ network_model = model.Network(id='network_id',
+ bridge='br-int',
+ injected='injected',
+ label='fake_network',
+ tenant_id='fake_tenant')
+ network_cache = {'info_cache': {
+ 'network_info': [{'id': 'network_id',
+ 'address': 'mac_address',
+ 'network': network_model,
+ 'type': 'ovs',
+ 'ovs_interfaceid': 'ovs_interfaceid',
+ 'devname': 'devname'}]}}
+
+ self._fake_get_instance_nw_info_helper(network_cache,
+ port_data2,
+ None,
+ None)
+
+ def _fake_get_instance_nw_info_helper(self, network_cache,
+ current_neutron_ports,
+ networks=None, port_ids=None):
+ """Helper function to test get_instance_nw_info.
+
+ :param network_cache - data already in the nova network cache.
+ :param current_neutron_ports - updated list of ports from neutron.
+ :param networks - networks of ports being added to instance.
+ :param port_ids - new ports being added to instance.
+ """
+
+ # keep a copy of the original ports/networks to pass to
+ # get_instance_nw_info() as the code below changes them.
+ original_port_ids = copy.copy(port_ids)
+ original_networks = copy.copy(networks)
+
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
+ api.db.instance_info_cache_update(
+ mox.IgnoreArg(),
+ self.instance['uuid'], mox.IgnoreArg())
+ neutronv2.get_client(mox.IgnoreArg(),
+ admin=True).MultipleTimes().AndReturn(
+ self.moxed_client)
+ self.moxed_client.list_ports(
+ tenant_id=self.instance['project_id'],
+ device_id=self.instance['uuid']).AndReturn(
+ {'ports': current_neutron_ports})
+
+ ifaces = network_cache['info_cache']['network_info']
+
+ if port_ids is None:
+ port_ids = [iface['id'] for iface in ifaces]
+ net_ids = [iface['network']['id'] for iface in ifaces]
+ nets = [{'id': iface['network']['id'],
+ 'name': iface['network']['label'],
+ 'tenant_id': iface['network']['meta']['tenant_id']}
+ for iface in ifaces]
+ if networks is None:
+ self.moxed_client.list_networks(
+ id=net_ids).AndReturn({'networks': nets})
+ else:
+ networks = networks + [
+ dict(id=iface['network']['id'],
+ name=iface['network']['label'],
+ tenant_id=iface['network']['meta']['tenant_id'])
+ for iface in ifaces]
+ port_ids = [iface['id'] for iface in ifaces] + port_ids
+
+ index = 0
+
+ current_neutron_port_map = {}
+ for current_neutron_port in current_neutron_ports:
+ current_neutron_port_map[current_neutron_port['id']] = (
+ current_neutron_port)
+ for port_id in port_ids:
+ current_neutron_port = current_neutron_port_map.get(port_id)
+ if current_neutron_port:
+ for ip in current_neutron_port['fixed_ips']:
+ self.moxed_client.list_floatingips(
+ fixed_ip_address=ip['ip_address'],
+ port_id=current_neutron_port['id']).AndReturn(
+ {'floatingips': [self.float_data2[index]]})
+ self.moxed_client.list_subnets(
+ id=mox.SameElementsAs([ip['subnet_id']])
+ ).AndReturn(
+ {'subnets': [self.subnet_data_n[index]]})
+ self.moxed_client.list_ports(
+ network_id=current_neutron_port['network_id'],
+ device_owner='network:dhcp').AndReturn(
+ {'ports': self.dhcp_port_data1})
+ index += 1
+ self.mox.ReplayAll()
+
+ self.instance['info_cache'] = network_cache
+ instance = copy.copy(self.instance)
+ instance['info_cache'] = network_cache['info_cache']
+ nw_infs = api.get_instance_nw_info(self.context,
+ instance,
+ networks=original_networks,
+ port_ids=original_port_ids)
+
+ self.assertEqual(index, len(nw_infs))
+ # ensure that nic ordering is preserved
+ for iface_index in range(index):
+ self.assertEqual(nw_infs[iface_index]['id'],
+ port_ids[iface_index])
+
+ def test_get_instance_nw_info_without_subnet(self):
+ # Test get instance_nw_info for a port without subnet.
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
+ api.db.instance_info_cache_update(
+ mox.IgnoreArg(),
+ self.instance['uuid'], mox.IgnoreArg())
+ self.moxed_client.list_ports(
+ tenant_id=self.instance['project_id'],
+ device_id=self.instance['uuid']).AndReturn(
+ {'ports': self.port_data3})
+ self.moxed_client.list_networks(
+ id=[self.port_data1[0]['network_id']]).AndReturn(
+ {'networks': self.nets1})
+ neutronv2.get_client(mox.IgnoreArg(),
+ admin=True).MultipleTimes().AndReturn(
+ self.moxed_client)
+
+ net_info_cache = []
+ for port in self.port_data3:
+ net_info_cache.append({"network": {"id": port['network_id']},
+ "id": port['id']})
+ instance = copy.copy(self.instance)
+ instance['info_cache'] = {'network_info':
+ six.text_type(
+ jsonutils.dumps(net_info_cache))}
+
+ self.mox.ReplayAll()
+
+ nw_inf = api.get_instance_nw_info(self.context,
+ instance)
+
+ id_suffix = 3
+ self.assertEqual(0, len(nw_inf.fixed_ips()))
+ self.assertEqual('my_netname1', nw_inf[0]['network']['label'])
+ self.assertEqual('my_portid%s' % id_suffix, nw_inf[0]['id'])
+ self.assertEqual('my_mac%s' % id_suffix, nw_inf[0]['address'])
+ self.assertEqual(0, len(nw_inf[0]['network']['subnets']))
+
+ def test_refresh_neutron_extensions_cache(self):
+ api = neutronapi.API()
+
+ # Note: Don't want the default get_client from setUp()
+ self.mox.ResetAll()
+ neutronv2.get_client(mox.IgnoreArg()).AndReturn(
+ self.moxed_client)
+ self.moxed_client.list_extensions().AndReturn(
+ {'extensions': [{'name': constants.QOS_QUEUE}]})
+ self.mox.ReplayAll()
+ api._refresh_neutron_extensions_cache(mox.IgnoreArg())
+ self.assertEqual(
+ {constants.QOS_QUEUE: {'name': constants.QOS_QUEUE}},
+ api.extensions)
+
+ def test_populate_neutron_extension_values_rxtx_factor(self):
+ api = neutronapi.API()
+
+ # Note: Don't want the default get_client from setUp()
+ self.mox.ResetAll()
+ neutronv2.get_client(mox.IgnoreArg()).AndReturn(
+ self.moxed_client)
+ self.moxed_client.list_extensions().AndReturn(
+ {'extensions': [{'name': constants.QOS_QUEUE}]})
+ self.mox.ReplayAll()
+ flavor = flavors.get_default_flavor()
+ flavor['rxtx_factor'] = 1
+ sys_meta = utils.dict_to_metadata(
+ flavors.save_flavor_info({}, flavor))
+ instance = {'system_metadata': sys_meta}
+ port_req_body = {'port': {}}
+ api._populate_neutron_extension_values(self.context, instance,
+ None, port_req_body)
+ self.assertEqual(port_req_body['port']['rxtx_factor'], 1)
+
+ def test_allocate_for_instance_1(self):
+ # Allocate one port in one network env.
+ self._allocate_for_instance(1)
+
+ def test_allocate_for_instance_2(self):
+ # Allocate one port in two networks env.
+ api = self._stub_allocate_for_instance(net_idx=2)
+ self.assertRaises(exception.NetworkAmbiguous,
+ api.allocate_for_instance,
+ self.context, self.instance)
+
+ def test_allocate_for_instance_accepts_macs_kwargs_None(self):
+ # The macs kwarg should be accepted as None.
+ self._allocate_for_instance(1, macs=None)
+
+ def test_allocate_for_instance_accepts_macs_kwargs_set(self):
+ # The macs kwarg should be accepted, as a set, the
+ # _allocate_for_instance helper checks that the mac is used to create a
+ # port.
+ self._allocate_for_instance(1, macs=set(['ab:cd:ef:01:23:45']))
+
+ def test_allocate_for_instance_accepts_only_portid(self):
+ # Make sure allocate_for_instance works when only a portid is provided
+ self._returned_nw_info = self.port_data1
+ result = self._allocate_for_instance(
+ requested_networks=objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id='my_portid1')]))
+ self.assertEqual(self.port_data1, result)
+
+ def test_allocate_for_instance_not_enough_macs_via_ports(self):
+ # using a hypervisor MAC via a pre-created port will stop it being
+ # used to dynamically create a port on a network. We put the network
+ # first in requested_networks so that if the code were to not pre-check
+ # requested ports, it would incorrectly assign the mac and not fail.
+ requested_networks = objects.NetworkRequestList(
+ objects = [
+ objects.NetworkRequest(network_id=self.nets2[1]['id']),
+ objects.NetworkRequest(port_id='my_portid1')])
+ api = self._stub_allocate_for_instance(
+ net_idx=2, requested_networks=requested_networks,
+ macs=set(['my_mac1']),
+ _break='mac' + self.nets2[1]['id'])
+ self.assertRaises(exception.PortNotFree,
+ api.allocate_for_instance, self.context,
+ self.instance, requested_networks=requested_networks,
+ macs=set(['my_mac1']))
+
+ def test_allocate_for_instance_not_enough_macs(self):
+ # If not enough MAC addresses are available to allocate to networks, an
+ # error should be raised.
+ # We could pass in macs=set(), but that wouldn't tell us that
+ # allocate_for_instance tracks used macs properly, so we pass in one
+ # mac, and ask for two networks.
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id=self.nets2[1]['id']),
+ objects.NetworkRequest(network_id=self.nets2[0]['id'])])
+ api = self._stub_allocate_for_instance(
+ net_idx=2, requested_networks=requested_networks,
+ macs=set(['my_mac2']),
+ _break='mac' + self.nets2[0]['id'])
+ with mock.patch.object(api, '_delete_ports'):
+ self.assertRaises(exception.PortNotFree,
+ api.allocate_for_instance, self.context,
+ self.instance,
+ requested_networks=requested_networks,
+ macs=set(['my_mac2']))
+
+ def test_allocate_for_instance_two_macs_two_networks(self):
+ # If two MACs are available and two networks requested, two new ports
+ # get made and no exceptions raised.
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id=self.nets2[1]['id']),
+ objects.NetworkRequest(network_id=self.nets2[0]['id'])])
+ self._allocate_for_instance(
+ net_idx=2, requested_networks=requested_networks,
+ macs=set(['my_mac2', 'my_mac1']))
+
+ def test_allocate_for_instance_mac_conflicting_requested_port(self):
+ # specify only first and last network
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id='my_portid1')])
+ api = self._stub_allocate_for_instance(
+ net_idx=1, requested_networks=requested_networks,
+ macs=set(['unknown:mac']),
+ _break='pre_list_networks')
+ self.assertRaises(exception.PortNotUsable,
+ api.allocate_for_instance, self.context,
+ self.instance, requested_networks=requested_networks,
+ macs=set(['unknown:mac']))
+
+ def test_allocate_for_instance_without_requested_networks(self):
+ api = self._stub_allocate_for_instance(net_idx=3)
+ self.assertRaises(exception.NetworkAmbiguous,
+ api.allocate_for_instance,
+ self.context, self.instance)
+
+ def test_allocate_for_instance_with_requested_non_available_network(self):
+ """verify that a non available network is ignored.
+ self.nets2 (net_idx=2) is composed of self.nets3[0] and self.nets3[1]
+ Do not create a port on a non available network self.nets3[2].
+ """
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id=net['id'])
+ for net in (self.nets3[0], self.nets3[2], self.nets3[1])])
+ self._allocate_for_instance(net_idx=2,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_with_requested_networks(self):
+ # specify only first and last network
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id=net['id'])
+ for net in (self.nets3[1], self.nets3[0], self.nets3[2])])
+ self._allocate_for_instance(net_idx=3,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_with_requested_networks_with_fixedip(self):
+ # specify only first and last network
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id=self.nets1[0]['id'],
+ address='10.0.1.0')])
+ self._allocate_for_instance(net_idx=1,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_with_requested_networks_with_port(self):
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id='my_portid1')])
+ self._allocate_for_instance(net_idx=1,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_no_networks(self):
+ """verify the exception thrown when there are no networks defined."""
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
+ api = neutronapi.API()
+ self.moxed_client.list_extensions().AndReturn({'extensions': []})
+ self.moxed_client.list_networks(
+ tenant_id=self.instance.project_id,
+ shared=False).AndReturn(
+ {'networks': model.NetworkInfo([])})
+ self.moxed_client.list_networks(shared=True).AndReturn(
+ {'networks': model.NetworkInfo([])})
+ self.mox.ReplayAll()
+ nwinfo = api.allocate_for_instance(self.context, self.instance)
+ self.assertEqual(len(nwinfo), 0)
+
+ def test_allocate_for_instance_ex1(self):
+ """verify we will delete created ports
+ if we fail to allocate all net resources.
+
+ Mox to raise exception when creating a second port.
+ In this case, the code should delete the first created port.
+ """
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
+ self.mox.StubOutWithMock(api, '_has_port_binding_extension')
+ api._has_port_binding_extension(mox.IgnoreArg(),
+ neutron=self.moxed_client,
+ refresh_cache=True).AndReturn(False)
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id=net['id'])
+ for net in (self.nets2[0], self.nets2[1])])
+ self.moxed_client.list_networks(
+ id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2})
+ index = 0
+ for network in self.nets2:
+ binding_port_req_body = {
+ 'port': {
+ 'device_id': self.instance.uuid,
+ 'device_owner': 'compute:nova',
+ },
+ }
+ port_req_body = {
+ 'port': {
+ 'network_id': network['id'],
+ 'admin_state_up': True,
+ 'tenant_id': self.instance.project_id,
+ },
+ }
+ port_req_body['port'].update(binding_port_req_body['port'])
+ port = {'id': 'portid_' + network['id']}
+
+ api._populate_neutron_extension_values(self.context,
+ self.instance, None, binding_port_req_body,
+ neutron=self.moxed_client).AndReturn(None)
+ if index == 0:
+ self.moxed_client.create_port(
+ MyComparator(port_req_body)).AndReturn({'port': port})
+ else:
+ NeutronOverQuota = exceptions.OverQuotaClient()
+ self.moxed_client.create_port(
+ MyComparator(port_req_body)).AndRaise(NeutronOverQuota)
+ index += 1
+ self.moxed_client.delete_port('portid_' + self.nets2[0]['id'])
+ self.mox.ReplayAll()
+ self.assertRaises(exception.PortLimitExceeded,
+ api.allocate_for_instance,
+ self.context, self.instance,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_ex2(self):
+ """verify we have no port to delete
+ if we fail to allocate the first net resource.
+
+ Mox to raise exception when creating the first port.
+ In this case, the code should not delete any ports.
+ """
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
+ self.mox.StubOutWithMock(api, '_has_port_binding_extension')
+ api._has_port_binding_extension(mox.IgnoreArg(),
+ neutron=self.moxed_client,
+ refresh_cache=True).AndReturn(False)
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id=net['id'])
+ for net in (self.nets2[0], self.nets2[1])])
+ self.moxed_client.list_networks(
+ id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2})
+ binding_port_req_body = {
+ 'port': {
+ 'device_id': self.instance.uuid,
+ 'device_owner': 'compute:nova',
+ },
+ }
+ port_req_body = {
+ 'port': {
+ 'network_id': self.nets2[0]['id'],
+ 'admin_state_up': True,
+ 'device_id': self.instance.uuid,
+ 'tenant_id': self.instance.project_id,
+ },
+ }
+ api._populate_neutron_extension_values(self.context,
+ self.instance, None, binding_port_req_body,
+ neutron=self.moxed_client).AndReturn(None)
+ self.moxed_client.create_port(
+ MyComparator(port_req_body)).AndRaise(
+ Exception("fail to create port"))
+ self.mox.ReplayAll()
+ self.assertRaises(NEUTRON_CLIENT_EXCEPTION, api.allocate_for_instance,
+ self.context, self.instance,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_no_port_or_network(self):
+ class BailOutEarly(Exception):
+ pass
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
+ api = neutronapi.API()
+ self.moxed_client.list_extensions().AndReturn({'extensions': []})
+ self.mox.StubOutWithMock(api, '_get_available_networks')
+ # Make sure we get an empty list and then bail out of the rest
+ # of the function
+ api._get_available_networks(self.context, self.instance.project_id,
+ [],
+ neutron=self.moxed_client).\
+ AndRaise(BailOutEarly)
+ self.mox.ReplayAll()
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest()])
+ self.assertRaises(BailOutEarly,
+ api.allocate_for_instance,
+ self.context, self.instance,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_second_time(self):
+ # Make sure that allocate_for_instance only returns ports that it
+ # allocated during _that_ run.
+ new_port = {'id': 'fake'}
+ self._returned_nw_info = self.port_data1 + [new_port]
+ nw_info = self._allocate_for_instance()
+ self.assertEqual(nw_info, [new_port])
+
+ def test_allocate_for_instance_port_in_use(self):
+ # If a port is already in use, an exception should be raised.
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id='my_portid1')])
+ api = self._stub_allocate_for_instance(
+ requested_networks=requested_networks,
+ _break='pre_list_networks',
+ _device=True)
+ self.assertRaises(exception.PortInUse,
+ api.allocate_for_instance, self.context,
+ self.instance, requested_networks=requested_networks)
+
+ def test_allocate_for_instance_with_externalnet_forbidden(self):
+ """Only one network is available, it's external, and the client
+ is unauthorized to use it.
+ """
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
+ self.moxed_client.list_extensions().AndReturn({'extensions': []})
+ # no networks in the tenant
+ self.moxed_client.list_networks(
+ tenant_id=self.instance.project_id,
+ shared=False).AndReturn(
+ {'networks': model.NetworkInfo([])})
+ # external network is shared
+ self.moxed_client.list_networks(shared=True).AndReturn(
+ {'networks': self.nets8})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ self.assertRaises(exception.ExternalNetworkAttachForbidden,
+ api.allocate_for_instance,
+ self.context, self.instance)
+
+ def test_allocate_for_instance_with_externalnet_multiple(self):
+ """Multiple networks are available, one the client is authorized
+ to use, and an external one the client is unauthorized to use.
+ """
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
+ self.moxed_client.list_extensions().AndReturn({'extensions': []})
+ # network found in the tenant
+ self.moxed_client.list_networks(
+ tenant_id=self.instance.project_id,
+ shared=False).AndReturn(
+ {'networks': self.nets1})
+ # external network is shared
+ self.moxed_client.list_networks(shared=True).AndReturn(
+ {'networks': self.nets8})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ self.assertRaises(
+ exception.NetworkAmbiguous,
+ api.allocate_for_instance,
+ self.context, self.instance)
+
+ def test_allocate_for_instance_with_externalnet_admin_ctx(self):
+ """Only one network is available, it's external, and the client
+ is authorized.
+ """
+ admin_ctx = context.RequestContext('userid', 'my_tenantid',
+ is_admin=True)
+ api = self._stub_allocate_for_instance(net_idx=8)
+ api.allocate_for_instance(admin_ctx, self.instance)
+
+ def _deallocate_for_instance(self, number, requested_networks=None):
+ # TODO(mriedem): Remove this conversion when all neutronv2 APIs are
+ # converted to handling instance objects.
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
+ api = neutronapi.API()
+ port_data = number == 1 and self.port_data1 or self.port_data2
+ ret_data = copy.deepcopy(port_data)
+ if requested_networks:
+ if isinstance(requested_networks, objects.NetworkRequestList):
+ # NOTE(danms): Temporary and transitional
+ with mock.patch('nova.utils.is_neutron', return_value=True):
+ requested_networks = requested_networks.as_tuples()
+ for net, fip, port, request_id in requested_networks:
+ ret_data.append({'network_id': net,
+ 'device_id': self.instance.uuid,
+ 'device_owner': 'compute:nova',
+ 'id': port,
+ 'status': 'DOWN',
+ 'admin_state_up': True,
+ 'fixed_ips': [],
+ 'mac_address': 'fake_mac', })
+ self.moxed_client.list_ports(
+ device_id=self.instance.uuid).AndReturn(
+ {'ports': ret_data})
+ if requested_networks:
+ for net, fip, port, request_id in requested_networks:
+ self.moxed_client.update_port(port)
+ for port in reversed(port_data):
+ self.moxed_client.delete_port(port['id'])
+
+ self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
+ api.db.instance_info_cache_update(self.context,
+ self.instance.uuid,
+ {'network_info': '[]'})
+ self.mox.ReplayAll()
+
+ api = neutronapi.API()
+ api.deallocate_for_instance(self.context, self.instance,
+ requested_networks=requested_networks)
+
+ def test_deallocate_for_instance_1_with_requested(self):
+ requested = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='fake-net',
+ address='1.2.3.4',
+ port_id='fake-port')])
+ # Test to deallocate in one port env.
+ self._deallocate_for_instance(1, requested_networks=requested)
+
+ def test_deallocate_for_instance_2_with_requested(self):
+ requested = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='fake-net',
+ address='1.2.3.4',
+ port_id='fake-port')])
+ # Test to deallocate in one port env.
+ self._deallocate_for_instance(2, requested_networks=requested)
+
+ def test_deallocate_for_instance_1(self):
+ # Test to deallocate in one port env.
+ self._deallocate_for_instance(1)
+
+ def test_deallocate_for_instance_2(self):
+ # Test to deallocate in two ports env.
+ self._deallocate_for_instance(2)
+
+ def test_deallocate_for_instance_port_not_found(self):
+ # TODO(mriedem): Remove this conversion when all neutronv2 APIs are
+ # converted to handling instance objects.
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
+ port_data = self.port_data1
+ self.moxed_client.list_ports(
+ device_id=self.instance.uuid).AndReturn(
+ {'ports': port_data})
+
+ NeutronNotFound = exceptions.NeutronClientException(status_code=404)
+ for port in reversed(port_data):
+ self.moxed_client.delete_port(port['id']).AndRaise(
+ NeutronNotFound)
+ self.mox.ReplayAll()
+
+ api = neutronapi.API()
+ api.deallocate_for_instance(self.context, self.instance)
+
+ def _test_deallocate_port_for_instance(self, number):
+ port_data = number == 1 and self.port_data1 or self.port_data2
+ nets = number == 1 and self.nets1 or self.nets2
+ self.moxed_client.delete_port(port_data[0]['id'])
+
+ net_info_cache = []
+ for port in port_data:
+ net_info_cache.append({"network": {"id": port['network_id']},
+ "id": port['id']})
+ instance = copy.copy(self.instance)
+ instance['info_cache'] = {'network_info':
+ six.text_type(
+ jsonutils.dumps(net_info_cache))}
+ api = neutronapi.API()
+ neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
+ self.moxed_client)
+ self.moxed_client.list_ports(
+ tenant_id=self.instance['project_id'],
+ device_id=self.instance['uuid']).AndReturn(
+ {'ports': port_data[1:]})
+ neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
+ self.moxed_client)
+ net_ids = [port['network_id'] for port in port_data]
+ self.moxed_client.list_networks(id=net_ids).AndReturn(
+ {'networks': nets})
+ float_data = number == 1 and self.float_data1 or self.float_data2
+ for data in port_data[1:]:
+ for ip in data['fixed_ips']:
+ self.moxed_client.list_floatingips(
+ fixed_ip_address=ip['ip_address'],
+ port_id=data['id']).AndReturn(
+ {'floatingips': float_data[1:]})
+ for port in port_data[1:]:
+ self.moxed_client.list_subnets(id=['my_subid2']).AndReturn({})
+
+ self.mox.ReplayAll()
+
+ nwinfo = api.deallocate_port_for_instance(self.context, instance,
+ port_data[0]['id'])
+ self.assertEqual(len(nwinfo), len(port_data[1:]))
+ if len(port_data) > 1:
+ self.assertEqual(nwinfo[0]['network']['id'], 'my_netid2')
+
+ def test_deallocate_port_for_instance_1(self):
+ # Test to deallocate the first and only port
+ self._test_deallocate_port_for_instance(1)
+
+ def test_deallocate_port_for_instance_2(self):
+ # Test to deallocate the first port of two
+ self._test_deallocate_port_for_instance(2)
+
+ def test_list_ports(self):
+ search_opts = {'parm': 'value'}
+ self.moxed_client.list_ports(**search_opts)
+ self.mox.ReplayAll()
+ neutronapi.API().list_ports(self.context, **search_opts)
+
+ def test_show_port(self):
+ self.moxed_client.show_port('foo')
+ self.mox.ReplayAll()
+ neutronapi.API().show_port(self.context, 'foo')
+
+ def test_validate_networks(self):
+ requested_networks = [('my_netid1', None, None, None),
+ ('my_netid2', None, None, None)]
+ ids = ['my_netid1', 'my_netid2']
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(ids)).AndReturn(
+ {'networks': self.nets2})
+ self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
+ {'ports': []})
+ self.moxed_client.show_quota(
+ tenant_id='my_tenantid').AndReturn(
+ {'quota': {'port': 50}})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ api.validate_networks(self.context, requested_networks, 1)
+
+ def test_validate_networks_without_port_quota_on_network_side(self):
+ requested_networks = [('my_netid1', None, None, None),
+ ('my_netid2', None, None, None)]
+ ids = ['my_netid1', 'my_netid2']
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(ids)).AndReturn(
+ {'networks': self.nets2})
+ self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
+ {'ports': []})
+ self.moxed_client.show_quota(
+ tenant_id='my_tenantid').AndReturn(
+ {'quota': {}})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ api.validate_networks(self.context, requested_networks, 1)
+
+ def test_validate_networks_ex_1(self):
+ requested_networks = [('my_netid1', None, None, None)]
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(['my_netid1'])).AndReturn(
+ {'networks': self.nets1})
+ self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
+ {'ports': []})
+ self.moxed_client.show_quota(
+ tenant_id='my_tenantid').AndReturn(
+ {'quota': {'port': 50}})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ try:
+ api.validate_networks(self.context, requested_networks, 1)
+ except exception.NetworkNotFound as ex:
+ self.assertIn("my_netid2", six.text_type(ex))
+
+ def test_validate_networks_ex_2(self):
+ requested_networks = [('my_netid1', None, None, None),
+ ('my_netid2', None, None, None),
+ ('my_netid3', None, None, None)]
+ ids = ['my_netid1', 'my_netid2', 'my_netid3']
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(ids)).AndReturn(
+ {'networks': self.nets1})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ try:
+ api.validate_networks(self.context, requested_networks, 1)
+ except exception.NetworkNotFound as ex:
+ self.assertIn("my_netid2, my_netid3", six.text_type(ex))
+
+ def test_validate_networks_duplicate_disable(self):
+ """Verify that the correct exception is thrown when duplicate
+ network ids are passed to validate_networks, when nova config flag
+ allow_duplicate_networks is set to its default value: False
+ """
+ requested_networks = [('my_netid1', None, None, None),
+ ('my_netid1', None, None, None)]
+ self.mox.ReplayAll()
+ # Expected call from setUp.
+ neutronv2.get_client(None)
+ api = neutronapi.API()
+ self.assertRaises(exception.NetworkDuplicated,
+ api.validate_networks,
+ self.context, requested_networks, 1)
+
+ def test_validate_networks_duplicate_enable(self):
+ """Verify that no duplicateNetworks exception is thrown when duplicate
+ network ids are passed to validate_networks, when nova config flag
+ allow_duplicate_networks is set to its non default value: True
+ """
+ self.flags(allow_duplicate_networks=True, group='neutron')
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='my_netid1'),
+ objects.NetworkRequest(network_id='my_netid1')])
+ ids = ['my_netid1', 'my_netid1']
+
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(ids)).AndReturn(
+ {'networks': self.nets1})
+ self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
+ {'ports': []})
+ self.moxed_client.show_quota(
+ tenant_id='my_tenantid').AndReturn(
+ {'quota': {'port': 50}})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ api.validate_networks(self.context, requested_networks, 1)
+
+ def test_allocate_for_instance_with_requested_networks_duplicates(self):
+ # specify a duplicate network to allocate to instance
+ self.flags(allow_duplicate_networks=True, group='neutron')
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id=net['id'])
+ for net in (self.nets6[0], self.nets6[1])])
+ self._allocate_for_instance(net_idx=6,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_requested_networks_duplicates_port(self):
+ # specify first port and last port that are in same network
+ self.flags(allow_duplicate_networks=True, group='neutron')
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=port['id'])
+ for port in (self.port_data1[0], self.port_data3[0])])
+ self._allocate_for_instance(net_idx=6,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_requested_networks_duplicates_combo(self):
+ # specify a combo net_idx=7 : net2, port in net1, net2, port in net1
+ self.flags(allow_duplicate_networks=True, group='neutron')
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='my_netid2'),
+ objects.NetworkRequest(port_id=self.port_data1[0]['id']),
+ objects.NetworkRequest(network_id='my_netid2'),
+ objects.NetworkRequest(port_id=self.port_data3[0]['id'])])
+ self._allocate_for_instance(net_idx=7,
+ requested_networks=requested_networks)
+
+ def test_validate_networks_not_specified(self):
+ requested_networks = objects.NetworkRequestList(objects=[])
+ self.moxed_client.list_networks(
+ tenant_id=self.context.project_id,
+ shared=False).AndReturn(
+ {'networks': self.nets1})
+ self.moxed_client.list_networks(
+ shared=True).AndReturn(
+ {'networks': self.nets2})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ self.assertRaises(exception.NetworkAmbiguous,
+ api.validate_networks,
+ self.context, requested_networks, 1)
+
+ def test_validate_networks_port_not_found(self):
+ # Verify that the correct exception is thrown when a non existent
+ # port is passed to validate_networks.
+
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(
+ network_id='my_netid1',
+ port_id='3123-ad34-bc43-32332ca33e')])
+
+ NeutronNotFound = exceptions.NeutronClientException(status_code=404)
+ self.moxed_client.show_port(requested_networks[0].port_id).AndRaise(
+ NeutronNotFound)
+ self.mox.ReplayAll()
+ # Expected call from setUp.
+ neutronv2.get_client(None)
+ api = neutronapi.API()
+ self.assertRaises(exception.PortNotFound,
+ api.validate_networks,
+ self.context, requested_networks, 1)
+
+ def test_validate_networks_port_show_rasies_non404(self):
+ # Verify that the correct exception is thrown when a non existent
+ # port is passed to validate_networks.
+
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(
+ network_id='my_netid1',
+ port_id='3123-ad34-bc43-32332ca33e')])
+
+ NeutronNotFound = exceptions.NeutronClientException(status_code=0)
+ self.moxed_client.show_port(requested_networks[0].port_id).AndRaise(
+ NeutronNotFound)
+ self.mox.ReplayAll()
+ # Expected call from setUp.
+ neutronv2.get_client(None)
+ api = neutronapi.API()
+ self.assertRaises(exceptions.NeutronClientException,
+ api.validate_networks,
+ self.context, requested_networks, 1)
+
+ def test_validate_networks_port_in_use(self):
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=self.port_data3[0]['id'])])
+ self.moxed_client.show_port(self.port_data3[0]['id']).\
+ AndReturn({'port': self.port_data3[0]})
+
+ self.mox.ReplayAll()
+
+ api = neutronapi.API()
+ self.assertRaises(exception.PortInUse,
+ api.validate_networks,
+ self.context, requested_networks, 1)
+
+ def test_validate_networks_port_no_subnet_id(self):
+ port_a = self.port_data3[0]
+ port_a['device_id'] = None
+ port_a['device_owner'] = None
+
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=port_a['id'])])
+ self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
+
+ self.mox.ReplayAll()
+
+ api = neutronapi.API()
+ self.assertRaises(exception.PortRequiresFixedIP,
+ api.validate_networks,
+ self.context, requested_networks, 1)
+
+ def test_validate_networks_no_subnet_id(self):
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='his_netid4')])
+ ids = ['his_netid4']
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(ids)).AndReturn(
+ {'networks': self.nets4})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ self.assertRaises(exception.NetworkRequiresSubnet,
+ api.validate_networks,
+ self.context, requested_networks, 1)
+
+ def test_validate_networks_ports_in_same_network_disable(self):
+ """Verify that duplicateNetworks exception is thrown when ports on same
+ duplicate network are passed to validate_networks, when nova config
+ flag allow_duplicate_networks is set to its default False
+ """
+ self.flags(allow_duplicate_networks=False, group='neutron')
+ port_a = self.port_data3[0]
+ port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
+ 'subnet_id': 'subnet_id'}
+ port_b = self.port_data1[0]
+ self.assertEqual(port_a['network_id'], port_b['network_id'])
+ for port in [port_a, port_b]:
+ port['device_id'] = None
+ port['device_owner'] = None
+
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=port_a['id']),
+ objects.NetworkRequest(port_id=port_b['id'])])
+ self.moxed_client.show_port(port_a['id']).AndReturn(
+ {'port': port_a})
+ self.moxed_client.show_port(port_b['id']).AndReturn(
+ {'port': port_b})
+
+ self.mox.ReplayAll()
+
+ api = neutronapi.API()
+ self.assertRaises(exception.NetworkDuplicated,
+ api.validate_networks,
+ self.context, requested_networks, 1)
+
+ def test_validate_networks_ports_in_same_network_enable(self):
+ """Verify that duplicateNetworks exception is not thrown when ports
+ on same duplicate network are passed to validate_networks, when nova
+ config flag allow_duplicate_networks is set to its True
+ """
+ self.flags(allow_duplicate_networks=True, group='neutron')
+ port_a = self.port_data3[0]
+ port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
+ 'subnet_id': 'subnet_id'}
+ port_b = self.port_data1[0]
+ self.assertEqual(port_a['network_id'], port_b['network_id'])
+ for port in [port_a, port_b]:
+ port['device_id'] = None
+ port['device_owner'] = None
+
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=port_a['id']),
+ objects.NetworkRequest(port_id=port_b['id'])])
+ self.moxed_client.show_port(port_a['id']).AndReturn(
+ {'port': port_a})
+ self.moxed_client.show_port(port_b['id']).AndReturn(
+ {'port': port_b})
+
+ self.mox.ReplayAll()
+
+ api = neutronapi.API()
+ api.validate_networks(self.context, requested_networks, 1)
+
+ def test_validate_networks_ports_not_in_same_network(self):
+ port_a = self.port_data3[0]
+ port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
+ 'subnet_id': 'subnet_id'}
+ port_b = self.port_data2[1]
+ self.assertNotEqual(port_a['network_id'], port_b['network_id'])
+ for port in [port_a, port_b]:
+ port['device_id'] = None
+ port['device_owner'] = None
+
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=port_a['id']),
+ objects.NetworkRequest(port_id=port_b['id'])])
+ self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
+ self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
+ self.mox.ReplayAll()
+
+ api = neutronapi.API()
+ api.validate_networks(self.context, requested_networks, 1)
+
+ def test_validate_networks_no_quota(self):
+ # Test validation for a request for one instance needing
+ # two ports, where the quota is 2 and 2 ports are in use
+ # => instances which can be created = 0
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='my_netid1'),
+ objects.NetworkRequest(network_id='my_netid2')])
+ ids = ['my_netid1', 'my_netid2']
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(ids)).AndReturn(
+ {'networks': self.nets2})
+ self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
+ {'ports': self.port_data2})
+ self.moxed_client.show_quota(
+ tenant_id='my_tenantid').AndReturn(
+ {'quota': {'port': 2}})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ max_count = api.validate_networks(self.context,
+ requested_networks, 1)
+ self.assertEqual(max_count, 0)
+
+ def test_validate_networks_with_ports_and_networks(self):
+ # Test validation for a request for one instance needing
+ # one port allocated via nova with another port being passed in.
+ port_b = self.port_data2[1]
+ port_b['device_id'] = None
+ port_b['device_owner'] = None
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='my_netid1'),
+ objects.NetworkRequest(port_id=port_b['id'])])
+ self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
+ ids = ['my_netid1']
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(ids)).AndReturn(
+ {'networks': self.nets1})
+ self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
+ {'ports': self.port_data2})
+ self.moxed_client.show_quota(
+ tenant_id='my_tenantid').AndReturn(
+ {'quota': {'port': 5}})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ max_count = api.validate_networks(self.context,
+ requested_networks, 1)
+ self.assertEqual(max_count, 1)
+
+ def test_validate_networks_one_port_and_no_networks(self):
+ # Test that show quota is not called if no networks are
+ # passed in and only ports.
+ port_b = self.port_data2[1]
+ port_b['device_id'] = None
+ port_b['device_owner'] = None
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=port_b['id'])])
+ self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ max_count = api.validate_networks(self.context,
+ requested_networks, 1)
+ self.assertEqual(max_count, 1)
+
+ def test_validate_networks_some_quota(self):
+ # Test validation for a request for two instance needing
+ # two ports each, where the quota is 5 and 2 ports are in use
+ # => instances which can be created = 1
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='my_netid1'),
+ objects.NetworkRequest(network_id='my_netid2')])
+ ids = ['my_netid1', 'my_netid2']
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(ids)).AndReturn(
+ {'networks': self.nets2})
+ self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
+ {'ports': self.port_data2})
+ self.moxed_client.show_quota(
+ tenant_id='my_tenantid').AndReturn(
+ {'quota': {'port': 5}})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ max_count = api.validate_networks(self.context,
+ requested_networks, 2)
+ self.assertEqual(max_count, 1)
+
+ def test_validate_networks_unlimited_quota(self):
+ # Test validation for a request for two instance needing
+ # two ports each, where the quota is -1 (unlimited)
+ # => instances which can be created = 1
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='my_netid1'),
+ objects.NetworkRequest(network_id='my_netid2')])
+ ids = ['my_netid1', 'my_netid2']
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(ids)).AndReturn(
+ {'networks': self.nets2})
+ self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
+ {'ports': self.port_data2})
+ self.moxed_client.show_quota(
+ tenant_id='my_tenantid').AndReturn(
+ {'quota': {'port': -1}})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ max_count = api.validate_networks(self.context,
+ requested_networks, 2)
+ self.assertEqual(max_count, 2)
+
+ def test_validate_networks_no_quota_but_ports_supplied(self):
+ port_a = self.port_data3[0]
+ port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
+ 'subnet_id': 'subnet_id'}
+ port_b = self.port_data2[1]
+ self.assertNotEqual(port_a['network_id'], port_b['network_id'])
+ for port in [port_a, port_b]:
+ port['device_id'] = None
+ port['device_owner'] = None
+
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=port_a['id']),
+ objects.NetworkRequest(port_id=port_b['id'])])
+ self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
+ self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
+
+ self.mox.ReplayAll()
+
+ api = neutronapi.API()
+ max_count = api.validate_networks(self.context,
+ requested_networks, 1)
+ self.assertEqual(max_count, 1)
+
+ def _mock_list_ports(self, port_data=None):
+ if port_data is None:
+ port_data = self.port_data2
+ address = self.port_address
+ self.moxed_client.list_ports(
+ fixed_ips=MyComparator('ip_address=%s' % address)).AndReturn(
+ {'ports': port_data})
+ self.mox.ReplayAll()
+ return address
+
+ def test_get_instance_uuids_by_ip_filter(self):
+ self._mock_list_ports()
+ filters = {'ip': '^10\\.0\\.1\\.2$'}
+ api = neutronapi.API()
+ result = api.get_instance_uuids_by_ip_filter(self.context, filters)
+ self.assertEqual(self.instance2['uuid'], result[0]['instance_uuid'])
+ self.assertEqual(self.instance['uuid'], result[1]['instance_uuid'])
+
+ def test_get_fixed_ip_by_address_fails_for_no_ports(self):
+ address = self._mock_list_ports(port_data=[])
+ api = neutronapi.API()
+ self.assertRaises(exception.FixedIpNotFoundForAddress,
+ api.get_fixed_ip_by_address,
+ self.context, address)
+
+ def test_get_fixed_ip_by_address_succeeds_for_1_port(self):
+ address = self._mock_list_ports(port_data=self.port_data1)
+ api = neutronapi.API()
+ result = api.get_fixed_ip_by_address(self.context, address)
+ self.assertEqual(self.instance2['uuid'], result['instance_uuid'])
+
+ def test_get_fixed_ip_by_address_fails_for_more_than_1_port(self):
+ address = self._mock_list_ports()
+ api = neutronapi.API()
+ self.assertRaises(exception.FixedIpAssociatedWithMultipleInstances,
+ api.get_fixed_ip_by_address,
+ self.context, address)
+
+ def _get_available_networks(self, prv_nets, pub_nets,
+ req_ids=None, context=None):
+ api = neutronapi.API()
+ nets = prv_nets + pub_nets
+ if req_ids:
+ mox_list_params = {'id': req_ids}
+ self.moxed_client.list_networks(
+ **mox_list_params).AndReturn({'networks': nets})
+ else:
+ mox_list_params = {'tenant_id': self.instance['project_id'],
+ 'shared': False}
+ self.moxed_client.list_networks(
+ **mox_list_params).AndReturn({'networks': prv_nets})
+ mox_list_params = {'shared': True}
+ self.moxed_client.list_networks(
+ **mox_list_params).AndReturn({'networks': pub_nets})
+
+ self.mox.ReplayAll()
+ rets = api._get_available_networks(
+ context if context else self.context,
+ self.instance['project_id'],
+ req_ids)
+ self.assertEqual(rets, nets)
+
+ def test_get_available_networks_all_private(self):
+ self._get_available_networks(prv_nets=self.nets2, pub_nets=[])
+
+ def test_get_available_networks_all_public(self):
+ self._get_available_networks(prv_nets=[], pub_nets=self.nets2)
+
+ def test_get_available_networks_private_and_public(self):
+ self._get_available_networks(prv_nets=self.nets1, pub_nets=self.nets4)
+
+ def test_get_available_networks_with_network_ids(self):
+ prv_nets = [self.nets3[0]]
+ pub_nets = [self.nets3[-1]]
+ # specify only first and last network
+ req_ids = [net['id'] for net in (self.nets3[0], self.nets3[-1])]
+ self._get_available_networks(prv_nets, pub_nets, req_ids)
+
+ def test_get_available_networks_with_custom_policy(self):
+ rules = {'network:attach_external_network':
+ common_policy.parse_rule('')}
+ policy.set_rules(rules)
+ req_ids = [net['id'] for net in self.nets5]
+ self._get_available_networks(self.nets5, pub_nets=[], req_ids=req_ids)
+
+ def test_get_floating_ip_pools(self):
+ api = neutronapi.API()
+ search_opts = {'router:external': True}
+ self.moxed_client.list_networks(**search_opts).\
+ AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]})
+ self.mox.ReplayAll()
+ pools = api.get_floating_ip_pools(self.context)
+ expected = [self.fip_pool['name'], self.fip_pool_nova['name']]
+ self.assertEqual(expected, pools)
+
+ def _get_expected_fip_model(self, fip_data, idx=0):
+ expected = {'id': fip_data['id'],
+ 'address': fip_data['floating_ip_address'],
+ 'pool': self.fip_pool['name'],
+ 'project_id': fip_data['tenant_id'],
+ 'fixed_ip_id': fip_data['port_id'],
+ 'fixed_ip':
+ {'address': fip_data['fixed_ip_address']},
+ 'instance': ({'uuid': self.port_data2[idx]['device_id']}
+ if fip_data['port_id']
+ else None)}
+ return expected
+
+ def _test_get_floating_ip(self, fip_data, idx=0, by_address=False):
+ api = neutronapi.API()
+ fip_id = fip_data['id']
+ net_id = fip_data['floating_network_id']
+ address = fip_data['floating_ip_address']
+ if by_address:
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': [fip_data]})
+ else:
+ self.moxed_client.show_floatingip(fip_id).\
+ AndReturn({'floatingip': fip_data})
+ self.moxed_client.show_network(net_id).\
+ AndReturn({'network': self.fip_pool})
+ if fip_data['port_id']:
+ self.moxed_client.show_port(fip_data['port_id']).\
+ AndReturn({'port': self.port_data2[idx]})
+ self.mox.ReplayAll()
+
+ expected = self._get_expected_fip_model(fip_data, idx)
+
+ if by_address:
+ fip = api.get_floating_ip_by_address(self.context, address)
+ else:
+ fip = api.get_floating_ip(self.context, fip_id)
+ self.assertEqual(expected, fip)
+
+ def test_get_floating_ip_unassociated(self):
+ self._test_get_floating_ip(self.fip_unassociated, idx=0)
+
+ def test_get_floating_ip_associated(self):
+ self._test_get_floating_ip(self.fip_associated, idx=1)
+
+ def test_get_floating_ip_by_address(self):
+ self._test_get_floating_ip(self.fip_unassociated, idx=0,
+ by_address=True)
+
+ def test_get_floating_ip_by_address_associated(self):
+ self._test_get_floating_ip(self.fip_associated, idx=1,
+ by_address=True)
+
+ def test_get_floating_ip_by_address_not_found(self):
+ api = neutronapi.API()
+ address = self.fip_unassociated['floating_ip_address']
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': []})
+ self.mox.ReplayAll()
+ self.assertRaises(exception.FloatingIpNotFoundForAddress,
+ api.get_floating_ip_by_address,
+ self.context, address)
+
+ def test_get_floating_ip_by_id_not_found(self):
+ api = neutronapi.API()
+ NeutronNotFound = exceptions.NeutronClientException(status_code=404)
+ floating_ip_id = self.fip_unassociated['id']
+ self.moxed_client.show_floatingip(floating_ip_id).\
+ AndRaise(NeutronNotFound)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.FloatingIpNotFound,
+ api.get_floating_ip,
+ self.context, floating_ip_id)
+
+ def test_get_floating_ip_raises_non404(self):
+ api = neutronapi.API()
+ NeutronNotFound = exceptions.NeutronClientException(status_code=0)
+ floating_ip_id = self.fip_unassociated['id']
+ self.moxed_client.show_floatingip(floating_ip_id).\
+ AndRaise(NeutronNotFound)
+ self.mox.ReplayAll()
+ self.assertRaises(exceptions.NeutronClientException,
+ api.get_floating_ip,
+ self.context, floating_ip_id)
+
+ def test_get_floating_ip_by_address_multiple_found(self):
+ api = neutronapi.API()
+ address = self.fip_unassociated['floating_ip_address']
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': [self.fip_unassociated] * 2})
+ self.mox.ReplayAll()
+ self.assertRaises(exception.FloatingIpMultipleFoundForAddress,
+ api.get_floating_ip_by_address,
+ self.context, address)
+
+ def test_get_floating_ips_by_project(self):
+ api = neutronapi.API()
+ project_id = self.context.project_id
+ self.moxed_client.list_floatingips(tenant_id=project_id).\
+ AndReturn({'floatingips': [self.fip_unassociated,
+ self.fip_associated]})
+ search_opts = {'router:external': True}
+ self.moxed_client.list_networks(**search_opts).\
+ AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]})
+ self.moxed_client.list_ports(tenant_id=project_id).\
+ AndReturn({'ports': self.port_data2})
+ self.mox.ReplayAll()
+
+ expected = [self._get_expected_fip_model(self.fip_unassociated),
+ self._get_expected_fip_model(self.fip_associated, idx=1)]
+ fips = api.get_floating_ips_by_project(self.context)
+ self.assertEqual(expected, fips)
+
+ def _test_get_instance_id_by_floating_address(self, fip_data,
+ associated=False):
+ api = neutronapi.API()
+ address = fip_data['floating_ip_address']
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': [fip_data]})
+ if associated:
+ self.moxed_client.show_port(fip_data['port_id']).\
+ AndReturn({'port': self.port_data2[1]})
+ self.mox.ReplayAll()
+
+ if associated:
+ expected = self.port_data2[1]['device_id']
+ else:
+ expected = None
+ fip = api.get_instance_id_by_floating_address(self.context, address)
+ self.assertEqual(expected, fip)
+
+ def test_get_instance_id_by_floating_address(self):
+ self._test_get_instance_id_by_floating_address(self.fip_unassociated)
+
+ def test_get_instance_id_by_floating_address_associated(self):
+ self._test_get_instance_id_by_floating_address(self.fip_associated,
+ associated=True)
+
+ def test_allocate_floating_ip(self):
+ api = neutronapi.API()
+ pool_name = self.fip_pool['name']
+ pool_id = self.fip_pool['id']
+ search_opts = {'router:external': True,
+ 'fields': 'id',
+ 'name': pool_name}
+ self.moxed_client.list_networks(**search_opts).\
+ AndReturn({'networks': [self.fip_pool]})
+ self.moxed_client.create_floatingip(
+ {'floatingip': {'floating_network_id': pool_id}}).\
+ AndReturn({'floatingip': self.fip_unassociated})
+ self.mox.ReplayAll()
+ fip = api.allocate_floating_ip(self.context, 'ext_net')
+ self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
+
+ def test_allocate_floating_ip_addr_gen_fail(self):
+ api = neutronapi.API()
+ pool_name = self.fip_pool['name']
+ pool_id = self.fip_pool['id']
+ search_opts = {'router:external': True,
+ 'fields': 'id',
+ 'name': pool_name}
+ self.moxed_client.list_networks(**search_opts).\
+ AndReturn({'networks': [self.fip_pool]})
+ self.moxed_client.create_floatingip(
+ {'floatingip': {'floating_network_id': pool_id}}).\
+ AndRaise(exceptions.IpAddressGenerationFailureClient)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NoMoreFloatingIps,
+ api.allocate_floating_ip, self.context, 'ext_net')
+
+ def test_allocate_floating_ip_exhausted_fail(self):
+ api = neutronapi.API()
+ pool_name = self.fip_pool['name']
+ pool_id = self.fip_pool['id']
+ search_opts = {'router:external': True,
+ 'fields': 'id',
+ 'name': pool_name}
+ self.moxed_client.list_networks(**search_opts).\
+ AndReturn({'networks': [self.fip_pool]})
+ self.moxed_client.create_floatingip(
+ {'floatingip': {'floating_network_id': pool_id}}).\
+ AndRaise(exceptions.ExternalIpAddressExhaustedClient)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NoMoreFloatingIps,
+ api.allocate_floating_ip, self.context, 'ext_net')
+
+ def test_allocate_floating_ip_with_pool_id(self):
+ api = neutronapi.API()
+ pool_id = self.fip_pool['id']
+ search_opts = {'router:external': True,
+ 'fields': 'id',
+ 'id': pool_id}
+ self.moxed_client.list_networks(**search_opts).\
+ AndReturn({'networks': [self.fip_pool]})
+ self.moxed_client.create_floatingip(
+ {'floatingip': {'floating_network_id': pool_id}}).\
+ AndReturn({'floatingip': self.fip_unassociated})
+ self.mox.ReplayAll()
+ fip = api.allocate_floating_ip(self.context, pool_id)
+ self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
+
+ def test_allocate_floating_ip_with_default_pool(self):
+ api = neutronapi.API()
+ pool_name = self.fip_pool_nova['name']
+ pool_id = self.fip_pool_nova['id']
+ search_opts = {'router:external': True,
+ 'fields': 'id',
+ 'name': pool_name}
+ self.moxed_client.list_networks(**search_opts).\
+ AndReturn({'networks': [self.fip_pool_nova]})
+ self.moxed_client.create_floatingip(
+ {'floatingip': {'floating_network_id': pool_id}}).\
+ AndReturn({'floatingip': self.fip_unassociated})
+ self.mox.ReplayAll()
+ fip = api.allocate_floating_ip(self.context)
+ self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
+
+ def test_release_floating_ip(self):
+ api = neutronapi.API()
+ address = self.fip_unassociated['floating_ip_address']
+ fip_id = self.fip_unassociated['id']
+
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': [self.fip_unassociated]})
+ self.moxed_client.delete_floatingip(fip_id)
+ self.mox.ReplayAll()
+ api.release_floating_ip(self.context, address)
+
+ def test_disassociate_and_release_floating_ip(self):
+ api = neutronapi.API()
+ address = self.fip_unassociated['floating_ip_address']
+ fip_id = self.fip_unassociated['id']
+ floating_ip = {'address': address}
+
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': [self.fip_unassociated]})
+ self.moxed_client.delete_floatingip(fip_id)
+ self.mox.ReplayAll()
+ api.disassociate_and_release_floating_ip(self.context, None,
+ floating_ip)
+
+ def test_release_floating_ip_associated(self):
+ api = neutronapi.API()
+ address = self.fip_associated['floating_ip_address']
+
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': [self.fip_associated]})
+ self.mox.ReplayAll()
+ self.assertRaises(exception.FloatingIpAssociated,
+ api.release_floating_ip, self.context, address)
+
+ def _setup_mock_for_refresh_cache(self, api, instances):
+ nw_info = self.mox.CreateMock(model.NetworkInfo)
+ self.mox.StubOutWithMock(api, '_get_instance_nw_info')
+ self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
+ for instance in instances:
+ nw_info.json()
+ api._get_instance_nw_info(mox.IgnoreArg(), instance).\
+ AndReturn(nw_info)
+ api.db.instance_info_cache_update(mox.IgnoreArg(),
+ instance['uuid'],
+ mox.IgnoreArg())
+
+ def test_associate_floating_ip(self):
+ api = neutronapi.API()
+ address = self.fip_unassociated['floating_ip_address']
+ fixed_address = self.port_address2
+ fip_id = self.fip_unassociated['id']
+
+ search_opts = {'device_owner': 'compute:nova',
+ 'device_id': self.instance['uuid']}
+ self.moxed_client.list_ports(**search_opts).\
+ AndReturn({'ports': [self.port_data2[1]]})
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': [self.fip_unassociated]})
+ self.moxed_client.update_floatingip(
+ fip_id, {'floatingip': {'port_id': self.fip_associated['port_id'],
+ 'fixed_ip_address': fixed_address}})
+ self._setup_mock_for_refresh_cache(api, [self.instance])
+
+ self.mox.ReplayAll()
+ api.associate_floating_ip(self.context, self.instance,
+ address, fixed_address)
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ def test_reassociate_floating_ip(self, mock_get):
+ api = neutronapi.API()
+ address = self.fip_associated['floating_ip_address']
+ new_fixed_address = self.port_address
+ fip_id = self.fip_associated['id']
+
+ search_opts = {'device_owner': 'compute:nova',
+ 'device_id': self.instance2['uuid']}
+ self.moxed_client.list_ports(**search_opts).\
+ AndReturn({'ports': [self.port_data2[0]]})
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': [self.fip_associated]})
+ self.moxed_client.update_floatingip(
+ fip_id, {'floatingip': {'port_id': 'my_portid1',
+ 'fixed_ip_address': new_fixed_address}})
+ self.moxed_client.show_port(self.fip_associated['port_id']).\
+ AndReturn({'port': self.port_data2[1]})
+
+ mock_get.return_value = fake_instance.fake_instance_obj(
+ self.context, **self.instance)
+ self._setup_mock_for_refresh_cache(api, [mock_get.return_value,
+ self.instance2])
+
+ self.mox.ReplayAll()
+ api.associate_floating_ip(self.context, self.instance2,
+ address, new_fixed_address)
+
+ def test_associate_floating_ip_not_found_fixed_ip(self):
+ api = neutronapi.API()
+ address = self.fip_associated['floating_ip_address']
+ fixed_address = self.fip_associated['fixed_ip_address']
+
+ search_opts = {'device_owner': 'compute:nova',
+ 'device_id': self.instance['uuid']}
+ self.moxed_client.list_ports(**search_opts).\
+ AndReturn({'ports': [self.port_data2[0]]})
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.FixedIpNotFoundForAddress,
+ api.associate_floating_ip, self.context,
+ self.instance, address, fixed_address)
+
+ def test_disassociate_floating_ip(self):
+ api = neutronapi.API()
+ address = self.fip_associated['floating_ip_address']
+ fip_id = self.fip_associated['id']
+
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': [self.fip_associated]})
+ self.moxed_client.update_floatingip(
+ fip_id, {'floatingip': {'port_id': None}})
+ self._setup_mock_for_refresh_cache(api, [self.instance])
+
+ self.mox.ReplayAll()
+ api.disassociate_floating_ip(self.context, self.instance, address)
+
+ def test_add_fixed_ip_to_instance(self):
+ api = neutronapi.API()
+ self._setup_mock_for_refresh_cache(api, [self.instance])
+ network_id = 'my_netid1'
+ search_opts = {'network_id': network_id}
+ self.moxed_client.list_subnets(
+ **search_opts).AndReturn({'subnets': self.subnet_data_n})
+
+ search_opts = {'device_id': self.instance['uuid'],
+ 'device_owner': 'compute:nova',
+ 'network_id': network_id}
+ self.moxed_client.list_ports(
+ **search_opts).AndReturn({'ports': self.port_data1})
+ port_req_body = {
+ 'port': {
+ 'fixed_ips': [{'subnet_id': 'my_subid1'},
+ {'subnet_id': 'my_subid1'}],
+ },
+ }
+ port = self.port_data1[0]
+ port['fixed_ips'] = [{'subnet_id': 'my_subid1'}]
+ self.moxed_client.update_port('my_portid1',
+ MyComparator(port_req_body)).AndReturn({'port': port})
+
+ self.mox.ReplayAll()
+ api.add_fixed_ip_to_instance(self.context, self.instance, network_id)
+
+ def test_remove_fixed_ip_from_instance(self):
+ api = neutronapi.API()
+ self._setup_mock_for_refresh_cache(api, [self.instance])
+ address = '10.0.0.3'
+ zone = 'compute:%s' % self.instance['availability_zone']
+ search_opts = {'device_id': self.instance['uuid'],
+ 'device_owner': zone,
+ 'fixed_ips': 'ip_address=%s' % address}
+ self.moxed_client.list_ports(
+ **search_opts).AndReturn({'ports': self.port_data1})
+ port_req_body = {
+ 'port': {
+ 'fixed_ips': [],
+ },
+ }
+ port = self.port_data1[0]
+ port['fixed_ips'] = []
+ self.moxed_client.update_port('my_portid1',
+ MyComparator(port_req_body)).AndReturn({'port': port})
+
+ self.mox.ReplayAll()
+ api.remove_fixed_ip_from_instance(self.context, self.instance, address)
+
+ def test_list_floating_ips_without_l3_support(self):
+ api = neutronapi.API()
+ NeutronNotFound = exceptions.NeutronClientException(
+ status_code=404)
+ self.moxed_client.list_floatingips(
+ fixed_ip_address='1.1.1.1', port_id=1).AndRaise(NeutronNotFound)
+ self.mox.ReplayAll()
+ neutronv2.get_client('fake')
+ floatingips = api._get_floating_ips_by_fixed_and_port(
+ self.moxed_client, '1.1.1.1', 1)
+ self.assertEqual(floatingips, [])
+
+ def test_nw_info_get_ips(self):
+ fake_port = {
+ 'fixed_ips': [
+ {'ip_address': '1.1.1.1'}],
+ 'id': 'port-id',
+ }
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port')
+ api._get_floating_ips_by_fixed_and_port(
+ self.moxed_client, '1.1.1.1', 'port-id').AndReturn(
+ [{'floating_ip_address': '10.0.0.1'}])
+ self.mox.ReplayAll()
+ neutronv2.get_client('fake')
+ result = api._nw_info_get_ips(self.moxed_client, fake_port)
+ self.assertEqual(len(result), 1)
+ self.assertEqual(result[0]['address'], '1.1.1.1')
+ self.assertEqual(result[0]['floating_ips'][0]['address'], '10.0.0.1')
+
+ def test_nw_info_get_subnets(self):
+ fake_port = {
+ 'fixed_ips': [
+ {'ip_address': '1.1.1.1'},
+ {'ip_address': '2.2.2.2'}],
+ 'id': 'port-id',
+ }
+ fake_subnet = model.Subnet(cidr='1.0.0.0/8')
+ fake_ips = [model.IP(x['ip_address']) for x in fake_port['fixed_ips']]
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api, '_get_subnets_from_port')
+ api._get_subnets_from_port(self.context, fake_port).AndReturn(
+ [fake_subnet])
+ self.mox.ReplayAll()
+ neutronv2.get_client('fake')
+ subnets = api._nw_info_get_subnets(self.context, fake_port, fake_ips)
+ self.assertEqual(len(subnets), 1)
+ self.assertEqual(len(subnets[0]['ips']), 1)
+ self.assertEqual(subnets[0]['ips'][0]['address'], '1.1.1.1')
+
+ def _test_nw_info_build_network(self, vif_type):
+ fake_port = {
+ 'fixed_ips': [{'ip_address': '1.1.1.1'}],
+ 'id': 'port-id',
+ 'network_id': 'net-id',
+ 'binding:vif_type': vif_type,
+ }
+ fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
+ fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant'}]
+ api = neutronapi.API()
+ self.mox.ReplayAll()
+ neutronv2.get_client('fake')
+ net, iid = api._nw_info_build_network(fake_port, fake_nets,
+ fake_subnets)
+ self.assertEqual(net['subnets'], fake_subnets)
+ self.assertEqual(net['id'], 'net-id')
+ self.assertEqual(net['label'], 'foo')
+ self.assertEqual(net.get_meta('tenant_id'), 'tenant')
+ self.assertEqual(net.get_meta('injected'), CONF.flat_injected)
+ return net, iid
+
+ def test_nw_info_build_network_ovs(self):
+ net, iid = self._test_nw_info_build_network(model.VIF_TYPE_OVS)
+ self.assertEqual(net['bridge'], CONF.neutron.ovs_bridge)
+ self.assertNotIn('should_create_bridge', net)
+ self.assertEqual(iid, 'port-id')
+
+ def test_nw_info_build_network_dvs(self):
+ net, iid = self._test_nw_info_build_network(model.VIF_TYPE_DVS)
+ self.assertEqual('foo-net-id', net['bridge'])
+ self.assertNotIn('should_create_bridge', net)
+ self.assertNotIn('ovs_interfaceid', net)
+ self.assertIsNone(iid)
+
+ def test_nw_info_build_network_bridge(self):
+ net, iid = self._test_nw_info_build_network(model.VIF_TYPE_BRIDGE)
+ self.assertEqual(net['bridge'], 'brqnet-id')
+ self.assertTrue(net['should_create_bridge'])
+ self.assertIsNone(iid)
+
+ def test_nw_info_build_network_other(self):
+ net, iid = self._test_nw_info_build_network(None)
+ self.assertIsNone(net['bridge'])
+ self.assertNotIn('should_create_bridge', net)
+ self.assertIsNone(iid)
+
+ def test_nw_info_build_no_match(self):
+ fake_port = {
+ 'fixed_ips': [{'ip_address': '1.1.1.1'}],
+ 'id': 'port-id',
+ 'network_id': 'net-id1',
+ 'tenant_id': 'tenant',
+ 'binding:vif_type': model.VIF_TYPE_OVS,
+ }
+ fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
+ fake_nets = [{'id': 'net-id2', 'name': 'foo', 'tenant_id': 'tenant'}]
+ api = neutronapi.API()
+ self.mox.ReplayAll()
+ neutronv2.get_client('fake')
+ net, iid = api._nw_info_build_network(fake_port, fake_nets,
+ fake_subnets)
+ self.assertEqual(fake_subnets, net['subnets'])
+ self.assertEqual('net-id1', net['id'])
+ self.assertEqual('net-id1', net['id'])
+ self.assertEqual('tenant', net['meta']['tenant_id'])
+
+ def test_build_network_info_model(self):
+ api = neutronapi.API()
+ fake_inst = {'project_id': 'fake', 'uuid': 'uuid',
+ 'info_cache': {'network_info': []}}
+ fake_ports = [
+ # admin_state_up=True and status='ACTIVE' thus vif.active=True
+ {'id': 'port1',
+ 'network_id': 'net-id',
+ 'admin_state_up': True,
+ 'status': 'ACTIVE',
+ 'fixed_ips': [{'ip_address': '1.1.1.1'}],
+ 'mac_address': 'de:ad:be:ef:00:01',
+ 'binding:vif_type': model.VIF_TYPE_BRIDGE,
+ 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
+ 'binding:vif_details': {},
+ },
+ # admin_state_up=False and status='DOWN' thus vif.active=True
+ {'id': 'port2',
+ 'network_id': 'net-id',
+ 'admin_state_up': False,
+ 'status': 'DOWN',
+ 'fixed_ips': [{'ip_address': '1.1.1.1'}],
+ 'mac_address': 'de:ad:be:ef:00:02',
+ 'binding:vif_type': model.VIF_TYPE_BRIDGE,
+ 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
+ 'binding:vif_details': {},
+ },
+ # admin_state_up=True and status='DOWN' thus vif.active=False
+ {'id': 'port0',
+ 'network_id': 'net-id',
+ 'admin_state_up': True,
+ 'status': 'DOWN',
+ 'fixed_ips': [{'ip_address': '1.1.1.1'}],
+ 'mac_address': 'de:ad:be:ef:00:03',
+ 'binding:vif_type': model.VIF_TYPE_BRIDGE,
+ 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
+ 'binding:vif_details': {},
+ },
+ # admin_state_up=True and status='ACTIVE' thus vif.active=True
+ {'id': 'port3',
+ 'network_id': 'net-id',
+ 'admin_state_up': True,
+ 'status': 'ACTIVE',
+ 'fixed_ips': [{'ip_address': '1.1.1.1'}],
+ 'mac_address': 'de:ad:be:ef:00:04',
+ 'binding:vif_type': model.VIF_TYPE_HW_VEB,
+ 'binding:vnic_type': model.VNIC_TYPE_DIRECT,
+ 'binding:profile': {'pci_vendor_info': '1137:0047',
+ 'pci_slot': '0000:0a:00.1',
+ 'physical_network': 'phynet1'},
+ 'binding:vif_details': {model.VIF_DETAILS_PROFILEID: 'pfid'},
+ },
+ # admin_state_up=True and status='ACTIVE' thus vif.active=True
+ {'id': 'port4',
+ 'network_id': 'net-id',
+ 'admin_state_up': True,
+ 'status': 'ACTIVE',
+ 'fixed_ips': [{'ip_address': '1.1.1.1'}],
+ 'mac_address': 'de:ad:be:ef:00:05',
+ 'binding:vif_type': model.VIF_TYPE_802_QBH,
+ 'binding:vnic_type': model.VNIC_TYPE_MACVTAP,
+ 'binding:profile': {'pci_vendor_info': '1137:0047',
+ 'pci_slot': '0000:0a:00.2',
+ 'physical_network': 'phynet1'},
+ 'binding:vif_details': {model.VIF_DETAILS_PROFILEID: 'pfid'},
+ },
+ # admin_state_up=True and status='ACTIVE' thus vif.active=True
+ # This port has no binding:vnic_type to verify default is assumed
+ {'id': 'port5',
+ 'network_id': 'net-id',
+ 'admin_state_up': True,
+ 'status': 'ACTIVE',
+ 'fixed_ips': [{'ip_address': '1.1.1.1'}],
+ 'mac_address': 'de:ad:be:ef:00:06',
+ 'binding:vif_type': model.VIF_TYPE_BRIDGE,
+ # No binding:vnic_type
+ 'binding:vif_details': {},
+ },
+ # This does not match the networks we provide below,
+ # so it should be ignored (and is here to verify that)
+ {'id': 'port6',
+ 'network_id': 'other-net-id',
+ 'admin_state_up': True,
+ 'status': 'DOWN',
+ 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
+ },
+ ]
+ fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
+ fake_nets = [
+ {'id': 'net-id',
+ 'name': 'foo',
+ 'tenant_id': 'fake',
+ }
+ ]
+ neutronv2.get_client(mox.IgnoreArg(), admin=True).MultipleTimes(
+ ).AndReturn(self.moxed_client)
+ self.moxed_client.list_ports(
+ tenant_id='fake', device_id='uuid').AndReturn(
+ {'ports': fake_ports})
+
+ self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port')
+ self.mox.StubOutWithMock(api, '_get_subnets_from_port')
+ requested_ports = [fake_ports[2], fake_ports[0], fake_ports[1],
+ fake_ports[3], fake_ports[4], fake_ports[5]]
+ for requested_port in requested_ports:
+ api._get_floating_ips_by_fixed_and_port(
+ self.moxed_client, '1.1.1.1', requested_port['id']).AndReturn(
+ [{'floating_ip_address': '10.0.0.1'}])
+ for requested_port in requested_ports:
+ api._get_subnets_from_port(self.context, requested_port
+ ).AndReturn(fake_subnets)
+
+ self.mox.ReplayAll()
+ neutronv2.get_client('fake')
+ nw_infos = api._build_network_info_model(self.context, fake_inst,
+ fake_nets,
+ [fake_ports[2]['id'],
+ fake_ports[0]['id'],
+ fake_ports[1]['id'],
+ fake_ports[3]['id'],
+ fake_ports[4]['id'],
+ fake_ports[5]['id']])
+ self.assertEqual(len(nw_infos), 6)
+ index = 0
+ for nw_info in nw_infos:
+ self.assertEqual(nw_info['address'],
+ requested_ports[index]['mac_address'])
+ self.assertEqual(nw_info['devname'], 'tapport' + str(index))
+ self.assertIsNone(nw_info['ovs_interfaceid'])
+ self.assertEqual(nw_info['type'],
+ requested_ports[index]['binding:vif_type'])
+ if nw_info['type'] == model.VIF_TYPE_BRIDGE:
+ self.assertEqual(nw_info['network']['bridge'], 'brqnet-id')
+ self.assertEqual(nw_info['vnic_type'],
+ requested_ports[index].get('binding:vnic_type',
+ model.VNIC_TYPE_NORMAL))
+ self.assertEqual(nw_info.get('details'),
+ requested_ports[index].get('binding:vif_details'))
+ self.assertEqual(nw_info.get('profile'),
+ requested_ports[index].get('binding:profile'))
+ index += 1
+
+ self.assertEqual(nw_infos[0]['active'], False)
+ self.assertEqual(nw_infos[1]['active'], True)
+ self.assertEqual(nw_infos[2]['active'], True)
+ self.assertEqual(nw_infos[3]['active'], True)
+ self.assertEqual(nw_infos[4]['active'], True)
+ self.assertEqual(nw_infos[5]['active'], True)
+
+ self.assertEqual(nw_infos[0]['id'], 'port0')
+ self.assertEqual(nw_infos[1]['id'], 'port1')
+ self.assertEqual(nw_infos[2]['id'], 'port2')
+ self.assertEqual(nw_infos[3]['id'], 'port3')
+ self.assertEqual(nw_infos[4]['id'], 'port4')
+ self.assertEqual(nw_infos[5]['id'], 'port5')
+
+ def test_get_subnets_from_port(self):
+ api = neutronapi.API()
+
+ port_data = copy.copy(self.port_data1[0])
+ subnet_data1 = copy.copy(self.subnet_data1)
+ subnet_data1[0]['host_routes'] = [
+ {'destination': '192.168.0.0/24', 'nexthop': '1.0.0.10'}
+ ]
+
+ self.moxed_client.list_subnets(
+ id=[port_data['fixed_ips'][0]['subnet_id']]
+ ).AndReturn({'subnets': subnet_data1})
+ self.moxed_client.list_ports(
+ network_id=subnet_data1[0]['network_id'],
+ device_owner='network:dhcp').AndReturn({'ports': []})
+ self.mox.ReplayAll()
+
+ subnets = api._get_subnets_from_port(self.context, port_data)
+
+ self.assertEqual(len(subnets), 1)
+ self.assertEqual(len(subnets[0]['routes']), 1)
+ self.assertEqual(subnets[0]['routes'][0]['cidr'],
+ subnet_data1[0]['host_routes'][0]['destination'])
+ self.assertEqual(subnets[0]['routes'][0]['gateway']['address'],
+ subnet_data1[0]['host_routes'][0]['nexthop'])
+
+ def test_get_all_empty_list_networks(self):
+ api = neutronapi.API()
+ self.moxed_client.list_networks().AndReturn({'networks': []})
+ self.mox.ReplayAll()
+ networks = api.get_all(self.context)
+ self.assertEqual(networks, [])
+
+ def test_get_floating_ips_by_fixed_address(self):
+ # NOTE(lbragstad): We need to reset the mocks in order to assert
+ # a NotImplementedError is raised when calling the method under test.
+ self.mox.ResetAll()
+ fake_fixed = '192.168.1.4'
+ api = neutronapi.API()
+ self.assertRaises(NotImplementedError,
+ api.get_floating_ips_by_fixed_address,
+ self.context, fake_fixed)
+
+ @mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
+ def test_get_port_vnic_info_1(self, mock_get_client):
+ api = neutronapi.API()
+ self.mox.ResetAll()
+ test_port = {
+ 'port': {'id': 'my_port_id1',
+ 'network_id': 'net-id',
+ 'binding:vnic_type': model.VNIC_TYPE_DIRECT,
+ },
+ }
+ test_net = {'network': {'provider:physical_network': 'phynet1'}}
+
+ mock_client = mock_get_client()
+ mock_client.show_port.return_value = test_port
+ mock_client.show_network.return_value = test_net
+ vnic_type, phynet_name = api._get_port_vnic_info(
+ self.context, mock_client, test_port['port']['id'])
+
+ mock_client.show_port.assert_called_once_with(test_port['port']['id'],
+ fields=['binding:vnic_type', 'network_id'])
+ mock_client.show_network.assert_called_once_with(
+ test_port['port']['network_id'],
+ fields='provider:physical_network')
+ self.assertEqual(model.VNIC_TYPE_DIRECT, vnic_type)
+ self.assertEqual(phynet_name, 'phynet1')
+
+ def _test_get_port_vnic_info(self, mock_get_client,
+ binding_vnic_type=None):
+ api = neutronapi.API()
+ self.mox.ResetAll()
+ test_port = {
+ 'port': {'id': 'my_port_id2',
+ 'network_id': 'net-id',
+ },
+ }
+
+ if binding_vnic_type:
+ test_port['port']['binding:vnic_type'] = binding_vnic_type
+
+ mock_client = mock_get_client()
+ mock_client.show_port.return_value = test_port
+ vnic_type, phynet_name = api._get_port_vnic_info(
+ self.context, mock_client, test_port['port']['id'])
+
+ mock_client.show_port.assert_called_once_with(test_port['port']['id'],
+ fields=['binding:vnic_type', 'network_id'])
+ self.assertEqual(model.VNIC_TYPE_NORMAL, vnic_type)
+ self.assertFalse(phynet_name)
+
+ @mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
+ def test_get_port_vnic_info_2(self, mock_get_client):
+ self._test_get_port_vnic_info(mock_get_client,
+ binding_vnic_type=model.VNIC_TYPE_NORMAL)
+
+ @mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
+ def test_get_port_vnic_info_3(self, mock_get_client):
+ self._test_get_port_vnic_info(mock_get_client)
+
+ @mock.patch.object(neutronapi.API, "_get_port_vnic_info")
+ @mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
+ def test_create_pci_requests_for_sriov_ports(self, mock_get_client,
+ mock_get_port_vnic_info):
+ api = neutronapi.API()
+ self.mox.ResetAll()
+ requested_networks = objects.NetworkRequestList(
+ objects = [
+ objects.NetworkRequest(port_id='my_portid1'),
+ objects.NetworkRequest(network_id='net1'),
+ objects.NetworkRequest(port_id='my_portid2'),
+ objects.NetworkRequest(port_id='my_portid3'),
+ objects.NetworkRequest(port_id='my_portid4')])
+ pci_requests = objects.InstancePCIRequests(requests=[])
+ mock_get_port_vnic_info.side_effect = [
+ (model.VNIC_TYPE_DIRECT, 'phynet1'),
+ (model.VNIC_TYPE_NORMAL, ''),
+ (model.VNIC_TYPE_MACVTAP, 'phynet1'),
+ (model.VNIC_TYPE_MACVTAP, 'phynet2')
+ ]
+ api.create_pci_requests_for_sriov_ports(
+ None, pci_requests, requested_networks)
+ self.assertEqual(3, len(pci_requests.requests))
+ has_pci_request_id = [net.pci_request_id is not None for net in
+ requested_networks.objects]
+ expected_results = [True, False, False, True, True]
+ self.assertEqual(expected_results, has_pci_request_id)
+
+
+class TestNeutronv2WithMock(test.TestCase):
+ """Used to test Neutron V2 API with mock."""
+
+ def setUp(self):
+ super(TestNeutronv2WithMock, self).setUp()
+ self.api = neutronapi.API()
+ self.context = context.RequestContext(
+ 'fake-user', 'fake-project',
+ auth_token='bff4a5a6b9eb4ea2a6efec6eefb77936')
+
+ @mock.patch('oslo.concurrency.lockutils.lock')
+ def test_get_instance_nw_info_locks_per_instance(self, mock_lock):
+ instance = objects.Instance(uuid=uuid.uuid4())
+ api = neutronapi.API()
+ mock_lock.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ api.get_instance_nw_info, 'context', instance)
+ mock_lock.assert_called_once_with('refresh_cache-%s' % instance.uuid)
+
+ def _test_validate_networks_fixed_ip_no_dup(self, nets, requested_networks,
+ ids, list_port_values):
+
+ def _fake_list_ports(**search_opts):
+ for args, return_value in list_port_values:
+ if args == search_opts:
+ return return_value
+ self.fail('Unexpected call to list_ports %s' % search_opts)
+
+ with contextlib.nested(
+ mock.patch.object(client.Client, 'list_ports',
+ side_effect=_fake_list_ports),
+ mock.patch.object(client.Client, 'list_networks',
+ return_value={'networks': nets}),
+ mock.patch.object(client.Client, 'show_quota',
+ return_value={'quota': {'port': 50}})) as (
+ list_ports_mock, list_networks_mock, show_quota_mock):
+
+ self.api.validate_networks(self.context, requested_networks, 1)
+
+ self.assertEqual(len(list_port_values),
+ len(list_ports_mock.call_args_list))
+ list_networks_mock.assert_called_once_with(id=ids)
+ show_quota_mock.assert_called_once_with(tenant_id='fake-project')
+
+ def test_validate_networks_fixed_ip_no_dup1(self):
+ # Test validation for a request for a network with a
+ # fixed ip that is not already in use because no fixed ips in use
+
+ nets1 = [{'id': 'my_netid1',
+ 'name': 'my_netname1',
+ 'subnets': ['mysubnid1'],
+ 'tenant_id': 'fake-project'}]
+
+ requested_networks = [('my_netid1', '10.0.1.2', None, None)]
+ ids = ['my_netid1']
+ list_port_values = [({'network_id': 'my_netid1',
+ 'fixed_ips': 'ip_address=10.0.1.2',
+ 'fields': 'device_id'},
+ {'ports': []}),
+ ({'tenant_id': 'fake-project'},
+ {'ports': []})]
+ self._test_validate_networks_fixed_ip_no_dup(nets1, requested_networks,
+ ids, list_port_values)
+
+ def test_validate_networks_fixed_ip_no_dup2(self):
+ # Test validation for a request for a network with a
+ # fixed ip that is not already in use because not used on this net id
+
+ nets2 = [{'id': 'my_netid1',
+ 'name': 'my_netname1',
+ 'subnets': ['mysubnid1'],
+ 'tenant_id': 'fake-project'},
+ {'id': 'my_netid2',
+ 'name': 'my_netname2',
+ 'subnets': ['mysubnid2'],
+ 'tenant_id': 'fake-project'}]
+
+ requested_networks = [('my_netid1', '10.0.1.2', None, None),
+ ('my_netid2', '10.0.1.3', None, None)]
+ ids = ['my_netid1', 'my_netid2']
+ list_port_values = [({'network_id': 'my_netid1',
+ 'fixed_ips': 'ip_address=10.0.1.2',
+ 'fields': 'device_id'},
+ {'ports': []}),
+ ({'network_id': 'my_netid2',
+ 'fixed_ips': 'ip_address=10.0.1.3',
+ 'fields': 'device_id'},
+ {'ports': []}),
+
+ ({'tenant_id': 'fake-project'},
+ {'ports': []})]
+
+ self._test_validate_networks_fixed_ip_no_dup(nets2, requested_networks,
+ ids, list_port_values)
+
+ def test_validate_networks_fixed_ip_dup(self):
+ # Test validation for a request for a network with a
+ # fixed ip that is already in use
+
+ requested_networks = [('my_netid1', '10.0.1.2', None, None)]
+ list_port_mock_params = {'network_id': 'my_netid1',
+ 'fixed_ips': 'ip_address=10.0.1.2',
+ 'fields': 'device_id'}
+ list_port_mock_return = {'ports': [({'device_id': 'my_deviceid'})]}
+
+ with mock.patch.object(client.Client, 'list_ports',
+ return_value=list_port_mock_return) as (
+ list_ports_mock):
+
+ self.assertRaises(exception.FixedIpAlreadyInUse,
+ self.api.validate_networks,
+ self.context, requested_networks, 1)
+
+ list_ports_mock.assert_called_once_with(**list_port_mock_params)
+
+ def test_allocate_floating_ip_exceed_limit(self):
+ # Verify that the correct exception is thrown when quota exceed
+ pool_name = 'dummy'
+ api = neutronapi.API()
+ with contextlib.nested(
+ mock.patch.object(client.Client, 'create_floatingip'),
+ mock.patch.object(api,
+ '_get_floating_ip_pool_id_by_name_or_id')) as (
+ create_mock, get_mock):
+ create_mock.side_effect = exceptions.OverQuotaClient()
+
+ self.assertRaises(exception.FloatingIpLimitExceeded,
+ api.allocate_floating_ip,
+ self.context, pool_name)
+
+ def test_create_port_for_instance_no_more_ip(self):
+ instance = fake_instance.fake_instance_obj(self.context)
+ net = {'id': 'my_netid1',
+ 'name': 'my_netname1',
+ 'subnets': ['mysubnid1'],
+ 'tenant_id': instance['project_id']}
+
+ with mock.patch.object(client.Client, 'create_port',
+ side_effect=exceptions.IpAddressGenerationFailureClient()) as (
+ create_port_mock):
+ zone = 'compute:%s' % instance['availability_zone']
+ port_req_body = {'port': {'device_id': instance['uuid'],
+ 'device_owner': zone}}
+ self.assertRaises(exception.NoMoreFixedIps,
+ self.api._create_port,
+ neutronv2.get_client(self.context),
+ instance, net['id'], port_req_body)
+ create_port_mock.assert_called_once_with(port_req_body)
+
+ @mock.patch.object(client.Client, 'create_port',
+ side_effect=exceptions.MacAddressInUseClient())
+ def test_create_port_for_instance_mac_address_in_use(self,
+ create_port_mock):
+ # Create fake data.
+ instance = fake_instance.fake_instance_obj(self.context)
+ net = {'id': 'my_netid1',
+ 'name': 'my_netname1',
+ 'subnets': ['mysubnid1'],
+ 'tenant_id': instance['project_id']}
+ zone = 'compute:%s' % instance['availability_zone']
+ port_req_body = {'port': {'device_id': instance['uuid'],
+ 'device_owner': zone,
+ 'mac_address': 'XX:XX:XX:XX:XX:XX'}}
+ available_macs = set(['XX:XX:XX:XX:XX:XX'])
+ # Run the code.
+ self.assertRaises(exception.PortInUse,
+ self.api._create_port,
+ neutronv2.get_client(self.context),
+ instance, net['id'], port_req_body,
+ available_macs=available_macs)
+ # Assert the calls.
+ create_port_mock.assert_called_once_with(port_req_body)
+
+ @mock.patch.object(client.Client, 'create_port',
+ side_effect=exceptions.IpAddressInUseClient())
+ def test_create_port_for_fixed_ip_in_use(self, create_port_mock):
+ # Create fake data.
+ instance = fake_instance.fake_instance_obj(self.context)
+ net = {'id': 'my_netid1',
+ 'name': 'my_netname1',
+ 'subnets': ['mysubnid1'],
+ 'tenant_id': instance['project_id']}
+ zone = 'compute:%s' % instance['availability_zone']
+ port_req_body = {'port': {'device_id': instance['uuid'],
+ 'device_owner': zone,
+ 'mac_address': 'XX:XX:XX:XX:XX:XX'}}
+ fake_ip = '1.1.1.1'
+ # Run the code.
+ self.assertRaises(exception.FixedIpAlreadyInUse,
+ self.api._create_port,
+ neutronv2.get_client(self.context),
+ instance, net['id'], port_req_body,
+ fixed_ip=fake_ip)
+ # Assert the calls.
+ create_port_mock.assert_called_once_with(port_req_body)
+
+ def test_get_network_detail_not_found(self):
+ api = neutronapi.API()
+ expected_exc = exceptions.NetworkNotFoundClient()
+ network_uuid = '02cacbca-7d48-4a2c-8011-43eecf8a9786'
+ with mock.patch.object(client.Client, 'show_network',
+ side_effect=expected_exc) as (
+ fake_show_network):
+ self.assertRaises(exception.NetworkNotFound,
+ api.get,
+ self.context,
+ network_uuid)
+ fake_show_network.assert_called_once_with(network_uuid)
+
+ def test_deallocate_for_instance_uses_delete_helper(self):
+ # setup fake data
+ instance = fake_instance.fake_instance_obj(self.context)
+ port_data = {'ports': [{'id': str(uuid.uuid4())}]}
+ ports = set([port['id'] for port in port_data.get('ports')])
+ api = neutronapi.API()
+ # setup mocks
+ mock_client = mock.Mock()
+ mock_client.list_ports.return_value = port_data
+ with contextlib.nested(
+ mock.patch.object(neutronv2, 'get_client',
+ return_value=mock_client),
+ mock.patch.object(api, '_delete_ports')
+ ) as (
+ mock_get_client, mock_delete
+ ):
+ # run the code
+ api.deallocate_for_instance(self.context, instance)
+ # assert the calls
+ mock_client.list_ports.assert_called_once_with(
+ device_id=instance.uuid)
+ mock_delete.assert_called_once_with(
+ mock_client, instance, ports, raise_if_fail=True)
+
+ def _test_delete_ports(self, expect_raise):
+ results = [exceptions.NeutronClientException, None]
+ mock_client = mock.Mock()
+ with mock.patch.object(mock_client, 'delete_port',
+ side_effect=results):
+ api = neutronapi.API()
+ api._delete_ports(mock_client, {'uuid': 'foo'}, ['port1', 'port2'],
+ raise_if_fail=expect_raise)
+
+ def test_delete_ports_raise(self):
+ self.assertRaises(exceptions.NeutronClientException,
+ self._test_delete_ports, True)
+
+ def test_delete_ports_no_raise(self):
+ self._test_delete_ports(False)
+
+ def test_delete_ports_never_raise_404(self):
+ mock_client = mock.Mock()
+ mock_client.delete_port.side_effect = exceptions.PortNotFoundClient
+ api = neutronapi.API()
+ api._delete_ports(mock_client, {'uuid': 'foo'}, ['port1'],
+ raise_if_fail=True)
+ mock_client.delete_port.assert_called_once_with('port1')
+
+ def test_deallocate_port_for_instance_fails(self):
+ mock_client = mock.Mock()
+ api = neutronapi.API()
+ with contextlib.nested(
+ mock.patch.object(neutronv2, 'get_client',
+ return_value=mock_client),
+ mock.patch.object(api, '_delete_ports',
+ side_effect=exceptions.Unauthorized),
+ mock.patch.object(api, 'get_instance_nw_info')
+ ) as (
+ get_client, delete_ports, get_nw_info
+ ):
+ self.assertRaises(exceptions.Unauthorized,
+ api.deallocate_port_for_instance,
+ self.context, instance={'uuid': 'fake'},
+ port_id='fake')
+ # make sure that we didn't try to reload nw info
+ self.assertFalse(get_nw_info.called)
+
+ @mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
+ def _test_show_port_exceptions(self, client_exc, expected_nova_exc,
+ get_client_mock):
+ show_port_mock = mock.Mock(side_effect=client_exc)
+ get_client_mock.return_value.show_port = show_port_mock
+ self.assertRaises(expected_nova_exc, self.api.show_port,
+ self.context, 'fake_port_id')
+
+ def test_show_port_not_found(self):
+ self._test_show_port_exceptions(exceptions.PortNotFoundClient,
+ exception.PortNotFound)
+
+ def test_show_port_forbidden(self):
+ self._test_show_port_exceptions(exceptions.Unauthorized,
+ exception.Forbidden)
+
+
+class TestNeutronv2ModuleMethods(test.TestCase):
+
+ def test_gather_port_ids_and_networks_wrong_params(self):
+ api = neutronapi.API()
+
+ # Test with networks not None and port_ids is None
+ self.assertRaises(exception.NovaException,
+ api._gather_port_ids_and_networks,
+ 'fake_context', 'fake_instance',
+ [{'network': {'name': 'foo'}}], None)
+
+ # Test with networks is None and port_ids not None
+ self.assertRaises(exception.NovaException,
+ api._gather_port_ids_and_networks,
+ 'fake_context', 'fake_instance',
+ None, ['list', 'of', 'port_ids'])
+
+ def test_ensure_requested_network_ordering_no_preference_ids(self):
+ l = [1, 2, 3]
+
+ neutronapi._ensure_requested_network_ordering(
+ lambda x: x,
+ l,
+ None)
+
+ def test_ensure_requested_network_ordering_no_preference_hashes(self):
+ l = [{'id': 3}, {'id': 1}, {'id': 2}]
+
+ neutronapi._ensure_requested_network_ordering(
+ lambda x: x['id'],
+ l,
+ None)
+
+ self.assertEqual(l, [{'id': 3}, {'id': 1}, {'id': 2}])
+
+ def test_ensure_requested_network_ordering_with_preference(self):
+ l = [{'id': 3}, {'id': 1}, {'id': 2}]
+
+ neutronapi._ensure_requested_network_ordering(
+ lambda x: x['id'],
+ l,
+ [1, 2, 3])
+
+ self.assertEqual(l, [{'id': 1}, {'id': 2}, {'id': 3}])
+
+
+class TestNeutronv2Portbinding(TestNeutronv2Base):
+
+ def test_allocate_for_instance_portbinding(self):
+ self._allocate_for_instance(1, portbinding=True)
+
+ def test_populate_neutron_extension_values_binding(self):
+ api = neutronapi.API()
+ neutronv2.get_client(mox.IgnoreArg()).AndReturn(
+ self.moxed_client)
+ self.moxed_client.list_extensions().AndReturn(
+ {'extensions': [{'name': constants.PORTBINDING_EXT}]})
+ self.mox.ReplayAll()
+ host_id = 'my_host_id'
+ instance = {'host': host_id}
+ port_req_body = {'port': {}}
+ api._populate_neutron_extension_values(self.context, instance,
+ None, port_req_body)
+ self.assertEqual(port_req_body['port']['binding:host_id'], host_id)
+ self.assertFalse(port_req_body['port'].get('binding:profile'))
+
+ @mock.patch.object(pci_whitelist, 'get_pci_device_devspec')
+ @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ def test_populate_neutron_extension_values_binding_sriov(self,
+ mock_get_instance_pci_devs,
+ mock_get_pci_device_devspec):
+ api = neutronapi.API()
+ host_id = 'my_host_id'
+ instance = {'host': host_id}
+ port_req_body = {'port': {}}
+ pci_req_id = 'my_req_id'
+ pci_dev = {'vendor_id': '1377',
+ 'product_id': '0047',
+ 'address': '0000:0a:00.1',
+ }
+ PciDevice = collections.namedtuple('PciDevice',
+ ['vendor_id', 'product_id', 'address'])
+ mydev = PciDevice(**pci_dev)
+ profile = {'pci_vendor_info': '1377:0047',
+ 'pci_slot': '0000:0a:00.1',
+ 'physical_network': 'phynet1',
+ }
+
+ mock_get_instance_pci_devs.return_value = [mydev]
+ devspec = mock.Mock()
+ devspec.get_tags.return_value = {'physical_network': 'phynet1'}
+ mock_get_pci_device_devspec.return_value = devspec
+ api._populate_neutron_binding_profile(instance,
+ pci_req_id, port_req_body)
+
+ self.assertEqual(port_req_body['port']['binding:profile'], profile)
+
+ def test_migrate_instance_finish_binding_false(self):
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api, '_has_port_binding_extension')
+ api._has_port_binding_extension(mox.IgnoreArg(),
+ refresh_cache=True).AndReturn(False)
+ self.mox.ReplayAll()
+ api.migrate_instance_finish(self.context, None, None)
+
+ def test_migrate_instance_finish_binding_true(self):
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api, '_has_port_binding_extension')
+ api._has_port_binding_extension(mox.IgnoreArg(),
+ refresh_cache=True).AndReturn(True)
+ neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
+ self.moxed_client)
+ search_opts = {'device_id': self.instance['uuid'],
+ 'tenant_id': self.instance['project_id']}
+ ports = {'ports': [{'id': 'test1'}]}
+ self.moxed_client.list_ports(**search_opts).AndReturn(ports)
+ migration = {'source_compute': self.instance.get('host'),
+ 'dest_compute': 'dest_host', }
+ port_req_body = {'port':
+ {'binding:host_id': migration['dest_compute']}}
+ self.moxed_client.update_port('test1',
+ port_req_body).AndReturn(None)
+ self.mox.ReplayAll()
+ api.migrate_instance_finish(self.context, self.instance, migration)
+
+ def test_migrate_instance_finish_binding_true_exception(self):
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api, '_has_port_binding_extension')
+ api._has_port_binding_extension(mox.IgnoreArg(),
+ refresh_cache=True).AndReturn(True)
+ neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
+ self.moxed_client)
+ search_opts = {'device_id': self.instance['uuid'],
+ 'tenant_id': self.instance['project_id']}
+ ports = {'ports': [{'id': 'test1'}]}
+ self.moxed_client.list_ports(**search_opts).AndReturn(ports)
+ migration = {'source_compute': self.instance.get('host'),
+ 'dest_compute': 'dest_host', }
+ port_req_body = {'port':
+ {'binding:host_id': migration['dest_compute']}}
+ self.moxed_client.update_port('test1',
+ port_req_body).AndRaise(
+ Exception("fail to update port"))
+ self.mox.ReplayAll()
+ self.assertRaises(NEUTRON_CLIENT_EXCEPTION,
+ api.migrate_instance_finish,
+ self.context, self.instance, migration)
+
+ def test_associate_not_implemented(self):
+ api = neutronapi.API()
+ self.assertRaises(NotImplementedError,
+ api.associate,
+ self.context, 'id')
+
+
+class TestNeutronv2ExtraDhcpOpts(TestNeutronv2Base):
+ def setUp(self):
+ super(TestNeutronv2ExtraDhcpOpts, self).setUp()
+ neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
+ self.moxed_client)
+
+ def test_allocate_for_instance_1_with_extra_dhcp_opts_turned_off(self):
+ self._allocate_for_instance(1, extra_dhcp_opts=False)
+
+ def test_allocate_for_instance_extradhcpopts(self):
+ dhcp_opts = [{'opt_name': 'bootfile-name',
+ 'opt_value': 'pxelinux.0'},
+ {'opt_name': 'tftp-server',
+ 'opt_value': '123.123.123.123'},
+ {'opt_name': 'server-ip-address',
+ 'opt_value': '123.123.123.456'}]
+
+ self._allocate_for_instance(1, dhcp_options=dhcp_opts)
+
+
+class TestNeutronClientForAdminScenarios(test.TestCase):
+
+ def _test_get_client_for_admin(self, use_id=False, admin_context=False):
+
+ def client_mock(*args, **kwargs):
+ client.Client.httpclient = mock.MagicMock()
+
+ self.flags(auth_strategy=None, group='neutron')
+ self.flags(url='http://anyhost/', group='neutron')
+ self.flags(url_timeout=30, group='neutron')
+ if use_id:
+ self.flags(admin_tenant_id='admin_tenant_id', group='neutron')
+ self.flags(admin_user_id='admin_user_id', group='neutron')
+
+ if admin_context:
+ my_context = context.get_admin_context()
+ else:
+ my_context = context.RequestContext('userid', 'my_tenantid',
+ auth_token='token')
+ self.mox.StubOutWithMock(client.Client, "__init__")
+ kwargs = {
+ 'auth_url': CONF.neutron.admin_auth_url,
+ 'password': CONF.neutron.admin_password,
+ 'endpoint_url': CONF.neutron.url,
+ 'auth_strategy': None,
+ 'timeout': CONF.neutron.url_timeout,
+ 'insecure': False,
+ 'ca_cert': None,
+ 'token': None}
+ if use_id:
+ kwargs['tenant_id'] = CONF.neutron.admin_tenant_id
+ kwargs['user_id'] = CONF.neutron.admin_user_id
+ else:
+ kwargs['tenant_name'] = CONF.neutron.admin_tenant_name
+ kwargs['username'] = CONF.neutron.admin_username
+ client.Client.__init__(**kwargs).WithSideEffects(client_mock)
+ self.mox.ReplayAll()
+
+ # clean global
+ token_store = neutronv2.AdminTokenStore.get()
+ token_store.admin_auth_token = None
+ if admin_context:
+ # Note that the context does not contain a token but is
+ # an admin context which will force an elevation to admin
+ # credentials.
+ neutronv2.get_client(my_context)
+ else:
+ # Note that the context is not elevated, but the True is passed in
+ # which will force an elevation to admin credentials even though
+ # the context has an auth_token.
+ neutronv2.get_client(my_context, True)
+
+ def test_get_client_for_admin(self):
+ self._test_get_client_for_admin()
+
+ def test_get_client_for_admin_with_id(self):
+ self._test_get_client_for_admin(use_id=True)
+
+ def test_get_client_for_admin_context(self):
+ self._test_get_client_for_admin(admin_context=True)
+
+ def test_get_client_for_admin_context_with_id(self):
+ self._test_get_client_for_admin(use_id=True, admin_context=True)
diff --git a/nova/tests/unit/network/test_rpcapi.py b/nova/tests/unit/network/test_rpcapi.py
new file mode 100644
index 0000000000..f24fdd02d2
--- /dev/null
+++ b/nova/tests/unit/network/test_rpcapi.py
@@ -0,0 +1,353 @@
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Unit Tests for nova.network.rpcapi
+"""
+
+import collections
+
+import mox
+from oslo.config import cfg
+
+from nova import context
+from nova.network import rpcapi as network_rpcapi
+from nova import test
+from nova.tests.unit import fake_instance
+
+CONF = cfg.CONF
+
+
+class NetworkRpcAPITestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(NetworkRpcAPITestCase, self).setUp()
+ self.flags(multi_host=True)
+
+ # Used to specify the default value expected if no real value is passed
+ DefaultArg = collections.namedtuple('DefaultArg', ['value'])
+
+ def _test_network_api(self, method, rpc_method, **kwargs):
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+
+ rpcapi = network_rpcapi.NetworkAPI()
+ self.assertIsNotNone(rpcapi.client)
+ self.assertEqual(rpcapi.client.target.topic, CONF.network_topic)
+
+ expected_retval = 'foo' if rpc_method == 'call' else None
+ expected_version = kwargs.pop('version', None)
+ expected_fanout = kwargs.pop('fanout', None)
+ expected_kwargs = kwargs.copy()
+
+ for k, v in expected_kwargs.items():
+ if isinstance(v, self.DefaultArg):
+ expected_kwargs[k] = v.value
+ kwargs.pop(k)
+
+ prepare_kwargs = {}
+ if expected_version:
+ prepare_kwargs['version'] = expected_version
+ if expected_fanout:
+ prepare_kwargs['fanout'] = True
+
+ if 'source_compute' in expected_kwargs:
+ # Fix up for migrate_instance_* calls.
+ expected_kwargs['source'] = expected_kwargs.pop('source_compute')
+ expected_kwargs['dest'] = expected_kwargs.pop('dest_compute')
+
+ targeted_methods = [
+ 'lease_fixed_ip', 'release_fixed_ip', 'rpc_setup_network_on_host',
+ '_rpc_allocate_fixed_ip', 'deallocate_fixed_ip', 'update_dns',
+ '_associate_floating_ip', '_disassociate_floating_ip',
+ 'lease_fixed_ip', 'release_fixed_ip', 'migrate_instance_start',
+ 'migrate_instance_finish',
+ 'allocate_for_instance', 'deallocate_for_instance',
+ ]
+ targeted_by_instance = ['deallocate_for_instance']
+ if method in targeted_methods and ('host' in expected_kwargs or
+ 'instance' in expected_kwargs):
+ if method in targeted_by_instance:
+ host = expected_kwargs['instance']['host']
+ else:
+ host = expected_kwargs['host']
+ if method not in ['allocate_for_instance',
+ 'deallocate_fixed_ip']:
+ expected_kwargs.pop('host')
+ if CONF.multi_host:
+ prepare_kwargs['server'] = host
+
+ self.mox.StubOutWithMock(rpcapi, 'client')
+
+ version_check = [
+ 'deallocate_for_instance', 'deallocate_fixed_ip',
+ 'allocate_for_instance',
+ ]
+ if method in version_check:
+ rpcapi.client.can_send_version(mox.IgnoreArg()).AndReturn(True)
+
+ if prepare_kwargs:
+ rpcapi.client.prepare(**prepare_kwargs).AndReturn(rpcapi.client)
+
+ rpc_method = getattr(rpcapi.client, rpc_method)
+ rpc_method(ctxt, method, **expected_kwargs).AndReturn('foo')
+
+ self.mox.ReplayAll()
+
+ retval = getattr(rpcapi, method)(ctxt, **kwargs)
+ self.assertEqual(retval, expected_retval)
+
+ def test_create_networks(self):
+ self._test_network_api('create_networks', rpc_method='call',
+ arg1='arg', arg2='arg')
+
+ def test_delete_network(self):
+ self._test_network_api('delete_network', rpc_method='call',
+ uuid='fake_uuid', fixed_range='range')
+
+ def test_disassociate_network(self):
+ self._test_network_api('disassociate_network', rpc_method='call',
+ network_uuid='fake_uuid')
+
+ def test_associate_host_and_project(self):
+ self._test_network_api('associate', rpc_method='call',
+ network_uuid='fake_uuid',
+ associations={'host': "testHost",
+ 'project': 'testProject'},
+ version="1.5")
+
+ def test_get_fixed_ip(self):
+ self._test_network_api('get_fixed_ip', rpc_method='call', id='id')
+
+ def test_get_fixed_ip_by_address(self):
+ self._test_network_api('get_fixed_ip_by_address', rpc_method='call',
+ address='a.b.c.d')
+
+ def test_get_floating_ip(self):
+ self._test_network_api('get_floating_ip', rpc_method='call', id='id')
+
+ def test_get_floating_ip_pools(self):
+ self._test_network_api('get_floating_ip_pools', rpc_method='call',
+ version="1.7")
+
+ def test_get_floating_ip_by_address(self):
+ self._test_network_api('get_floating_ip_by_address', rpc_method='call',
+ address='a.b.c.d')
+
+ def test_get_floating_ips_by_project(self):
+ self._test_network_api('get_floating_ips_by_project',
+ rpc_method='call')
+
+ def test_get_floating_ips_by_fixed_address(self):
+ self._test_network_api('get_floating_ips_by_fixed_address',
+ rpc_method='call', fixed_address='w.x.y.z')
+
+ def test_get_instance_id_by_floating_address(self):
+ self._test_network_api('get_instance_id_by_floating_address',
+ rpc_method='call', address='w.x.y.z')
+
+ def test_allocate_floating_ip(self):
+ self._test_network_api('allocate_floating_ip', rpc_method='call',
+ project_id='fake_id', pool='fake_pool', auto_assigned=False)
+
+ def test_deallocate_floating_ip(self):
+ self._test_network_api('deallocate_floating_ip', rpc_method='call',
+ address='addr', affect_auto_assigned=True)
+
+ def test_allocate_floating_ip_no_multi(self):
+ self.flags(multi_host=False)
+ self._test_network_api('allocate_floating_ip', rpc_method='call',
+ project_id='fake_id', pool='fake_pool', auto_assigned=False)
+
+ def test_deallocate_floating_ip_no_multi(self):
+ self.flags(multi_host=False)
+ self._test_network_api('deallocate_floating_ip', rpc_method='call',
+ address='addr', affect_auto_assigned=True)
+
+ def test_associate_floating_ip(self):
+ self._test_network_api('associate_floating_ip', rpc_method='call',
+ floating_address='blah', fixed_address='foo',
+ affect_auto_assigned=True)
+
+ def test_disassociate_floating_ip(self):
+ self._test_network_api('disassociate_floating_ip', rpc_method='call',
+ address='addr', affect_auto_assigned=True)
+
+ def test_allocate_for_instance(self):
+ self._test_network_api('allocate_for_instance', rpc_method='call',
+ instance_id='fake_id', project_id='fake_id', host='fake_host',
+ rxtx_factor='fake_factor', vpn=False, requested_networks={},
+ macs=[], version='1.13')
+
+ def test_deallocate_for_instance(self):
+ instance = fake_instance.fake_instance_obj(context.get_admin_context())
+ self._test_network_api('deallocate_for_instance', rpc_method='call',
+ requested_networks=self.DefaultArg(None), instance=instance,
+ version='1.11')
+
+ def test_deallocate_for_instance_with_expected_networks(self):
+ instance = fake_instance.fake_instance_obj(context.get_admin_context())
+ self._test_network_api('deallocate_for_instance', rpc_method='call',
+ instance=instance, requested_networks={}, version='1.11')
+
+ def test_add_fixed_ip_to_instance(self):
+ self._test_network_api('add_fixed_ip_to_instance', rpc_method='call',
+ instance_id='fake_id', rxtx_factor='fake_factor',
+ host='fake_host', network_id='fake_id', version='1.9')
+
+ def test_remove_fixed_ip_from_instance(self):
+ self._test_network_api('remove_fixed_ip_from_instance',
+ rpc_method='call', instance_id='fake_id',
+ rxtx_factor='fake_factor', host='fake_host',
+ address='fake_address', version='1.9')
+
+ def test_add_network_to_project(self):
+ self._test_network_api('add_network_to_project', rpc_method='call',
+ project_id='fake_id', network_uuid='fake_uuid')
+
+ def test_get_instance_nw_info(self):
+ self._test_network_api('get_instance_nw_info', rpc_method='call',
+ instance_id='fake_id', rxtx_factor='fake_factor',
+ host='fake_host', project_id='fake_id', version='1.9')
+
+ def test_validate_networks(self):
+ self._test_network_api('validate_networks', rpc_method='call',
+ networks={})
+
+ def test_get_instance_uuids_by_ip_filter(self):
+ self._test_network_api('get_instance_uuids_by_ip_filter',
+ rpc_method='call', filters={})
+
+ def test_get_dns_domains(self):
+ self._test_network_api('get_dns_domains', rpc_method='call')
+
+ def test_add_dns_entry(self):
+ self._test_network_api('add_dns_entry', rpc_method='call',
+ address='addr', name='name', dns_type='foo', domain='domain')
+
+ def test_modify_dns_entry(self):
+ self._test_network_api('modify_dns_entry', rpc_method='call',
+ address='addr', name='name', domain='domain')
+
+ def test_delete_dns_entry(self):
+ self._test_network_api('delete_dns_entry', rpc_method='call',
+ name='name', domain='domain')
+
+ def test_delete_dns_domain(self):
+ self._test_network_api('delete_dns_domain', rpc_method='call',
+ domain='fake_domain')
+
+ def test_get_dns_entries_by_address(self):
+ self._test_network_api('get_dns_entries_by_address', rpc_method='call',
+ address='fake_address', domain='fake_domain')
+
+ def test_get_dns_entries_by_name(self):
+ self._test_network_api('get_dns_entries_by_name', rpc_method='call',
+ name='fake_name', domain='fake_domain')
+
+ def test_create_private_dns_domain(self):
+ self._test_network_api('create_private_dns_domain', rpc_method='call',
+ domain='fake_domain', av_zone='fake_zone')
+
+ def test_create_public_dns_domain(self):
+ self._test_network_api('create_public_dns_domain', rpc_method='call',
+ domain='fake_domain', project='fake_project')
+
+ def test_setup_networks_on_host(self):
+ self._test_network_api('setup_networks_on_host', rpc_method='call',
+ instance_id='fake_id', host='fake_host', teardown=False)
+
+ def test_lease_fixed_ip(self):
+ self._test_network_api('lease_fixed_ip', rpc_method='cast',
+ host='fake_host', address='fake_addr')
+
+ def test_release_fixed_ip(self):
+ self._test_network_api('release_fixed_ip', rpc_method='cast',
+ host='fake_host', address='fake_addr')
+
+ def test_set_network_host(self):
+ self._test_network_api('set_network_host', rpc_method='call',
+ network_ref={})
+
+ def test_rpc_setup_network_on_host(self):
+ self._test_network_api('rpc_setup_network_on_host', rpc_method='call',
+ network_id='fake_id', teardown=False, host='fake_host')
+
+ def test_rpc_allocate_fixed_ip(self):
+ self._test_network_api('_rpc_allocate_fixed_ip', rpc_method='call',
+ instance_id='fake_id', network_id='fake_id', address='addr',
+ vpn=True, host='fake_host')
+
+ def test_deallocate_fixed_ip(self):
+ instance = fake_instance.fake_db_instance()
+ self._test_network_api('deallocate_fixed_ip', rpc_method='call',
+ address='fake_addr', host='fake_host', instance=instance,
+ version='1.12')
+
+ def test_update_dns(self):
+ self._test_network_api('update_dns', rpc_method='cast', fanout=True,
+ network_ids='fake_id', version='1.3')
+
+ def test__associate_floating_ip(self):
+ self._test_network_api('_associate_floating_ip', rpc_method='call',
+ floating_address='fake_addr', fixed_address='fixed_address',
+ interface='fake_interface', host='fake_host',
+ instance_uuid='fake_uuid', version='1.6')
+
+ def test__disassociate_floating_ip(self):
+ self._test_network_api('_disassociate_floating_ip', rpc_method='call',
+ address='fake_addr', interface='fake_interface',
+ host='fake_host', instance_uuid='fake_uuid', version='1.6')
+
+ def test_migrate_instance_start(self):
+ self._test_network_api('migrate_instance_start', rpc_method='call',
+ instance_uuid='fake_instance_uuid',
+ rxtx_factor='fake_factor',
+ project_id='fake_project',
+ source_compute='fake_src_compute',
+ dest_compute='fake_dest_compute',
+ floating_addresses='fake_floating_addresses',
+ host=self.DefaultArg(None),
+ version='1.2')
+
+ def test_migrate_instance_start_multi_host(self):
+ self._test_network_api('migrate_instance_start', rpc_method='call',
+ instance_uuid='fake_instance_uuid',
+ rxtx_factor='fake_factor',
+ project_id='fake_project',
+ source_compute='fake_src_compute',
+ dest_compute='fake_dest_compute',
+ floating_addresses='fake_floating_addresses',
+ host='fake_host',
+ version='1.2')
+
+ def test_migrate_instance_finish(self):
+ self._test_network_api('migrate_instance_finish', rpc_method='call',
+ instance_uuid='fake_instance_uuid',
+ rxtx_factor='fake_factor',
+ project_id='fake_project',
+ source_compute='fake_src_compute',
+ dest_compute='fake_dest_compute',
+ floating_addresses='fake_floating_addresses',
+ host=self.DefaultArg(None),
+ version='1.2')
+
+ def test_migrate_instance_finish_multi_host(self):
+ self._test_network_api('migrate_instance_finish', rpc_method='call',
+ instance_uuid='fake_instance_uuid',
+ rxtx_factor='fake_factor',
+ project_id='fake_project',
+ source_compute='fake_src_compute',
+ dest_compute='fake_dest_compute',
+ floating_addresses='fake_floating_addresses',
+ host='fake_host',
+ version='1.2')
diff --git a/nova/tests/unit/objects/__init__.py b/nova/tests/unit/objects/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/objects/__init__.py
diff --git a/nova/tests/unit/objects/test_agent.py b/nova/tests/unit/objects/test_agent.py
new file mode 100644
index 0000000000..86be0cd361
--- /dev/null
+++ b/nova/tests/unit/objects/test_agent.py
@@ -0,0 +1,103 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import exception
+from nova.objects import agent as agent_obj
+from nova.tests.unit.objects import test_objects
+
+
+fake_agent = {
+ 'id': 1,
+ 'hypervisor': 'novavm',
+ 'os': 'linux',
+ 'architecture': 'DISC',
+ 'version': '1.0',
+ 'url': 'http://openstack.org/novavm/agents/novavm_agent_v1.0.rpm',
+ 'md5hash': '8cb151f3adc23a92db8ddbe084796823',
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+}
+
+
+class _TestAgent(object):
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ @mock.patch('nova.db.agent_build_get_by_triple')
+ def test_get_by_triple(self, mock_get):
+ mock_get.return_value = fake_agent
+ agent = agent_obj.Agent.get_by_triple(self.context,
+ 'novavm', 'linux', 'DISC')
+ self._compare(self, fake_agent, agent)
+
+ @mock.patch('nova.db.agent_build_get_by_triple')
+ def test_get_by_triple_none(self, mock_get):
+ mock_get.return_value = None
+ agent = agent_obj.Agent.get_by_triple(self.context,
+ 'novavm', 'linux', 'DISC')
+ self.assertIsNone(agent)
+
+ @mock.patch('nova.db.agent_build_create')
+ def test_create(self, mock_create):
+ mock_create.return_value = fake_agent
+ agent = agent_obj.Agent(context=self.context)
+ agent.hypervisor = 'novavm'
+ agent.create()
+ mock_create.assert_called_once_with(self.context,
+ {'hypervisor': 'novavm'})
+ self._compare(self, fake_agent, agent)
+
+ @mock.patch('nova.db.agent_build_create')
+ def test_create_with_id(self, mock_create):
+ agent = agent_obj.Agent(context=self.context, id=123)
+ self.assertRaises(exception.ObjectActionError, agent.create)
+ self.assertFalse(mock_create.called)
+
+ @mock.patch('nova.db.agent_build_destroy')
+ def test_destroy(self, mock_destroy):
+ agent = agent_obj.Agent(context=self.context, id=123)
+ agent.destroy()
+ mock_destroy.assert_called_once_with(self.context, 123)
+
+ @mock.patch('nova.db.agent_build_update')
+ def test_save(self, mock_update):
+ mock_update.return_value = fake_agent
+ agent = agent_obj.Agent(context=self.context, id=123)
+ agent.obj_reset_changes()
+ agent.hypervisor = 'novavm'
+ agent.save()
+ mock_update.assert_called_once_with(self.context, 123,
+ {'hypervisor': 'novavm'})
+
+ @mock.patch('nova.db.agent_build_get_all')
+ def test_get_all(self, mock_get_all):
+ mock_get_all.return_value = [fake_agent]
+ agents = agent_obj.AgentList.get_all(self.context, hypervisor='novavm')
+ self.assertEqual(1, len(agents))
+ self._compare(self, fake_agent, agents[0])
+ mock_get_all.assert_called_once_with(self.context, hypervisor='novavm')
+
+
+class TestAgent(test_objects._LocalTest, _TestAgent):
+ pass
+
+
+class TestAgentRemote(test_objects._RemoteTest, _TestAgent):
+ pass
diff --git a/nova/tests/unit/objects/test_aggregate.py b/nova/tests/unit/objects/test_aggregate.py
new file mode 100644
index 0000000000..67ea514bc7
--- /dev/null
+++ b/nova/tests/unit/objects/test_aggregate.py
@@ -0,0 +1,199 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.utils import timeutils
+
+from nova import db
+from nova import exception
+from nova.objects import aggregate
+from nova.tests.unit import fake_notifier
+from nova.tests.unit.objects import test_objects
+
+
+NOW = timeutils.utcnow().replace(microsecond=0)
+fake_aggregate = {
+ 'created_at': NOW,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'id': 123,
+ 'name': 'fake-aggregate',
+ 'hosts': ['foo', 'bar'],
+ 'metadetails': {'this': 'that'},
+ }
+
+SUBS = {'metadata': 'metadetails'}
+
+
+class _TestAggregateObject(object):
+ def test_get_by_id(self):
+ self.mox.StubOutWithMock(db, 'aggregate_get')
+ db.aggregate_get(self.context, 123).AndReturn(fake_aggregate)
+ self.mox.ReplayAll()
+ agg = aggregate.Aggregate.get_by_id(self.context, 123)
+ self.compare_obj(agg, fake_aggregate, subs=SUBS)
+
+ def test_create(self):
+ self.mox.StubOutWithMock(db, 'aggregate_create')
+ db.aggregate_create(self.context, {'name': 'foo'},
+ metadata={'one': 'two'}).AndReturn(fake_aggregate)
+ self.mox.ReplayAll()
+ agg = aggregate.Aggregate()
+ agg.name = 'foo'
+ agg.metadata = {'one': 'two'}
+ agg.create(self.context)
+ self.compare_obj(agg, fake_aggregate, subs=SUBS)
+
+ def test_recreate_fails(self):
+ self.mox.StubOutWithMock(db, 'aggregate_create')
+ db.aggregate_create(self.context, {'name': 'foo'},
+ metadata={'one': 'two'}).AndReturn(fake_aggregate)
+ self.mox.ReplayAll()
+ agg = aggregate.Aggregate()
+ agg.name = 'foo'
+ agg.metadata = {'one': 'two'}
+ agg.create(self.context)
+ self.assertRaises(exception.ObjectActionError, agg.create,
+ self.context)
+
+ def test_save(self):
+ self.mox.StubOutWithMock(db, 'aggregate_update')
+ db.aggregate_update(self.context, 123, {'name': 'baz'}).AndReturn(
+ fake_aggregate)
+ self.mox.ReplayAll()
+ agg = aggregate.Aggregate()
+ agg.id = 123
+ agg.name = 'baz'
+ agg.save(self.context)
+ self.compare_obj(agg, fake_aggregate, subs=SUBS)
+
+ def test_save_and_create_no_hosts(self):
+ agg = aggregate.Aggregate()
+ agg.id = 123
+ agg.hosts = ['foo', 'bar']
+ self.assertRaises(exception.ObjectActionError,
+ agg.create, self.context)
+ self.assertRaises(exception.ObjectActionError,
+ agg.save, self.context)
+
+ def test_update_metadata(self):
+ self.mox.StubOutWithMock(db, 'aggregate_metadata_delete')
+ self.mox.StubOutWithMock(db, 'aggregate_metadata_add')
+ db.aggregate_metadata_delete(self.context, 123, 'todelete')
+ db.aggregate_metadata_add(self.context, 123, {'toadd': 'myval'})
+ self.mox.ReplayAll()
+ fake_notifier.NOTIFICATIONS = []
+ agg = aggregate.Aggregate()
+ agg._context = self.context
+ agg.id = 123
+ agg.metadata = {'foo': 'bar'}
+ agg.obj_reset_changes()
+ agg.update_metadata({'todelete': None, 'toadd': 'myval'})
+ self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('aggregate.updatemetadata.start', msg.event_type)
+ self.assertEqual({'todelete': None, 'toadd': 'myval'},
+ msg.payload['meta_data'])
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual('aggregate.updatemetadata.end', msg.event_type)
+ self.assertEqual({'todelete': None, 'toadd': 'myval'},
+ msg.payload['meta_data'])
+ self.assertEqual({'foo': 'bar', 'toadd': 'myval'}, agg.metadata)
+
+ def test_destroy(self):
+ self.mox.StubOutWithMock(db, 'aggregate_delete')
+ db.aggregate_delete(self.context, 123)
+ self.mox.ReplayAll()
+ agg = aggregate.Aggregate()
+ agg.id = 123
+ agg.destroy(self.context)
+
+ def test_add_host(self):
+ self.mox.StubOutWithMock(db, 'aggregate_host_add')
+ db.aggregate_host_add(self.context, 123, 'bar'
+ ).AndReturn({'host': 'bar'})
+ self.mox.ReplayAll()
+ agg = aggregate.Aggregate()
+ agg.id = 123
+ agg.hosts = ['foo']
+ agg._context = self.context
+ agg.add_host('bar')
+ self.assertEqual(agg.hosts, ['foo', 'bar'])
+
+ def test_delete_host(self):
+ self.mox.StubOutWithMock(db, 'aggregate_host_delete')
+ db.aggregate_host_delete(self.context, 123, 'foo')
+ self.mox.ReplayAll()
+ agg = aggregate.Aggregate()
+ agg.id = 123
+ agg.hosts = ['foo', 'bar']
+ agg._context = self.context
+ agg.delete_host('foo')
+ self.assertEqual(agg.hosts, ['bar'])
+
+ def test_availability_zone(self):
+ agg = aggregate.Aggregate()
+ agg.metadata = {'availability_zone': 'foo'}
+ self.assertEqual('foo', agg.availability_zone)
+
+ def test_get_all(self):
+ self.mox.StubOutWithMock(db, 'aggregate_get_all')
+ db.aggregate_get_all(self.context).AndReturn([fake_aggregate])
+ self.mox.ReplayAll()
+ aggs = aggregate.AggregateList.get_all(self.context)
+ self.assertEqual(1, len(aggs))
+ self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
+
+ def test_by_host(self):
+ self.mox.StubOutWithMock(db, 'aggregate_get_by_host')
+ db.aggregate_get_by_host(self.context, 'fake-host', key=None,
+ ).AndReturn([fake_aggregate])
+ self.mox.ReplayAll()
+ aggs = aggregate.AggregateList.get_by_host(self.context, 'fake-host')
+ self.assertEqual(1, len(aggs))
+ self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
+
+ @mock.patch('nova.db.aggregate_get_by_metadata_key')
+ def test_get_by_metadata_key(self, get_by_metadata_key):
+ get_by_metadata_key.return_value = [fake_aggregate]
+ aggs = aggregate.AggregateList.get_by_metadata_key(
+ self.context, 'this')
+ self.assertEqual(1, len(aggs))
+ self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
+
+ @mock.patch('nova.db.aggregate_get_by_metadata_key')
+ def test_get_by_metadata_key_and_hosts_no_match(self, get_by_metadata_key):
+ get_by_metadata_key.return_value = [fake_aggregate]
+ aggs = aggregate.AggregateList.get_by_metadata_key(
+ self.context, 'this', hosts=['baz'])
+ self.assertEqual(0, len(aggs))
+
+ @mock.patch('nova.db.aggregate_get_by_metadata_key')
+ def test_get_by_metadata_key_and_hosts_match(self, get_by_metadata_key):
+ get_by_metadata_key.return_value = [fake_aggregate]
+ aggs = aggregate.AggregateList.get_by_metadata_key(
+ self.context, 'this', hosts=['foo', 'bar'])
+ self.assertEqual(1, len(aggs))
+ self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
+
+
+class TestAggregateObject(test_objects._LocalTest,
+ _TestAggregateObject):
+ pass
+
+
+class TestRemoteAggregateObject(test_objects._RemoteTest,
+ _TestAggregateObject):
+ pass
diff --git a/nova/tests/unit/objects/test_bandwidth_usage.py b/nova/tests/unit/objects/test_bandwidth_usage.py
new file mode 100644
index 0000000000..933e7ff643
--- /dev/null
+++ b/nova/tests/unit/objects/test_bandwidth_usage.py
@@ -0,0 +1,124 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+import iso8601
+import mock
+from oslo.utils import timeutils
+
+from nova import context
+from nova import db
+from nova.objects import bandwidth_usage
+from nova import test
+from nova.tests.unit.objects import test_objects
+
+
+class _TestBandwidthUsage(test.TestCase):
+
+ def setUp(self):
+ super(_TestBandwidthUsage, self).setUp()
+ self.user_id = 'fake_user'
+ self.project_id = 'fake_project'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ now, start_period = self._time_now_and_start_period()
+ self.expected_bw_usage = self._fake_bw_usage(
+ time=now, start_period=start_period)
+
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ @staticmethod
+ def _fake_bw_usage(time=None, start_period=None, bw_in=100,
+ bw_out=200, last_ctr_in=12345, last_ctr_out=67890):
+ fake_bw_usage = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'instance_uuid': 'fake_uuid1',
+ 'mac': 'fake_mac1',
+ 'start_period': start_period,
+ 'bw_in': bw_in,
+ 'bw_out': bw_out,
+ 'last_ctr_in': last_ctr_in,
+ 'last_ctr_out': last_ctr_out,
+ 'last_refreshed': time
+ }
+ return fake_bw_usage
+
+ @staticmethod
+ def _time_now_and_start_period():
+ now = timeutils.utcnow().replace(tzinfo=iso8601.iso8601.Utc(),
+ microsecond=0)
+ start_period = now - datetime.timedelta(seconds=10)
+ return now, start_period
+
+ @mock.patch.object(db, 'bw_usage_get')
+ def test_get_by_instance_uuid_and_mac(self, mock_get):
+ mock_get.return_value = self.expected_bw_usage
+ bw_usage = bandwidth_usage.BandwidthUsage.get_by_instance_uuid_and_mac(
+ self.context, 'fake_uuid', 'fake_mac',
+ start_period=self.expected_bw_usage['start_period'])
+ self._compare(self, self.expected_bw_usage, bw_usage)
+
+ @mock.patch.object(db, 'bw_usage_get_by_uuids')
+ def test_get_by_uuids(self, mock_get_by_uuids):
+ mock_get_by_uuids.return_value = [self.expected_bw_usage]
+
+ bw_usages = bandwidth_usage.BandwidthUsageList.get_by_uuids(
+ self.context, ['fake_uuid'],
+ start_period=self.expected_bw_usage['start_period'])
+ self.assertEqual(len(bw_usages), 1)
+ self._compare(self, self.expected_bw_usage, bw_usages[0])
+
+ @mock.patch.object(db, 'bw_usage_update')
+ def test_create(self, mock_create):
+ mock_create.return_value = self.expected_bw_usage
+
+ bw_usage = bandwidth_usage.BandwidthUsage()
+ bw_usage.create(self.context, 'fake_uuid', 'fake_mac',
+ 100, 200, 12345, 67890,
+ start_period=self.expected_bw_usage['start_period'])
+
+ self._compare(self, self.expected_bw_usage, bw_usage)
+
+ @mock.patch.object(db, 'bw_usage_update')
+ def test_update(self, mock_update):
+ expected_bw_usage1 = self._fake_bw_usage(
+ time=self.expected_bw_usage['last_refreshed'],
+ start_period=self.expected_bw_usage['start_period'],
+ last_ctr_in=42, last_ctr_out=42)
+
+ mock_update.side_effect = [expected_bw_usage1, self.expected_bw_usage]
+
+ bw_usage = bandwidth_usage.BandwidthUsage()
+ bw_usage.create(self.context, 'fake_uuid1', 'fake_mac1',
+ 100, 200, 42, 42,
+ start_period=self.expected_bw_usage['start_period'])
+ self._compare(self, expected_bw_usage1, bw_usage)
+ bw_usage.create(self.context, 'fake_uuid1', 'fake_mac1',
+ 100, 200, 12345, 67890,
+ start_period=self.expected_bw_usage['start_period'])
+ self._compare(self, self.expected_bw_usage, bw_usage)
+
+
+class TestBandwidthUsageObject(test_objects._LocalTest,
+ _TestBandwidthUsage):
+ pass
+
+
+class TestRemoteBandwidthUsageObject(test_objects._RemoteTest,
+ _TestBandwidthUsage):
+ pass
diff --git a/nova/tests/unit/objects/test_block_device.py b/nova/tests/unit/objects/test_block_device.py
new file mode 100644
index 0000000000..32bb51fe96
--- /dev/null
+++ b/nova/tests/unit/objects/test_block_device.py
@@ -0,0 +1,333 @@
+# Copyright 2013 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+
+import mock
+
+from nova.cells import rpcapi as cells_rpcapi
+from nova import db
+from nova import exception
+from nova import objects
+from nova.objects import block_device as block_device_obj
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit.objects import test_objects
+
+
+class _TestBlockDeviceMappingObject(object):
+ def fake_bdm(self, instance=None):
+ instance = instance or {}
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 123,
+ 'instance_uuid': instance.get('uuid') or 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'boot_index': -1
+ })
+ if instance:
+ fake_bdm['instance'] = instance
+ return fake_bdm
+
+ def _test_save(self, cell_type=None):
+ if cell_type:
+ self.flags(enable=True, cell_type=cell_type, group='cells')
+ else:
+ self.flags(enable=False, group='cells')
+
+ fake_bdm = self.fake_bdm()
+ with contextlib.nested(
+ mock.patch.object(
+ db, 'block_device_mapping_update', return_value=fake_bdm),
+ mock.patch.object(
+ cells_rpcapi.CellsAPI, 'bdm_update_or_create_at_top')
+ ) as (bdm_update_mock, cells_update_mock):
+ bdm_object = objects.BlockDeviceMapping()
+ bdm_object.id = 123
+ bdm_object.volume_id = 'fake_volume_id'
+ bdm_object.save(self.context)
+
+ bdm_update_mock.assert_called_once_with(
+ self.context, 123, {'volume_id': 'fake_volume_id'},
+ legacy=False)
+ if cell_type != 'compute':
+ self.assertFalse(cells_update_mock.called)
+ else:
+ self.assertEqual(1, cells_update_mock.call_count)
+ self.assertTrue(len(cells_update_mock.call_args[0]) > 1)
+ self.assertIsInstance(cells_update_mock.call_args[0][1],
+ block_device_obj.BlockDeviceMapping)
+ self.assertEqual(cells_update_mock.call_args[1], {})
+
+ def test_save_nocells(self):
+ self._test_save()
+
+ def test_save_apicell(self):
+ self._test_save(cell_type='api')
+
+ def test_save_computecell(self):
+ self._test_save(cell_type='compute')
+
+ def test_save_instance_changed(self):
+ bdm_object = objects.BlockDeviceMapping()
+ bdm_object.instance = objects.Instance()
+ self.assertRaises(exception.ObjectActionError,
+ bdm_object.save, self.context)
+
+ @mock.patch.object(db, 'block_device_mapping_get_by_volume_id')
+ def test_get_by_volume_id(self, get_by_vol_id):
+ get_by_vol_id.return_value = self.fake_bdm()
+
+ vol_bdm = objects.BlockDeviceMapping.get_by_volume_id(
+ self.context, 'fake-volume-id')
+ for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS:
+ self.assertFalse(vol_bdm.obj_attr_is_set(attr))
+ self.assertRemotes()
+
+ @mock.patch.object(db, 'block_device_mapping_get_by_volume_id')
+ def test_get_by_volume_id_not_found(self, get_by_vol_id):
+ get_by_vol_id.return_value = None
+
+ self.assertRaises(exception.VolumeBDMNotFound,
+ objects.BlockDeviceMapping.get_by_volume_id,
+ self.context, 'fake-volume-id')
+
+ @mock.patch.object(db, 'block_device_mapping_get_by_volume_id')
+ def test_get_by_volume_instance_uuid_missmatch(self, get_by_vol_id):
+ fake_bdm_vol = self.fake_bdm(instance={'uuid': 'other-fake-instance'})
+ get_by_vol_id.return_value = fake_bdm_vol
+
+ self.assertRaises(exception.InvalidVolume,
+ objects.BlockDeviceMapping.get_by_volume_id,
+ self.context, 'fake-volume-id',
+ instance_uuid='fake-instance')
+
+ @mock.patch.object(db, 'block_device_mapping_get_by_volume_id')
+ def test_get_by_volume_id_with_expected(self, get_by_vol_id):
+ get_by_vol_id.return_value = self.fake_bdm(
+ fake_instance.fake_db_instance())
+
+ vol_bdm = objects.BlockDeviceMapping.get_by_volume_id(
+ self.context, 'fake-volume-id', expected_attrs=['instance'])
+ for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS:
+ self.assertTrue(vol_bdm.obj_attr_is_set(attr))
+ get_by_vol_id.assert_called_once_with(self.context, 'fake-volume-id',
+ ['instance'])
+ self.assertRemotes()
+
+ def _test_create_mocked(self, cell_type=None):
+ if cell_type:
+ self.flags(enable=True, cell_type=cell_type, group='cells')
+ else:
+ self.flags(enable=False, group='cells')
+ values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
+ 'destination_type': 'volume',
+ 'instance_uuid': 'fake-instance'}
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict(values)
+
+ with contextlib.nested(
+ mock.patch.object(
+ db, 'block_device_mapping_create', return_value=fake_bdm),
+ mock.patch.object(cells_rpcapi.CellsAPI,
+ 'bdm_update_or_create_at_top')
+ ) as (bdm_create_mock, cells_update_mock):
+ bdm = objects.BlockDeviceMapping(**values)
+
+ if cell_type == 'api':
+ self.assertRaises(exception.ObjectActionError,
+ bdm.create, self.context)
+ elif cell_type == 'compute':
+ bdm.create(self.context)
+ bdm_create_mock.assert_called_once_with(
+ self.context, values, legacy=False)
+ self.assertEqual(1, cells_update_mock.call_count)
+ self.assertTrue(len(cells_update_mock.call_args[0]) > 1)
+ self.assertIsInstance(cells_update_mock.call_args[0][1],
+ block_device_obj.BlockDeviceMapping)
+ self.assertEqual(cells_update_mock.call_args[1],
+ {'create': True})
+ else:
+ bdm.create(self.context)
+ self.assertFalse(cells_update_mock.called)
+ bdm_create_mock.assert_called_once_with(
+ self.context, values, legacy=False)
+
+ def test_create_nocells(self):
+ self._test_create_mocked()
+
+ def test_create_apicell(self):
+ self._test_create_mocked(cell_type='api')
+
+ def test_create_computecell(self):
+ self._test_create_mocked(cell_type='compute')
+
+ def test_create(self):
+ values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
+ 'destination_type': 'volume',
+ 'instance_uuid': 'fake-instance'}
+ bdm = objects.BlockDeviceMapping(**values)
+ with mock.patch.object(cells_rpcapi.CellsAPI,
+ 'bdm_update_or_create_at_top'):
+ bdm.create(self.context)
+
+ for k, v in values.iteritems():
+ self.assertEqual(v, getattr(bdm, k))
+
+ def test_create_fails(self):
+ values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
+ 'destination_type': 'volume',
+ 'instance_uuid': 'fake-instance'}
+ bdm = objects.BlockDeviceMapping(**values)
+ bdm.create(self.context)
+
+ self.assertRaises(exception.ObjectActionError,
+ bdm.create, self.context)
+
+ def test_create_fails_instance(self):
+ values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
+ 'destination_type': 'volume',
+ 'instance_uuid': 'fake-instance',
+ 'instance': objects.Instance()}
+ bdm = objects.BlockDeviceMapping(**values)
+ self.assertRaises(exception.ObjectActionError,
+ bdm.create, self.context)
+
+ def _test_destroy_mocked(self, cell_type=None):
+ values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
+ 'destination_type': 'volume', 'id': 1,
+ 'instance_uuid': 'fake-instance', 'device_name': 'fake'}
+ if cell_type:
+ self.flags(enable=True, cell_type=cell_type, group='cells')
+ else:
+ self.flags(enable=False, group='cells')
+ with contextlib.nested(
+ mock.patch.object(db, 'block_device_mapping_destroy'),
+ mock.patch.object(cells_rpcapi.CellsAPI, 'bdm_destroy_at_top')
+ ) as (bdm_del, cells_destroy):
+ bdm = objects.BlockDeviceMapping(**values)
+ bdm.destroy(self.context)
+ bdm_del.assert_called_once_with(self.context, values['id'])
+ if cell_type != 'compute':
+ self.assertFalse(cells_destroy.called)
+ else:
+ cells_destroy.assert_called_once_with(
+ self.context, values['instance_uuid'],
+ device_name=values['device_name'],
+ volume_id=values['volume_id'])
+
+ def test_destroy_nocells(self):
+ self._test_destroy_mocked()
+
+ def test_destroy_apicell(self):
+ self._test_destroy_mocked(cell_type='api')
+
+ def test_destroy_computecell(self):
+ self._test_destroy_mocked(cell_type='compute')
+
+
+class TestBlockDeviceMappingObject(test_objects._LocalTest,
+ _TestBlockDeviceMappingObject):
+ pass
+
+
+class TestRemoteBlockDeviceMappingObject(test_objects._RemoteTest,
+ _TestBlockDeviceMappingObject):
+ pass
+
+
+class _TestBlockDeviceMappingListObject(object):
+ def fake_bdm(self, bdm_id):
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
+ 'id': bdm_id, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'boot_index': -1,
+ })
+ return fake_bdm
+
+ @mock.patch.object(db, 'block_device_mapping_get_all_by_instance')
+ def test_get_by_instance_uuid(self, get_all_by_inst):
+ fakes = [self.fake_bdm(123), self.fake_bdm(456)]
+ get_all_by_inst.return_value = fakes
+ bdm_list = (
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ self.context, 'fake_instance_uuid'))
+ for faked, got in zip(fakes, bdm_list):
+ self.assertIsInstance(got, objects.BlockDeviceMapping)
+ self.assertEqual(faked['id'], got.id)
+
+ @mock.patch.object(db, 'block_device_mapping_get_all_by_instance')
+ def test_get_by_instance_uuid_no_result(self, get_all_by_inst):
+ get_all_by_inst.return_value = None
+ bdm_list = (
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ self.context, 'fake_instance_uuid'))
+ self.assertEqual(0, len(bdm_list))
+
+ def test_root_volume_metadata(self):
+ fake_volume = {
+ 'volume_image_metadata': {'vol_test_key': 'vol_test_value'}}
+
+ class FakeVolumeApi(object):
+ def get(*args, **kwargs):
+ return fake_volume
+
+ block_device_mapping = block_device_obj.block_device_make_list(None, [
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1,
+ 'boot_index': 0,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': 'fake_volume_id',
+ 'delete_on_termination': False})])
+
+ volume_meta = block_device_mapping.root_metadata(
+ self.context, None, FakeVolumeApi())
+ self.assertEqual(fake_volume['volume_image_metadata'], volume_meta)
+
+ def test_root_image_metadata(self):
+ fake_image = {'properties': {'img_test_key': 'img_test_value'}}
+
+ class FakeImageApi(object):
+ def show(*args, **kwargs):
+ return fake_image
+
+ block_device_mapping = block_device_obj.block_device_make_list(None, [
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1,
+ 'boot_index': 0,
+ 'source_type': 'image',
+ 'destination_type': 'local',
+ 'image_id': "fake-image",
+ 'delete_on_termination': True})])
+
+ image_meta = block_device_mapping.root_metadata(
+ self.context, FakeImageApi(), None)
+ self.assertEqual(fake_image['properties'], image_meta)
+
+
+class TestBlockDeviceMappingListObject(test_objects._LocalTest,
+ _TestBlockDeviceMappingListObject):
+ pass
+
+
+class TestRemoteBlockDeviceMappingListObject(
+ test_objects._RemoteTest, _TestBlockDeviceMappingListObject):
+ pass
diff --git a/nova/tests/unit/objects/test_compute_node.py b/nova/tests/unit/objects/test_compute_node.py
new file mode 100644
index 0000000000..0bbf8050c8
--- /dev/null
+++ b/nova/tests/unit/objects/test_compute_node.py
@@ -0,0 +1,240 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+
+from nova import db
+from nova import exception
+from nova.objects import compute_node
+from nova.objects import hv_spec
+from nova.objects import service
+from nova.tests.unit.objects import test_objects
+from nova.virt import hardware
+
+NOW = timeutils.utcnow().replace(microsecond=0)
+fake_stats = {'num_foo': '10'}
+fake_stats_db_format = jsonutils.dumps(fake_stats)
+# host_ip is coerced from a string to an IPAddress
+# but needs to be converted to a string for the database format
+fake_host_ip = '127.0.0.1'
+fake_numa_topology = hardware.VirtNUMAHostTopology(
+ cells=[hardware.VirtNUMATopologyCellUsage(0, set([1, 2]), 512),
+ hardware.VirtNUMATopologyCellUsage(1, set([3, 4]), 512)])
+fake_numa_topology_db_format = fake_numa_topology.to_json()
+fake_hv_spec = hv_spec.HVSpec(arch='foo', hv_type='bar', vm_mode='foobar')
+fake_supported_hv_specs = [fake_hv_spec]
+# for backward compatibility, each supported instance object
+# is stored as a list in the database
+fake_supported_hv_specs_db_format = jsonutils.dumps([fake_hv_spec.to_list()])
+fake_compute_node = {
+ 'created_at': NOW,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'id': 123,
+ 'service_id': 456,
+ 'vcpus': 4,
+ 'memory_mb': 4096,
+ 'local_gb': 1024,
+ 'vcpus_used': 2,
+ 'memory_mb_used': 2048,
+ 'local_gb_used': 512,
+ 'hypervisor_type': 'Hyper-Dan-VM-ware',
+ 'hypervisor_version': 1001,
+ 'hypervisor_hostname': 'vm.danplanet.com',
+ 'free_ram_mb': 1024,
+ 'free_disk_gb': 256,
+ 'current_workload': 100,
+ 'running_vms': 2013,
+ 'cpu_info': 'Schmintel i786',
+ 'disk_available_least': 256,
+ 'metrics': '',
+ 'stats': fake_stats_db_format,
+ 'host_ip': fake_host_ip,
+ 'numa_topology': fake_numa_topology_db_format,
+ 'supported_instances': fake_supported_hv_specs_db_format,
+ }
+
+
+class _TestComputeNodeObject(object):
+ def supported_hv_specs_comparator(self, expected, obj_val):
+ obj_val = [inst.to_list() for inst in obj_val]
+ self.json_comparator(expected, obj_val)
+
+ def comparators(self):
+ return {'stats': self.json_comparator,
+ 'host_ip': self.str_comparator,
+ 'supported_hv_specs': self.supported_hv_specs_comparator}
+
+ def subs(self):
+ return {'supported_hv_specs': 'supported_instances'}
+
+ def test_get_by_id(self):
+ self.mox.StubOutWithMock(db, 'compute_node_get')
+ db.compute_node_get(self.context, 123).AndReturn(fake_compute_node)
+ self.mox.ReplayAll()
+ compute = compute_node.ComputeNode.get_by_id(self.context, 123)
+ self.compare_obj(compute, fake_compute_node,
+ subs=self.subs(),
+ comparators=self.comparators())
+
+ def test_get_by_service_id(self):
+ self.mox.StubOutWithMock(db, 'compute_node_get_by_service_id')
+ db.compute_node_get_by_service_id(self.context, 456).AndReturn(
+ fake_compute_node)
+ self.mox.ReplayAll()
+ compute = compute_node.ComputeNode.get_by_service_id(self.context, 456)
+ self.compare_obj(compute, fake_compute_node,
+ subs=self.subs(),
+ comparators=self.comparators())
+
+ def test_create(self):
+ self.mox.StubOutWithMock(db, 'compute_node_create')
+ db.compute_node_create(
+ self.context,
+ {
+ 'service_id': 456,
+ 'stats': fake_stats_db_format,
+ 'host_ip': fake_host_ip,
+ 'supported_instances': fake_supported_hv_specs_db_format,
+ }).AndReturn(fake_compute_node)
+ self.mox.ReplayAll()
+ compute = compute_node.ComputeNode()
+ compute.service_id = 456
+ compute.stats = fake_stats
+ # NOTE (pmurray): host_ip is coerced to an IPAddress
+ compute.host_ip = fake_host_ip
+ compute.supported_hv_specs = fake_supported_hv_specs
+ compute.create(self.context)
+ self.compare_obj(compute, fake_compute_node,
+ subs=self.subs(),
+ comparators=self.comparators())
+
+ def test_recreate_fails(self):
+ self.mox.StubOutWithMock(db, 'compute_node_create')
+ db.compute_node_create(self.context, {'service_id': 456}).AndReturn(
+ fake_compute_node)
+ self.mox.ReplayAll()
+ compute = compute_node.ComputeNode()
+ compute.service_id = 456
+ compute.create(self.context)
+ self.assertRaises(exception.ObjectActionError, compute.create,
+ self.context)
+
+ def test_save(self):
+ self.mox.StubOutWithMock(db, 'compute_node_update')
+ db.compute_node_update(
+ self.context, 123,
+ {
+ 'vcpus_used': 3,
+ 'stats': fake_stats_db_format,
+ 'host_ip': fake_host_ip,
+ 'supported_instances': fake_supported_hv_specs_db_format,
+ }).AndReturn(fake_compute_node)
+ self.mox.ReplayAll()
+ compute = compute_node.ComputeNode()
+ compute.id = 123
+ compute.vcpus_used = 3
+ compute.stats = fake_stats
+ # NOTE (pmurray): host_ip is coerced to an IPAddress
+ compute.host_ip = fake_host_ip
+ compute.supported_hv_specs = fake_supported_hv_specs
+ compute.save(self.context)
+ self.compare_obj(compute, fake_compute_node,
+ subs=self.subs(),
+ comparators=self.comparators())
+
+ @mock.patch.object(db, 'compute_node_create',
+ return_value=fake_compute_node)
+ def test_set_id_failure(self, db_mock):
+ compute = compute_node.ComputeNode()
+ compute.create(self.context)
+ self.assertRaises(exception.ReadOnlyFieldError, setattr,
+ compute, 'id', 124)
+
+ def test_destroy(self):
+ self.mox.StubOutWithMock(db, 'compute_node_delete')
+ db.compute_node_delete(self.context, 123)
+ self.mox.ReplayAll()
+ compute = compute_node.ComputeNode()
+ compute.id = 123
+ compute.destroy(self.context)
+
+ def test_service(self):
+ self.mox.StubOutWithMock(service.Service, 'get_by_id')
+ service.Service.get_by_id(self.context, 456).AndReturn('my-service')
+ self.mox.ReplayAll()
+ compute = compute_node.ComputeNode()
+ compute._context = self.context
+ compute.id = 123
+ compute.service_id = 456
+ self.assertEqual('my-service', compute.service)
+ # Make sure it doesn't call Service.get_by_id() again
+ self.assertEqual('my-service', compute.service)
+
+ def test_get_all(self):
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ db.compute_node_get_all(self.context).AndReturn([fake_compute_node])
+ self.mox.ReplayAll()
+ computes = compute_node.ComputeNodeList.get_all(self.context)
+ self.assertEqual(1, len(computes))
+ self.compare_obj(computes[0], fake_compute_node,
+ subs=self.subs(),
+ comparators=self.comparators())
+
+ def test_get_by_hypervisor(self):
+ self.mox.StubOutWithMock(db, 'compute_node_search_by_hypervisor')
+ db.compute_node_search_by_hypervisor(self.context, 'hyper').AndReturn(
+ [fake_compute_node])
+ self.mox.ReplayAll()
+ computes = compute_node.ComputeNodeList.get_by_hypervisor(self.context,
+ 'hyper')
+ self.assertEqual(1, len(computes))
+ self.compare_obj(computes[0], fake_compute_node,
+ subs=self.subs(),
+ comparators=self.comparators())
+
+ @mock.patch('nova.db.service_get')
+ def test_get_by_service(self, service_get):
+ service_get.return_value = {'compute_node': [fake_compute_node]}
+ fake_service = service.Service(id=123)
+ computes = compute_node.ComputeNodeList.get_by_service(self.context,
+ fake_service)
+ self.assertEqual(1, len(computes))
+ self.compare_obj(computes[0], fake_compute_node,
+ subs=self.subs(),
+ comparators=self.comparators())
+
+ def test_compat_numa_topology(self):
+ compute = compute_node.ComputeNode()
+ primitive = compute.obj_to_primitive(target_version='1.4')
+ self.assertNotIn('numa_topology', primitive)
+
+ def test_compat_supported_hv_specs(self):
+ compute = compute_node.ComputeNode()
+ compute.supported_hv_specs = fake_supported_hv_specs
+ primitive = compute.obj_to_primitive(target_version='1.5')
+ self.assertNotIn('supported_hv_specs', primitive)
+
+
+class TestComputeNodeObject(test_objects._LocalTest,
+ _TestComputeNodeObject):
+ pass
+
+
+class TestRemoteComputeNodeObject(test_objects._RemoteTest,
+ _TestComputeNodeObject):
+ pass
diff --git a/nova/tests/unit/objects/test_dns_domain.py b/nova/tests/unit/objects/test_dns_domain.py
new file mode 100644
index 0000000000..45f42ff237
--- /dev/null
+++ b/nova/tests/unit/objects/test_dns_domain.py
@@ -0,0 +1,85 @@
+# Copyright (C) 2014, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import db
+from nova.objects import dns_domain
+from nova.tests.unit.objects import test_objects
+
+
+fake_dnsd = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'domain': 'blah.example.com',
+ 'scope': 'private',
+ 'availability_zone': 'overthere',
+ 'project_id': '867530niner',
+}
+
+
+class _TestDNSDomain(object):
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ def test_get_by_domain(self):
+ with mock.patch.object(db, 'dnsdomain_get') as get:
+ get.return_value = fake_dnsd
+ dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
+ self._compare(self, fake_dnsd, dnsd)
+
+ def test_register_for_zone(self):
+ dns_domain.DNSDomain.register_for_zone(self.context.elevated(),
+ 'domain', 'zone')
+ dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
+ self.assertEqual('domain', dnsd.domain)
+ self.assertEqual('zone', dnsd.availability_zone)
+
+ def test_register_for_project(self):
+ dns_domain.DNSDomain.register_for_project(self.context.elevated(),
+ 'domain', 'project')
+ dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
+ self.assertEqual('domain', dnsd.domain)
+ self.assertEqual('project', dnsd.project_id)
+
+ def test_delete_by_domain(self):
+ dns_domain.DNSDomain.register_for_zone(self.context.elevated(),
+ 'domain', 'zone')
+ dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
+ self.assertEqual('domain', dnsd.domain)
+ self.assertEqual('zone', dnsd.availability_zone)
+
+ dns_domain.DNSDomain.delete_by_domain(self.context.elevated(),
+ 'domain')
+ dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
+ self.assertIsNone(dnsd)
+
+ def test_get_all(self):
+ with mock.patch.object(db, 'dnsdomain_get_all') as get:
+ get.return_value = [fake_dnsd]
+ dns_domain.DNSDomainList.get_all(self.context)
+
+
+class TestDNSDomainObject(test_objects._LocalTest,
+ _TestDNSDomain):
+ pass
+
+
+class TestRemoteDNSDomainObject(test_objects._RemoteTest,
+ _TestDNSDomain):
+ pass
diff --git a/nova/tests/unit/objects/test_ec2.py b/nova/tests/unit/objects/test_ec2.py
new file mode 100644
index 0000000000..cc79cb1e49
--- /dev/null
+++ b/nova/tests/unit/objects/test_ec2.py
@@ -0,0 +1,192 @@
+# Copyright (C) 2014, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import db
+from nova.objects import ec2 as ec2_obj
+from nova.tests.unit.objects import test_objects
+
+
+fake_map = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'id': 1,
+ 'uuid': 'fake-uuid-2',
+}
+
+
+class _TestEC2InstanceMapping(object):
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ def test_create(self):
+ imap = ec2_obj.EC2InstanceMapping()
+ imap.uuid = 'fake-uuid-2'
+
+ with mock.patch.object(db, 'ec2_instance_create') as create:
+ create.return_value = fake_map
+ imap.create(self.context)
+
+ self.assertEqual(self.context, imap._context)
+ imap._context = None
+ self._compare(self, fake_map, imap)
+
+ def test_get_by_uuid(self):
+ with mock.patch.object(db, 'ec2_instance_get_by_uuid') as get:
+ get.return_value = fake_map
+ imap = ec2_obj.EC2InstanceMapping.get_by_uuid(self.context,
+ 'fake-uuid-2')
+ self._compare(self, fake_map, imap)
+
+ def test_get_by_ec2_id(self):
+ with mock.patch.object(db, 'ec2_instance_get_by_id') as get:
+ get.return_value = fake_map
+ imap = ec2_obj.EC2InstanceMapping.get_by_id(self.context, 1)
+ self._compare(self, fake_map, imap)
+
+
+class TestEC2InstanceMapping(test_objects._LocalTest, _TestEC2InstanceMapping):
+ pass
+
+
+class TestRemoteEC2InstanceMapping(test_objects._RemoteTest,
+ _TestEC2InstanceMapping):
+ pass
+
+
+class _TestEC2VolumeMapping(object):
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ def test_create(self):
+ vmap = ec2_obj.EC2VolumeMapping()
+ vmap.uuid = 'fake-uuid-2'
+
+ with mock.patch.object(db, 'ec2_volume_create') as create:
+ create.return_value = fake_map
+ vmap.create(self.context)
+
+ self.assertEqual(self.context, vmap._context)
+ vmap._context = None
+ self._compare(self, fake_map, vmap)
+
+ def test_get_by_uuid(self):
+ with mock.patch.object(db, 'ec2_volume_get_by_uuid') as get:
+ get.return_value = fake_map
+ vmap = ec2_obj.EC2VolumeMapping.get_by_uuid(self.context,
+ 'fake-uuid-2')
+ self._compare(self, fake_map, vmap)
+
+ def test_get_by_ec2_id(self):
+ with mock.patch.object(db, 'ec2_volume_get_by_id') as get:
+ get.return_value = fake_map
+ vmap = ec2_obj.EC2VolumeMapping.get_by_id(self.context, 1)
+ self._compare(self, fake_map, vmap)
+
+
+class TestEC2VolumeMapping(test_objects._LocalTest, _TestEC2VolumeMapping):
+ pass
+
+
+class TestRemoteEC2VolumeMapping(test_objects._RemoteTest,
+ _TestEC2VolumeMapping):
+ pass
+
+
+class _TestEC2SnapshotMapping(object):
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ def test_create(self):
+ smap = ec2_obj.EC2SnapshotMapping()
+ smap.uuid = 'fake-uuid-2'
+
+ with mock.patch.object(db, 'ec2_snapshot_create') as create:
+ create.return_value = fake_map
+ smap.create(self.context)
+
+ self.assertEqual(self.context, smap._context)
+ smap._context = None
+ self._compare(self, fake_map, smap)
+
+ def test_get_by_uuid(self):
+ with mock.patch.object(db, 'ec2_snapshot_get_by_uuid') as get:
+ get.return_value = fake_map
+ smap = ec2_obj.EC2SnapshotMapping.get_by_uuid(self.context,
+ 'fake-uuid-2')
+ self._compare(self, fake_map, smap)
+
+ def test_get_by_ec2_id(self):
+ with mock.patch.object(db, 'ec2_snapshot_get_by_ec2_id') as get:
+ get.return_value = fake_map
+ smap = ec2_obj.EC2SnapshotMapping.get_by_id(self.context, 1)
+ self._compare(self, fake_map, smap)
+
+
+class TestEC2SnapshotMapping(test_objects._LocalTest, _TestEC2SnapshotMapping):
+ pass
+
+
+class TestRemoteEC2SnapshotMapping(test_objects._RemoteTest,
+ _TestEC2SnapshotMapping):
+ pass
+
+
+class _TestS3ImageMapping(object):
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ def test_create(self):
+ s3imap = ec2_obj.S3ImageMapping()
+ s3imap.uuid = 'fake-uuid-2'
+
+ with mock.patch.object(db, 's3_image_create') as create:
+ create.return_value = fake_map
+ s3imap.create(self.context)
+
+ self.assertEqual(self.context, s3imap._context)
+ s3imap._context = None
+ self._compare(self, fake_map, s3imap)
+
+ def test_get_by_uuid(self):
+ with mock.patch.object(db, 's3_image_get_by_uuid') as get:
+ get.return_value = fake_map
+ s3imap = ec2_obj.S3ImageMapping.get_by_uuid(self.context,
+ 'fake-uuid-2')
+ self._compare(self, fake_map, s3imap)
+
+ def test_get_by_s3_id(self):
+ with mock.patch.object(db, 's3_image_get') as get:
+ get.return_value = fake_map
+ s3imap = ec2_obj.S3ImageMapping.get_by_id(self.context, 1)
+ self._compare(self, fake_map, s3imap)
+
+
+class TestS3ImageMapping(test_objects._LocalTest, _TestS3ImageMapping):
+ pass
+
+
+class TestRemoteS3ImageMapping(test_objects._RemoteTest, _TestS3ImageMapping):
+ pass
diff --git a/nova/tests/unit/objects/test_external_event.py b/nova/tests/unit/objects/test_external_event.py
new file mode 100644
index 0000000000..c3e319243f
--- /dev/null
+++ b/nova/tests/unit/objects/test_external_event.py
@@ -0,0 +1,46 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.objects import external_event as external_event_obj
+from nova.tests.unit.objects import test_objects
+
+
+class _TestInstanceExternalEventObject(object):
+ def test_make_key(self):
+ key = external_event_obj.InstanceExternalEvent.make_key('foo', 'bar')
+ self.assertEqual('foo-bar', key)
+
+ def test_make_key_no_tag(self):
+ key = external_event_obj.InstanceExternalEvent.make_key('foo')
+ self.assertEqual('foo', key)
+
+ def test_key(self):
+ event = external_event_obj.InstanceExternalEvent(name='foo',
+ tag='bar')
+ with mock.patch.object(event, 'make_key') as make_key:
+ make_key.return_value = 'key'
+ self.assertEqual('key', event.key)
+ make_key.assert_called_once_with('foo', 'bar')
+
+
+class TestInstanceExternalEventObject(test_objects._LocalTest,
+ _TestInstanceExternalEventObject):
+ pass
+
+
+class TestRemoteInstanceExternalEventObject(test_objects._RemoteTest,
+ _TestInstanceExternalEventObject):
+ pass
diff --git a/nova/tests/unit/objects/test_fields.py b/nova/tests/unit/objects/test_fields.py
new file mode 100644
index 0000000000..806d97773a
--- /dev/null
+++ b/nova/tests/unit/objects/test_fields.py
@@ -0,0 +1,393 @@
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+import iso8601
+import netaddr
+from oslo.utils import timeutils
+
+from nova.network import model as network_model
+from nova.objects import base as obj_base
+from nova.objects import fields
+from nova import test
+
+
+class FakeFieldType(fields.FieldType):
+ def coerce(self, obj, attr, value):
+ return '*%s*' % value
+
+ def to_primitive(self, obj, attr, value):
+ return '!%s!' % value
+
+ def from_primitive(self, obj, attr, value):
+ return value[1:-1]
+
+
+class TestField(test.NoDBTestCase):
+ def setUp(self):
+ super(TestField, self).setUp()
+ self.field = fields.Field(FakeFieldType())
+ self.coerce_good_values = [('foo', '*foo*')]
+ self.coerce_bad_values = []
+ self.to_primitive_values = [('foo', '!foo!')]
+ self.from_primitive_values = [('!foo!', 'foo')]
+
+ def test_coerce_good_values(self):
+ for in_val, out_val in self.coerce_good_values:
+ self.assertEqual(out_val, self.field.coerce('obj', 'attr', in_val))
+
+ def test_coerce_bad_values(self):
+ for in_val in self.coerce_bad_values:
+ self.assertRaises((TypeError, ValueError),
+ self.field.coerce, 'obj', 'attr', in_val)
+
+ def test_to_primitive(self):
+ for in_val, prim_val in self.to_primitive_values:
+ self.assertEqual(prim_val, self.field.to_primitive('obj', 'attr',
+ in_val))
+
+ def test_from_primitive(self):
+ class ObjectLikeThing:
+ _context = 'context'
+
+ for prim_val, out_val in self.from_primitive_values:
+ self.assertEqual(out_val, self.field.from_primitive(
+ ObjectLikeThing, 'attr', prim_val))
+
+ def test_stringify(self):
+ self.assertEqual('123', self.field.stringify(123))
+
+
+class TestString(TestField):
+ def setUp(self):
+ super(TestField, self).setUp()
+ self.field = fields.StringField()
+ self.coerce_good_values = [('foo', 'foo'), (1, '1'), (1L, '1'),
+ (True, 'True')]
+ self.coerce_bad_values = [None]
+ self.to_primitive_values = self.coerce_good_values[0:1]
+ self.from_primitive_values = self.coerce_good_values[0:1]
+
+ def test_stringify(self):
+ self.assertEqual("'123'", self.field.stringify(123))
+
+
+class TestInteger(TestField):
+ def setUp(self):
+ super(TestField, self).setUp()
+ self.field = fields.IntegerField()
+ self.coerce_good_values = [(1, 1), ('1', 1)]
+ self.coerce_bad_values = ['foo', None]
+ self.to_primitive_values = self.coerce_good_values[0:1]
+ self.from_primitive_values = self.coerce_good_values[0:1]
+
+
+class TestFloat(TestField):
+ def setUp(self):
+ super(TestFloat, self).setUp()
+ self.field = fields.FloatField()
+ self.coerce_good_values = [(1.1, 1.1), ('1.1', 1.1)]
+ self.coerce_bad_values = ['foo', None]
+ self.to_primitive_values = self.coerce_good_values[0:1]
+ self.from_primitive_values = self.coerce_good_values[0:1]
+
+
+class TestBoolean(TestField):
+ def setUp(self):
+ super(TestField, self).setUp()
+ self.field = fields.BooleanField()
+ self.coerce_good_values = [(True, True), (False, False), (1, True),
+ ('foo', True), (0, False), ('', False)]
+ self.coerce_bad_values = []
+ self.to_primitive_values = self.coerce_good_values[0:2]
+ self.from_primitive_values = self.coerce_good_values[0:2]
+
+
+class TestDateTime(TestField):
+ def setUp(self):
+ super(TestDateTime, self).setUp()
+ self.dt = datetime.datetime(1955, 11, 5, tzinfo=iso8601.iso8601.Utc())
+ self.field = fields.DateTimeField()
+ self.coerce_good_values = [(self.dt, self.dt),
+ (timeutils.isotime(self.dt), self.dt)]
+ self.coerce_bad_values = [1, 'foo']
+ self.to_primitive_values = [(self.dt, timeutils.isotime(self.dt))]
+ self.from_primitive_values = [(timeutils.isotime(self.dt), self.dt)]
+
+ def test_stringify(self):
+ self.assertEqual(
+ '1955-11-05T18:00:00Z',
+ self.field.stringify(
+ datetime.datetime(1955, 11, 5, 18, 0, 0,
+ tzinfo=iso8601.iso8601.Utc())))
+
+
+class TestIPAddress(TestField):
+ def setUp(self):
+ super(TestIPAddress, self).setUp()
+ self.field = fields.IPAddressField()
+ self.coerce_good_values = [('1.2.3.4', netaddr.IPAddress('1.2.3.4')),
+ ('::1', netaddr.IPAddress('::1')),
+ (netaddr.IPAddress('::1'),
+ netaddr.IPAddress('::1'))]
+ self.coerce_bad_values = ['1-2', 'foo']
+ self.to_primitive_values = [(netaddr.IPAddress('1.2.3.4'), '1.2.3.4'),
+ (netaddr.IPAddress('::1'), '::1')]
+ self.from_primitive_values = [('1.2.3.4',
+ netaddr.IPAddress('1.2.3.4')),
+ ('::1',
+ netaddr.IPAddress('::1'))]
+
+
+class TestIPAddressV4(TestField):
+ def setUp(self):
+ super(TestIPAddressV4, self).setUp()
+ self.field = fields.IPV4AddressField()
+ self.coerce_good_values = [('1.2.3.4', netaddr.IPAddress('1.2.3.4')),
+ (netaddr.IPAddress('1.2.3.4'),
+ netaddr.IPAddress('1.2.3.4'))]
+ self.coerce_bad_values = ['1-2', 'foo', '::1']
+ self.to_primitive_values = [(netaddr.IPAddress('1.2.3.4'), '1.2.3.4')]
+ self.from_primitive_values = [('1.2.3.4',
+ netaddr.IPAddress('1.2.3.4'))]
+
+
+class TestIPAddressV6(TestField):
+ def setUp(self):
+ super(TestIPAddressV6, self).setUp()
+ self.field = fields.IPV6AddressField()
+ self.coerce_good_values = [('::1', netaddr.IPAddress('::1')),
+ (netaddr.IPAddress('::1'),
+ netaddr.IPAddress('::1'))]
+ self.coerce_bad_values = ['1.2', 'foo', '1.2.3.4']
+ self.to_primitive_values = [(netaddr.IPAddress('::1'), '::1')]
+ self.from_primitive_values = [('::1',
+ netaddr.IPAddress('::1'))]
+
+
+class TestDict(TestField):
+ def setUp(self):
+ super(TestDict, self).setUp()
+ self.field = fields.Field(fields.Dict(FakeFieldType()))
+ self.coerce_good_values = [({'foo': 'bar'}, {'foo': '*bar*'}),
+ ({'foo': 1}, {'foo': '*1*'})]
+ self.coerce_bad_values = [{1: 'bar'}, 'foo']
+ self.to_primitive_values = [({'foo': 'bar'}, {'foo': '!bar!'})]
+ self.from_primitive_values = [({'foo': '!bar!'}, {'foo': 'bar'})]
+
+ def test_stringify(self):
+ self.assertEqual("{key=val}", self.field.stringify({'key': 'val'}))
+
+
+class TestDictOfStrings(TestField):
+ def setUp(self):
+ super(TestDictOfStrings, self).setUp()
+ self.field = fields.DictOfStringsField()
+ self.coerce_good_values = [({'foo': 'bar'}, {'foo': 'bar'}),
+ ({'foo': 1}, {'foo': '1'})]
+ self.coerce_bad_values = [{1: 'bar'}, {'foo': None}, 'foo']
+ self.to_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
+ self.from_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
+
+ def test_stringify(self):
+ self.assertEqual("{key='val'}", self.field.stringify({'key': 'val'}))
+
+
+class TestDictOfStringsNone(TestField):
+ def setUp(self):
+ super(TestDictOfStringsNone, self).setUp()
+ self.field = fields.DictOfNullableStringsField()
+ self.coerce_good_values = [({'foo': 'bar'}, {'foo': 'bar'}),
+ ({'foo': 1}, {'foo': '1'}),
+ ({'foo': None}, {'foo': None})]
+ self.coerce_bad_values = [{1: 'bar'}, 'foo']
+ self.to_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
+ self.from_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
+
+ def test_stringify(self):
+ self.assertEqual("{k2=None,key='val'}",
+ self.field.stringify({'k2': None,
+ 'key': 'val'}))
+
+
+class TestListOfDictOfNullableStringsField(TestField):
+ def setUp(self):
+ super(TestListOfDictOfNullableStringsField, self).setUp()
+ self.field = fields.ListOfDictOfNullableStringsField()
+ self.coerce_good_values = [([{'f': 'b', 'f1': 'b1'}, {'f2': 'b2'}],
+ [{'f': 'b', 'f1': 'b1'}, {'f2': 'b2'}]),
+ ([{'f': 1}, {'f1': 'b1'}],
+ [{'f': '1'}, {'f1': 'b1'}]),
+ ([{'foo': None}], [{'foo': None}])]
+ self.coerce_bad_values = [[{1: 'a'}], ['ham', 1], ['eggs']]
+ self.to_primitive_values = [([{'f': 'b'}, {'f1': 'b1'}, {'f2': None}],
+ [{'f': 'b'}, {'f1': 'b1'}, {'f2': None}])]
+ self.from_primitive_values = [([{'f': 'b'}, {'f1': 'b1'},
+ {'f2': None}],
+ [{'f': 'b'}, {'f1': 'b1'},
+ {'f2': None}])]
+
+ def test_stringify(self):
+ self.assertEqual("[{f=None,f1='b1'},{f2='b2'}]",
+ self.field.stringify(
+ [{'f': None, 'f1': 'b1'}, {'f2': 'b2'}]))
+
+
+class TestList(TestField):
+ def setUp(self):
+ super(TestList, self).setUp()
+ self.field = fields.Field(fields.List(FakeFieldType()))
+ self.coerce_good_values = [(['foo', 'bar'], ['*foo*', '*bar*'])]
+ self.coerce_bad_values = ['foo']
+ self.to_primitive_values = [(['foo'], ['!foo!'])]
+ self.from_primitive_values = [(['!foo!'], ['foo'])]
+
+ def test_stringify(self):
+ self.assertEqual('[123]', self.field.stringify([123]))
+
+
+class TestListOfStrings(TestField):
+ def setUp(self):
+ super(TestListOfStrings, self).setUp()
+ self.field = fields.ListOfStringsField()
+ self.coerce_good_values = [(['foo', 'bar'], ['foo', 'bar'])]
+ self.coerce_bad_values = ['foo']
+ self.to_primitive_values = [(['foo'], ['foo'])]
+ self.from_primitive_values = [(['foo'], ['foo'])]
+
+ def test_stringify(self):
+ self.assertEqual("['abc']", self.field.stringify(['abc']))
+
+
+class TestSet(TestField):
+ def setUp(self):
+ super(TestSet, self).setUp()
+ self.field = fields.Field(fields.Set(FakeFieldType()))
+ self.coerce_good_values = [(set(['foo', 'bar']),
+ set(['*foo*', '*bar*']))]
+ self.coerce_bad_values = [['foo'], {'foo': 'bar'}]
+ self.to_primitive_values = [(set(['foo']), tuple(['!foo!']))]
+ self.from_primitive_values = [(tuple(['!foo!']), set(['foo']))]
+
+ def test_stringify(self):
+ self.assertEqual('set([123])', self.field.stringify(set([123])))
+
+
+class TestSetOfIntegers(TestField):
+ def setUp(self):
+ super(TestSetOfIntegers, self).setUp()
+ self.field = fields.SetOfIntegersField()
+ self.coerce_good_values = [(set(['1', 2]),
+ set([1, 2]))]
+ self.coerce_bad_values = [set(['foo'])]
+ self.to_primitive_values = [(set([1]), tuple([1]))]
+ self.from_primitive_values = [(tuple([1]), set([1]))]
+
+ def test_stringify(self):
+ self.assertEqual('set([1,2])', self.field.stringify(set([1, 2])))
+
+
+class TestObject(TestField):
+ def setUp(self):
+ super(TestObject, self).setUp()
+
+ class TestableObject(obj_base.NovaObject):
+ fields = {
+ 'uuid': fields.StringField(),
+ }
+
+ def __eq__(self, value):
+ # NOTE(danms): Be rather lax about this equality thing to
+ # satisfy the assertEqual() in test_from_primitive(). We
+ # just want to make sure the right type of object is re-created
+ return value.__class__.__name__ == TestableObject.__name__
+
+ class OtherTestableObject(obj_base.NovaObject):
+ pass
+
+ test_inst = TestableObject()
+ self._test_cls = TestableObject
+ self.field = fields.Field(fields.Object('TestableObject'))
+ self.coerce_good_values = [(test_inst, test_inst)]
+ self.coerce_bad_values = [OtherTestableObject(), 1, 'foo']
+ self.to_primitive_values = [(test_inst, test_inst.obj_to_primitive())]
+ self.from_primitive_values = [(test_inst.obj_to_primitive(),
+ test_inst),
+ (test_inst, test_inst)]
+
+ def test_stringify(self):
+ obj = self._test_cls(uuid='fake-uuid')
+ self.assertEqual('TestableObject(fake-uuid)',
+ self.field.stringify(obj))
+
+
+class TestNetworkModel(TestField):
+ def setUp(self):
+ super(TestNetworkModel, self).setUp()
+ model = network_model.NetworkInfo()
+ self.field = fields.Field(fields.NetworkModel())
+ self.coerce_good_values = [(model, model), (model.json(), model)]
+ self.coerce_bad_values = [[], 'foo']
+ self.to_primitive_values = [(model, model.json())]
+ self.from_primitive_values = [(model.json(), model)]
+
+ def test_stringify(self):
+ networkinfo = network_model.NetworkInfo()
+ networkinfo.append(network_model.VIF(id=123))
+ networkinfo.append(network_model.VIF(id=456))
+ self.assertEqual('NetworkModel(123,456)',
+ self.field.stringify(networkinfo))
+
+
+class TestIPNetwork(TestField):
+ def setUp(self):
+ super(TestIPNetwork, self).setUp()
+ self.field = fields.Field(fields.IPNetwork())
+ good = ['192.168.1.0/24', '0.0.0.0/0', '::1/128', '::1/64', '::1/0']
+ self.coerce_good_values = [(x, netaddr.IPNetwork(x)) for x in good]
+ self.coerce_bad_values = ['192.168.0.0/f', '192.168.0.0/foo',
+ '::1/129', '192.168.0.0/-1']
+ self.to_primitive_values = [(netaddr.IPNetwork(x), x)
+ for x in good]
+ self.from_primitive_values = [(x, netaddr.IPNetwork(x))
+ for x in good]
+
+
+class TestIPV4Network(TestField):
+ def setUp(self):
+ super(TestIPV4Network, self).setUp()
+ self.field = fields.Field(fields.IPV4Network())
+ good = ['192.168.1.0/24', '0.0.0.0/0']
+ self.coerce_good_values = [(x, netaddr.IPNetwork(x)) for x in good]
+ self.coerce_bad_values = ['192.168.0.0/f', '192.168.0.0/foo',
+ '::1/129', '192.168.0.0/-1']
+ self.to_primitive_values = [(netaddr.IPNetwork(x), x)
+ for x in good]
+ self.from_primitive_values = [(x, netaddr.IPNetwork(x))
+ for x in good]
+
+
+class TestIPV6Network(TestField):
+ def setUp(self):
+ super(TestIPV6Network, self).setUp()
+ self.field = fields.Field(fields.IPV6Network())
+ good = ['::1/128', '::1/64', '::1/0']
+ self.coerce_good_values = [(x, netaddr.IPNetwork(x)) for x in good]
+ self.coerce_bad_values = ['192.168.0.0/f', '192.168.0.0/foo',
+ '::1/129', '192.168.0.0/-1']
+ self.to_primitive_values = [(netaddr.IPNetwork(x), x)
+ for x in good]
+ self.from_primitive_values = [(x, netaddr.IPNetwork(x))
+ for x in good]
diff --git a/nova/tests/unit/objects/test_fixed_ip.py b/nova/tests/unit/objects/test_fixed_ip.py
new file mode 100644
index 0000000000..116827416d
--- /dev/null
+++ b/nova/tests/unit/objects/test_fixed_ip.py
@@ -0,0 +1,339 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+import iso8601
+import mock
+import netaddr
+from oslo.utils import timeutils
+
+from nova import exception
+from nova.objects import fixed_ip
+from nova.tests.unit import fake_instance
+from nova.tests.unit.objects import test_network
+from nova.tests.unit.objects import test_objects
+
+
+fake_fixed_ip = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'id': 123,
+ 'address': '192.168.1.100',
+ 'network_id': None,
+ 'virtual_interface_id': None,
+ 'instance_uuid': None,
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'host': None,
+ 'network': None,
+ 'virtual_interface': None,
+ 'floating_ips': [],
+ }
+
+
+class _TestFixedIPObject(object):
+ def _compare(self, obj, db_obj):
+ for field in obj.fields:
+ if field in ('default_route', 'floating_ips'):
+ continue
+ if field in fixed_ip.FIXED_IP_OPTIONAL_ATTRS:
+ if obj.obj_attr_is_set(field) and db_obj[field] is not None:
+ obj_val = obj[field].uuid
+ db_val = db_obj[field]['uuid']
+ else:
+ continue
+ else:
+ obj_val = obj[field]
+ db_val = db_obj[field]
+ if isinstance(obj_val, netaddr.IPAddress):
+ obj_val = str(obj_val)
+ self.assertEqual(db_val, obj_val)
+
+ @mock.patch('nova.db.fixed_ip_get')
+ def test_get_by_id(self, get):
+ get.return_value = fake_fixed_ip
+ fixedip = fixed_ip.FixedIP.get_by_id(self.context, 123)
+ get.assert_called_once_with(self.context, 123, get_network=False)
+ self._compare(fixedip, fake_fixed_ip)
+
+ @mock.patch('nova.db.fixed_ip_get')
+ @mock.patch('nova.db.network_get')
+ def test_get_by_id_with_extras(self, network_get, fixed_get):
+ db_fixed = dict(fake_fixed_ip,
+ network=test_network.fake_network)
+ fixed_get.return_value = db_fixed
+ fixedip = fixed_ip.FixedIP.get_by_id(self.context, 123,
+ expected_attrs=['network'])
+ fixed_get.assert_called_once_with(self.context, 123, get_network=True)
+ self._compare(fixedip, db_fixed)
+ self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid)
+ self.assertFalse(network_get.called)
+
+ @mock.patch('nova.db.fixed_ip_get_by_address')
+ def test_get_by_address(self, get):
+ get.return_value = fake_fixed_ip
+ fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4')
+ get.assert_called_once_with(self.context, '1.2.3.4',
+ columns_to_join=[])
+ self._compare(fixedip, fake_fixed_ip)
+
+ @mock.patch('nova.db.fixed_ip_get_by_address')
+ @mock.patch('nova.db.network_get')
+ @mock.patch('nova.db.instance_get')
+ def test_get_by_address_with_extras(self, instance_get, network_get,
+ fixed_get):
+ db_fixed = dict(fake_fixed_ip, network=test_network.fake_network,
+ instance=fake_instance.fake_db_instance())
+ fixed_get.return_value = db_fixed
+ fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4',
+ expected_attrs=['network',
+ 'instance'])
+ fixed_get.assert_called_once_with(self.context, '1.2.3.4',
+ columns_to_join=['network',
+ 'instance'])
+ self._compare(fixedip, db_fixed)
+ self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid)
+ self.assertEqual(db_fixed['instance']['uuid'], fixedip.instance.uuid)
+ self.assertFalse(network_get.called)
+ self.assertFalse(instance_get.called)
+
+ @mock.patch('nova.db.fixed_ip_get_by_address')
+ @mock.patch('nova.db.network_get')
+ @mock.patch('nova.db.instance_get')
+ def test_get_by_address_with_extras_deleted_instance(self, instance_get,
+ network_get,
+ fixed_get):
+ db_fixed = dict(fake_fixed_ip, network=test_network.fake_network,
+ instance=None)
+ fixed_get.return_value = db_fixed
+ fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4',
+ expected_attrs=['network',
+ 'instance'])
+ fixed_get.assert_called_once_with(self.context, '1.2.3.4',
+ columns_to_join=['network',
+ 'instance'])
+ self._compare(fixedip, db_fixed)
+ self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid)
+ self.assertIsNone(fixedip.instance)
+ self.assertFalse(network_get.called)
+ self.assertFalse(instance_get.called)
+
+ @mock.patch('nova.db.fixed_ip_get_by_floating_address')
+ def test_get_by_floating_address(self, get):
+ get.return_value = fake_fixed_ip
+ fixedip = fixed_ip.FixedIP.get_by_floating_address(self.context,
+ '1.2.3.4')
+ get.assert_called_once_with(self.context, '1.2.3.4')
+ self._compare(fixedip, fake_fixed_ip)
+
+ @mock.patch('nova.db.fixed_ip_get_by_floating_address')
+ def test_get_by_floating_address_none(self, get):
+ get.return_value = None
+ fixedip = fixed_ip.FixedIP.get_by_floating_address(self.context,
+ '1.2.3.4')
+ get.assert_called_once_with(self.context, '1.2.3.4')
+ self.assertIsNone(fixedip)
+
+ @mock.patch('nova.db.fixed_ip_get_by_network_host')
+ def test_get_by_network_and_host(self, get):
+ get.return_value = fake_fixed_ip
+ fixedip = fixed_ip.FixedIP.get_by_network_and_host(self.context,
+ 123, 'host')
+ get.assert_called_once_with(self.context, 123, 'host')
+ self._compare(fixedip, fake_fixed_ip)
+
+ @mock.patch('nova.db.fixed_ip_associate')
+ def test_associate(self, associate):
+ associate.return_value = fake_fixed_ip
+ fixedip = fixed_ip.FixedIP.associate(self.context, '1.2.3.4',
+ 'fake-uuid')
+ associate.assert_called_with(self.context, '1.2.3.4', 'fake-uuid',
+ network_id=None, reserved=False)
+ self._compare(fixedip, fake_fixed_ip)
+
+ @mock.patch('nova.db.fixed_ip_associate_pool')
+ def test_associate_pool(self, associate):
+ associate.return_value = fake_fixed_ip
+ fixedip = fixed_ip.FixedIP.associate_pool(self.context, 123,
+ 'fake-uuid', 'host')
+ associate.assert_called_with(self.context, 123,
+ instance_uuid='fake-uuid',
+ host='host')
+ self._compare(fixedip, fake_fixed_ip)
+
+ @mock.patch('nova.db.fixed_ip_disassociate')
+ def test_disassociate_by_address(self, disassociate):
+ fixed_ip.FixedIP.disassociate_by_address(self.context, '1.2.3.4')
+ disassociate.assert_called_with(self.context, '1.2.3.4')
+
+ @mock.patch('nova.db.fixed_ip_disassociate_all_by_timeout')
+ def test_disassociate_all_by_timeout(self, disassociate):
+ now = timeutils.utcnow()
+ now_tz = timeutils.parse_isotime(
+ timeutils.isotime(now)).replace(
+ tzinfo=iso8601.iso8601.Utc())
+ disassociate.return_value = 123
+ result = fixed_ip.FixedIP.disassociate_all_by_timeout(self.context,
+ 'host', now)
+ self.assertEqual(123, result)
+ # NOTE(danms): be pedantic about timezone stuff
+ args, kwargs = disassociate.call_args_list[0]
+ self.assertEqual(now_tz, args[2])
+ self.assertEqual((self.context, 'host'), args[:2])
+ self.assertEqual({}, kwargs)
+
+ @mock.patch('nova.db.fixed_ip_create')
+ def test_create(self, create):
+ create.return_value = fake_fixed_ip
+ fixedip = fixed_ip.FixedIP(address='1.2.3.4')
+ fixedip.create(self.context)
+ create.assert_called_once_with(
+ self.context, {'address': '1.2.3.4'})
+ self._compare(fixedip, fake_fixed_ip)
+
+ @mock.patch('nova.db.fixed_ip_update')
+ def test_save(self, update):
+ update.return_value = fake_fixed_ip
+ fixedip = fixed_ip.FixedIP(context=self.context, address='1.2.3.4',
+ instance_uuid='fake-uuid')
+ self.assertRaises(exception.ObjectActionError, fixedip.save)
+ fixedip.obj_reset_changes(['address'])
+ fixedip.save()
+ update.assert_called_once_with(self.context, '1.2.3.4',
+ {'instance_uuid': 'fake-uuid'})
+
+ @mock.patch('nova.db.fixed_ip_disassociate')
+ def test_disassociate(self, disassociate):
+ fixedip = fixed_ip.FixedIP(context=self.context, address='1.2.3.4',
+ instance_uuid='fake-uuid')
+ fixedip.obj_reset_changes()
+ fixedip.disassociate()
+ disassociate.assert_called_once_with(self.context, '1.2.3.4')
+ self.assertIsNone(fixedip.instance_uuid)
+
+ @mock.patch('nova.db.fixed_ip_get_all')
+ def test_get_all(self, get_all):
+ get_all.return_value = [fake_fixed_ip]
+ fixedips = fixed_ip.FixedIPList.get_all(self.context)
+ self.assertEqual(1, len(fixedips))
+ get_all.assert_called_once_with(self.context)
+ self._compare(fixedips[0], fake_fixed_ip)
+
+ @mock.patch('nova.db.fixed_ip_get_by_instance')
+ def test_get_by_instance(self, get):
+ get.return_value = [fake_fixed_ip]
+ fixedips = fixed_ip.FixedIPList.get_by_instance_uuid(self.context,
+ 'fake-uuid')
+ self.assertEqual(1, len(fixedips))
+ get.assert_called_once_with(self.context, 'fake-uuid')
+ self._compare(fixedips[0], fake_fixed_ip)
+
+ @mock.patch('nova.db.fixed_ip_get_by_host')
+ def test_get_by_host(self, get):
+ get.return_value = [fake_fixed_ip]
+ fixedips = fixed_ip.FixedIPList.get_by_host(self.context, 'host')
+ self.assertEqual(1, len(fixedips))
+ get.assert_called_once_with(self.context, 'host')
+ self._compare(fixedips[0], fake_fixed_ip)
+
+ @mock.patch('nova.db.fixed_ips_by_virtual_interface')
+ def test_get_by_virtual_interface_id(self, get):
+ get.return_value = [fake_fixed_ip]
+ fixedips = fixed_ip.FixedIPList.get_by_virtual_interface_id(
+ self.context, 123)
+ self.assertEqual(1, len(fixedips))
+ get.assert_called_once_with(self.context, 123)
+ self._compare(fixedips[0], fake_fixed_ip)
+
+ def test_floating_ips_do_not_lazy_load(self):
+ fixedip = fixed_ip.FixedIP()
+ self.assertRaises(NotImplementedError, lambda: fixedip.floating_ips)
+
+ @mock.patch('nova.db.fixed_ip_bulk_create')
+ def test_bulk_create(self, bulk):
+ fixed_ips = [fixed_ip.FixedIP(address='192.168.1.1'),
+ fixed_ip.FixedIP(address='192.168.1.2')]
+ fixed_ip.FixedIPList.bulk_create(self.context, fixed_ips)
+ bulk.assert_called_once_with(self.context,
+ [{'address': '192.168.1.1'},
+ {'address': '192.168.1.2'}])
+
+ @mock.patch('nova.db.network_get_associated_fixed_ips')
+ def test_get_by_network(self, get):
+ info = {'address': '1.2.3.4',
+ 'instance_uuid': 'fake-uuid',
+ 'network_id': 0,
+ 'vif_id': 1,
+ 'vif_address': 'de:ad:be:ee:f0:00',
+ 'instance_hostname': 'fake-host',
+ 'instance_updated': datetime.datetime(1955, 11, 5),
+ 'instance_created': datetime.datetime(1955, 11, 5),
+ 'allocated': True,
+ 'leased': True,
+ 'default_route': True,
+ }
+ get.return_value = [info]
+ fixed_ips = fixed_ip.FixedIPList.get_by_network(
+ self.context, {'id': 0}, host='fake-host')
+ get.assert_called_once_with(self.context, 0, host='fake-host')
+ self.assertEqual(1, len(fixed_ips))
+ fip = fixed_ips[0]
+ self.assertEqual('1.2.3.4', str(fip.address))
+ self.assertEqual('fake-uuid', fip.instance_uuid)
+ self.assertEqual(0, fip.network_id)
+ self.assertEqual(1, fip.virtual_interface_id)
+ self.assertTrue(fip.allocated)
+ self.assertTrue(fip.leased)
+ self.assertEqual('fake-uuid', fip.instance.uuid)
+ self.assertEqual('fake-host', fip.instance.hostname)
+ self.assertIsInstance(fip.instance.created_at, datetime.datetime)
+ self.assertIsInstance(fip.instance.updated_at, datetime.datetime)
+ self.assertEqual(1, fip.virtual_interface.id)
+ self.assertEqual(info['vif_address'], fip.virtual_interface.address)
+
+ @mock.patch('nova.db.network_get_associated_fixed_ips')
+ def test_backport_default_route(self, mock_get):
+ info = {'address': '1.2.3.4',
+ 'instance_uuid': 'fake-uuid',
+ 'network_id': 0,
+ 'vif_id': 1,
+ 'vif_address': 'de:ad:be:ee:f0:00',
+ 'instance_hostname': 'fake-host',
+ 'instance_updated': datetime.datetime(1955, 11, 5),
+ 'instance_created': datetime.datetime(1955, 11, 5),
+ 'allocated': True,
+ 'leased': True,
+ 'default_route': True,
+ }
+ mock_get.return_value = [info]
+ fixed_ips = fixed_ip.FixedIPList.get_by_network(
+ self.context, {'id': 0}, host='fake-host')
+ primitive = fixed_ips[0].obj_to_primitive()
+ self.assertIn('default_route', primitive['nova_object.data'])
+ fixed_ips[0].obj_make_compatible(primitive['nova_object.data'], '1.1')
+ self.assertNotIn('default_route', primitive['nova_object.data'])
+
+
+class TestFixedIPObject(test_objects._LocalTest,
+ _TestFixedIPObject):
+ pass
+
+
+class TestRemoteFixedIPObject(test_objects._RemoteTest,
+ _TestFixedIPObject):
+ pass
diff --git a/nova/tests/unit/objects/test_flavor.py b/nova/tests/unit/objects/test_flavor.py
new file mode 100644
index 0000000000..a7189d4caa
--- /dev/null
+++ b/nova/tests/unit/objects/test_flavor.py
@@ -0,0 +1,253 @@
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import db
+from nova import exception
+from nova.objects import flavor as flavor_obj
+from nova.tests.unit.objects import test_objects
+
+
+fake_flavor = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'id': 1,
+ 'name': 'm1.foo',
+ 'memory_mb': 1024,
+ 'vcpus': 4,
+ 'root_gb': 20,
+ 'ephemeral_gb': 0,
+ 'flavorid': 'm1.foo',
+ 'swap': 0,
+ 'rxtx_factor': 1.0,
+ 'vcpu_weight': 1,
+ 'disabled': False,
+ 'is_public': True,
+ 'extra_specs': {'foo': 'bar'},
+ }
+
+
+class _TestFlavor(object):
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ def test_get_by_id(self):
+ with mock.patch.object(db, 'flavor_get') as get:
+ get.return_value = fake_flavor
+ flavor = flavor_obj.Flavor.get_by_id(self.context, 1)
+ self._compare(self, fake_flavor, flavor)
+
+ def test_get_by_name(self):
+ with mock.patch.object(db, 'flavor_get_by_name') as get_by_name:
+ get_by_name.return_value = fake_flavor
+ flavor = flavor_obj.Flavor.get_by_name(self.context, 'm1.foo')
+ self._compare(self, fake_flavor, flavor)
+
+ def test_get_by_flavor_id(self):
+ with mock.patch.object(db, 'flavor_get_by_flavor_id') as get_by_id:
+ get_by_id.return_value = fake_flavor
+ flavor = flavor_obj.Flavor.get_by_flavor_id(self.context,
+ 'm1.foo')
+ self._compare(self, fake_flavor, flavor)
+
+ def test_add_access(self):
+ elevated = self.context.elevated()
+ flavor = flavor_obj.Flavor(context=elevated, flavorid='123')
+ with mock.patch.object(db, 'flavor_access_add') as add:
+ flavor.add_access('456')
+ add.assert_called_once_with(elevated, '123', '456')
+
+ def test_add_access_with_dirty_projects(self):
+ flavor = flavor_obj.Flavor(context=self.context, projects=['1'])
+ self.assertRaises(exception.ObjectActionError,
+ flavor.add_access, '2')
+
+ def test_remove_access(self):
+ elevated = self.context.elevated()
+ flavor = flavor_obj.Flavor(context=elevated, flavorid='123')
+ with mock.patch.object(db, 'flavor_access_remove') as remove:
+ flavor.remove_access('456')
+ remove.assert_called_once_with(elevated, '123', '456')
+
+ def test_create(self):
+ flavor = flavor_obj.Flavor()
+ flavor.name = 'm1.foo'
+ flavor.extra_specs = fake_flavor['extra_specs']
+
+ with mock.patch.object(db, 'flavor_create') as create:
+ create.return_value = fake_flavor
+ flavor.create(self.context)
+
+ self.assertEqual(self.context, flavor._context)
+ # NOTE(danms): Orphan this to avoid lazy-loads
+ flavor._context = None
+ self._compare(self, fake_flavor, flavor)
+
+ def test_create_with_projects(self):
+ context = self.context.elevated()
+ flavor = flavor_obj.Flavor()
+ flavor.name = 'm1.foo'
+ flavor.extra_specs = fake_flavor['extra_specs']
+ flavor.projects = ['project-1', 'project-2']
+
+ db_flavor = dict(fake_flavor, projects=list(flavor.projects))
+
+ with mock.patch.multiple(db, flavor_create=mock.DEFAULT,
+ flavor_access_get_by_flavor_id=mock.DEFAULT
+ ) as methods:
+ methods['flavor_create'].return_value = db_flavor
+ methods['flavor_access_get_by_flavor_id'].return_value = [
+ {'project_id': 'project-1'},
+ {'project_id': 'project-2'}]
+ flavor.create(context)
+ methods['flavor_create'].assert_called_once_with(
+ context,
+ {'name': 'm1.foo',
+ 'extra_specs': fake_flavor['extra_specs']},
+ projects=['project-1', 'project-2'])
+
+ self.assertEqual(context, flavor._context)
+ # NOTE(danms): Orphan this to avoid lazy-loads
+ flavor._context = None
+ self._compare(self, fake_flavor, flavor)
+ self.assertEqual(['project-1', 'project-2'], flavor.projects)
+
+ def test_create_with_id(self):
+ flavor = flavor_obj.Flavor(id=123)
+ self.assertRaises(exception.ObjectActionError, flavor.create,
+ self.context)
+
+ @mock.patch('nova.db.flavor_access_add')
+ @mock.patch('nova.db.flavor_access_remove')
+ @mock.patch('nova.db.flavor_extra_specs_delete')
+ @mock.patch('nova.db.flavor_extra_specs_update_or_create')
+ def test_save(self, mock_update, mock_delete, mock_remove, mock_add):
+ ctxt = self.context.elevated()
+ extra_specs = {'key1': 'value1', 'key2': 'value2'}
+ projects = ['project-1', 'project-2']
+ flavor = flavor_obj.Flavor(context=ctxt, flavorid='foo',
+ extra_specs=extra_specs, projects=projects)
+ flavor.obj_reset_changes()
+
+ # Test deleting an extra_specs key and project
+ del flavor.extra_specs['key1']
+ del flavor.projects[-1]
+ self.assertEqual(set(['extra_specs', 'projects']),
+ flavor.obj_what_changed())
+ flavor.save()
+ self.assertEqual({'key2': 'value2'}, flavor.extra_specs)
+ mock_delete.assert_called_once_with(ctxt, 'foo', 'key1')
+ self.assertEqual(['project-1'], flavor.projects)
+ mock_remove.assert_called_once_with(ctxt, 'foo', 'project-2')
+
+ # Test updating an extra_specs key value
+ flavor.extra_specs['key2'] = 'foobar'
+ self.assertEqual(set(['extra_specs']), flavor.obj_what_changed())
+ flavor.save()
+ self.assertEqual({'key2': 'foobar'}, flavor.extra_specs)
+ mock_update.assert_called_with(ctxt, 'foo', {'key2': 'foobar'})
+
+ # Test adding an extra_specs and project
+ flavor.extra_specs['key3'] = 'value3'
+ flavor.projects.append('project-3')
+ self.assertEqual(set(['extra_specs', 'projects']),
+ flavor.obj_what_changed())
+ flavor.save()
+ self.assertEqual({'key2': 'foobar', 'key3': 'value3'},
+ flavor.extra_specs)
+ mock_update.assert_called_with(ctxt, 'foo', {'key2': 'foobar',
+ 'key3': 'value3'})
+ self.assertEqual(['project-1', 'project-3'], flavor.projects)
+ mock_add.assert_called_once_with(ctxt, 'foo', 'project-3')
+
+ @mock.patch('nova.db.flavor_create')
+ @mock.patch('nova.db.flavor_extra_specs_delete')
+ @mock.patch('nova.db.flavor_extra_specs_update_or_create')
+ def test_save_deleted_extra_specs(self, mock_update, mock_delete,
+ mock_create):
+ mock_create.return_value = dict(fake_flavor,
+ extra_specs={'key1': 'value1'})
+ ctxt = self.context.elevated()
+ flavor = flavor_obj.Flavor(context=ctxt)
+ flavor.flavorid = 'test'
+ flavor.extra_specs = {'key1': 'value1'}
+ flavor.create()
+ flavor.extra_specs = {}
+ flavor.save()
+ mock_delete.assert_called_once_with(ctxt, flavor.flavorid,
+ 'key1')
+ self.assertFalse(mock_update.called)
+
+ def test_save_invalid_fields(self):
+ flavor = flavor_obj.Flavor(id=123)
+ self.assertRaises(exception.ObjectActionError, flavor.save)
+
+ def test_destroy(self):
+ flavor = flavor_obj.Flavor(id=123, name='foo')
+ with mock.patch.object(db, 'flavor_destroy') as destroy:
+ flavor.destroy(self.context)
+ destroy.assert_called_once_with(self.context, flavor.name)
+
+ def test_load_projects(self):
+ flavor = flavor_obj.Flavor(context=self.context, flavorid='foo')
+ with mock.patch.object(db, 'flavor_access_get_by_flavor_id') as get:
+ get.return_value = [{'project_id': 'project-1'}]
+ projects = flavor.projects
+
+ self.assertEqual(['project-1'], projects)
+ self.assertNotIn('projects', flavor.obj_what_changed())
+
+ def test_load_anything_else(self):
+ flavor = flavor_obj.Flavor()
+ self.assertRaises(exception.ObjectActionError,
+ getattr, flavor, 'name')
+
+
+class TestFlavor(test_objects._LocalTest, _TestFlavor):
+ pass
+
+
+class TestFlavorRemote(test_objects._RemoteTest, _TestFlavor):
+ pass
+
+
+class _TestFlavorList(object):
+ def test_get_all(self):
+ with mock.patch.object(db, 'flavor_get_all') as get_all:
+ get_all.return_value = [fake_flavor]
+ filters = {'min_memory_mb': 4096}
+ flavors = flavor_obj.FlavorList.get_all(self.context,
+ inactive=False,
+ filters=filters,
+ sort_key='id',
+ sort_dir='asc')
+ self.assertEqual(1, len(flavors))
+ _TestFlavor._compare(self, fake_flavor, flavors[0])
+ get_all.assert_called_once_with(self.context, inactive=False,
+ filters=filters, sort_key='id',
+ sort_dir='asc', limit=None,
+ marker=None)
+
+
+class TestFlavorList(test_objects._LocalTest, _TestFlavorList):
+ pass
+
+
+class TestFlavorListRemote(test_objects._RemoteTest, _TestFlavorList):
+ pass
diff --git a/nova/tests/unit/objects/test_floating_ip.py b/nova/tests/unit/objects/test_floating_ip.py
new file mode 100644
index 0000000000..8454505bb0
--- /dev/null
+++ b/nova/tests/unit/objects/test_floating_ip.py
@@ -0,0 +1,259 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import netaddr
+
+from nova import exception
+from nova import objects
+from nova.objects import floating_ip
+from nova.tests.unit.objects import test_fixed_ip
+from nova.tests.unit.objects import test_network
+from nova.tests.unit.objects import test_objects
+
+fake_floating_ip = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'id': 123,
+ 'address': '172.17.0.1',
+ 'fixed_ip_id': None,
+ 'project_id': None,
+ 'host': None,
+ 'auto_assigned': False,
+ 'pool': None,
+ 'interface': None,
+ 'fixed_ip': None,
+}
+
+
+class _TestFloatingIPObject(object):
+ def _compare(self, obj, db_obj):
+ for field in obj.fields:
+ if field in floating_ip.FLOATING_IP_OPTIONAL_ATTRS:
+ if obj.obj_attr_is_set(field):
+ obj_val = obj[field].id
+ db_val = db_obj[field]['id']
+ else:
+ continue
+ else:
+ obj_val = obj[field]
+ db_val = db_obj[field]
+ if isinstance(obj_val, netaddr.IPAddress):
+ obj_val = str(obj_val)
+ self.assertEqual(db_val, obj_val)
+
+ @mock.patch('nova.db.floating_ip_get')
+ def test_get_by_id(self, get):
+ db_floatingip = dict(fake_floating_ip,
+ fixed_ip=test_fixed_ip.fake_fixed_ip)
+ get.return_value = db_floatingip
+ floatingip = floating_ip.FloatingIP.get_by_id(self.context, 123)
+ get.assert_called_once_with(self.context, 123)
+ self._compare(floatingip, db_floatingip)
+
+ @mock.patch('nova.db.floating_ip_get_by_address')
+ def test_get_by_address(self, get):
+ get.return_value = fake_floating_ip
+ floatingip = floating_ip.FloatingIP.get_by_address(self.context,
+ '1.2.3.4')
+ get.assert_called_once_with(self.context, '1.2.3.4')
+ self._compare(floatingip, fake_floating_ip)
+
+ @mock.patch('nova.db.floating_ip_get_pools')
+ def test_get_pool_names(self, get):
+ get.return_value = [{'name': 'a'}, {'name': 'b'}]
+ self.assertEqual(['a', 'b'],
+ floating_ip.FloatingIP.get_pool_names(self.context))
+
+ @mock.patch('nova.db.floating_ip_allocate_address')
+ def test_allocate_address(self, allocate):
+ allocate.return_value = '1.2.3.4'
+ self.assertEqual('1.2.3.4',
+ floating_ip.FloatingIP.allocate_address(self.context,
+ 'project',
+ 'pool'))
+ allocate.assert_called_with(self.context, 'project', 'pool',
+ auto_assigned=False)
+
+ @mock.patch('nova.db.floating_ip_fixed_ip_associate')
+ def test_associate(self, associate):
+ db_fixed = dict(test_fixed_ip.fake_fixed_ip,
+ network=test_network.fake_network)
+ associate.return_value = db_fixed
+ floatingip = floating_ip.FloatingIP.associate(self.context,
+ '172.17.0.1',
+ '192.168.1.1',
+ 'host')
+ associate.assert_called_with(self.context, '172.17.0.1',
+ '192.168.1.1', 'host')
+ self.assertEqual(db_fixed['id'], floatingip.fixed_ip.id)
+ self.assertEqual('172.17.0.1', str(floatingip.address))
+ self.assertEqual('host', floatingip.host)
+
+ @mock.patch('nova.db.floating_ip_deallocate')
+ def test_deallocate(self, deallocate):
+ floating_ip.FloatingIP.deallocate(self.context, '1.2.3.4')
+ deallocate.assert_called_with(self.context, '1.2.3.4')
+
+ @mock.patch('nova.db.floating_ip_destroy')
+ def test_destroy(self, destroy):
+ floating_ip.FloatingIP.destroy(self.context, '1.2.3.4')
+ destroy.assert_called_with(self.context, '1.2.3.4')
+
+ @mock.patch('nova.db.floating_ip_disassociate')
+ def test_disassociate(self, disassociate):
+ db_fixed = dict(test_fixed_ip.fake_fixed_ip,
+ network=test_network.fake_network)
+ disassociate.return_value = db_fixed
+ floatingip = floating_ip.FloatingIP.disassociate(self.context,
+ '1.2.3.4')
+ disassociate.assert_called_with(self.context, '1.2.3.4')
+ self.assertEqual(db_fixed['id'], floatingip.fixed_ip.id)
+ self.assertEqual('1.2.3.4', str(floatingip.address))
+
+ @mock.patch('nova.db.floating_ip_update')
+ def test_save(self, update):
+ update.return_value = fake_floating_ip
+ floatingip = floating_ip.FloatingIP(context=self.context,
+ id=123, address='1.2.3.4',
+ host='foo')
+ floatingip.obj_reset_changes(['address', 'id'])
+ floatingip.save()
+ self.assertEqual(set(), floatingip.obj_what_changed())
+ update.assert_called_with(self.context, '1.2.3.4',
+ {'host': 'foo'})
+
+ def test_save_errors(self):
+ floatingip = floating_ip.FloatingIP(context=self.context,
+ id=123, host='foo')
+ floatingip.obj_reset_changes()
+ floating_ip.address = '1.2.3.4'
+ self.assertRaises(exception.ObjectActionError, floatingip.save)
+
+ floatingip.obj_reset_changes()
+ floatingip.fixed_ip_id = 1
+ self.assertRaises(exception.ObjectActionError, floatingip.save)
+
+ @mock.patch('nova.db.floating_ip_update')
+ def test_save_no_fixedip(self, update):
+ update.return_value = fake_floating_ip
+ floatingip = floating_ip.FloatingIP(context=self.context,
+ id=123)
+ floatingip.fixed_ip = objects.FixedIP(context=self.context,
+ id=456)
+ self.assertNotIn('fixed_ip', update.calls[1])
+
+ @mock.patch('nova.db.floating_ip_get_all')
+ def test_get_all(self, get):
+ get.return_value = [fake_floating_ip]
+ floatingips = floating_ip.FloatingIPList.get_all(self.context)
+ self.assertEqual(1, len(floatingips))
+ self._compare(floatingips[0], fake_floating_ip)
+ get.assert_called_with(self.context)
+
+ @mock.patch('nova.db.floating_ip_get_all_by_host')
+ def test_get_by_host(self, get):
+ get.return_value = [fake_floating_ip]
+ floatingips = floating_ip.FloatingIPList.get_by_host(self.context,
+ 'host')
+ self.assertEqual(1, len(floatingips))
+ self._compare(floatingips[0], fake_floating_ip)
+ get.assert_called_with(self.context, 'host')
+
+ @mock.patch('nova.db.floating_ip_get_all_by_project')
+ def test_get_by_project(self, get):
+ get.return_value = [fake_floating_ip]
+ floatingips = floating_ip.FloatingIPList.get_by_project(self.context,
+ 'project')
+ self.assertEqual(1, len(floatingips))
+ self._compare(floatingips[0], fake_floating_ip)
+ get.assert_called_with(self.context, 'project')
+
+ @mock.patch('nova.db.floating_ip_get_by_fixed_address')
+ def test_get_by_fixed_address(self, get):
+ get.return_value = [fake_floating_ip]
+ floatingips = floating_ip.FloatingIPList.get_by_fixed_address(
+ self.context, '1.2.3.4')
+ self.assertEqual(1, len(floatingips))
+ self._compare(floatingips[0], fake_floating_ip)
+ get.assert_called_with(self.context, '1.2.3.4')
+
+ @mock.patch('nova.db.floating_ip_get_by_fixed_ip_id')
+ def test_get_by_fixed_ip_id(self, get):
+ get.return_value = [fake_floating_ip]
+ floatingips = floating_ip.FloatingIPList.get_by_fixed_ip_id(
+ self.context, 123)
+ self.assertEqual(1, len(floatingips))
+ self._compare(floatingips[0], fake_floating_ip)
+ get.assert_called_with(self.context, 123)
+
+ @mock.patch('nova.db.instance_floating_address_get_all')
+ def test_get_addresses_by_instance(self, get_all):
+ expected = ['1.2.3.4', '4.5.6.7']
+ get_all.return_value = list(expected)
+ ips = floating_ip.FloatingIP.get_addresses_by_instance(
+ self.context, {'uuid': '1234'})
+ self.assertEqual(expected, ips)
+ get_all.assert_called_once_with(self.context, '1234')
+
+ def test_make_ip_info(self):
+ result = objects.FloatingIPList.make_ip_info('1.2.3.4', 'pool', 'eth0')
+ self.assertEqual({'address': '1.2.3.4', 'pool': 'pool',
+ 'interface': 'eth0'},
+ result)
+
+ @mock.patch('nova.db.floating_ip_bulk_create')
+ def test_bulk_create(self, create_mock):
+ def fake_create(ctxt, ip_info):
+ return [{'id': 1, 'address': ip['address'], 'fixed_ip_id': 1,
+ 'project_id': 'foo', 'host': 'host',
+ 'auto_assigned': False, 'pool': ip['pool'],
+ 'interface': ip['interface'], 'fixed_ip': None,
+ 'created_at': None, 'updated_at': None,
+ 'deleted_at': None, 'deleted': False}
+ for ip in ip_info]
+
+ create_mock.side_effect = fake_create
+ ips = [objects.FloatingIPList.make_ip_info('1.1.1.1', 'pool', 'eth0'),
+ objects.FloatingIPList.make_ip_info('1.1.1.2', 'loop', 'eth1')]
+ result = objects.FloatingIPList.create(None, ips)
+ self.assertIs(result, None)
+ result = objects.FloatingIPList.create(None, ips, want_result=True)
+ self.assertEqual('1.1.1.2', str(result[1].address))
+
+ @mock.patch('nova.db.floating_ip_bulk_destroy')
+ def test_bulk_destroy(self, destroy_mock):
+ ips = [{'address': '1.2.3.4'}, {'address': '4.5.6.7'}]
+ objects.FloatingIPList.destroy(None, ips)
+ destroy_mock.assert_called_once_with(None, ips)
+
+ def test_backport_fixedip_1_1(self):
+ floating = objects.FloatingIP()
+ fixed = objects.FixedIP()
+ floating.fixed_ip = fixed
+ primitive = floating.obj_to_primitive(target_version='1.1')
+ self.assertEqual('1.1',
+ primitive['nova_object.data']['fixed_ip']['nova_object.version'])
+
+
+class TestFloatingIPObject(test_objects._LocalTest,
+ _TestFloatingIPObject):
+ pass
+
+
+class TestRemoteFloatingIPObject(test_objects._RemoteTest,
+ _TestFloatingIPObject):
+ pass
diff --git a/nova/tests/unit/objects/test_hv_spec.py b/nova/tests/unit/objects/test_hv_spec.py
new file mode 100644
index 0000000000..94782cd3a1
--- /dev/null
+++ b/nova/tests/unit/objects/test_hv_spec.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.compute import arch
+from nova.compute import hvtype
+from nova.compute import vm_mode
+from nova import objects
+from nova.tests.unit.objects import test_objects
+
+
+spec_dict = {
+ 'arch': arch.I686,
+ 'hv_type': hvtype.KVM,
+ 'vm_mode': vm_mode.HVM
+}
+
+spec_list = [
+ arch.I686,
+ hvtype.KVM,
+ vm_mode.HVM
+]
+
+
+class _TestHVSpecObject(object):
+
+ def test_hv_spec_from_list(self):
+ spec_obj = objects.HVSpec.from_list(spec_list)
+ self.compare_obj(spec_obj, spec_dict)
+
+ def test_hv_spec_to_list(self):
+ spec_obj = objects.HVSpec()
+ spec_obj.arch = arch.I686
+ spec_obj.hv_type = hvtype.KVM
+ spec_obj.vm_mode = vm_mode.HVM
+ spec = spec_obj.to_list()
+ self.assertEqual(spec_list, spec)
+
+
+class TestHVSpecObject(test_objects._LocalTest,
+ _TestHVSpecObject):
+ pass
+
+
+class TestRemoteHVSpecObject(test_objects._RemoteTest,
+ _TestHVSpecObject):
+ pass
diff --git a/nova/tests/unit/objects/test_instance.py b/nova/tests/unit/objects/test_instance.py
new file mode 100644
index 0000000000..b24fd0143d
--- /dev/null
+++ b/nova/tests/unit/objects/test_instance.py
@@ -0,0 +1,1196 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+import iso8601
+import mock
+import mox
+import netaddr
+from oslo.utils import timeutils
+
+from nova.cells import rpcapi as cells_rpcapi
+from nova.compute import flavors
+from nova import db
+from nova import exception
+from nova.network import model as network_model
+from nova import notifications
+from nova import objects
+from nova.objects import instance
+from nova.objects import instance_info_cache
+from nova.objects import instance_numa_topology
+from nova.objects import pci_device
+from nova.objects import security_group
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit.objects import test_instance_fault
+from nova.tests.unit.objects import test_instance_info_cache
+from nova.tests.unit.objects import test_instance_numa_topology
+from nova.tests.unit.objects import test_instance_pci_requests
+from nova.tests.unit.objects import test_objects
+from nova.tests.unit.objects import test_security_group
+from nova import utils
+
+
+class _TestInstanceObject(object):
+ @property
+ def fake_instance(self):
+ fake_instance = fakes.stub_instance(id=2,
+ access_ipv4='1.2.3.4',
+ access_ipv6='::1')
+ fake_instance['cell_name'] = 'api!child'
+ fake_instance['scheduled_at'] = None
+ fake_instance['terminated_at'] = None
+ fake_instance['deleted_at'] = None
+ fake_instance['created_at'] = None
+ fake_instance['updated_at'] = None
+ fake_instance['launched_at'] = (
+ fake_instance['launched_at'].replace(
+ tzinfo=iso8601.iso8601.Utc(), microsecond=0))
+ fake_instance['deleted'] = False
+ fake_instance['info_cache']['instance_uuid'] = fake_instance['uuid']
+ fake_instance['security_groups'] = []
+ fake_instance['pci_devices'] = []
+ fake_instance['user_id'] = self.context.user_id
+ fake_instance['project_id'] = self.context.project_id
+ return fake_instance
+
+ def test_datetime_deserialization(self):
+ red_letter_date = timeutils.parse_isotime(
+ timeutils.isotime(datetime.datetime(1955, 11, 5)))
+ inst = instance.Instance(uuid='fake-uuid', launched_at=red_letter_date)
+ primitive = inst.obj_to_primitive()
+ expected = {'nova_object.name': 'Instance',
+ 'nova_object.namespace': 'nova',
+ 'nova_object.version': '1.16',
+ 'nova_object.data':
+ {'uuid': 'fake-uuid',
+ 'launched_at': '1955-11-05T00:00:00Z'},
+ 'nova_object.changes': ['launched_at', 'uuid']}
+ self.assertEqual(primitive, expected)
+ inst2 = instance.Instance.obj_from_primitive(primitive)
+ self.assertIsInstance(inst2.launched_at, datetime.datetime)
+ self.assertEqual(inst2.launched_at, red_letter_date)
+
+ def test_ip_deserialization(self):
+ inst = instance.Instance(uuid='fake-uuid', access_ip_v4='1.2.3.4',
+ access_ip_v6='::1')
+ primitive = inst.obj_to_primitive()
+ expected = {'nova_object.name': 'Instance',
+ 'nova_object.namespace': 'nova',
+ 'nova_object.version': '1.16',
+ 'nova_object.data':
+ {'uuid': 'fake-uuid',
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': '::1'},
+ 'nova_object.changes': ['uuid', 'access_ip_v6',
+ 'access_ip_v4']}
+ self.assertEqual(primitive, expected)
+ inst2 = instance.Instance.obj_from_primitive(primitive)
+ self.assertIsInstance(inst2.access_ip_v4, netaddr.IPAddress)
+ self.assertIsInstance(inst2.access_ip_v6, netaddr.IPAddress)
+ self.assertEqual(inst2.access_ip_v4, netaddr.IPAddress('1.2.3.4'))
+ self.assertEqual(inst2.access_ip_v6, netaddr.IPAddress('::1'))
+
+ def test_get_without_expected(self):
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(self.context, 'uuid',
+ columns_to_join=[],
+ use_slave=False
+ ).AndReturn(self.fake_instance)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, 'uuid',
+ expected_attrs=[])
+ for attr in instance.INSTANCE_OPTIONAL_ATTRS:
+ self.assertFalse(inst.obj_attr_is_set(attr))
+ self.assertRemotes()
+
+ def test_get_with_expected(self):
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
+ self.mox.StubOutWithMock(
+ db, 'instance_extra_get_by_instance_uuid')
+
+ exp_cols = instance.INSTANCE_OPTIONAL_ATTRS[:]
+ exp_cols.remove('fault')
+ exp_cols.remove('numa_topology')
+ exp_cols.remove('pci_requests')
+
+ db.instance_get_by_uuid(
+ self.context, 'uuid',
+ columns_to_join=exp_cols,
+ use_slave=False
+ ).AndReturn(self.fake_instance)
+ fake_faults = test_instance_fault.fake_faults
+ db.instance_fault_get_by_instance_uuids(
+ self.context, [self.fake_instance['uuid']]
+ ).AndReturn(fake_faults)
+ fake_topology = test_instance_numa_topology.fake_db_topology
+ db.instance_extra_get_by_instance_uuid(
+ self.context, self.fake_instance['uuid'],
+ columns=['numa_topology']
+ ).AndReturn(fake_topology)
+ fake_requests = test_instance_pci_requests.fake_pci_requests
+ db.instance_extra_get_by_instance_uuid(
+ self.context, self.fake_instance['uuid'],
+ columns=['pci_requests']
+ ).AndReturn(fake_requests)
+
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(
+ self.context, 'uuid',
+ expected_attrs=instance.INSTANCE_OPTIONAL_ATTRS)
+ for attr in instance.INSTANCE_OPTIONAL_ATTRS:
+ self.assertTrue(inst.obj_attr_is_set(attr))
+ self.assertRemotes()
+
+ def test_get_by_id(self):
+ self.mox.StubOutWithMock(db, 'instance_get')
+ db.instance_get(self.context, 'instid',
+ columns_to_join=['info_cache',
+ 'security_groups']
+ ).AndReturn(self.fake_instance)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_id(self.context, 'instid')
+ self.assertEqual(inst.uuid, self.fake_instance['uuid'])
+ self.assertRemotes()
+
+ def test_load(self):
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ fake_uuid = self.fake_instance['uuid']
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(self.fake_instance)
+ fake_inst2 = dict(self.fake_instance,
+ system_metadata=[{'key': 'foo', 'value': 'bar'}])
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['system_metadata'],
+ use_slave=False
+ ).AndReturn(fake_inst2)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
+ self.assertFalse(hasattr(inst, '_system_metadata'))
+ sys_meta = inst.system_metadata
+ self.assertEqual(sys_meta, {'foo': 'bar'})
+ self.assertTrue(hasattr(inst, '_system_metadata'))
+ # Make sure we don't run load again
+ sys_meta2 = inst.system_metadata
+ self.assertEqual(sys_meta2, {'foo': 'bar'})
+ self.assertRemotes()
+
+ def test_load_invalid(self):
+ inst = instance.Instance(context=self.context, uuid='fake-uuid')
+ self.assertRaises(exception.ObjectActionError,
+ inst.obj_load_attr, 'foo')
+
+ def test_get_remote(self):
+ # isotime doesn't have microseconds and is always UTC
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ fake_instance = self.fake_instance
+ db.instance_get_by_uuid(self.context, 'fake-uuid',
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(fake_instance)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, 'fake-uuid')
+ self.assertEqual(inst.id, fake_instance['id'])
+ self.assertEqual(inst.launched_at, fake_instance['launched_at'])
+ self.assertEqual(str(inst.access_ip_v4),
+ fake_instance['access_ip_v4'])
+ self.assertEqual(str(inst.access_ip_v6),
+ fake_instance['access_ip_v6'])
+ self.assertRemotes()
+
+ def test_refresh(self):
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ fake_uuid = self.fake_instance['uuid']
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(dict(self.fake_instance,
+ host='orig-host'))
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(dict(self.fake_instance,
+ host='new-host'))
+ self.mox.StubOutWithMock(instance_info_cache.InstanceInfoCache,
+ 'refresh')
+ instance_info_cache.InstanceInfoCache.refresh()
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
+ self.assertEqual(inst.host, 'orig-host')
+ inst.refresh()
+ self.assertEqual(inst.host, 'new-host')
+ self.assertRemotes()
+ self.assertEqual(set([]), inst.obj_what_changed())
+
+ def test_refresh_does_not_recurse(self):
+ inst = instance.Instance(context=self.context, uuid='fake-uuid',
+ metadata={})
+ inst_copy = instance.Instance()
+ inst_copy.uuid = inst.uuid
+ self.mox.StubOutWithMock(instance.Instance, 'get_by_uuid')
+ instance.Instance.get_by_uuid(self.context, uuid=inst.uuid,
+ expected_attrs=['metadata'],
+ use_slave=False
+ ).AndReturn(inst_copy)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.OrphanedObjectError, inst.refresh)
+
+ def _save_test_helper(self, cell_type, save_kwargs):
+ """Common code for testing save() for cells/non-cells."""
+ if cell_type:
+ self.flags(enable=True, cell_type=cell_type, group='cells')
+ else:
+ self.flags(enable=False, group='cells')
+
+ old_ref = dict(self.fake_instance, host='oldhost', user_data='old',
+ vm_state='old', task_state='old')
+ fake_uuid = old_ref['uuid']
+
+ expected_updates = dict(vm_state='meow', task_state='wuff',
+ user_data='new')
+
+ new_ref = dict(old_ref, host='newhost', **expected_updates)
+ exp_vm_state = save_kwargs.get('expected_vm_state')
+ exp_task_state = save_kwargs.get('expected_task_state')
+ admin_reset = save_kwargs.get('admin_state_reset', False)
+ if exp_vm_state:
+ expected_updates['expected_vm_state'] = exp_vm_state
+ if exp_task_state:
+ if (exp_task_state == 'image_snapshot' and
+ 'instance_version' in save_kwargs and
+ save_kwargs['instance_version'] == '1.9'):
+ expected_updates['expected_task_state'] = [
+ 'image_snapshot', 'image_snapshot_pending']
+ else:
+ expected_updates['expected_task_state'] = exp_task_state
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(db, 'instance_info_cache_update')
+ cells_api_mock = self.mox.CreateMock(cells_rpcapi.CellsAPI)
+ self.mox.StubOutWithMock(cells_api_mock,
+ 'instance_update_at_top')
+ self.mox.StubOutWithMock(cells_api_mock,
+ 'instance_update_from_api')
+ self.mox.StubOutWithMock(cells_rpcapi, 'CellsAPI',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(notifications, 'send_update')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(old_ref)
+ db.instance_update_and_get_original(
+ self.context, fake_uuid, expected_updates,
+ update_cells=False,
+ columns_to_join=['info_cache', 'security_groups',
+ 'system_metadata']
+ ).AndReturn((old_ref, new_ref))
+ if cell_type == 'api':
+ cells_rpcapi.CellsAPI().AndReturn(cells_api_mock)
+ cells_api_mock.instance_update_from_api(
+ self.context, mox.IsA(instance.Instance),
+ exp_vm_state, exp_task_state, admin_reset)
+ elif cell_type == 'compute':
+ cells_rpcapi.CellsAPI().AndReturn(cells_api_mock)
+ cells_api_mock.instance_update_at_top(self.context, new_ref)
+ notifications.send_update(self.context, mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ inst = instance.Instance.get_by_uuid(self.context, old_ref['uuid'])
+ if 'instance_version' in save_kwargs:
+ inst.VERSION = save_kwargs.pop('instance_version')
+ self.assertEqual('old', inst.task_state)
+ self.assertEqual('old', inst.vm_state)
+ self.assertEqual('old', inst.user_data)
+ inst.vm_state = 'meow'
+ inst.task_state = 'wuff'
+ inst.user_data = 'new'
+ inst.save(**save_kwargs)
+ self.assertEqual('newhost', inst.host)
+ self.assertEqual('meow', inst.vm_state)
+ self.assertEqual('wuff', inst.task_state)
+ self.assertEqual('new', inst.user_data)
+ self.assertEqual(set([]), inst.obj_what_changed())
+
+ def test_save(self):
+ self._save_test_helper(None, {})
+
+ def test_save_in_api_cell(self):
+ self._save_test_helper('api', {})
+
+ def test_save_in_compute_cell(self):
+ self._save_test_helper('compute', {})
+
+ def test_save_exp_vm_state(self):
+ self._save_test_helper(None, {'expected_vm_state': ['meow']})
+
+ def test_save_exp_task_state(self):
+ self._save_test_helper(None, {'expected_task_state': ['meow']})
+
+ def test_save_exp_task_state_havana(self):
+ self._save_test_helper(None, {
+ 'expected_task_state': 'image_snapshot',
+ 'instance_version': '1.9'})
+
+ def test_save_exp_vm_state_api_cell(self):
+ self._save_test_helper('api', {'expected_vm_state': ['meow']})
+
+ def test_save_exp_task_state_api_cell(self):
+ self._save_test_helper('api', {'expected_task_state': ['meow']})
+
+ def test_save_exp_task_state_api_cell_admin_reset(self):
+ self._save_test_helper('api', {'admin_state_reset': True})
+
+ def test_save_rename_sends_notification(self):
+ # Tests that simply changing the 'display_name' on the instance
+ # will send a notification.
+ self.flags(enable=False, group='cells')
+ old_ref = dict(self.fake_instance, display_name='hello')
+ fake_uuid = old_ref['uuid']
+ expected_updates = dict(display_name='goodbye')
+ new_ref = dict(old_ref, **expected_updates)
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(notifications, 'send_update')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(old_ref)
+ db.instance_update_and_get_original(
+ self.context, fake_uuid, expected_updates, update_cells=False,
+ columns_to_join=['info_cache', 'security_groups',
+ 'system_metadata']
+ ).AndReturn((old_ref, new_ref))
+ notifications.send_update(self.context, mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ inst = instance.Instance.get_by_uuid(self.context, old_ref['uuid'],
+ use_slave=False)
+ self.assertEqual('hello', inst.display_name)
+ inst.display_name = 'goodbye'
+ inst.save()
+ self.assertEqual('goodbye', inst.display_name)
+ self.assertEqual(set([]), inst.obj_what_changed())
+
+ @mock.patch('nova.db.instance_update_and_get_original')
+ @mock.patch('nova.objects.Instance._from_db_object')
+ def test_save_does_not_refresh_pci_devices(self, mock_fdo, mock_update):
+ # NOTE(danms): This tests that we don't update the pci_devices
+ # field from the contents of the database. This is not because we
+ # don't necessarily want to, but because the way pci_devices is
+ # currently implemented it causes versioning issues. When that is
+ # resolved, this test should go away.
+ mock_update.return_value = None, None
+ inst = instance.Instance(context=self.context, id=123)
+ inst.uuid = 'foo'
+ inst.pci_devices = pci_device.PciDeviceList()
+ inst.save()
+ self.assertNotIn('pci_devices',
+ mock_fdo.call_args_list[0][1]['expected_attrs'])
+
+ def test_get_deleted(self):
+ fake_inst = dict(self.fake_instance, id=123, deleted=123)
+ fake_uuid = fake_inst['uuid']
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
+ # NOTE(danms): Make sure it's actually a bool
+ self.assertEqual(inst.deleted, True)
+
+ def test_get_not_cleaned(self):
+ fake_inst = dict(self.fake_instance, id=123, cleaned=None)
+ fake_uuid = fake_inst['uuid']
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
+ # NOTE(mikal): Make sure it's actually a bool
+ self.assertEqual(inst.cleaned, False)
+
+ def test_get_cleaned(self):
+ fake_inst = dict(self.fake_instance, id=123, cleaned=1)
+ fake_uuid = fake_inst['uuid']
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
+ # NOTE(mikal): Make sure it's actually a bool
+ self.assertEqual(inst.cleaned, True)
+
+ def test_with_info_cache(self):
+ fake_inst = dict(self.fake_instance)
+ fake_uuid = fake_inst['uuid']
+ nwinfo1 = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
+ nwinfo2 = network_model.NetworkInfo.hydrate([{'address': 'bar'}])
+ nwinfo1_json = nwinfo1.json()
+ nwinfo2_json = nwinfo2.json()
+ fake_inst['info_cache'] = dict(
+ test_instance_info_cache.fake_info_cache,
+ network_info=nwinfo1_json,
+ instance_uuid=fake_uuid)
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(db, 'instance_info_cache_update')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ db.instance_info_cache_update(self.context, fake_uuid,
+ {'network_info': nwinfo2_json})
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
+ self.assertEqual(inst.info_cache.network_info, nwinfo1)
+ self.assertEqual(inst.info_cache.instance_uuid, fake_uuid)
+ inst.info_cache.network_info = nwinfo2
+ inst.save()
+
+ def test_with_info_cache_none(self):
+ fake_inst = dict(self.fake_instance, info_cache=None)
+ fake_uuid = fake_inst['uuid']
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
+ ['info_cache'])
+ self.assertIsNone(inst.info_cache)
+
+ def test_with_security_groups(self):
+ fake_inst = dict(self.fake_instance)
+ fake_uuid = fake_inst['uuid']
+ fake_inst['security_groups'] = [
+ {'id': 1, 'name': 'secgroup1', 'description': 'fake-desc',
+ 'user_id': 'fake-user', 'project_id': 'fake_project',
+ 'created_at': None, 'updated_at': None, 'deleted_at': None,
+ 'deleted': False},
+ {'id': 2, 'name': 'secgroup2', 'description': 'fake-desc',
+ 'user_id': 'fake-user', 'project_id': 'fake_project',
+ 'created_at': None, 'updated_at': None, 'deleted_at': None,
+ 'deleted': False},
+ ]
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(db, 'security_group_update')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ db.security_group_update(self.context, 1, {'description': 'changed'}
+ ).AndReturn(fake_inst['security_groups'][0])
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
+ self.assertEqual(len(inst.security_groups), 2)
+ for index, group in enumerate(fake_inst['security_groups']):
+ for key in group:
+ self.assertEqual(group[key],
+ inst.security_groups[index][key])
+ self.assertIsInstance(inst.security_groups[index],
+ security_group.SecurityGroup)
+ self.assertEqual(inst.security_groups.obj_what_changed(), set())
+ inst.security_groups[0].description = 'changed'
+ inst.save()
+ self.assertEqual(inst.security_groups.obj_what_changed(), set())
+
+ def test_with_empty_security_groups(self):
+ fake_inst = dict(self.fake_instance, security_groups=[])
+ fake_uuid = fake_inst['uuid']
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
+ self.assertEqual(0, len(inst.security_groups))
+
+ def test_with_empty_pci_devices(self):
+ fake_inst = dict(self.fake_instance, pci_devices=[])
+ fake_uuid = fake_inst['uuid']
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['pci_devices'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
+ ['pci_devices'])
+ self.assertEqual(len(inst.pci_devices), 0)
+
+ def test_with_pci_devices(self):
+ fake_inst = dict(self.fake_instance)
+ fake_uuid = fake_inst['uuid']
+ fake_inst['pci_devices'] = [
+ {'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 2,
+ 'compute_node_id': 1,
+ 'address': 'a1',
+ 'vendor_id': 'v1',
+ 'product_id': 'p1',
+ 'dev_type': 't',
+ 'status': 'allocated',
+ 'dev_id': 'i',
+ 'label': 'l',
+ 'instance_uuid': fake_uuid,
+ 'request_id': None,
+ 'extra_info': '{}'},
+ {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 1,
+ 'compute_node_id': 1,
+ 'address': 'a',
+ 'vendor_id': 'v',
+ 'product_id': 'p',
+ 'dev_type': 't',
+ 'status': 'allocated',
+ 'dev_id': 'i',
+ 'label': 'l',
+ 'instance_uuid': fake_uuid,
+ 'request_id': None,
+ 'extra_info': '{}'},
+ ]
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['pci_devices'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
+ ['pci_devices'])
+ self.assertEqual(len(inst.pci_devices), 2)
+ self.assertEqual(inst.pci_devices[0].instance_uuid, fake_uuid)
+ self.assertEqual(inst.pci_devices[1].instance_uuid, fake_uuid)
+
+ def test_with_fault(self):
+ fake_inst = dict(self.fake_instance)
+ fake_uuid = fake_inst['uuid']
+ fake_faults = [dict(x, instance_uuid=fake_uuid)
+ for x in test_instance_fault.fake_faults['fake-uuid']]
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=[],
+ use_slave=False
+ ).AndReturn(self.fake_instance)
+ db.instance_fault_get_by_instance_uuids(
+ self.context, [fake_uuid]).AndReturn({fake_uuid: fake_faults})
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
+ expected_attrs=['fault'])
+ self.assertEqual(fake_faults[0], dict(inst.fault.items()))
+ self.assertRemotes()
+
+ def test_iteritems_with_extra_attrs(self):
+ self.stubs.Set(instance.Instance, 'name', 'foo')
+ inst = instance.Instance(uuid='fake-uuid')
+ self.assertEqual(inst.items(),
+ {'uuid': 'fake-uuid',
+ 'name': 'foo',
+ }.items())
+
+ def _test_metadata_change_tracking(self, which):
+ inst = instance.Instance(uuid='fake-uuid')
+ setattr(inst, which, {})
+ inst.obj_reset_changes()
+ getattr(inst, which)['foo'] = 'bar'
+ self.assertEqual(set([which]), inst.obj_what_changed())
+ inst.obj_reset_changes()
+ self.assertEqual(set(), inst.obj_what_changed())
+
+ def test_metadata_change_tracking(self):
+ self._test_metadata_change_tracking('metadata')
+
+ def test_system_metadata_change_tracking(self):
+ self._test_metadata_change_tracking('system_metadata')
+
+ def test_create_stubbed(self):
+ self.mox.StubOutWithMock(db, 'instance_create')
+ vals = {'host': 'foo-host',
+ 'memory_mb': 128,
+ 'system_metadata': {'foo': 'bar'}}
+ fake_inst = fake_instance.fake_db_instance(**vals)
+ db.instance_create(self.context, vals).AndReturn(fake_inst)
+ self.mox.ReplayAll()
+ inst = instance.Instance(host='foo-host', memory_mb=128,
+ system_metadata={'foo': 'bar'})
+ inst.create(self.context)
+
+ def test_create(self):
+ self.mox.StubOutWithMock(db, 'instance_create')
+ db.instance_create(self.context, {}).AndReturn(self.fake_instance)
+ self.mox.ReplayAll()
+ inst = instance.Instance()
+ inst.create(self.context)
+ self.assertEqual(self.fake_instance['id'], inst.id)
+
+ def test_create_with_values(self):
+ inst1 = instance.Instance(user_id=self.context.user_id,
+ project_id=self.context.project_id,
+ host='foo-host')
+ inst1.create(self.context)
+ self.assertEqual(inst1.host, 'foo-host')
+ inst2 = instance.Instance.get_by_uuid(self.context, inst1.uuid)
+ self.assertEqual(inst2.host, 'foo-host')
+
+ def test_create_with_numa_topology(self):
+ inst = instance.Instance(uuid=self.fake_instance['uuid'],
+ numa_topology=instance_numa_topology.InstanceNUMATopology
+ .obj_from_topology(
+ test_instance_numa_topology.fake_numa_topology))
+
+ inst.create(self.context)
+ self.assertIsNotNone(inst.numa_topology)
+ got_numa_topo = (
+ instance_numa_topology.InstanceNUMATopology
+ .get_by_instance_uuid(self.context, inst.uuid))
+ self.assertEqual(inst.numa_topology.id, got_numa_topo.id)
+
+ def test_recreate_fails(self):
+ inst = instance.Instance(user_id=self.context.user_id,
+ project_id=self.context.project_id,
+ host='foo-host')
+ inst.create(self.context)
+ self.assertRaises(exception.ObjectActionError, inst.create,
+ self.context)
+
+ def test_create_with_special_things(self):
+ self.mox.StubOutWithMock(db, 'instance_create')
+ fake_inst = fake_instance.fake_db_instance()
+ db.instance_create(self.context,
+ {'host': 'foo-host',
+ 'security_groups': ['foo', 'bar'],
+ 'info_cache': {'network_info': '[]'},
+ }
+ ).AndReturn(fake_inst)
+ self.mox.ReplayAll()
+ secgroups = security_group.SecurityGroupList()
+ secgroups.objects = []
+ for name in ('foo', 'bar'):
+ secgroup = security_group.SecurityGroup()
+ secgroup.name = name
+ secgroups.objects.append(secgroup)
+ info_cache = instance_info_cache.InstanceInfoCache()
+ info_cache.network_info = network_model.NetworkInfo()
+ inst = instance.Instance(host='foo-host', security_groups=secgroups,
+ info_cache=info_cache)
+ inst.create(self.context)
+
+ def test_destroy_stubbed(self):
+ self.mox.StubOutWithMock(db, 'instance_destroy')
+ deleted_at = datetime.datetime(1955, 11, 6)
+ fake_inst = fake_instance.fake_db_instance(deleted_at=deleted_at,
+ deleted=True)
+ db.instance_destroy(self.context, 'fake-uuid',
+ constraint=None).AndReturn(fake_inst)
+ self.mox.ReplayAll()
+ inst = instance.Instance(id=1, uuid='fake-uuid', host='foo')
+ inst.destroy(self.context)
+ self.assertEqual(timeutils.normalize_time(inst.deleted_at),
+ timeutils.normalize_time(deleted_at))
+ self.assertTrue(inst.deleted)
+
+ def test_destroy(self):
+ values = {'user_id': self.context.user_id,
+ 'project_id': self.context.project_id}
+ db_inst = db.instance_create(self.context, values)
+ inst = instance.Instance(id=db_inst['id'], uuid=db_inst['uuid'])
+ inst.destroy(self.context)
+ self.assertRaises(exception.InstanceNotFound,
+ db.instance_get_by_uuid, self.context,
+ db_inst['uuid'])
+
+ def test_destroy_host_constraint(self):
+ values = {'user_id': self.context.user_id,
+ 'project_id': self.context.project_id,
+ 'host': 'foo'}
+ db_inst = db.instance_create(self.context, values)
+ inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
+ inst.host = None
+ self.assertRaises(exception.ObjectActionError,
+ inst.destroy)
+
+ def test_name_does_not_trigger_lazy_loads(self):
+ values = {'user_id': self.context.user_id,
+ 'project_id': self.context.project_id,
+ 'host': 'foo'}
+ db_inst = db.instance_create(self.context, values)
+ inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
+ self.assertFalse(inst.obj_attr_is_set('fault'))
+ self.flags(instance_name_template='foo-%(uuid)s')
+ self.assertEqual('foo-%s' % db_inst['uuid'], inst.name)
+ self.assertFalse(inst.obj_attr_is_set('fault'))
+
+ def test_from_db_object_not_overwrite_info_cache(self):
+ info_cache = instance_info_cache.InstanceInfoCache()
+ inst = instance.Instance(context=self.context,
+ info_cache=info_cache)
+ db_inst = fake_instance.fake_db_instance()
+ db_inst['info_cache'] = dict(
+ test_instance_info_cache.fake_info_cache)
+ inst._from_db_object(self.context, inst, db_inst,
+ expected_attrs=['info_cache'])
+ self.assertIs(info_cache, inst.info_cache)
+
+ def test_compat_strings(self):
+ unicode_attributes = ['user_id', 'project_id', 'image_ref',
+ 'kernel_id', 'ramdisk_id', 'hostname',
+ 'key_name', 'key_data', 'host', 'node',
+ 'user_data', 'availability_zone',
+ 'display_name', 'display_description',
+ 'launched_on', 'locked_by', 'os_type',
+ 'architecture', 'vm_mode', 'root_device_name',
+ 'default_ephemeral_device',
+ 'default_swap_device', 'config_drive',
+ 'cell_name']
+ inst = instance.Instance()
+ expected = {}
+ for key in unicode_attributes:
+ inst[key] = u'\u2603'
+ expected[key] = '?'
+ primitive = inst.obj_to_primitive(target_version='1.6')
+ self.assertEqual(expected, primitive['nova_object.data'])
+ self.assertEqual('1.6', primitive['nova_object.version'])
+
+ def test_compat_pci_devices(self):
+ inst = instance.Instance()
+ inst.pci_devices = pci_device.PciDeviceList()
+ primitive = inst.obj_to_primitive(target_version='1.5')
+ self.assertNotIn('pci_devices', primitive)
+
+ def test_compat_info_cache(self):
+ inst = instance.Instance()
+ inst.info_cache = instance_info_cache.InstanceInfoCache()
+ primitive = inst.obj_to_primitive(target_version='1.9')
+ self.assertEqual(
+ '1.4',
+ primitive['nova_object.data']['info_cache']['nova_object.version'])
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
+ def test_get_with_pci_requests(self, mock_get):
+ mock_get.return_value = objects.InstancePCIRequests()
+ db_instance = db.instance_create(self.context, {
+ 'user_id': self.context.user_id,
+ 'project_id': self.context.project_id})
+ instance = objects.Instance.get_by_uuid(
+ self.context, db_instance['uuid'],
+ expected_attrs=['pci_requests'])
+ self.assertTrue(instance.obj_attr_is_set('pci_requests'))
+ self.assertIsNotNone(instance.pci_requests)
+
+ def _test_get_flavor(self, namespace):
+ prefix = '%s_' % namespace if namespace is not None else ''
+ db_inst = db.instance_create(self.context, {
+ 'user_id': self.context.user_id,
+ 'project_id': self.context.project_id,
+ 'system_metadata': flavors.save_flavor_info(
+ {}, flavors.get_default_flavor(), prefix)})
+ db_flavor = flavors.extract_flavor(db_inst, prefix)
+ inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
+ flavor = inst.get_flavor(namespace)
+ self.assertEqual(db_flavor['flavorid'], flavor.flavorid)
+
+ def test_get_flavor(self):
+ self._test_get_flavor(None)
+ self._test_get_flavor('foo')
+
+ def _test_set_flavor(self, namespace):
+ prefix = '%s_' % namespace if namespace is not None else ''
+ db_inst = db.instance_create(self.context, {
+ 'user_id': self.context.user_id,
+ 'project_id': self.context.project_id,
+ })
+ inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
+ db_flavor = flavors.get_default_flavor()
+ inst.set_flavor(db_flavor, namespace)
+ db_inst = db.instance_get(self.context, db_inst['id'])
+ self.assertEqual(
+ db_flavor['flavorid'], flavors.extract_flavor(
+ db_inst, prefix)['flavorid'])
+
+ def test_set_flavor(self):
+ self._test_set_flavor(None)
+ self._test_set_flavor('foo')
+
+ def test_delete_flavor(self):
+ namespace = 'foo'
+ prefix = '%s_' % namespace
+ db_inst = db.instance_create(self.context, {
+ 'user_id': self.context.user_id,
+ 'project_id': self.context.project_id,
+ 'system_metadata': flavors.save_flavor_info(
+ {}, flavors.get_default_flavor(), prefix)})
+ inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
+ inst.delete_flavor(namespace)
+ db_inst = db.instance_get(self.context, db_inst['id'])
+ self.assertEqual({}, utils.instance_sys_meta(db_inst))
+
+ def test_delete_flavor_no_namespace_fails(self):
+ inst = instance.Instance(system_metadata={})
+ self.assertRaises(KeyError, inst.delete_flavor, None)
+ self.assertRaises(KeyError, inst.delete_flavor, '')
+
+ @mock.patch.object(db, 'instance_metadata_delete')
+ def test_delete_metadata_key(self, db_delete):
+ inst = instance.Instance(context=self.context,
+ id=1, uuid='fake-uuid')
+ inst.metadata = {'foo': '1', 'bar': '2'}
+ inst.obj_reset_changes()
+ inst.delete_metadata_key('foo')
+ self.assertEqual({'bar': '2'}, inst.metadata)
+ self.assertEqual({}, inst.obj_get_changes())
+ db_delete.assert_called_once_with(self.context, inst.uuid, 'foo')
+
+ def test_reset_changes(self):
+ inst = instance.Instance()
+ inst.metadata = {'1985': 'present'}
+ inst.system_metadata = {'1955': 'past'}
+ self.assertEqual({}, inst._orig_metadata)
+ inst.obj_reset_changes(['metadata'])
+ self.assertEqual({'1985': 'present'}, inst._orig_metadata)
+ self.assertEqual({}, inst._orig_system_metadata)
+
+ def test_load_generic_calls_handler(self):
+ inst = instance.Instance(context=self.context,
+ uuid='fake-uuid')
+ with mock.patch.object(inst, '_load_generic') as mock_load:
+ def fake_load(name):
+ inst.system_metadata = {}
+
+ mock_load.side_effect = fake_load
+ inst.system_metadata
+ mock_load.assert_called_once_with('system_metadata')
+
+ def test_load_fault_calls_handler(self):
+ inst = instance.Instance(context=self.context,
+ uuid='fake-uuid')
+ with mock.patch.object(inst, '_load_fault') as mock_load:
+ def fake_load():
+ inst.fault = None
+
+ mock_load.side_effect = fake_load
+ inst.fault
+ mock_load.assert_called_once_with()
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ def test_load_generic(self, mock_get):
+ inst2 = instance.Instance(metadata={'foo': 'bar'})
+ mock_get.return_value = inst2
+ inst = instance.Instance(context=self.context,
+ uuid='fake-uuid')
+ inst.metadata
+ self.assertEqual({'foo': 'bar'}, inst.metadata)
+ mock_get.assert_called_once_with(self.context,
+ uuid='fake-uuid',
+ expected_attrs=['metadata'])
+ self.assertNotIn('metadata', inst.obj_what_changed())
+
+ @mock.patch('nova.db.instance_fault_get_by_instance_uuids')
+ def test_load_fault(self, mock_get):
+ fake_fault = test_instance_fault.fake_faults['fake-uuid'][0]
+ mock_get.return_value = {'fake': [fake_fault]}
+ inst = instance.Instance(context=self.context, uuid='fake')
+ fault = inst.fault
+ mock_get.assert_called_once_with(self.context, ['fake'])
+ self.assertEqual(fake_fault['id'], fault.id)
+ self.assertNotIn('metadata', inst.obj_what_changed())
+
+
+class TestInstanceObject(test_objects._LocalTest,
+ _TestInstanceObject):
+ pass
+
+
+class TestRemoteInstanceObject(test_objects._RemoteTest,
+ _TestInstanceObject):
+ pass
+
+
+class _TestInstanceListObject(object):
+ def fake_instance(self, id, updates=None):
+ fake_instance = fakes.stub_instance(id=2,
+ access_ipv4='1.2.3.4',
+ access_ipv6='::1')
+ fake_instance['scheduled_at'] = None
+ fake_instance['terminated_at'] = None
+ fake_instance['deleted_at'] = None
+ fake_instance['created_at'] = None
+ fake_instance['updated_at'] = None
+ fake_instance['launched_at'] = (
+ fake_instance['launched_at'].replace(
+ tzinfo=iso8601.iso8601.Utc(), microsecond=0))
+ fake_instance['info_cache'] = {'network_info': '[]',
+ 'instance_uuid': fake_instance['uuid']}
+ fake_instance['security_groups'] = []
+ fake_instance['deleted'] = 0
+ if updates:
+ fake_instance.update(updates)
+ return fake_instance
+
+ def test_get_all_by_filters(self):
+ fakes = [self.fake_instance(1), self.fake_instance(2)]
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
+ db.instance_get_all_by_filters(self.context, {'foo': 'bar'}, 'uuid',
+ 'asc', limit=None, marker=None,
+ columns_to_join=['metadata'],
+ use_slave=False).AndReturn(fakes)
+ self.mox.ReplayAll()
+ inst_list = instance.InstanceList.get_by_filters(
+ self.context, {'foo': 'bar'}, 'uuid', 'asc',
+ expected_attrs=['metadata'], use_slave=False)
+
+ for i in range(0, len(fakes)):
+ self.assertIsInstance(inst_list.objects[i], instance.Instance)
+ self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
+ self.assertRemotes()
+
+ def test_get_all_by_filters_works_for_cleaned(self):
+ fakes = [self.fake_instance(1),
+ self.fake_instance(2, updates={'deleted': 2,
+ 'cleaned': None})]
+ self.context.read_deleted = 'yes'
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
+ db.instance_get_all_by_filters(self.context,
+ {'deleted': True, 'cleaned': False},
+ 'uuid', 'asc', limit=None, marker=None,
+ columns_to_join=['metadata'],
+ use_slave=False).AndReturn(
+ [fakes[1]])
+ self.mox.ReplayAll()
+ inst_list = instance.InstanceList.get_by_filters(
+ self.context, {'deleted': True, 'cleaned': False}, 'uuid', 'asc',
+ expected_attrs=['metadata'], use_slave=False)
+
+ self.assertEqual(1, len(inst_list))
+ self.assertIsInstance(inst_list.objects[0], instance.Instance)
+ self.assertEqual(inst_list.objects[0].uuid, fakes[1]['uuid'])
+ self.assertRemotes()
+
+ def test_get_by_host(self):
+ fakes = [self.fake_instance(1),
+ self.fake_instance(2)]
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ db.instance_get_all_by_host(self.context, 'foo',
+ columns_to_join=None,
+ use_slave=False).AndReturn(fakes)
+ self.mox.ReplayAll()
+ inst_list = instance.InstanceList.get_by_host(self.context, 'foo')
+ for i in range(0, len(fakes)):
+ self.assertIsInstance(inst_list.objects[i], instance.Instance)
+ self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
+ self.assertEqual(inst_list.objects[i]._context, self.context)
+ self.assertEqual(inst_list.obj_what_changed(), set())
+ self.assertRemotes()
+
+ def test_get_by_host_and_node(self):
+ fakes = [self.fake_instance(1),
+ self.fake_instance(2)]
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
+ db.instance_get_all_by_host_and_node(self.context, 'foo', 'bar'
+ ).AndReturn(fakes)
+ self.mox.ReplayAll()
+ inst_list = instance.InstanceList.get_by_host_and_node(self.context,
+ 'foo', 'bar')
+ for i in range(0, len(fakes)):
+ self.assertIsInstance(inst_list.objects[i], instance.Instance)
+ self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
+ self.assertRemotes()
+
+ def test_get_by_host_and_not_type(self):
+ fakes = [self.fake_instance(1),
+ self.fake_instance(2)]
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_not_type')
+ db.instance_get_all_by_host_and_not_type(self.context, 'foo',
+ type_id='bar').AndReturn(
+ fakes)
+ self.mox.ReplayAll()
+ inst_list = instance.InstanceList.get_by_host_and_not_type(
+ self.context, 'foo', 'bar')
+ for i in range(0, len(fakes)):
+ self.assertIsInstance(inst_list.objects[i], instance.Instance)
+ self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
+ self.assertRemotes()
+
+ def test_get_hung_in_rebooting(self):
+ fakes = [self.fake_instance(1),
+ self.fake_instance(2)]
+ dt = timeutils.isotime()
+ self.mox.StubOutWithMock(db, 'instance_get_all_hung_in_rebooting')
+ db.instance_get_all_hung_in_rebooting(self.context, dt).AndReturn(
+ fakes)
+ self.mox.ReplayAll()
+ inst_list = instance.InstanceList.get_hung_in_rebooting(self.context,
+ dt)
+ for i in range(0, len(fakes)):
+ self.assertIsInstance(inst_list.objects[i], instance.Instance)
+ self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
+ self.assertRemotes()
+
+ def test_get_active_by_window_joined(self):
+ fakes = [self.fake_instance(1), self.fake_instance(2)]
+ # NOTE(mriedem): Send in a timezone-naive datetime since the
+ # InstanceList.get_active_by_window_joined method should convert it
+ # to tz-aware for the DB API call, which we'll assert with our stub.
+ dt = timeutils.utcnow()
+
+ def fake_instance_get_active_by_window_joined(context, begin, end,
+ project_id, host):
+ # make sure begin is tz-aware
+ self.assertIsNotNone(begin.utcoffset())
+ self.assertIsNone(end)
+ return fakes
+
+ with mock.patch.object(db, 'instance_get_active_by_window_joined',
+ fake_instance_get_active_by_window_joined):
+ inst_list = instance.InstanceList.get_active_by_window_joined(
+ self.context, dt)
+
+ for fake, obj in zip(fakes, inst_list.objects):
+ self.assertIsInstance(obj, instance.Instance)
+ self.assertEqual(obj.uuid, fake['uuid'])
+ self.assertRemotes()
+
+ def test_with_fault(self):
+ fake_insts = [
+ fake_instance.fake_db_instance(uuid='fake-uuid', host='host'),
+ fake_instance.fake_db_instance(uuid='fake-inst2', host='host'),
+ ]
+ fake_faults = test_instance_fault.fake_faults
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
+ db.instance_get_all_by_host(self.context, 'host',
+ columns_to_join=[],
+ use_slave=False
+ ).AndReturn(fake_insts)
+ db.instance_fault_get_by_instance_uuids(
+ self.context, [x['uuid'] for x in fake_insts]
+ ).AndReturn(fake_faults)
+ self.mox.ReplayAll()
+ instances = instance.InstanceList.get_by_host(self.context, 'host',
+ expected_attrs=['fault'],
+ use_slave=False)
+ self.assertEqual(2, len(instances))
+ self.assertEqual(fake_faults['fake-uuid'][0],
+ dict(instances[0].fault.iteritems()))
+ self.assertIsNone(instances[1].fault)
+
+ def test_fill_faults(self):
+ self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
+
+ inst1 = instance.Instance(uuid='uuid1')
+ inst2 = instance.Instance(uuid='uuid2')
+ insts = [inst1, inst2]
+ for inst in insts:
+ inst.obj_reset_changes()
+ db_faults = {
+ 'uuid1': [{'id': 123,
+ 'instance_uuid': 'uuid1',
+ 'code': 456,
+ 'message': 'Fake message',
+ 'details': 'No details',
+ 'host': 'foo',
+ 'deleted': False,
+ 'deleted_at': None,
+ 'updated_at': None,
+ 'created_at': None,
+ }
+ ]}
+
+ db.instance_fault_get_by_instance_uuids(self.context,
+ [x.uuid for x in insts],
+ ).AndReturn(db_faults)
+ self.mox.ReplayAll()
+ inst_list = instance.InstanceList()
+ inst_list._context = self.context
+ inst_list.objects = insts
+ faulty = inst_list.fill_faults()
+ self.assertEqual(faulty, ['uuid1'])
+ self.assertEqual(inst_list[0].fault.message,
+ db_faults['uuid1'][0]['message'])
+ self.assertIsNone(inst_list[1].fault)
+ for inst in inst_list:
+ self.assertEqual(inst.obj_what_changed(), set())
+
+ def test_get_by_security_group(self):
+ fake_secgroup = dict(test_security_group.fake_secgroup)
+ fake_secgroup['instances'] = [
+ fake_instance.fake_db_instance(id=1,
+ system_metadata={'foo': 'bar'}),
+ fake_instance.fake_db_instance(id=2),
+ ]
+
+ with mock.patch.object(db, 'security_group_get') as sgg:
+ sgg.return_value = fake_secgroup
+ secgroup = security_group.SecurityGroup()
+ secgroup.id = fake_secgroup['id']
+ instances = instance.InstanceList.get_by_security_group(
+ self.context, secgroup)
+
+ self.assertEqual(2, len(instances))
+ self.assertEqual([1, 2], [x.id for x in instances])
+ self.assertTrue(instances[0].obj_attr_is_set('system_metadata'))
+ self.assertEqual({'foo': 'bar'}, instances[0].system_metadata)
+
+
+class TestInstanceListObject(test_objects._LocalTest,
+ _TestInstanceListObject):
+ pass
+
+
+class TestRemoteInstanceListObject(test_objects._RemoteTest,
+ _TestInstanceListObject):
+ pass
+
+
+class TestInstanceObjectMisc(test.NoDBTestCase):
+ def test_expected_cols(self):
+ self.stubs.Set(instance, '_INSTANCE_OPTIONAL_JOINED_FIELDS', ['bar'])
+ self.assertEqual(['bar'], instance._expected_cols(['foo', 'bar']))
+ self.assertIsNone(instance._expected_cols(None))
diff --git a/nova/tests/unit/objects/test_instance_action.py b/nova/tests/unit/objects/test_instance_action.py
new file mode 100644
index 0000000000..488ba6fa2a
--- /dev/null
+++ b/nova/tests/unit/objects/test_instance_action.py
@@ -0,0 +1,365 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import traceback
+
+import mock
+from oslo.utils import timeutils
+
+from nova import db
+from nova.objects import instance_action
+from nova import test
+from nova.tests.unit.objects import test_objects
+
+
+NOW = timeutils.utcnow().replace(microsecond=0)
+fake_action = {
+ 'created_at': NOW,
+ 'deleted_at': None,
+ 'updated_at': None,
+ 'deleted': False,
+ 'id': 123,
+ 'action': 'fake-action',
+ 'instance_uuid': 'fake-uuid',
+ 'request_id': 'fake-request',
+ 'user_id': 'fake-user',
+ 'project_id': 'fake-project',
+ 'start_time': NOW,
+ 'finish_time': None,
+ 'message': 'foo',
+}
+fake_event = {
+ 'created_at': NOW,
+ 'deleted_at': None,
+ 'updated_at': None,
+ 'deleted': False,
+ 'id': 123,
+ 'event': 'fake-event',
+ 'action_id': 123,
+ 'start_time': NOW,
+ 'finish_time': None,
+ 'result': 'fake-result',
+ 'traceback': 'fake-tb',
+}
+
+
+class _TestInstanceActionObject(object):
+ @mock.patch.object(db, 'action_get_by_request_id')
+ def test_get_by_request_id(self, mock_get):
+ context = self.context
+ mock_get.return_value = fake_action
+ action = instance_action.InstanceAction.get_by_request_id(
+ context, 'fake-uuid', 'fake-request')
+ self.compare_obj(action, fake_action)
+ mock_get.assert_called_once_with(context,
+ 'fake-uuid', 'fake-request')
+
+ def test_pack_action_start(self):
+ values = instance_action.InstanceAction.pack_action_start(
+ self.context, 'fake-uuid', 'fake-action')
+ self.assertEqual(values['request_id'], self.context.request_id)
+ self.assertEqual(values['user_id'], self.context.user_id)
+ self.assertEqual(values['project_id'], self.context.project_id)
+ self.assertEqual(values['instance_uuid'], 'fake-uuid')
+ self.assertEqual(values['action'], 'fake-action')
+ self.assertEqual(values['start_time'].replace(tzinfo=None),
+ self.context.timestamp)
+
+ def test_pack_action_finish(self):
+ timeutils.set_time_override(override_time=NOW)
+ values = instance_action.InstanceAction.pack_action_finish(
+ self.context, 'fake-uuid')
+ self.assertEqual(values['request_id'], self.context.request_id)
+ self.assertEqual(values['instance_uuid'], 'fake-uuid')
+ self.assertEqual(values['finish_time'].replace(tzinfo=None), NOW)
+
+ @mock.patch.object(db, 'action_start')
+ def test_action_start(self, mock_start):
+ test_class = instance_action.InstanceAction
+ expected_packed_values = test_class.pack_action_start(
+ self.context, 'fake-uuid', 'fake-action')
+ mock_start.return_value = fake_action
+ action = instance_action.InstanceAction.action_start(
+ self.context, 'fake-uuid', 'fake-action', want_result=True)
+ mock_start.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.compare_obj(action, fake_action)
+
+ @mock.patch.object(db, 'action_start')
+ def test_action_start_no_result(self, mock_start):
+ test_class = instance_action.InstanceAction
+ expected_packed_values = test_class.pack_action_start(
+ self.context, 'fake-uuid', 'fake-action')
+ mock_start.return_value = fake_action
+ action = instance_action.InstanceAction.action_start(
+ self.context, 'fake-uuid', 'fake-action', want_result=False)
+ mock_start.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.assertIsNone(action)
+
+ @mock.patch.object(db, 'action_finish')
+ def test_action_finish(self, mock_finish):
+ timeutils.set_time_override(override_time=NOW)
+ test_class = instance_action.InstanceAction
+ expected_packed_values = test_class.pack_action_finish(
+ self.context, 'fake-uuid')
+ mock_finish.return_value = fake_action
+ action = instance_action.InstanceAction.action_finish(
+ self.context, 'fake-uuid', want_result=True)
+ mock_finish.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.compare_obj(action, fake_action)
+
+ @mock.patch.object(db, 'action_finish')
+ def test_action_finish_no_result(self, mock_finish):
+ timeutils.set_time_override(override_time=NOW)
+ test_class = instance_action.InstanceAction
+ expected_packed_values = test_class.pack_action_finish(
+ self.context, 'fake-uuid')
+ mock_finish.return_value = fake_action
+ action = instance_action.InstanceAction.action_finish(
+ self.context, 'fake-uuid', want_result=False)
+ mock_finish.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.assertIsNone(action)
+
+ @mock.patch.object(db, 'action_finish')
+ @mock.patch.object(db, 'action_start')
+ def test_finish(self, mock_start, mock_finish):
+ timeutils.set_time_override(override_time=NOW)
+ expected_packed_action_start = {
+ 'request_id': self.context.request_id,
+ 'user_id': self.context.user_id,
+ 'project_id': self.context.project_id,
+ 'instance_uuid': 'fake-uuid',
+ 'action': 'fake-action',
+ 'start_time': self.context.timestamp,
+ }
+ expected_packed_action_finish = {
+ 'request_id': self.context.request_id,
+ 'instance_uuid': 'fake-uuid',
+ 'finish_time': NOW,
+ }
+ mock_start.return_value = fake_action
+ mock_finish.return_value = fake_action
+ action = instance_action.InstanceAction.action_start(
+ self.context, 'fake-uuid', 'fake-action')
+ action.finish(self.context)
+ mock_start.assert_called_once_with(self.context,
+ expected_packed_action_start)
+ mock_finish.assert_called_once_with(self.context,
+ expected_packed_action_finish)
+ self.compare_obj(action, fake_action)
+
+ @mock.patch.object(db, 'actions_get')
+ def test_get_list(self, mock_get):
+ fake_actions = [dict(fake_action, id=1234),
+ dict(fake_action, id=5678)]
+ mock_get.return_value = fake_actions
+ obj_list = instance_action.InstanceActionList.get_by_instance_uuid(
+ self.context, 'fake-uuid')
+ for index, action in enumerate(obj_list):
+ self.compare_obj(action, fake_actions[index])
+ mock_get.assert_called_once_with(self.context, 'fake-uuid')
+
+
+class TestInstanceActionObject(test_objects._LocalTest,
+ _TestInstanceActionObject):
+ pass
+
+
+class TestRemoteInstanceActionObject(test_objects._RemoteTest,
+ _TestInstanceActionObject):
+ pass
+
+
+class _TestInstanceActionEventObject(object):
+ @mock.patch.object(db, 'action_event_get_by_id')
+ def test_get_by_id(self, mock_get):
+ mock_get.return_value = fake_event
+ event = instance_action.InstanceActionEvent.get_by_id(
+ self.context, 'fake-action-id', 'fake-event-id')
+ self.compare_obj(event, fake_event)
+ mock_get.assert_called_once_with(self.context,
+ 'fake-action-id', 'fake-event-id')
+
+ @mock.patch.object(db, 'action_event_start')
+ def test_event_start(self, mock_start):
+ timeutils.set_time_override(override_time=NOW)
+ test_class = instance_action.InstanceActionEvent
+ expected_packed_values = test_class.pack_action_event_start(
+ self.context, 'fake-uuid', 'fake-event')
+ mock_start.return_value = fake_event
+ event = instance_action.InstanceActionEvent.event_start(
+ self.context, 'fake-uuid', 'fake-event', want_result=True)
+ mock_start.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.compare_obj(event, fake_event)
+
+ @mock.patch.object(db, 'action_event_start')
+ def test_event_start_no_result(self, mock_start):
+ timeutils.set_time_override(override_time=NOW)
+ test_class = instance_action.InstanceActionEvent
+ expected_packed_values = test_class.pack_action_event_start(
+ self.context, 'fake-uuid', 'fake-event')
+ mock_start.return_value = fake_event
+ event = instance_action.InstanceActionEvent.event_start(
+ self.context, 'fake-uuid', 'fake-event', want_result=False)
+ mock_start.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.assertIsNone(event)
+
+ @mock.patch.object(db, 'action_event_finish')
+ def test_event_finish(self, mock_finish):
+ timeutils.set_time_override(override_time=NOW)
+ test_class = instance_action.InstanceActionEvent
+ expected_packed_values = test_class.pack_action_event_finish(
+ self.context, 'fake-uuid', 'fake-event')
+ expected_packed_values['finish_time'] = timeutils.utcnow()
+ mock_finish.return_value = fake_event
+ event = instance_action.InstanceActionEvent.event_finish(
+ self.context, 'fake-uuid', 'fake-event', want_result=True)
+ mock_finish.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.compare_obj(event, fake_event)
+
+ @mock.patch.object(db, 'action_event_finish')
+ def test_event_finish_no_result(self, mock_finish):
+ timeutils.set_time_override(override_time=NOW)
+ test_class = instance_action.InstanceActionEvent
+ expected_packed_values = test_class.pack_action_event_finish(
+ self.context, 'fake-uuid', 'fake-event')
+ expected_packed_values['finish_time'] = timeutils.utcnow()
+ mock_finish.return_value = fake_event
+ event = instance_action.InstanceActionEvent.event_finish(
+ self.context, 'fake-uuid', 'fake-event', want_result=False)
+ mock_finish.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.assertIsNone(event)
+
+ @mock.patch.object(traceback, 'format_tb')
+ @mock.patch.object(db, 'action_event_finish')
+ def test_event_finish_with_failure(self, mock_finish, mock_tb):
+ timeutils.set_time_override(override_time=NOW)
+ test_class = instance_action.InstanceActionEvent
+ expected_packed_values = test_class.pack_action_event_finish(
+ self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb')
+ expected_packed_values['finish_time'] = timeutils.utcnow()
+
+ mock_finish.return_value = fake_event
+ event = test_class.event_finish_with_failure(
+ self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb',
+ want_result=True)
+ mock_finish.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.compare_obj(event, fake_event)
+
+ @mock.patch.object(traceback, 'format_tb')
+ @mock.patch.object(db, 'action_event_finish')
+ def test_event_finish_with_failure_legacy(self, mock_finish, mock_tb):
+ # Tests that exc_tb is serialized when it's not a string type.
+ mock_tb.return_value = 'fake-tb'
+ timeutils.set_time_override(override_time=NOW)
+ test_class = instance_action.InstanceActionEvent
+ expected_packed_values = test_class.pack_action_event_finish(
+ self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb')
+ expected_packed_values['finish_time'] = timeutils.utcnow()
+
+ mock_finish.return_value = fake_event
+ fake_tb = mock.sentinel.fake_tb
+ event = test_class.event_finish_with_failure(
+ self.context, 'fake-uuid', 'fake-event', exc_val='val',
+ exc_tb=fake_tb, want_result=True)
+ mock_finish.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.compare_obj(event, fake_event)
+ mock_tb.assert_called_once_with(fake_tb)
+
+ @mock.patch.object(db, 'action_event_finish')
+ def test_event_finish_with_failure_legacy_unicode(self, mock_finish):
+ # Tests that traceback.format_tb is not called when exc_tb is unicode.
+ timeutils.set_time_override(override_time=NOW)
+ test_class = instance_action.InstanceActionEvent
+ expected_packed_values = test_class.pack_action_event_finish(
+ self.context, 'fake-uuid', 'fake-event', 'val', unicode('fake-tb'))
+ expected_packed_values['finish_time'] = timeutils.utcnow()
+
+ mock_finish.return_value = fake_event
+ event = test_class.event_finish_with_failure(
+ self.context, 'fake-uuid', 'fake-event', exc_val='val',
+ exc_tb=unicode('fake-tb'), want_result=True)
+ mock_finish.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.compare_obj(event, fake_event)
+
+ @mock.patch.object(traceback, 'format_tb')
+ @mock.patch.object(db, 'action_event_finish')
+ def test_event_finish_with_failure_no_result(self, mock_finish, mock_tb):
+ # Tests that traceback.format_tb is not called when exc_tb is a str
+ # and want_result is False, so no event should come back.
+ mock_tb.return_value = 'fake-tb'
+ timeutils.set_time_override(override_time=NOW)
+ test_class = instance_action.InstanceActionEvent
+ expected_packed_values = test_class.pack_action_event_finish(
+ self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb')
+ expected_packed_values['finish_time'] = timeutils.utcnow()
+
+ mock_finish.return_value = fake_event
+ event = test_class.event_finish_with_failure(
+ self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb',
+ want_result=False)
+ mock_finish.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.assertIsNone(event)
+ self.assertFalse(mock_tb.called)
+
+ @mock.patch.object(db, 'action_events_get')
+ def test_get_by_action(self, mock_get):
+ fake_events = [dict(fake_event, id=1234),
+ dict(fake_event, id=5678)]
+ mock_get.return_value = fake_events
+ obj_list = instance_action.InstanceActionEventList.get_by_action(
+ self.context, 'fake-action-id')
+ for index, event in enumerate(obj_list):
+ self.compare_obj(event, fake_events[index])
+ mock_get.assert_called_once_with(self.context, 'fake-action-id')
+
+ @mock.patch('nova.objects.instance_action.InstanceActionEvent.'
+ 'pack_action_event_finish')
+ @mock.patch('traceback.format_tb')
+ def test_event_finish_with_failure_serialized(self, mock_format,
+ mock_pack):
+ mock_format.return_value = 'traceback'
+ mock_pack.side_effect = test.TestingException
+ self.assertRaises(
+ test.TestingException,
+ instance_action.InstanceActionEvent.event_finish_with_failure,
+ self.context, 'fake-uuid', 'fake-event',
+ exc_val=mock.sentinel.exc_val,
+ exc_tb=mock.sentinel.exc_tb)
+ mock_pack.assert_called_once_with(self.context, 'fake-uuid',
+ 'fake-event',
+ exc_val=str(mock.sentinel.exc_val),
+ exc_tb='traceback')
+ mock_format.assert_called_once_with(mock.sentinel.exc_tb)
+
+
+class TestInstanceActionEventObject(test_objects._LocalTest,
+ _TestInstanceActionEventObject):
+ pass
+
+
+class TestRemoteInstanceActionEventObject(test_objects._RemoteTest,
+ _TestInstanceActionEventObject):
+ pass
diff --git a/nova/tests/unit/objects/test_instance_fault.py b/nova/tests/unit/objects/test_instance_fault.py
new file mode 100644
index 0000000000..97716d42d3
--- /dev/null
+++ b/nova/tests/unit/objects/test_instance_fault.py
@@ -0,0 +1,126 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import db
+from nova import exception
+from nova.objects import instance_fault
+from nova.tests.unit.objects import test_objects
+
+
+fake_faults = {
+ 'fake-uuid': [
+ {'id': 1, 'instance_uuid': 'fake-uuid', 'code': 123, 'message': 'msg1',
+ 'details': 'details', 'host': 'host', 'deleted': False,
+ 'created_at': None, 'updated_at': None, 'deleted_at': None},
+ {'id': 2, 'instance_uuid': 'fake-uuid', 'code': 456, 'message': 'msg2',
+ 'details': 'details', 'host': 'host', 'deleted': False,
+ 'created_at': None, 'updated_at': None, 'deleted_at': None},
+ ]
+ }
+
+
+class _TestInstanceFault(object):
+ def test_get_latest_for_instance(self):
+ self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
+ db.instance_fault_get_by_instance_uuids(self.context, ['fake-uuid']
+ ).AndReturn(fake_faults)
+ self.mox.ReplayAll()
+ fault = instance_fault.InstanceFault.get_latest_for_instance(
+ self.context, 'fake-uuid')
+ for key in fake_faults['fake-uuid'][0]:
+ self.assertEqual(fake_faults['fake-uuid'][0][key], fault[key])
+
+ def test_get_latest_for_instance_with_none(self):
+ self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
+ db.instance_fault_get_by_instance_uuids(self.context, ['fake-uuid']
+ ).AndReturn({})
+ self.mox.ReplayAll()
+ fault = instance_fault.InstanceFault.get_latest_for_instance(
+ self.context, 'fake-uuid')
+ self.assertIsNone(fault)
+
+ def test_get_by_instance(self):
+ self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
+ db.instance_fault_get_by_instance_uuids(self.context, ['fake-uuid']
+ ).AndReturn(fake_faults)
+ self.mox.ReplayAll()
+ faults = instance_fault.InstanceFaultList.get_by_instance_uuids(
+ self.context, ['fake-uuid'])
+ for index, db_fault in enumerate(fake_faults['fake-uuid']):
+ for key in db_fault:
+ self.assertEqual(fake_faults['fake-uuid'][index][key],
+ faults[index][key])
+
+ def test_get_by_instance_with_none(self):
+ self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
+ db.instance_fault_get_by_instance_uuids(self.context, ['fake-uuid']
+ ).AndReturn({})
+ self.mox.ReplayAll()
+ faults = instance_fault.InstanceFaultList.get_by_instance_uuids(
+ self.context, ['fake-uuid'])
+ self.assertEqual(0, len(faults))
+
+ @mock.patch('nova.cells.rpcapi.CellsAPI.instance_fault_create_at_top')
+ @mock.patch('nova.db.instance_fault_create')
+ def _test_create(self, update_cells, mock_create, cells_fault_create):
+ mock_create.return_value = fake_faults['fake-uuid'][1]
+ fault = instance_fault.InstanceFault()
+ fault.instance_uuid = 'fake-uuid'
+ fault.code = 456
+ fault.message = 'foo'
+ fault.details = 'you screwed up'
+ fault.host = 'myhost'
+ fault.create(self.context)
+ self.assertEqual(2, fault.id)
+ mock_create.assert_called_once_with(self.context,
+ {'instance_uuid': 'fake-uuid',
+ 'code': 456,
+ 'message': 'foo',
+ 'details': 'you screwed up',
+ 'host': 'myhost'})
+ if update_cells:
+ cells_fault_create.assert_called_once_with(
+ self.context, fake_faults['fake-uuid'][1])
+ else:
+ self.assertFalse(cells_fault_create.called)
+
+ def test_create_no_cells(self):
+ self.flags(enable=False, group='cells')
+ self._test_create(False)
+
+ def test_create_api_cell(self):
+ self.flags(cell_type='api', enable=True, group='cells')
+ self._test_create(False)
+
+ def test_create_compute_cell(self):
+ self.flags(cell_type='compute', enable=True, group='cells')
+ self._test_create(True)
+
+ def test_create_already_created(self):
+ fault = instance_fault.InstanceFault()
+ fault.id = 1
+ self.assertRaises(exception.ObjectActionError,
+ fault.create, self.context)
+
+
+class TestInstanceFault(test_objects._LocalTest,
+ _TestInstanceFault):
+ pass
+
+
+class TestInstanceFaultRemote(test_objects._RemoteTest,
+ _TestInstanceFault):
+ pass
diff --git a/nova/tests/unit/objects/test_instance_group.py b/nova/tests/unit/objects/test_instance_group.py
new file mode 100644
index 0000000000..0e20f54145
--- /dev/null
+++ b/nova/tests/unit/objects/test_instance_group.py
@@ -0,0 +1,350 @@
+# Copyright (c) 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from nova.compute import flavors
+from nova import context
+from nova import db
+from nova import exception
+from nova.objects import instance_group
+from nova.tests.unit import fake_notifier
+from nova.tests.unit.objects import test_objects
+from nova.tests.unit import utils as tests_utils
+
+
+class _TestInstanceGroupObjects(object):
+
+ def setUp(self):
+ super(_TestInstanceGroupObjects, self).setUp()
+ self.user_id = 'fake_user'
+ self.project_id = 'fake_project'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ def _get_default_values(self):
+ return {'name': 'fake_name',
+ 'user_id': self.user_id,
+ 'project_id': self.project_id}
+
+ def _create_instance_group(self, context, values, policies=None,
+ members=None):
+ return db.instance_group_create(context, values, policies=policies,
+ members=members)
+
+ def test_get_by_uuid(self):
+ values = self._get_default_values()
+ policies = ['policy1', 'policy2']
+ members = ['instance_id1', 'instance_id2']
+ db_result = self._create_instance_group(self.context, values,
+ policies=policies,
+ members=members)
+ obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
+ db_result.uuid)
+ self.assertEqual(obj_result.members, members)
+ self.assertEqual(obj_result.policies, policies)
+
+ def test_get_by_instance_uuid(self):
+ values = self._get_default_values()
+ policies = ['policy1', 'policy2']
+ members = ['instance_id1', 'instance_id2']
+ db_result = self._create_instance_group(self.context, values,
+ policies=policies,
+ members=members)
+ obj_result = instance_group.InstanceGroup.get_by_instance_uuid(
+ self.context, 'instance_id1')
+ self.assertEqual(obj_result.uuid, db_result.uuid)
+
+ def test_refresh(self):
+ values = self._get_default_values()
+ db_result = self._create_instance_group(self.context, values)
+ obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
+ db_result.uuid)
+ self.assertEqual(obj_result.name, 'fake_name')
+ values = {'name': 'new_name', 'user_id': 'new_user',
+ 'project_id': 'new_project'}
+ db.instance_group_update(self.context, db_result['uuid'],
+ values)
+ obj_result.refresh()
+ self.assertEqual(obj_result.name, 'new_name')
+ self.assertEqual(set([]), obj_result.obj_what_changed())
+
+ def test_save_simple(self):
+ values = self._get_default_values()
+ db_result = self._create_instance_group(self.context, values)
+ obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
+ db_result.uuid)
+ self.assertEqual(obj_result.name, 'fake_name')
+ obj_result.name = 'new_name'
+ obj_result.save()
+ result = db.instance_group_get(self.context, db_result['uuid'])
+ self.assertEqual(result['name'], 'new_name')
+
+ def test_save_policies(self):
+ values = self._get_default_values()
+ db_result = self._create_instance_group(self.context, values)
+ obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
+ db_result.uuid)
+ policies = ['policy1', 'policy2']
+ obj_result.policies = policies
+ obj_result.save()
+ result = db.instance_group_get(self.context, db_result['uuid'])
+ self.assertEqual(result['policies'], policies)
+
+ def test_save_members(self):
+ values = self._get_default_values()
+ db_result = self._create_instance_group(self.context, values)
+ obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
+ db_result.uuid)
+ members = ['instance1', 'instance2']
+ obj_result.members = members
+ fake_notifier.NOTIFICATIONS = []
+ obj_result.save()
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('servergroup.update', msg.event_type)
+ self.assertEqual(members, msg.payload['members'])
+ result = db.instance_group_get(self.context, db_result['uuid'])
+ self.assertEqual(result['members'], members)
+
+ def test_create(self):
+ group1 = instance_group.InstanceGroup()
+ group1.uuid = 'fake-uuid'
+ group1.name = 'fake-name'
+ fake_notifier.NOTIFICATIONS = []
+ group1.create(self.context)
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(group1.name, msg.payload['name'])
+ self.assertEqual(group1.uuid, msg.payload['server_group_id'])
+ self.assertEqual('servergroup.create', msg.event_type)
+ group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
+ group1.uuid)
+ self.assertEqual(group1.id, group2.id)
+ self.assertEqual(group1.uuid, group2.uuid)
+ self.assertEqual(group1.name, group2.name)
+ result = db.instance_group_get(self.context, group1.uuid)
+ self.assertEqual(group1.id, result.id)
+ self.assertEqual(group1.uuid, result.uuid)
+ self.assertEqual(group1.name, result.name)
+
+ def test_create_with_policies(self):
+ group1 = instance_group.InstanceGroup()
+ group1.policies = ['policy1', 'policy2']
+ group1.create(self.context)
+ group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
+ group1.uuid)
+ self.assertEqual(group1.id, group2.id)
+ self.assertEqual(group1.policies, group2.policies)
+
+ def test_create_with_members(self):
+ group1 = instance_group.InstanceGroup()
+ group1.members = ['instance1', 'instance2']
+ group1.create(self.context)
+ group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
+ group1.uuid)
+ self.assertEqual(group1.id, group2.id)
+ self.assertEqual(group1.members, group2.members)
+
+ def test_recreate_fails(self):
+ group = instance_group.InstanceGroup()
+ group.create(self.context)
+ self.assertRaises(exception.ObjectActionError, group.create,
+ self.context)
+
+ def test_destroy(self):
+ values = self._get_default_values()
+ result = self._create_instance_group(self.context, values)
+ group = instance_group.InstanceGroup()
+ group.id = result.id
+ group.uuid = result.uuid
+ fake_notifier.NOTIFICATIONS = []
+ group.destroy(self.context)
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('servergroup.delete', msg.event_type)
+ self.assertEqual(group.uuid, msg.payload['server_group_id'])
+ self.assertRaises(exception.InstanceGroupNotFound,
+ db.instance_group_get, self.context, result['uuid'])
+
+ def _populate_instances(self):
+ instances = [(str(uuid.uuid4()), 'f1', 'p1'),
+ (str(uuid.uuid4()), 'f2', 'p1'),
+ (str(uuid.uuid4()), 'f3', 'p2'),
+ (str(uuid.uuid4()), 'f4', 'p2')]
+ for instance in instances:
+ values = self._get_default_values()
+ values['uuid'] = instance[0]
+ values['name'] = instance[1]
+ values['project_id'] = instance[2]
+ self._create_instance_group(self.context, values)
+ return instances
+
+ def test_list_all(self):
+ self._populate_instances()
+ inst_list = instance_group.InstanceGroupList.get_all(self.context)
+ groups = db.instance_group_get_all(self.context)
+ self.assertEqual(len(groups), len(inst_list.objects))
+ self.assertEqual(len(groups), 4)
+ for i in range(0, len(groups)):
+ self.assertIsInstance(inst_list.objects[i],
+ instance_group.InstanceGroup)
+ self.assertEqual(inst_list.objects[i].uuid, groups[i]['uuid'])
+
+ def test_list_by_project_id(self):
+ self._populate_instances()
+ project_ids = ['p1', 'p2']
+ for id in project_ids:
+ il = instance_group.InstanceGroupList.get_by_project_id(
+ self.context, id)
+ groups = db.instance_group_get_all_by_project_id(self.context, id)
+ self.assertEqual(len(groups), len(il.objects))
+ self.assertEqual(len(groups), 2)
+ for i in range(0, len(groups)):
+ self.assertIsInstance(il.objects[i],
+ instance_group.InstanceGroup)
+ self.assertEqual(il.objects[i].uuid, groups[i]['uuid'])
+ self.assertEqual(il.objects[i].name, groups[i]['name'])
+ self.assertEqual(il.objects[i].project_id, id)
+
+ def test_get_by_name(self):
+ self._populate_instances()
+ ctxt = context.RequestContext('fake_user', 'p1')
+ ig = instance_group.InstanceGroup.get_by_name(ctxt, 'f1')
+ self.assertEqual('f1', ig.name)
+
+ def test_get_by_hint(self):
+ instances = self._populate_instances()
+ for instance in instances:
+ ctxt = context.RequestContext('fake_user', instance[2])
+ ig = instance_group.InstanceGroup.get_by_hint(ctxt, instance[1])
+ self.assertEqual(instance[1], ig.name)
+ ig = instance_group.InstanceGroup.get_by_hint(ctxt, instance[0])
+ self.assertEqual(instance[0], ig.uuid)
+
+ def test_add_members(self):
+ instance_ids = ['fakeid1', 'fakeid2']
+ values = self._get_default_values()
+ group = self._create_instance_group(self.context, values)
+ fake_notifier.NOTIFICATIONS = []
+ members = instance_group.InstanceGroup.add_members(self.context,
+ group.uuid, instance_ids)
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('servergroup.addmember', msg.event_type)
+ self.assertEqual(group.uuid, msg.payload['server_group_id'])
+ self.assertEqual(instance_ids, msg.payload['instance_uuids'])
+ group = instance_group.InstanceGroup.get_by_uuid(self.context,
+ group.uuid)
+ for instance in instance_ids:
+ self.assertIn(instance, members)
+ self.assertIn(instance, group.members)
+
+ def test_get_hosts(self):
+ instance1 = tests_utils.get_test_instance(self.context,
+ flavor=flavors.get_default_flavor(), obj=True)
+ instance1.host = 'hostA'
+ instance1.save()
+ instance2 = tests_utils.get_test_instance(self.context,
+ flavor=flavors.get_default_flavor(), obj=True)
+ instance2.host = 'hostB'
+ instance2.save()
+ instance3 = tests_utils.get_test_instance(self.context,
+ flavor=flavors.get_default_flavor(), obj=True)
+ instance3.host = 'hostB'
+ instance3.save()
+
+ instance_ids = [instance1.uuid, instance2.uuid, instance3.uuid]
+ values = self._get_default_values()
+ group = self._create_instance_group(self.context, values)
+ instance_group.InstanceGroup.add_members(self.context, group.uuid,
+ instance_ids)
+
+ group = instance_group.InstanceGroup.get_by_uuid(self.context,
+ group.uuid)
+ hosts = group.get_hosts(self.context)
+ self.assertEqual(2, len(hosts))
+ self.assertIn('hostA', hosts)
+ self.assertIn('hostB', hosts)
+ hosts = group.get_hosts(self.context, exclude=[instance1.uuid])
+ self.assertEqual(1, len(hosts))
+ self.assertIn('hostB', hosts)
+
+ def test_get_hosts_with_some_none(self):
+ instance1 = tests_utils.get_test_instance(self.context,
+ flavor=flavors.get_default_flavor(), obj=True)
+ instance1.host = None
+ instance1.save()
+ instance2 = tests_utils.get_test_instance(self.context,
+ flavor=flavors.get_default_flavor(), obj=True)
+ instance2.host = 'hostB'
+ instance2.save()
+
+ instance_ids = [instance1.uuid, instance2.uuid]
+ values = self._get_default_values()
+ group = self._create_instance_group(self.context, values)
+ instance_group.InstanceGroup.add_members(self.context, group.uuid,
+ instance_ids)
+
+ group = instance_group.InstanceGroup.get_by_uuid(self.context,
+ group.uuid)
+ hosts = group.get_hosts(self.context)
+ self.assertEqual(1, len(hosts))
+ self.assertIn('hostB', hosts)
+
+ def test_obj_make_compatible(self):
+ group = instance_group.InstanceGroup(uuid='fake-uuid',
+ name='fake-name')
+ group.create(self.context)
+ group_primitive = group.obj_to_primitive()
+ group.obj_make_compatible(group_primitive, '1.6')
+ self.assertEqual({}, group_primitive['metadetails'])
+
+ def test_count_members_by_user(self):
+ instance1 = tests_utils.get_test_instance(self.context,
+ flavor=flavors.get_default_flavor(), obj=True)
+ instance1.user_id = 'user1'
+ instance1.save()
+ instance2 = tests_utils.get_test_instance(self.context,
+ flavor=flavors.get_default_flavor(), obj=True)
+ instance2.user_id = 'user2'
+ instance2.save()
+ instance3 = tests_utils.get_test_instance(self.context,
+ flavor=flavors.get_default_flavor(), obj=True)
+ instance3.user_id = 'user2'
+ instance3.save()
+
+ instance_ids = [instance1.uuid, instance2.uuid, instance3.uuid]
+ values = self._get_default_values()
+ group = self._create_instance_group(self.context, values)
+ instance_group.InstanceGroup.add_members(self.context, group.uuid,
+ instance_ids)
+
+ group = instance_group.InstanceGroup.get_by_uuid(self.context,
+ group.uuid)
+ count_user1 = group.count_members_by_user(self.context, 'user1')
+ count_user2 = group.count_members_by_user(self.context, 'user2')
+ count_user3 = group.count_members_by_user(self.context, 'user3')
+ self.assertEqual(1, count_user1)
+ self.assertEqual(2, count_user2)
+ self.assertEqual(0, count_user3)
+
+
+class TestInstanceGroupObject(test_objects._LocalTest,
+ _TestInstanceGroupObjects):
+ pass
+
+
+class TestRemoteInstanceGroupObject(test_objects._RemoteTest,
+ _TestInstanceGroupObjects):
+ pass
diff --git a/nova/tests/unit/objects/test_instance_info_cache.py b/nova/tests/unit/objects/test_instance_info_cache.py
new file mode 100644
index 0000000000..9a72772030
--- /dev/null
+++ b/nova/tests/unit/objects/test_instance_info_cache.py
@@ -0,0 +1,117 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.cells import opts as cells_opts
+from nova.cells import rpcapi as cells_rpcapi
+from nova import db
+from nova import exception
+from nova.network import model as network_model
+from nova.objects import instance_info_cache
+from nova.tests.unit.objects import test_objects
+
+
+fake_info_cache = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'instance_uuid': 'fake-uuid',
+ 'network_info': '[]',
+ }
+
+
+class _TestInstanceInfoCacheObject(object):
+ def test_get_by_instance_uuid(self):
+ nwinfo = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
+ self.mox.StubOutWithMock(db, 'instance_info_cache_get')
+ db.instance_info_cache_get(self.context, 'fake-uuid').AndReturn(
+ dict(fake_info_cache, network_info=nwinfo.json()))
+ self.mox.ReplayAll()
+ obj = instance_info_cache.InstanceInfoCache.get_by_instance_uuid(
+ self.context, 'fake-uuid')
+ self.assertEqual(obj.instance_uuid, 'fake-uuid')
+ self.assertEqual(obj.network_info, nwinfo)
+ self.assertRemotes()
+
+ def test_get_by_instance_uuid_no_entries(self):
+ self.mox.StubOutWithMock(db, 'instance_info_cache_get')
+ db.instance_info_cache_get(self.context, 'fake-uuid').AndReturn(None)
+ self.mox.ReplayAll()
+ self.assertRaises(
+ exception.InstanceInfoCacheNotFound,
+ instance_info_cache.InstanceInfoCache.get_by_instance_uuid,
+ self.context, 'fake-uuid')
+
+ def test_new(self):
+ obj = instance_info_cache.InstanceInfoCache.new(self.context,
+ 'fake-uuid')
+ self.assertEqual(set(['instance_uuid', 'network_info']),
+ obj.obj_what_changed())
+ self.assertEqual('fake-uuid', obj.instance_uuid)
+ self.assertIsNone(obj.network_info)
+
+ def _save_helper(self, cell_type, update_cells):
+ obj = instance_info_cache.InstanceInfoCache()
+ cells_api = cells_rpcapi.CellsAPI()
+
+ self.mox.StubOutWithMock(db, 'instance_info_cache_update')
+ self.mox.StubOutWithMock(cells_opts, 'get_cell_type')
+ self.mox.StubOutWithMock(cells_rpcapi, 'CellsAPI',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(cells_api,
+ 'instance_info_cache_update_at_top')
+ nwinfo = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
+ db.instance_info_cache_update(
+ self.context, 'fake-uuid',
+ {'network_info': nwinfo.json()}).AndReturn('foo')
+ if update_cells:
+ cells_opts.get_cell_type().AndReturn(cell_type)
+ if cell_type == 'compute':
+ cells_rpcapi.CellsAPI().AndReturn(cells_api)
+ cells_api.instance_info_cache_update_at_top(
+ self.context, 'foo')
+ self.mox.ReplayAll()
+ obj._context = self.context
+ obj.instance_uuid = 'fake-uuid'
+ obj.network_info = nwinfo
+ obj.save(update_cells=update_cells)
+
+ def test_save_with_update_cells_and_compute_cell(self):
+ self._save_helper('compute', True)
+
+ def test_save_with_update_cells_and_non_compute_cell(self):
+ self._save_helper(None, True)
+
+ def test_save_without_update_cells(self):
+ self._save_helper(None, False)
+
+ def test_refresh(self):
+ obj = instance_info_cache.InstanceInfoCache.new(self.context,
+ 'fake-uuid1')
+ self.mox.StubOutWithMock(db, 'instance_info_cache_get')
+ db.instance_info_cache_get(self.context, 'fake-uuid1').AndReturn(
+ fake_info_cache)
+ self.mox.ReplayAll()
+ obj.refresh()
+ self.assertEqual(fake_info_cache['instance_uuid'], obj.instance_uuid)
+
+
+class TestInstanceInfoCacheObject(test_objects._LocalTest,
+ _TestInstanceInfoCacheObject):
+ pass
+
+
+class TestInstanceInfoCacheObjectRemote(test_objects._RemoteTest,
+ _TestInstanceInfoCacheObject):
+ pass
diff --git a/nova/tests/unit/objects/test_instance_numa_topology.py b/nova/tests/unit/objects/test_instance_numa_topology.py
new file mode 100644
index 0000000000..82c34ccda2
--- /dev/null
+++ b/nova/tests/unit/objects/test_instance_numa_topology.py
@@ -0,0 +1,78 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import mock
+
+from nova import exception
+from nova import objects
+from nova.tests.unit.objects import test_objects
+from nova.virt import hardware
+
+fake_numa_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(
+ 0, set([1, 2]), 512, hardware.VirtPageSize(2048)),
+ hardware.VirtNUMATopologyCellInstance(
+ 1, set([3, 4]), 512, hardware.VirtPageSize(2048))])
+
+fake_db_topology = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'id': 1,
+ 'instance_uuid': str(uuid.uuid4()),
+ 'numa_topology': fake_numa_topology.to_json()
+ }
+
+
+class _TestInstanceNUMATopology(object):
+ @mock.patch('nova.db.instance_extra_update_by_uuid')
+ def test_create(self, mock_update):
+ topo_obj = objects.InstanceNUMATopology.obj_from_topology(
+ fake_numa_topology)
+ topo_obj.instance_uuid = fake_db_topology['instance_uuid']
+ topo_obj.create(self.context)
+ self.assertEqual(1, len(mock_update.call_args_list))
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid')
+ def test_get_by_instance_uuid(self, mock_get):
+ mock_get.return_value = fake_db_topology
+ numa_topology = objects.InstanceNUMATopology.get_by_instance_uuid(
+ self.context, 'fake_uuid')
+ self.assertEqual(fake_db_topology['instance_uuid'],
+ numa_topology.instance_uuid)
+ for obj_cell, topo_cell in zip(
+ numa_topology.cells, fake_numa_topology.cells):
+ self.assertIsInstance(obj_cell, objects.InstanceNUMACell)
+ self.assertEqual(topo_cell.cpuset, obj_cell.cpuset)
+ self.assertEqual(topo_cell.memory, obj_cell.memory)
+ self.assertEqual(topo_cell.pagesize.size_kb, obj_cell.pagesize)
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid')
+ def test_get_by_instance_uuid_missing(self, mock_get):
+ mock_get.return_value = None
+ self.assertRaises(
+ exception.NumaTopologyNotFound,
+ objects.InstanceNUMATopology.get_by_instance_uuid,
+ self.context, 'fake_uuid')
+
+
+class TestInstanceNUMATopology(test_objects._LocalTest,
+ _TestInstanceNUMATopology):
+ pass
+
+
+class TestInstanceNUMATopologyRemote(test_objects._RemoteTest,
+ _TestInstanceNUMATopology):
+ pass
diff --git a/nova/tests/unit/objects/test_instance_pci_requests.py b/nova/tests/unit/objects/test_instance_pci_requests.py
new file mode 100644
index 0000000000..541d503ff4
--- /dev/null
+++ b/nova/tests/unit/objects/test_instance_pci_requests.py
@@ -0,0 +1,191 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova import objects
+from nova.tests.unit.objects import test_objects
+
+
+FAKE_UUID = '79a53d6b-0893-4838-a971-15f4f382e7c2'
+FAKE_REQUEST_UUID = '69b53d6b-0793-4839-c981-f5c4f382e7d2'
+
+# NOTE(danms): Yes, these are the same right now, but going forward,
+# we have changes to make which will be reflected in the format
+# in instance_extra, but not in system_metadata.
+fake_pci_requests = [
+ {'count': 2,
+ 'spec': [{'vendor_id': '8086',
+ 'device_id': '1502'}],
+ 'alias_name': 'alias_1',
+ 'is_new': False,
+ 'request_id': FAKE_REQUEST_UUID},
+ {'count': 2,
+ 'spec': [{'vendor_id': '6502',
+ 'device_id': '07B5'}],
+ 'alias_name': 'alias_2',
+ 'is_new': True,
+ 'request_id': FAKE_REQUEST_UUID},
+ ]
+
+fake_legacy_pci_requests = [
+ {'count': 2,
+ 'spec': [{'vendor_id': '8086',
+ 'device_id': '1502'}],
+ 'alias_name': 'alias_1'},
+ {'count': 1,
+ 'spec': [{'vendor_id': '6502',
+ 'device_id': '07B5'}],
+ 'alias_name': 'alias_2'},
+ ]
+
+
+class _TestInstancePCIRequests(object):
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid')
+ def test_get_by_instance_uuid(self, mock_get):
+ mock_get.return_value = {
+ 'instance_uuid': FAKE_UUID,
+ 'pci_requests': jsonutils.dumps(fake_pci_requests),
+ }
+ requests = objects.InstancePCIRequests.get_by_instance_uuid(
+ self.context, FAKE_UUID)
+ self.assertEqual(2, len(requests.requests))
+ for index, request in enumerate(requests.requests):
+ self.assertEqual(fake_pci_requests[index]['alias_name'],
+ request.alias_name)
+ self.assertEqual(fake_pci_requests[index]['count'],
+ request.count)
+ self.assertEqual(fake_pci_requests[index]['spec'],
+ [dict(x.items()) for x in request.spec])
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
+ def test_get_by_instance_uuid_and_newness(self, mock_get):
+ pcir = objects.InstancePCIRequests
+ mock_get.return_value = objects.InstancePCIRequests(
+ instance_uuid='fake-uuid',
+ requests=[objects.InstancePCIRequest(count=1, is_new=False),
+ objects.InstancePCIRequest(count=2, is_new=True)])
+ old_req = pcir.get_by_instance_uuid_and_newness(self.context,
+ 'fake-uuid',
+ False)
+ mock_get.return_value = objects.InstancePCIRequests(
+ instance_uuid='fake-uuid',
+ requests=[objects.InstancePCIRequest(count=1, is_new=False),
+ objects.InstancePCIRequest(count=2, is_new=True)])
+ new_req = pcir.get_by_instance_uuid_and_newness(self.context,
+ 'fake-uuid',
+ True)
+ self.assertEqual(1, old_req.requests[0].count)
+ self.assertEqual(2, new_req.requests[0].count)
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
+ def test_get_by_instance_current(self, mock_get):
+ instance = objects.Instance(uuid='fake-uuid',
+ system_metadata={})
+ objects.InstancePCIRequests.get_by_instance(self.context,
+ instance)
+ mock_get.assert_called_once_with(self.context, 'fake-uuid')
+
+ def test_get_by_instance_legacy(self):
+ fakesysmeta = {
+ 'pci_requests': jsonutils.dumps([fake_legacy_pci_requests[0]]),
+ 'new_pci_requests': jsonutils.dumps([fake_legacy_pci_requests[1]]),
+ }
+ instance = objects.Instance(uuid='fake-uuid',
+ system_metadata=fakesysmeta)
+ requests = objects.InstancePCIRequests.get_by_instance(self.context,
+ instance)
+ self.assertEqual(2, len(requests.requests))
+ self.assertEqual('alias_1', requests.requests[0].alias_name)
+ self.assertFalse(requests.requests[0].is_new)
+ self.assertEqual('alias_2', requests.requests[1].alias_name)
+ self.assertTrue(requests.requests[1].is_new)
+
+ @mock.patch('nova.db.instance_extra_update_by_uuid')
+ def test_save(self, mock_update):
+ requests = objects.InstancePCIRequests(
+ context=self.context,
+ instance_uuid=FAKE_UUID,
+ requests=[objects.InstancePCIRequest(
+ count=1,
+ spec=[{'foo': 'bar'}, {'baz': 'bat'}],
+ alias_name='alias_1',
+ is_new=False,
+ request_id=FAKE_REQUEST_UUID)])
+ requests.save()
+ self.assertEqual(FAKE_UUID, mock_update.call_args_list[0][0][1])
+ self.assertEqual(
+ [{'count': 1, 'is_new': False,
+ 'alias_name': 'alias_1',
+ 'spec': [{'foo': 'bar'}, {'baz': 'bat'}],
+ 'request_id': FAKE_REQUEST_UUID}],
+ jsonutils.loads(
+ mock_update.call_args_list[0][0][2]['pci_requests']))
+
+ @mock.patch('nova.db.instance_extra_update_by_uuid')
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid')
+ def test_save_and_reload(self, mock_get, mock_update):
+ database = {}
+
+ def _save(context, uuid, values):
+ database.setdefault(uuid, {'instance_uuid': uuid})
+ database[uuid].update(values)
+
+ def _get(context, uuid, columns):
+ return database.get(uuid, {})
+
+ mock_update.side_effect = _save
+ mock_get.side_effect = _get
+
+ requests = objects.InstancePCIRequests(
+ context=self.context,
+ instance_uuid=FAKE_UUID,
+ requests=[objects.InstancePCIRequest(
+ count=1, is_new=False, alias_name='alias_1',
+ spec=[{'foo': 'bar'}])])
+ requests.save()
+ _requests = objects.InstancePCIRequests.get_by_instance_uuid(
+ self.context, FAKE_UUID)
+
+ self.assertEqual(requests.instance_uuid, _requests.instance_uuid)
+ self.assertEqual(len(requests.requests), len(_requests.requests))
+ self.assertEqual(requests.requests[0].alias_name,
+ _requests.requests[0].alias_name)
+
+ def test_new_compatibility(self):
+ request = objects.InstancePCIRequest(is_new=False)
+ self.assertFalse(request.new)
+
+ def test_backport_1_0(self):
+ requests = objects.InstancePCIRequests(
+ requests=[objects.InstancePCIRequest(count=1,
+ request_id=FAKE_UUID),
+ objects.InstancePCIRequest(count=2,
+ request_id=FAKE_UUID)])
+ primitive = requests.obj_to_primitive(target_version='1.0')
+ backported = objects.InstancePCIRequests.obj_from_primitive(
+ primitive)
+ self.assertEqual('1.0', backported.VERSION)
+ self.assertEqual(2, len(backported.requests))
+ self.assertFalse(backported.requests[0].obj_attr_is_set('request_id'))
+ self.assertFalse(backported.requests[1].obj_attr_is_set('request_id'))
+
+
+class TestInstancePCIRequests(test_objects._LocalTest,
+ _TestInstancePCIRequests):
+ pass
+
+
+class TestRemoteInstancePCIRequests(test_objects._RemoteTest,
+ _TestInstancePCIRequests):
+ pass
diff --git a/nova/tests/unit/objects/test_keypair.py b/nova/tests/unit/objects/test_keypair.py
new file mode 100644
index 0000000000..da0d52831d
--- /dev/null
+++ b/nova/tests/unit/objects/test_keypair.py
@@ -0,0 +1,109 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.utils import timeutils
+
+from nova import db
+from nova import exception
+from nova.objects import keypair
+from nova.tests.unit.objects import test_objects
+
+NOW = timeutils.utcnow().replace(microsecond=0)
+fake_keypair = {
+ 'created_at': NOW,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'id': 123,
+ 'name': 'foo-keypair',
+ 'user_id': 'fake-user',
+ 'fingerprint': 'fake-fingerprint',
+ 'public_key': 'fake\npublic\nkey',
+ }
+
+
+class _TestKeyPairObject(object):
+ def test_get_by_name(self):
+ self.mox.StubOutWithMock(db, 'key_pair_get')
+ db.key_pair_get(self.context, 'fake-user', 'foo-keypair').AndReturn(
+ fake_keypair)
+ self.mox.ReplayAll()
+ keypair_obj = keypair.KeyPair.get_by_name(self.context, 'fake-user',
+ 'foo-keypair')
+ self.compare_obj(keypair_obj, fake_keypair)
+
+ def test_create(self):
+ self.mox.StubOutWithMock(db, 'key_pair_create')
+ db.key_pair_create(self.context,
+ {'name': 'foo-keypair',
+ 'public_key': 'keydata'}).AndReturn(fake_keypair)
+ self.mox.ReplayAll()
+ keypair_obj = keypair.KeyPair()
+ keypair_obj.name = 'foo-keypair'
+ keypair_obj.public_key = 'keydata'
+ keypair_obj.create(self.context)
+ self.compare_obj(keypair_obj, fake_keypair)
+
+ def test_recreate_fails(self):
+ self.mox.StubOutWithMock(db, 'key_pair_create')
+ db.key_pair_create(self.context,
+ {'name': 'foo-keypair',
+ 'public_key': 'keydata'}).AndReturn(fake_keypair)
+ self.mox.ReplayAll()
+ keypair_obj = keypair.KeyPair()
+ keypair_obj.name = 'foo-keypair'
+ keypair_obj.public_key = 'keydata'
+ keypair_obj.create(self.context)
+ self.assertRaises(exception.ObjectActionError, keypair_obj.create,
+ self.context)
+
+ def test_destroy(self):
+ self.mox.StubOutWithMock(db, 'key_pair_destroy')
+ db.key_pair_destroy(self.context, 'fake-user', 'foo-keypair')
+ self.mox.ReplayAll()
+ keypair_obj = keypair.KeyPair()
+ keypair_obj.id = 123
+ keypair_obj.user_id = 'fake-user'
+ keypair_obj.name = 'foo-keypair'
+ keypair_obj.destroy(self.context)
+
+ def test_destroy_by_name(self):
+ self.mox.StubOutWithMock(db, 'key_pair_destroy')
+ db.key_pair_destroy(self.context, 'fake-user', 'foo-keypair')
+ self.mox.ReplayAll()
+ keypair.KeyPair.destroy_by_name(self.context, 'fake-user',
+ 'foo-keypair')
+
+ def test_get_by_user(self):
+ self.mox.StubOutWithMock(db, 'key_pair_get_all_by_user')
+ self.mox.StubOutWithMock(db, 'key_pair_count_by_user')
+ db.key_pair_get_all_by_user(self.context, 'fake-user').AndReturn(
+ [fake_keypair])
+ db.key_pair_count_by_user(self.context, 'fake-user').AndReturn(1)
+ self.mox.ReplayAll()
+ keypairs = keypair.KeyPairList.get_by_user(self.context, 'fake-user')
+ self.assertEqual(1, len(keypairs))
+ self.compare_obj(keypairs[0], fake_keypair)
+ self.assertEqual(1, keypair.KeyPairList.get_count_by_user(self.context,
+ 'fake-user'))
+
+
+class TestMigrationObject(test_objects._LocalTest,
+ _TestKeyPairObject):
+ pass
+
+
+class TestRemoteMigrationObject(test_objects._RemoteTest,
+ _TestKeyPairObject):
+ pass
diff --git a/nova/tests/unit/objects/test_migration.py b/nova/tests/unit/objects/test_migration.py
new file mode 100644
index 0000000000..eeb57db618
--- /dev/null
+++ b/nova/tests/unit/objects/test_migration.py
@@ -0,0 +1,184 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.utils import timeutils
+
+from nova import context
+from nova import db
+from nova import exception
+from nova.objects import migration
+from nova.tests.unit import fake_instance
+from nova.tests.unit.objects import test_objects
+
+
+NOW = timeutils.utcnow().replace(microsecond=0)
+
+
+def fake_db_migration(**updates):
+ db_instance = {
+ 'created_at': NOW,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'id': 123,
+ 'source_compute': 'compute-source',
+ 'dest_compute': 'compute-dest',
+ 'source_node': 'node-source',
+ 'dest_node': 'node-dest',
+ 'dest_host': 'host-dest',
+ 'old_instance_type_id': 42,
+ 'new_instance_type_id': 84,
+ 'instance_uuid': 'fake-uuid',
+ 'status': 'migrating',
+ }
+
+ if updates:
+ db_instance.update(updates)
+ return db_instance
+
+
+class _TestMigrationObject(object):
+ def test_get_by_id(self):
+ ctxt = context.get_admin_context()
+ fake_migration = fake_db_migration()
+ self.mox.StubOutWithMock(db, 'migration_get')
+ db.migration_get(ctxt, fake_migration['id']).AndReturn(fake_migration)
+ self.mox.ReplayAll()
+ mig = migration.Migration.get_by_id(ctxt, fake_migration['id'])
+ self.compare_obj(mig, fake_migration)
+
+ def test_get_by_instance_and_status(self):
+ ctxt = context.get_admin_context()
+ fake_migration = fake_db_migration()
+ self.mox.StubOutWithMock(db, 'migration_get_by_instance_and_status')
+ db.migration_get_by_instance_and_status(ctxt,
+ fake_migration['id'],
+ 'migrating'
+ ).AndReturn(fake_migration)
+ self.mox.ReplayAll()
+ mig = migration.Migration.get_by_instance_and_status(
+ ctxt, fake_migration['id'], 'migrating')
+ self.compare_obj(mig, fake_migration)
+
+ def test_create(self):
+ ctxt = context.get_admin_context()
+ fake_migration = fake_db_migration()
+ self.mox.StubOutWithMock(db, 'migration_create')
+ db.migration_create(ctxt, {'source_compute': 'foo'}).AndReturn(
+ fake_migration)
+ self.mox.ReplayAll()
+ mig = migration.Migration()
+ mig.source_compute = 'foo'
+ mig.create(ctxt)
+ self.assertEqual(fake_migration['dest_compute'], mig.dest_compute)
+
+ def test_recreate_fails(self):
+ ctxt = context.get_admin_context()
+ fake_migration = fake_db_migration()
+ self.mox.StubOutWithMock(db, 'migration_create')
+ db.migration_create(ctxt, {'source_compute': 'foo'}).AndReturn(
+ fake_migration)
+ self.mox.ReplayAll()
+ mig = migration.Migration()
+ mig.source_compute = 'foo'
+ mig.create(ctxt)
+ self.assertRaises(exception.ObjectActionError, mig.create,
+ self.context)
+
+ def test_save(self):
+ ctxt = context.get_admin_context()
+ fake_migration = fake_db_migration()
+ self.mox.StubOutWithMock(db, 'migration_update')
+ db.migration_update(ctxt, 123, {'source_compute': 'foo'}
+ ).AndReturn(fake_migration)
+ self.mox.ReplayAll()
+ mig = migration.Migration()
+ mig.id = 123
+ mig.source_compute = 'foo'
+ mig.save(ctxt)
+ self.assertEqual(fake_migration['dest_compute'], mig.dest_compute)
+
+ def test_instance(self):
+ ctxt = context.get_admin_context()
+ fake_migration = fake_db_migration()
+ fake_inst = fake_instance.fake_db_instance()
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(ctxt, fake_migration['instance_uuid'],
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ mig = migration.Migration._from_db_object(ctxt,
+ migration.Migration(),
+ fake_migration)
+ mig._context = ctxt
+ self.mox.ReplayAll()
+ self.assertEqual(mig.instance.host, fake_inst['host'])
+
+ def test_get_unconfirmed_by_dest_compute(self):
+ ctxt = context.get_admin_context()
+ fake_migration = fake_db_migration()
+ db_migrations = [fake_migration, dict(fake_migration, id=456)]
+ self.mox.StubOutWithMock(
+ db, 'migration_get_unconfirmed_by_dest_compute')
+ db.migration_get_unconfirmed_by_dest_compute(
+ ctxt, 'window', 'foo',
+ use_slave=False).AndReturn(db_migrations)
+ self.mox.ReplayAll()
+ migrations = (
+ migration.MigrationList.get_unconfirmed_by_dest_compute(
+ ctxt, 'window', 'foo', use_slave=False))
+ self.assertEqual(2, len(migrations))
+ for index, db_migration in enumerate(db_migrations):
+ self.compare_obj(migrations[index], db_migration)
+
+ def test_get_in_progress_by_host_and_node(self):
+ ctxt = context.get_admin_context()
+ fake_migration = fake_db_migration()
+ db_migrations = [fake_migration, dict(fake_migration, id=456)]
+ self.mox.StubOutWithMock(
+ db, 'migration_get_in_progress_by_host_and_node')
+ db.migration_get_in_progress_by_host_and_node(
+ ctxt, 'host', 'node').AndReturn(db_migrations)
+ self.mox.ReplayAll()
+ migrations = (
+ migration.MigrationList.get_in_progress_by_host_and_node(
+ ctxt, 'host', 'node'))
+ self.assertEqual(2, len(migrations))
+ for index, db_migration in enumerate(db_migrations):
+ self.compare_obj(migrations[index], db_migration)
+
+ def test_get_by_filters(self):
+ ctxt = context.get_admin_context()
+ fake_migration = fake_db_migration()
+ db_migrations = [fake_migration, dict(fake_migration, id=456)]
+ self.mox.StubOutWithMock(
+ db, 'migration_get_all_by_filters')
+ filters = {'foo': 'bar'}
+ db.migration_get_all_by_filters(ctxt, filters).AndReturn(db_migrations)
+ self.mox.ReplayAll()
+ migrations = migration.MigrationList.get_by_filters(ctxt, filters)
+ self.assertEqual(2, len(migrations))
+ for index, db_migration in enumerate(db_migrations):
+ self.compare_obj(migrations[index], db_migration)
+
+
+class TestMigrationObject(test_objects._LocalTest,
+ _TestMigrationObject):
+ pass
+
+
+class TestRemoteMigrationObject(test_objects._RemoteTest,
+ _TestMigrationObject):
+ pass
diff --git a/nova/tests/unit/objects/test_network.py b/nova/tests/unit/objects/test_network.py
new file mode 100644
index 0000000000..0ba6ed06e9
--- /dev/null
+++ b/nova/tests/unit/objects/test_network.py
@@ -0,0 +1,232 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import netaddr
+
+from nova.objects import network as network_obj
+from nova.tests.unit.objects import test_objects
+
+
+fake_network = {
+ 'deleted': False,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'id': 1,
+ 'label': 'Fake Network',
+ 'injected': False,
+ 'cidr': '192.168.1.0/24',
+ 'cidr_v6': '1234::/64',
+ 'multi_host': False,
+ 'netmask': '255.255.255.0',
+ 'gateway': '192.168.1.1',
+ 'broadcast': '192.168.1.255',
+ 'netmask_v6': 64,
+ 'gateway_v6': '1234::1',
+ 'bridge': 'br100',
+ 'bridge_interface': 'eth0',
+ 'dns1': '8.8.8.8',
+ 'dns2': '8.8.4.4',
+ 'vlan': None,
+ 'vpn_public_address': None,
+ 'vpn_public_port': None,
+ 'vpn_private_address': None,
+ 'dhcp_start': '192.168.1.10',
+ 'rxtx_base': None,
+ 'project_id': None,
+ 'priority': None,
+ 'host': None,
+ 'uuid': 'fake-uuid',
+ 'mtu': None,
+ 'dhcp_server': '192.168.1.1',
+ 'enable_dhcp': True,
+ 'share_address': False,
+}
+
+
+class _TestNetworkObject(object):
+ def _compare(self, obj, db_obj):
+ for field in obj.fields:
+ db_val = db_obj[field]
+ obj_val = obj[field]
+ if isinstance(obj_val, netaddr.IPAddress):
+ obj_val = str(obj_val)
+ if isinstance(obj_val, netaddr.IPNetwork):
+ obj_val = str(obj_val)
+ if field == 'netmask_v6':
+ db_val = str(netaddr.IPNetwork('1::/%i' % db_val).netmask)
+ self.assertEqual(db_val, obj_val)
+
+ @mock.patch('nova.db.network_get')
+ def test_get_by_id(self, get):
+ get.return_value = fake_network
+ network = network_obj.Network.get_by_id(self.context, 'foo')
+ self._compare(network, fake_network)
+ get.assert_called_once_with(self.context, 'foo',
+ project_only='allow_none')
+
+ @mock.patch('nova.db.network_get_by_uuid')
+ def test_get_by_uuid(self, get):
+ get.return_value = fake_network
+ network = network_obj.Network.get_by_uuid(self.context, 'foo')
+ self._compare(network, fake_network)
+ get.assert_called_once_with(self.context, 'foo')
+
+ @mock.patch('nova.db.network_get_by_cidr')
+ def test_get_by_cidr(self, get):
+ get.return_value = fake_network
+ network = network_obj.Network.get_by_cidr(self.context,
+ '192.168.1.0/24')
+ self._compare(network, fake_network)
+ get.assert_called_once_with(self.context, '192.168.1.0/24')
+
+ @mock.patch('nova.db.network_update')
+ @mock.patch('nova.db.network_set_host')
+ def test_save(self, set_host, update):
+ result = dict(fake_network, injected=True)
+ network = network_obj.Network._from_db_object(self.context,
+ network_obj.Network(),
+ fake_network)
+ network.obj_reset_changes()
+ network.save()
+ network.label = 'bar'
+ update.return_value = result
+ network.save()
+ update.assert_called_once_with(self.context, network.id,
+ {'label': 'bar'})
+ self.assertFalse(set_host.called)
+ self._compare(network, result)
+
+ @mock.patch('nova.db.network_update')
+ @mock.patch('nova.db.network_set_host')
+ @mock.patch('nova.db.network_get')
+ def test_save_with_host(self, get, set_host, update):
+ result = dict(fake_network, injected=True)
+ network = network_obj.Network._from_db_object(self.context,
+ network_obj.Network(),
+ fake_network)
+ network.obj_reset_changes()
+ network.host = 'foo'
+ get.return_value = result
+ network.save()
+ set_host.assert_called_once_with(self.context, network.id, 'foo')
+ self.assertFalse(update.called)
+ self._compare(network, result)
+
+ @mock.patch('nova.db.network_update')
+ @mock.patch('nova.db.network_set_host')
+ def test_save_with_host_and_other(self, set_host, update):
+ result = dict(fake_network, injected=True)
+ network = network_obj.Network._from_db_object(self.context,
+ network_obj.Network(),
+ fake_network)
+ network.obj_reset_changes()
+ network.host = 'foo'
+ network.label = 'bar'
+ update.return_value = result
+ network.save()
+ set_host.assert_called_once_with(self.context, network.id, 'foo')
+ update.assert_called_once_with(self.context, network.id,
+ {'label': 'bar'})
+ self._compare(network, result)
+
+ @mock.patch('nova.db.network_associate')
+ def test_associate(self, associate):
+ network_obj.Network.associate(self.context, 'project',
+ network_id=123)
+ associate.assert_called_once_with(self.context, 'project',
+ network_id=123, force=False)
+
+ @mock.patch('nova.db.network_disassociate')
+ def test_disassociate(self, disassociate):
+ network_obj.Network.disassociate(self.context, 123,
+ host=True, project=True)
+ disassociate.assert_called_once_with(self.context, 123, True, True)
+
+ @mock.patch('nova.db.network_create_safe')
+ def test_create(self, create):
+ create.return_value = fake_network
+ network = network_obj.Network(context=self.context, label='foo')
+ network.create()
+ create.assert_called_once_with(self.context, {'label': 'foo'})
+ self._compare(network, fake_network)
+
+ @mock.patch('nova.db.network_delete_safe')
+ def test_destroy(self, delete):
+ network = network_obj.Network(context=self.context, id=123)
+ network.destroy()
+ delete.assert_called_once_with(self.context, 123)
+ self.assertTrue(network.deleted)
+ self.assertNotIn('deleted', network.obj_what_changed())
+
+ @mock.patch('nova.db.network_get_all')
+ def test_get_all(self, get_all):
+ get_all.return_value = [fake_network]
+ networks = network_obj.NetworkList.get_all(self.context)
+ self.assertEqual(1, len(networks))
+ get_all.assert_called_once_with(self.context, 'allow_none')
+ self._compare(networks[0], fake_network)
+
+ @mock.patch('nova.db.network_get_all_by_uuids')
+ def test_get_all_by_uuids(self, get_all):
+ get_all.return_value = [fake_network]
+ networks = network_obj.NetworkList.get_by_uuids(self.context,
+ ['foo'])
+ self.assertEqual(1, len(networks))
+ get_all.assert_called_once_with(self.context, ['foo'], 'allow_none')
+ self._compare(networks[0], fake_network)
+
+ @mock.patch('nova.db.network_get_all_by_host')
+ def test_get_all_by_host(self, get_all):
+ get_all.return_value = [fake_network]
+ networks = network_obj.NetworkList.get_by_host(self.context, 'host')
+ self.assertEqual(1, len(networks))
+ get_all.assert_called_once_with(self.context, 'host')
+ self._compare(networks[0], fake_network)
+
+ @mock.patch('nova.db.network_in_use_on_host')
+ def test_in_use_on_host(self, in_use):
+ in_use.return_value = True
+ self.assertTrue(network_obj.Network.in_use_on_host(self.context,
+ 123, 'foo'))
+ in_use.assert_called_once_with(self.context, 123, 'foo')
+
+ @mock.patch('nova.db.project_get_networks')
+ def test_get_all_by_project(self, get_nets):
+ get_nets.return_value = [fake_network]
+ networks = network_obj.NetworkList.get_by_project(self.context, 123)
+ self.assertEqual(1, len(networks))
+ get_nets.assert_called_once_with(self.context, 123, associate=True)
+ self._compare(networks[0], fake_network)
+
+ def test_compat_version_1_1(self):
+ network = network_obj.Network._from_db_object(self.context,
+ network_obj.Network(),
+ fake_network)
+ primitive = network.obj_to_primitive(target_version='1.1')
+ self.assertNotIn('mtu', primitive)
+ self.assertNotIn('enable_dhcp', primitive)
+ self.assertNotIn('dhcp_server', primitive)
+ self.assertNotIn('share_address', primitive)
+
+
+class TestNetworkObject(test_objects._LocalTest,
+ _TestNetworkObject):
+ pass
+
+
+class TestRemoteNetworkObject(test_objects._RemoteTest,
+ _TestNetworkObject):
+ pass
diff --git a/nova/tests/unit/objects/test_network_request.py b/nova/tests/unit/objects/test_network_request.py
new file mode 100644
index 0000000000..bbe6010226
--- /dev/null
+++ b/nova/tests/unit/objects/test_network_request.py
@@ -0,0 +1,102 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import objects
+from nova.tests.unit.objects import test_objects
+
+
+FAKE_UUID = '0C5C9AD2-F967-4E92-A7F3-24410F697440'
+
+
+class _TestNetworkRequestObject(object):
+ def test_basic(self):
+ request = objects.NetworkRequest()
+ request.network_id = '456'
+ request.address = '1.2.3.4'
+ request.port_id = FAKE_UUID
+
+ def test_load(self):
+ request = objects.NetworkRequest()
+ self.assertIsNone(request.port_id)
+
+ def test_to_tuple_neutron(self):
+ request = objects.NetworkRequest(network_id='123',
+ address='1.2.3.4',
+ port_id=FAKE_UUID,
+ )
+ with mock.patch('nova.utils.is_neutron', return_value=True):
+ self.assertEqual(('123', '1.2.3.4', FAKE_UUID, None),
+ request.to_tuple())
+
+ def test_to_tuple_nova(self):
+ request = objects.NetworkRequest(network_id='123',
+ address='1.2.3.4',
+ port_id=FAKE_UUID)
+ with mock.patch('nova.utils.is_neutron', return_value=False):
+ self.assertEqual(('123', '1.2.3.4'),
+ request.to_tuple())
+
+ def test_from_tuple_neutron(self):
+ request = objects.NetworkRequest.from_tuple(
+ ('123', '1.2.3.4', FAKE_UUID, None))
+ self.assertEqual('123', request.network_id)
+ self.assertEqual('1.2.3.4', str(request.address))
+ self.assertEqual(FAKE_UUID, request.port_id)
+
+ def test_from_tuple_neutron_without_pci_request_id(self):
+ request = objects.NetworkRequest.from_tuple(
+ ('123', '1.2.3.4', FAKE_UUID))
+ self.assertEqual('123', request.network_id)
+ self.assertEqual('1.2.3.4', str(request.address))
+ self.assertEqual(FAKE_UUID, request.port_id)
+
+ def test_from_tuple_nova(self):
+ request = objects.NetworkRequest.from_tuple(
+ ('123', '1.2.3.4'))
+ self.assertEqual('123', request.network_id)
+ self.assertEqual('1.2.3.4', str(request.address))
+ self.assertIsNone(request.port_id)
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_list_as_tuples(self, is_neutron):
+ requests = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='123'),
+ objects.NetworkRequest(network_id='456')])
+ self.assertEqual(
+ [('123', None, None, None), ('456', None, None, None)],
+ requests.as_tuples())
+
+ def test_is_single_unspecified(self):
+ requests = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='123')])
+ self.assertFalse(requests.is_single_unspecified)
+ requests = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(),
+ objects.NetworkRequest()])
+ self.assertFalse(requests.is_single_unspecified)
+ requests = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest()])
+ self.assertTrue(requests.is_single_unspecified)
+
+
+class TestNetworkRequestObject(test_objects._LocalTest,
+ _TestNetworkRequestObject):
+ pass
+
+
+class TestNetworkRequestRemoteObject(test_objects._RemoteTest,
+ _TestNetworkRequestObject):
+ pass
diff --git a/nova/tests/unit/objects/test_objects.py b/nova/tests/unit/objects/test_objects.py
new file mode 100644
index 0000000000..f7eb53808b
--- /dev/null
+++ b/nova/tests/unit/objects/test_objects.py
@@ -0,0 +1,1126 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import datetime
+import hashlib
+import inspect
+import os
+import pprint
+
+import mock
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import six
+from testtools import matchers
+
+from nova.conductor import rpcapi as conductor_rpcapi
+from nova import context
+from nova import exception
+from nova import objects
+from nova.objects import base
+from nova.objects import fields
+from nova.openstack.common import log
+from nova import rpc
+from nova import test
+from nova.tests.unit import fake_notifier
+from nova import utils
+
+
+LOG = log.getLogger(__name__)
+
+
+class MyOwnedObject(base.NovaPersistentObject, base.NovaObject):
+ VERSION = '1.0'
+ fields = {'baz': fields.Field(fields.Integer())}
+
+
+class MyObj(base.NovaPersistentObject, base.NovaObject):
+ VERSION = '1.6'
+ fields = {'foo': fields.Field(fields.Integer()),
+ 'bar': fields.Field(fields.String()),
+ 'missing': fields.Field(fields.String()),
+ 'readonly': fields.Field(fields.Integer(), read_only=True),
+ 'rel_object': fields.ObjectField('MyOwnedObject', nullable=True)
+ }
+
+ @staticmethod
+ def _from_db_object(context, obj, db_obj):
+ self = MyObj()
+ self.foo = db_obj['foo']
+ self.bar = db_obj['bar']
+ self.missing = db_obj['missing']
+ self.readonly = 1
+ return self
+
+ def obj_load_attr(self, attrname):
+ setattr(self, attrname, 'loaded!')
+
+ @base.remotable_classmethod
+ def query(cls, context):
+ obj = cls(foo=1, bar='bar')
+ obj.obj_reset_changes()
+ return obj
+
+ @base.remotable
+ def marco(self, context):
+ return 'polo'
+
+ @base.remotable
+ def _update_test(self, context):
+ if context.project_id == 'alternate':
+ self.bar = 'alternate-context'
+ else:
+ self.bar = 'updated'
+
+ @base.remotable
+ def save(self, context):
+ self.obj_reset_changes()
+
+ @base.remotable
+ def refresh(self, context):
+ self.foo = 321
+ self.bar = 'refreshed'
+ self.obj_reset_changes()
+
+ @base.remotable
+ def modify_save_modify(self, context):
+ self.bar = 'meow'
+ self.save()
+ self.foo = 42
+ self.rel_object = MyOwnedObject(baz=42)
+
+ def obj_make_compatible(self, primitive, target_version):
+ # NOTE(danms): Simulate an older version that had a different
+ # format for the 'bar' attribute
+ if target_version == '1.1' and 'bar' in primitive:
+ primitive['bar'] = 'old%s' % primitive['bar']
+
+
+class MyObjDiffVers(MyObj):
+ VERSION = '1.5'
+
+ @classmethod
+ def obj_name(cls):
+ return 'MyObj'
+
+
+class MyObj2(object):
+ @classmethod
+ def obj_name(cls):
+ return 'MyObj'
+
+ @base.remotable_classmethod
+ def query(cls, *args, **kwargs):
+ pass
+
+
+class RandomMixInWithNoFields(object):
+ """Used to test object inheritance using a mixin that has no fields."""
+ pass
+
+
+class TestSubclassedObject(RandomMixInWithNoFields, MyObj):
+ fields = {'new_field': fields.Field(fields.String())}
+
+
+class TestMetaclass(test.TestCase):
+ def test_obj_tracking(self):
+
+ @six.add_metaclass(base.NovaObjectMetaclass)
+ class NewBaseClass(object):
+ VERSION = '1.0'
+ fields = {}
+
+ @classmethod
+ def obj_name(cls):
+ return cls.__name__
+
+ class Fake1TestObj1(NewBaseClass):
+ @classmethod
+ def obj_name(cls):
+ return 'fake1'
+
+ class Fake1TestObj2(Fake1TestObj1):
+ pass
+
+ class Fake1TestObj3(Fake1TestObj1):
+ VERSION = '1.1'
+
+ class Fake2TestObj1(NewBaseClass):
+ @classmethod
+ def obj_name(cls):
+ return 'fake2'
+
+ class Fake1TestObj4(Fake1TestObj3):
+ VERSION = '1.2'
+
+ class Fake2TestObj2(Fake2TestObj1):
+ VERSION = '1.1'
+
+ class Fake1TestObj5(Fake1TestObj1):
+ VERSION = '1.1'
+
+ # Newest versions first in the list. Duplicate versions take the
+ # newest object.
+ expected = {'fake1': [Fake1TestObj4, Fake1TestObj5, Fake1TestObj2],
+ 'fake2': [Fake2TestObj2, Fake2TestObj1]}
+ self.assertEqual(expected, NewBaseClass._obj_classes)
+ # The following should work, also.
+ self.assertEqual(expected, Fake1TestObj1._obj_classes)
+ self.assertEqual(expected, Fake1TestObj2._obj_classes)
+ self.assertEqual(expected, Fake1TestObj3._obj_classes)
+ self.assertEqual(expected, Fake1TestObj4._obj_classes)
+ self.assertEqual(expected, Fake1TestObj5._obj_classes)
+ self.assertEqual(expected, Fake2TestObj1._obj_classes)
+ self.assertEqual(expected, Fake2TestObj2._obj_classes)
+
+ def test_field_checking(self):
+ def create_class(field):
+ class TestField(base.NovaObject):
+ VERSION = '1.5'
+ fields = {'foo': field()}
+ return TestField
+
+ create_class(fields.IPV4AndV6AddressField)
+ self.assertRaises(exception.ObjectFieldInvalid,
+ create_class, fields.IPV4AndV6Address)
+ self.assertRaises(exception.ObjectFieldInvalid,
+ create_class, int)
+
+
+class TestObjToPrimitive(test.TestCase):
+
+ def test_obj_to_primitive_list(self):
+ class MyObjElement(base.NovaObject):
+ fields = {'foo': fields.IntegerField()}
+
+ def __init__(self, foo):
+ super(MyObjElement, self).__init__()
+ self.foo = foo
+
+ class MyList(base.ObjectListBase, base.NovaObject):
+ fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
+
+ mylist = MyList()
+ mylist.objects = [MyObjElement(1), MyObjElement(2), MyObjElement(3)]
+ self.assertEqual([1, 2, 3],
+ [x['foo'] for x in base.obj_to_primitive(mylist)])
+
+ def test_obj_to_primitive_dict(self):
+ myobj = MyObj(foo=1, bar='foo')
+ self.assertEqual({'foo': 1, 'bar': 'foo'},
+ base.obj_to_primitive(myobj))
+
+ def test_obj_to_primitive_recursive(self):
+ class MyList(base.ObjectListBase, base.NovaObject):
+ fields = {'objects': fields.ListOfObjectsField('MyObj')}
+
+ mylist = MyList(objects=[MyObj(), MyObj()])
+ for i, value in enumerate(mylist):
+ value.foo = i
+ self.assertEqual([{'foo': 0}, {'foo': 1}],
+ base.obj_to_primitive(mylist))
+
+ def test_obj_to_primitive_with_ip_addr(self):
+ class TestObject(base.NovaObject):
+ fields = {'addr': fields.IPAddressField(),
+ 'cidr': fields.IPNetworkField()}
+
+ obj = TestObject(addr='1.2.3.4', cidr='1.1.1.1/16')
+ self.assertEqual({'addr': '1.2.3.4', 'cidr': '1.1.1.1/16'},
+ base.obj_to_primitive(obj))
+
+
+class TestObjMakeList(test.TestCase):
+
+ def test_obj_make_list(self):
+ class MyList(base.ObjectListBase, base.NovaObject):
+ pass
+
+ db_objs = [{'foo': 1, 'bar': 'baz', 'missing': 'banana'},
+ {'foo': 2, 'bar': 'bat', 'missing': 'apple'},
+ ]
+ mylist = base.obj_make_list('ctxt', MyList(), MyObj, db_objs)
+ self.assertEqual(2, len(mylist))
+ self.assertEqual('ctxt', mylist._context)
+ for index, item in enumerate(mylist):
+ self.assertEqual(db_objs[index]['foo'], item.foo)
+ self.assertEqual(db_objs[index]['bar'], item.bar)
+ self.assertEqual(db_objs[index]['missing'], item.missing)
+
+
+def compare_obj(test, obj, db_obj, subs=None, allow_missing=None,
+ comparators=None):
+ """Compare a NovaObject and a dict-like database object.
+
+ This automatically converts TZ-aware datetimes and iterates over
+ the fields of the object.
+
+ :param:test: The TestCase doing the comparison
+ :param:obj: The NovaObject to examine
+ :param:db_obj: The dict-like database object to use as reference
+ :param:subs: A dict of objkey=dbkey field substitutions
+ :param:allow_missing: A list of fields that may not be in db_obj
+ :param:comparators: Map of comparator functions to use for certain fields
+ """
+
+ if subs is None:
+ subs = {}
+ if allow_missing is None:
+ allow_missing = []
+ if comparators is None:
+ comparators = {}
+
+ for key in obj.fields:
+ if key in allow_missing and not obj.obj_attr_is_set(key):
+ continue
+ obj_val = obj[key]
+ db_key = subs.get(key, key)
+ db_val = db_obj[db_key]
+ if isinstance(obj_val, datetime.datetime):
+ obj_val = obj_val.replace(tzinfo=None)
+
+ if key in comparators:
+ comparator = comparators[key]
+ comparator(db_val, obj_val)
+ else:
+ test.assertEqual(db_val, obj_val)
+
+
+class _BaseTestCase(test.TestCase):
+ def setUp(self):
+ super(_BaseTestCase, self).setUp()
+ self.remote_object_calls = list()
+ self.user_id = 'fake-user'
+ self.project_id = 'fake-project'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ def compare_obj(self, obj, db_obj, subs=None, allow_missing=None,
+ comparators=None):
+ compare_obj(self, obj, db_obj, subs=subs, allow_missing=allow_missing,
+ comparators=comparators)
+
+ def json_comparator(self, expected, obj_val):
+ # json-ify an object field for comparison with its db str
+ # equivalent
+ self.assertEqual(expected, jsonutils.dumps(obj_val))
+
+ def str_comparator(self, expected, obj_val):
+ """Compare an object field to a string in the db by performing
+ a simple coercion on the object field value.
+ """
+ self.assertEqual(expected, str(obj_val))
+
+ def assertNotIsInstance(self, obj, cls, msg=None):
+ """Python < v2.7 compatibility. Assert 'not isinstance(obj, cls)."""
+ try:
+ f = super(_BaseTestCase, self).assertNotIsInstance
+ except AttributeError:
+ self.assertThat(obj,
+ matchers.Not(matchers.IsInstance(cls)),
+ message=msg or '')
+ else:
+ f(obj, cls, msg=msg)
+
+
+class _LocalTest(_BaseTestCase):
+ def setUp(self):
+ super(_LocalTest, self).setUp()
+ # Just in case
+ base.NovaObject.indirection_api = None
+
+ def assertRemotes(self):
+ self.assertEqual(self.remote_object_calls, [])
+
+
+@contextlib.contextmanager
+def things_temporarily_local():
+ # Temporarily go non-remote so the conductor handles
+ # this request directly
+ _api = base.NovaObject.indirection_api
+ base.NovaObject.indirection_api = None
+ yield
+ base.NovaObject.indirection_api = _api
+
+
+class _RemoteTest(_BaseTestCase):
+ def _testable_conductor(self):
+ self.conductor_service = self.start_service(
+ 'conductor', manager='nova.conductor.manager.ConductorManager')
+ self.remote_object_calls = list()
+
+ orig_object_class_action = \
+ self.conductor_service.manager.object_class_action
+ orig_object_action = \
+ self.conductor_service.manager.object_action
+
+ def fake_object_class_action(*args, **kwargs):
+ self.remote_object_calls.append((kwargs.get('objname'),
+ kwargs.get('objmethod')))
+ with things_temporarily_local():
+ result = orig_object_class_action(*args, **kwargs)
+ return (base.NovaObject.obj_from_primitive(result, context=args[0])
+ if isinstance(result, base.NovaObject) else result)
+ self.stubs.Set(self.conductor_service.manager, 'object_class_action',
+ fake_object_class_action)
+
+ def fake_object_action(*args, **kwargs):
+ self.remote_object_calls.append((kwargs.get('objinst'),
+ kwargs.get('objmethod')))
+ with things_temporarily_local():
+ result = orig_object_action(*args, **kwargs)
+ return result
+ self.stubs.Set(self.conductor_service.manager, 'object_action',
+ fake_object_action)
+
+ # Things are remoted by default in this session
+ base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI()
+
+ # To make sure local and remote contexts match
+ self.stubs.Set(rpc.RequestContextSerializer,
+ 'serialize_context',
+ lambda s, c: c)
+ self.stubs.Set(rpc.RequestContextSerializer,
+ 'deserialize_context',
+ lambda s, c: c)
+
+ def setUp(self):
+ super(_RemoteTest, self).setUp()
+ self._testable_conductor()
+
+ def assertRemotes(self):
+ self.assertNotEqual(self.remote_object_calls, [])
+
+
+class _TestObject(object):
+ def test_object_attrs_in_init(self):
+ # Spot check a few
+ objects.Instance
+ objects.InstanceInfoCache
+ objects.SecurityGroup
+ # Now check the test one in this file. Should be newest version
+ self.assertEqual('1.6', objects.MyObj.VERSION)
+
+ def test_hydration_type_error(self):
+ primitive = {'nova_object.name': 'MyObj',
+ 'nova_object.namespace': 'nova',
+ 'nova_object.version': '1.5',
+ 'nova_object.data': {'foo': 'a'}}
+ self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive)
+
+ def test_hydration(self):
+ primitive = {'nova_object.name': 'MyObj',
+ 'nova_object.namespace': 'nova',
+ 'nova_object.version': '1.5',
+ 'nova_object.data': {'foo': 1}}
+ real_method = MyObj._obj_from_primitive
+
+ def _obj_from_primitive(*args):
+ return real_method(*args)
+
+ with mock.patch.object(MyObj, '_obj_from_primitive') as ofp:
+ ofp.side_effect = _obj_from_primitive
+ obj = MyObj.obj_from_primitive(primitive)
+ ofp.assert_called_once_with(None, '1.5', primitive)
+ self.assertEqual(obj.foo, 1)
+
+ def test_hydration_version_different(self):
+ primitive = {'nova_object.name': 'MyObj',
+ 'nova_object.namespace': 'nova',
+ 'nova_object.version': '1.2',
+ 'nova_object.data': {'foo': 1}}
+ obj = MyObj.obj_from_primitive(primitive)
+ self.assertEqual(obj.foo, 1)
+ self.assertEqual('1.2', obj.VERSION)
+
+ def test_hydration_bad_ns(self):
+ primitive = {'nova_object.name': 'MyObj',
+ 'nova_object.namespace': 'foo',
+ 'nova_object.version': '1.5',
+ 'nova_object.data': {'foo': 1}}
+ self.assertRaises(exception.UnsupportedObjectError,
+ MyObj.obj_from_primitive, primitive)
+
+ def test_dehydration(self):
+ expected = {'nova_object.name': 'MyObj',
+ 'nova_object.namespace': 'nova',
+ 'nova_object.version': '1.6',
+ 'nova_object.data': {'foo': 1}}
+ obj = MyObj(foo=1)
+ obj.obj_reset_changes()
+ self.assertEqual(obj.obj_to_primitive(), expected)
+
+ def test_object_property(self):
+ obj = MyObj(foo=1)
+ self.assertEqual(obj.foo, 1)
+
+ def test_object_property_type_error(self):
+ obj = MyObj()
+
+ def fail():
+ obj.foo = 'a'
+ self.assertRaises(ValueError, fail)
+
+ def test_object_dict_syntax(self):
+ obj = MyObj(foo=123, bar='bar')
+ self.assertEqual(obj['foo'], 123)
+ self.assertEqual(sorted(obj.items(), key=lambda x: x[0]),
+ [('bar', 'bar'), ('foo', 123)])
+ self.assertEqual(sorted(list(obj.iteritems()), key=lambda x: x[0]),
+ [('bar', 'bar'), ('foo', 123)])
+
+ def test_load(self):
+ obj = MyObj()
+ self.assertEqual(obj.bar, 'loaded!')
+
+ def test_load_in_base(self):
+ class Foo(base.NovaObject):
+ fields = {'foobar': fields.Field(fields.Integer())}
+ obj = Foo()
+ with self.assertRaisesRegexp(NotImplementedError, ".*foobar.*"):
+ obj.foobar
+
+ def test_loaded_in_primitive(self):
+ obj = MyObj(foo=1)
+ obj.obj_reset_changes()
+ self.assertEqual(obj.bar, 'loaded!')
+ expected = {'nova_object.name': 'MyObj',
+ 'nova_object.namespace': 'nova',
+ 'nova_object.version': '1.6',
+ 'nova_object.changes': ['bar'],
+ 'nova_object.data': {'foo': 1,
+ 'bar': 'loaded!'}}
+ self.assertEqual(obj.obj_to_primitive(), expected)
+
+ def test_changes_in_primitive(self):
+ obj = MyObj(foo=123)
+ self.assertEqual(obj.obj_what_changed(), set(['foo']))
+ primitive = obj.obj_to_primitive()
+ self.assertIn('nova_object.changes', primitive)
+ obj2 = MyObj.obj_from_primitive(primitive)
+ self.assertEqual(obj2.obj_what_changed(), set(['foo']))
+ obj2.obj_reset_changes()
+ self.assertEqual(obj2.obj_what_changed(), set())
+
+ def test_obj_class_from_name(self):
+ obj = base.NovaObject.obj_class_from_name('MyObj', '1.5')
+ self.assertEqual('1.5', obj.VERSION)
+
+ def test_obj_class_from_name_latest_compatible(self):
+ obj = base.NovaObject.obj_class_from_name('MyObj', '1.1')
+ self.assertEqual('1.6', obj.VERSION)
+
+ def test_unknown_objtype(self):
+ self.assertRaises(exception.UnsupportedObjectError,
+ base.NovaObject.obj_class_from_name, 'foo', '1.0')
+
+ def test_obj_class_from_name_supported_version(self):
+ error = None
+ try:
+ base.NovaObject.obj_class_from_name('MyObj', '1.25')
+ except exception.IncompatibleObjectVersion as error:
+ pass
+
+ self.assertIsNotNone(error)
+ self.assertEqual('1.6', error.kwargs['supported'])
+
+ def test_with_alternate_context(self):
+ ctxt1 = context.RequestContext('foo', 'foo')
+ ctxt2 = context.RequestContext('bar', 'alternate')
+ obj = MyObj.query(ctxt1)
+ obj._update_test(ctxt2)
+ self.assertEqual(obj.bar, 'alternate-context')
+ self.assertRemotes()
+
+ def test_orphaned_object(self):
+ obj = MyObj.query(self.context)
+ obj._context = None
+ self.assertRaises(exception.OrphanedObjectError,
+ obj._update_test)
+ self.assertRemotes()
+
+ def test_changed_1(self):
+ obj = MyObj.query(self.context)
+ obj.foo = 123
+ self.assertEqual(obj.obj_what_changed(), set(['foo']))
+ obj._update_test(self.context)
+ self.assertEqual(obj.obj_what_changed(), set(['foo', 'bar']))
+ self.assertEqual(obj.foo, 123)
+ self.assertRemotes()
+
+ def test_changed_2(self):
+ obj = MyObj.query(self.context)
+ obj.foo = 123
+ self.assertEqual(obj.obj_what_changed(), set(['foo']))
+ obj.save(self.context)
+ self.assertEqual(obj.obj_what_changed(), set([]))
+ self.assertEqual(obj.foo, 123)
+ self.assertRemotes()
+
+ def test_changed_3(self):
+ obj = MyObj.query(self.context)
+ obj.foo = 123
+ self.assertEqual(obj.obj_what_changed(), set(['foo']))
+ obj.refresh(self.context)
+ self.assertEqual(obj.obj_what_changed(), set([]))
+ self.assertEqual(obj.foo, 321)
+ self.assertEqual(obj.bar, 'refreshed')
+ self.assertRemotes()
+
+ def test_changed_4(self):
+ obj = MyObj.query(self.context)
+ obj.bar = 'something'
+ self.assertEqual(obj.obj_what_changed(), set(['bar']))
+ obj.modify_save_modify(self.context)
+ self.assertEqual(obj.obj_what_changed(), set(['foo', 'rel_object']))
+ self.assertEqual(obj.foo, 42)
+ self.assertEqual(obj.bar, 'meow')
+ self.assertIsInstance(obj.rel_object, MyOwnedObject)
+ self.assertRemotes()
+
+ def test_changed_with_sub_object(self):
+ class ParentObject(base.NovaObject):
+ fields = {'foo': fields.IntegerField(),
+ 'bar': fields.ObjectField('MyObj'),
+ }
+ obj = ParentObject()
+ self.assertEqual(set(), obj.obj_what_changed())
+ obj.foo = 1
+ self.assertEqual(set(['foo']), obj.obj_what_changed())
+ bar = MyObj()
+ obj.bar = bar
+ self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
+ obj.obj_reset_changes()
+ self.assertEqual(set(), obj.obj_what_changed())
+ bar.foo = 1
+ self.assertEqual(set(['bar']), obj.obj_what_changed())
+
+ def test_static_result(self):
+ obj = MyObj.query(self.context)
+ self.assertEqual(obj.bar, 'bar')
+ result = obj.marco()
+ self.assertEqual(result, 'polo')
+ self.assertRemotes()
+
+ def test_updates(self):
+ obj = MyObj.query(self.context)
+ self.assertEqual(obj.foo, 1)
+ obj._update_test()
+ self.assertEqual(obj.bar, 'updated')
+ self.assertRemotes()
+
+ def test_base_attributes(self):
+ dt = datetime.datetime(1955, 11, 5)
+ obj = MyObj(created_at=dt, updated_at=dt, deleted_at=None,
+ deleted=False)
+ expected = {'nova_object.name': 'MyObj',
+ 'nova_object.namespace': 'nova',
+ 'nova_object.version': '1.6',
+ 'nova_object.changes':
+ ['deleted', 'created_at', 'deleted_at', 'updated_at'],
+ 'nova_object.data':
+ {'created_at': timeutils.isotime(dt),
+ 'updated_at': timeutils.isotime(dt),
+ 'deleted_at': None,
+ 'deleted': False,
+ }
+ }
+ self.assertEqual(obj.obj_to_primitive(), expected)
+
+ def test_contains(self):
+ obj = MyObj()
+ self.assertNotIn('foo', obj)
+ obj.foo = 1
+ self.assertIn('foo', obj)
+ self.assertNotIn('does_not_exist', obj)
+
+ def test_obj_attr_is_set(self):
+ obj = MyObj(foo=1)
+ self.assertTrue(obj.obj_attr_is_set('foo'))
+ self.assertFalse(obj.obj_attr_is_set('bar'))
+ self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang')
+
+ def test_get(self):
+ obj = MyObj(foo=1)
+ # Foo has value, should not get the default
+ self.assertEqual(obj.get('foo', 2), 1)
+ # Foo has value, should return the value without error
+ self.assertEqual(obj.get('foo'), 1)
+ # Bar is not loaded, so we should get the default
+ self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded')
+ # Bar without a default should lazy-load
+ self.assertEqual(obj.get('bar'), 'loaded!')
+ # Bar now has a default, but loaded value should be returned
+ self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!')
+ # Invalid attribute should raise AttributeError
+ self.assertRaises(AttributeError, obj.get, 'nothing')
+ # ...even with a default
+ self.assertRaises(AttributeError, obj.get, 'nothing', 3)
+
+ def test_object_inheritance(self):
+ base_fields = base.NovaPersistentObject.fields.keys()
+ myobj_fields = ['foo', 'bar', 'missing',
+ 'readonly', 'rel_object'] + base_fields
+ myobj3_fields = ['new_field']
+ self.assertTrue(issubclass(TestSubclassedObject, MyObj))
+ self.assertEqual(len(myobj_fields), len(MyObj.fields))
+ self.assertEqual(set(myobj_fields), set(MyObj.fields.keys()))
+ self.assertEqual(len(myobj_fields) + len(myobj3_fields),
+ len(TestSubclassedObject.fields))
+ self.assertEqual(set(myobj_fields) | set(myobj3_fields),
+ set(TestSubclassedObject.fields.keys()))
+
+ def test_get_changes(self):
+ obj = MyObj()
+ self.assertEqual({}, obj.obj_get_changes())
+ obj.foo = 123
+ self.assertEqual({'foo': 123}, obj.obj_get_changes())
+ obj.bar = 'test'
+ self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
+ obj.obj_reset_changes()
+ self.assertEqual({}, obj.obj_get_changes())
+
+ def test_obj_fields(self):
+ class TestObj(base.NovaObject):
+ fields = {'foo': fields.Field(fields.Integer())}
+ obj_extra_fields = ['bar']
+
+ @property
+ def bar(self):
+ return 'this is bar'
+
+ obj = TestObj()
+ self.assertEqual(['foo', 'bar'], obj.obj_fields)
+
+ def test_obj_constructor(self):
+ obj = MyObj(context=self.context, foo=123, bar='abc')
+ self.assertEqual(123, obj.foo)
+ self.assertEqual('abc', obj.bar)
+ self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
+
+ def test_obj_read_only(self):
+ obj = MyObj(context=self.context, foo=123, bar='abc')
+ obj.readonly = 1
+ self.assertRaises(exception.ReadOnlyFieldError, setattr,
+ obj, 'readonly', 2)
+
+ def test_obj_repr(self):
+ obj = MyObj(foo=123)
+ self.assertEqual('MyObj(bar=<?>,created_at=<?>,deleted=<?>,'
+ 'deleted_at=<?>,foo=123,missing=<?>,readonly=<?>,'
+ 'rel_object=<?>,updated_at=<?>)', repr(obj))
+
+
+class TestObject(_LocalTest, _TestObject):
+ pass
+
+
+class TestRemoteObject(_RemoteTest, _TestObject):
+ def test_major_version_mismatch(self):
+ MyObj2.VERSION = '2.0'
+ self.assertRaises(exception.IncompatibleObjectVersion,
+ MyObj2.query, self.context)
+
+ def test_minor_version_greater(self):
+ MyObj2.VERSION = '1.7'
+ self.assertRaises(exception.IncompatibleObjectVersion,
+ MyObj2.query, self.context)
+
+ def test_minor_version_less(self):
+ MyObj2.VERSION = '1.2'
+ obj = MyObj2.query(self.context)
+ self.assertEqual(obj.bar, 'bar')
+ self.assertRemotes()
+
+ def test_compat(self):
+ MyObj2.VERSION = '1.1'
+ obj = MyObj2.query(self.context)
+ self.assertEqual('oldbar', obj.bar)
+
+
+class TestObjectListBase(test.TestCase):
+ def test_list_like_operations(self):
+ class MyElement(base.NovaObject):
+ fields = {'foo': fields.IntegerField()}
+
+ def __init__(self, foo):
+ super(MyElement, self).__init__()
+ self.foo = foo
+
+ class Foo(base.ObjectListBase, base.NovaObject):
+ fields = {'objects': fields.ListOfObjectsField('MyElement')}
+
+ objlist = Foo(context='foo',
+ objects=[MyElement(1), MyElement(2), MyElement(3)])
+ self.assertEqual(list(objlist), objlist.objects)
+ self.assertEqual(len(objlist), 3)
+ self.assertIn(objlist.objects[0], objlist)
+ self.assertEqual(list(objlist[:1]), [objlist.objects[0]])
+ self.assertEqual(objlist[:1]._context, 'foo')
+ self.assertEqual(objlist[2], objlist.objects[2])
+ self.assertEqual(objlist.count(objlist.objects[0]), 1)
+ self.assertEqual(objlist.index(objlist.objects[1]), 1)
+ objlist.sort(key=lambda x: x.foo, reverse=True)
+ self.assertEqual([3, 2, 1],
+ [x.foo for x in objlist])
+
+ def test_serialization(self):
+ class Foo(base.ObjectListBase, base.NovaObject):
+ fields = {'objects': fields.ListOfObjectsField('Bar')}
+
+ class Bar(base.NovaObject):
+ fields = {'foo': fields.Field(fields.String())}
+
+ obj = Foo(objects=[])
+ for i in 'abc':
+ bar = Bar(foo=i)
+ obj.objects.append(bar)
+
+ obj2 = base.NovaObject.obj_from_primitive(obj.obj_to_primitive())
+ self.assertFalse(obj is obj2)
+ self.assertEqual([x.foo for x in obj],
+ [y.foo for y in obj2])
+
+ def _test_object_list_version_mappings(self, list_obj_class):
+ # Figure out what sort of object this list is for
+ list_field = list_obj_class.fields['objects']
+ item_obj_field = list_field._type._element_type
+ item_obj_name = item_obj_field._type._obj_name
+
+ # Look through all object classes of this type and make sure that
+ # the versions we find are covered by the parent list class
+ for item_class in base.NovaObject._obj_classes[item_obj_name]:
+ self.assertIn(
+ item_class.VERSION,
+ list_obj_class.child_versions.values(),
+ 'Version mapping is incomplete for %s' % (
+ list_obj_class.__name__))
+
+ def test_object_version_mappings(self):
+ # Find all object list classes and make sure that they at least handle
+ # all the current object versions
+ for obj_classes in base.NovaObject._obj_classes.values():
+ for obj_class in obj_classes:
+ if issubclass(obj_class, base.ObjectListBase):
+ self._test_object_list_version_mappings(obj_class)
+
+ def test_list_changes(self):
+ class Foo(base.ObjectListBase, base.NovaObject):
+ fields = {'objects': fields.ListOfObjectsField('Bar')}
+
+ class Bar(base.NovaObject):
+ fields = {'foo': fields.StringField()}
+
+ obj = Foo(objects=[])
+ self.assertEqual(set(['objects']), obj.obj_what_changed())
+ obj.objects.append(Bar(foo='test'))
+ self.assertEqual(set(['objects']), obj.obj_what_changed())
+ obj.obj_reset_changes()
+ # This should still look dirty because the child is dirty
+ self.assertEqual(set(['objects']), obj.obj_what_changed())
+ obj.objects[0].obj_reset_changes()
+ # This should now look clean because the child is clean
+ self.assertEqual(set(), obj.obj_what_changed())
+
+ def test_initialize_objects(self):
+ class Foo(base.ObjectListBase, base.NovaObject):
+ fields = {'objects': fields.ListOfObjectsField('Bar')}
+
+ class Bar(base.NovaObject):
+ fields = {'foo': fields.StringField()}
+
+ obj = Foo()
+ self.assertEqual([], obj.objects)
+ self.assertEqual(set(), obj.obj_what_changed())
+
+ def test_obj_repr(self):
+ class Foo(base.ObjectListBase, base.NovaObject):
+ fields = {'objects': fields.ListOfObjectsField('Bar')}
+
+ class Bar(base.NovaObject):
+ fields = {'uuid': fields.StringField()}
+
+ obj = Foo(objects=[Bar(uuid='fake-uuid')])
+ self.assertEqual('Foo(objects=[Bar(fake-uuid)])', repr(obj))
+
+
+class TestObjectSerializer(_BaseTestCase):
+ def test_serialize_entity_primitive(self):
+ ser = base.NovaObjectSerializer()
+ for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
+ self.assertEqual(thing, ser.serialize_entity(None, thing))
+
+ def test_deserialize_entity_primitive(self):
+ ser = base.NovaObjectSerializer()
+ for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
+ self.assertEqual(thing, ser.deserialize_entity(None, thing))
+
+ def test_deserialize_entity_newer_version(self):
+ ser = base.NovaObjectSerializer()
+ ser._conductor = mock.Mock()
+ ser._conductor.object_backport.return_value = 'backported'
+ obj = MyObj()
+ obj.VERSION = '1.25'
+ primitive = obj.obj_to_primitive()
+ result = ser.deserialize_entity(self.context, primitive)
+ self.assertEqual('backported', result)
+ ser._conductor.object_backport.assert_called_with(self.context,
+ primitive,
+ '1.6')
+
+ def test_object_serialization(self):
+ ser = base.NovaObjectSerializer()
+ obj = MyObj()
+ primitive = ser.serialize_entity(self.context, obj)
+ self.assertIn('nova_object.name', primitive)
+ obj2 = ser.deserialize_entity(self.context, primitive)
+ self.assertIsInstance(obj2, MyObj)
+ self.assertEqual(self.context, obj2._context)
+
+ def test_object_serialization_iterables(self):
+ ser = base.NovaObjectSerializer()
+ obj = MyObj()
+ for iterable in (list, tuple, set):
+ thing = iterable([obj])
+ primitive = ser.serialize_entity(self.context, thing)
+ self.assertEqual(1, len(primitive))
+ for item in primitive:
+ self.assertNotIsInstance(item, base.NovaObject)
+ thing2 = ser.deserialize_entity(self.context, primitive)
+ self.assertEqual(1, len(thing2))
+ for item in thing2:
+ self.assertIsInstance(item, MyObj)
+ # dict case
+ thing = {'key': obj}
+ primitive = ser.serialize_entity(self.context, thing)
+ self.assertEqual(1, len(primitive))
+ for item in primitive.itervalues():
+ self.assertNotIsInstance(item, base.NovaObject)
+ thing2 = ser.deserialize_entity(self.context, primitive)
+ self.assertEqual(1, len(thing2))
+ for item in thing2.itervalues():
+ self.assertIsInstance(item, MyObj)
+
+ # object-action updates dict case
+ thing = {'foo': obj.obj_to_primitive()}
+ primitive = ser.serialize_entity(self.context, thing)
+ self.assertEqual(thing, primitive)
+ thing2 = ser.deserialize_entity(self.context, thing)
+ self.assertIsInstance(thing2['foo'], base.NovaObject)
+
+
+# NOTE(danms): The hashes in this list should only be changed if
+# they come with a corresponding version bump in the affected
+# objects
+object_data = {
+ 'Agent': '1.0-c4ff8a833aee8ae44ab8aed1a171273d',
+ 'AgentList': '1.0-31f07426a729311a42ff7f6246e76e25',
+ 'Aggregate': '1.1-f5d477be06150529a9b2d27cc49030b5',
+ 'AggregateList': '1.2-4b02a285b8612bfb86a96ff80052fb0a',
+ 'BandwidthUsage': '1.1-bdab751673947f0ac7de108540a1a8ce',
+ 'BandwidthUsageList': '1.1-76898106a9db393cd5f42c557389c507',
+ 'BlockDeviceMapping': '1.4-9968ffe513e7672484b0f528b034cd0f',
+ 'BlockDeviceMappingList': '1.5-83767968de6e91e9705bddaae02bc649',
+ 'ComputeNode': '1.6-d2ea9b8f4a6e95ff6a683266eebddbff',
+ 'ComputeNodeList': '1.6-205aa2ea08d49f6ce87df1fcd2407b4e',
+ 'DNSDomain': '1.0-5bdc288d7c3b723ce86ede998fd5c9ba',
+ 'DNSDomainList': '1.0-cfb3e7e82be661501c31099523154db4',
+ 'EC2InstanceMapping': '1.0-627baaf4b12c9067200979bdc4558a99',
+ 'EC2SnapshotMapping': '1.0-26cf315be1f8abab4289d4147671c836',
+ 'EC2VolumeMapping': '1.0-2f8c3bf077c65a425294ec2b361c9143',
+ 'FixedIP': '1.6-2472964d39e50da67202109eb85cd173',
+ 'FixedIPList': '1.6-f2f740de66bc2d90627004bd311690ad',
+ 'Flavor': '1.1-096cfd023c35d07542cf732fb29b45e4',
+ 'FlavorList': '1.1-a3d5551267cb8f62ff38ded125900721',
+ 'FloatingIP': '1.6-27eb68b7c9c620dd5f0561b5a3be0e82',
+ 'FloatingIPList': '1.7-f376f63ed99243f9d90841b7f6732bbf',
+ 'HVSpec': '1.0-c4d8377cc4fe519930e60c1d8265a142',
+ 'Instance': '1.16-b00c09fb92ae80b393943f56e84abd9c',
+ 'InstanceAction': '1.1-6b1d0a6dbd522b5a83c20757ec659663',
+ 'InstanceActionEvent': '1.1-42dbdba74bd06e0619ca75cd3397cd1b',
+ 'InstanceActionEventList': '1.0-1d5cc958171d6ce07383c2ad6208318e',
+ 'InstanceActionList': '1.0-368410fdb8d69ae20c495308535d6266',
+ 'InstanceExternalEvent': '1.0-f1134523654407a875fd59b80f759ee7',
+ 'InstanceFault': '1.2-313438e37e9d358f3566c85f6ddb2d3e',
+ 'InstanceFaultList': '1.1-aeb598ffd0cd6aa61fca7adf0f5e900d',
+ 'InstanceGroup': '1.9-95ece99f092e8f4f88327cdbb44162c9',
+ 'InstanceGroupList': '1.6-c6b78f3c9d9080d33c08667e80589817',
+ 'InstanceInfoCache': '1.5-ef64b604498bfa505a8c93747a9d8b2f',
+ 'InstanceList': '1.10-03dd7839cd11cff75c3661c9e4227900',
+ 'InstanceNUMACell': '1.1-8d2a13c8360cc9ea1b68c9c6c4476857',
+ 'InstanceNUMATopology': '1.1-86b95d263c4c68411d44c6741b8d2bb0',
+ 'InstancePCIRequest': '1.1-e082d174f4643e5756ba098c47c1510f',
+ 'InstancePCIRequests': '1.1-bc7c6684d8579ee49d6a3b8aef756918',
+ 'KeyPair': '1.1-3410f51950d052d861c11946a6ae621a',
+ 'KeyPairList': '1.0-71132a568cc5d078ba1748a9c02c87b8',
+ 'Migration': '1.1-67c47726c2c71422058cd9d149d6d3ed',
+ 'MigrationList': '1.1-8c5f678edc72a592d591a13b35e54353',
+ 'MyObj': '1.6-55bfc22259fd3df239e4a49fa3552c93',
+ 'MyOwnedObject': '1.0-0f3d6c028543d7f3715d121db5b8e298',
+ 'Network': '1.2-2ea21ede5e45bb80e7b7ac7106915c4e',
+ 'NetworkList': '1.2-aa4ad23f035b97a41732ea8b3445fc5e',
+ 'NetworkRequest': '1.1-f31192f5a725017707f989585e12d7dc',
+ 'NetworkRequestList': '1.1-beeab521ac9450f1f5ef4eaa945a783c',
+ 'PciDevice': '1.2-29e35c3199f3b98ce66e5d1212612818',
+ 'PciDeviceList': '1.1-2896df4f5b06579e5f35adba5fcae9db',
+ 'Quotas': '1.1-7897deef00e6cd3095c8916f68d24418',
+ 'QuotasNoOp': '1.1-4b06fd721c586b907ddd6543a00d6c2f',
+ 'S3ImageMapping': '1.0-9225943a44a91ad0349b9fd8bd3f3ce2',
+ 'SecurityGroup': '1.1-bba0e72865e0953793e796571692453b',
+ 'SecurityGroupList': '1.0-528e6448adfeeb78921ebeda499ab72f',
+ 'SecurityGroupRule': '1.1-a9175baf7664439af1a16c2010b55576',
+ 'SecurityGroupRuleList': '1.1-667fca3a9928f23d2d10e61962c55f3c',
+ 'Service': '1.5-82bbfd46a744a9c89bc44b47a1b81683',
+ 'ServiceList': '1.3-4a1a5822dea268d0d7f892f5106bb2e1',
+ 'TestSubclassedObject': '1.6-c63feb2f2533b7d075490c04a2cc10dd',
+ 'VirtualInterface': '1.0-10fdac4c704102b6d57d6936d6d790d2',
+ 'VirtualInterfaceList': '1.0-accbf02628a8063c1d885077a2bf49b6',
+}
+
+
+object_relationships = {
+ 'BlockDeviceMapping': {'Instance': '1.16'},
+ 'FixedIP': {'Instance': '1.16', 'Network': '1.2',
+ 'VirtualInterface': '1.0',
+ 'FloatingIPList': '1.7'},
+ 'FloatingIP': {'FixedIP': '1.6'},
+ 'Instance': {'InstanceFault': '1.2',
+ 'InstanceInfoCache': '1.5',
+ 'InstanceNUMATopology': '1.1',
+ 'PciDeviceList': '1.1',
+ 'SecurityGroupList': '1.0',
+ 'InstancePCIRequests': '1.1'},
+ 'MyObj': {'MyOwnedObject': '1.0'},
+ 'SecurityGroupRule': {'SecurityGroup': '1.1'},
+ 'Service': {'ComputeNode': '1.6'},
+ 'TestSubclassedObject': {'MyOwnedObject': '1.0'}
+}
+
+
+class TestObjectVersions(test.TestCase):
+ def _find_remotable_method(self, cls, thing, parent_was_remotable=False):
+ """Follow a chain of remotable things down to the original function."""
+ if isinstance(thing, classmethod):
+ return self._find_remotable_method(cls, thing.__get__(None, cls))
+ elif inspect.ismethod(thing) and hasattr(thing, 'remotable'):
+ return self._find_remotable_method(cls, thing.original_fn,
+ parent_was_remotable=True)
+ elif parent_was_remotable:
+ # We must be the first non-remotable thing underneath a stack of
+ # remotable things (i.e. the actual implementation method)
+ return thing
+ else:
+ # This means the top-level thing never hit a remotable layer
+ return None
+
+ def _get_fingerprint(self, obj_name):
+ obj_class = base.NovaObject._obj_classes[obj_name][0]
+ fields = obj_class.fields.items()
+ fields.sort()
+ methods = []
+ for name in dir(obj_class):
+ thing = getattr(obj_class, name)
+ if inspect.ismethod(thing) or isinstance(thing, classmethod):
+ method = self._find_remotable_method(obj_class, thing)
+ if method:
+ methods.append((name, inspect.getargspec(method)))
+ methods.sort()
+ # NOTE(danms): Things that need a version bump are any fields
+ # and their types, or the signatures of any remotable methods.
+ # Of course, these are just the mechanical changes we can detect,
+ # but many other things may require a version bump (method behavior
+ # and return value changes, for example).
+ if hasattr(obj_class, 'child_versions'):
+ relevant_data = (fields, methods, obj_class.child_versions)
+ else:
+ relevant_data = (fields, methods)
+ fingerprint = '%s-%s' % (obj_class.VERSION,
+ hashlib.md5(str(relevant_data)).hexdigest())
+ return fingerprint
+
+ def test_versions(self):
+ fingerprints = {}
+ for obj_name in base.NovaObject._obj_classes:
+ fingerprints[obj_name] = self._get_fingerprint(obj_name)
+
+ if os.getenv('GENERATE_HASHES'):
+ file('object_hashes.txt', 'w').write(
+ pprint.pformat(fingerprints))
+ raise test.TestingException(
+ 'Generated hashes in object_hashes.txt')
+
+ stored = set(object_data.items())
+ computed = set(fingerprints.items())
+ changed = stored.symmetric_difference(computed)
+ expected = {}
+ actual = {}
+ for name, hash in changed:
+ expected[name] = object_data.get(name)
+ actual[name] = fingerprints.get(name)
+
+ self.assertEqual(expected, actual,
+ 'Some objects have changed; please make sure the '
+ 'versions have been bumped, and then update their '
+ 'hashes here.')
+
+ def _build_tree(self, tree, obj_class):
+ obj_name = obj_class.obj_name()
+ if obj_name in tree:
+ return
+
+ for name, field in obj_class.fields.items():
+ if isinstance(field._type, fields.Object):
+ sub_obj_name = field._type._obj_name
+ sub_obj_class = base.NovaObject._obj_classes[sub_obj_name][0]
+ self._build_tree(tree, sub_obj_class)
+ tree.setdefault(obj_name, {})
+ tree[obj_name][sub_obj_name] = sub_obj_class.VERSION
+
+ def test_relationships(self):
+ tree = {}
+ for obj_name in base.NovaObject._obj_classes.keys():
+ self._build_tree(tree, base.NovaObject._obj_classes[obj_name][0])
+
+ stored = set([(x, str(y)) for x, y in object_relationships.items()])
+ computed = set([(x, str(y)) for x, y in tree.items()])
+ changed = stored.symmetric_difference(computed)
+ expected = {}
+ actual = {}
+ for name, deps in changed:
+ expected[name] = object_relationships.get(name)
+ actual[name] = tree.get(name)
+ self.assertEqual(expected, actual,
+ 'Some objects have changed dependencies. '
+ 'Please make sure to bump the versions of '
+ 'parent objects and provide a rule in their '
+ 'obj_make_compatible() routines to backlevel '
+ 'the child object.')
+
+ def test_obj_make_compatible(self):
+ # Iterate all object classes and verify that we can run
+ # obj_make_compatible with every older version than current.
+ # This doesn't actually test the data conversions, but it at least
+ # makes sure the method doesn't blow up on something basic like
+ # expecting the wrong version format.
+ for obj_name in base.NovaObject._obj_classes:
+ obj_class = base.NovaObject._obj_classes[obj_name][0]
+ version = utils.convert_version_to_tuple(obj_class.VERSION)
+ for n in range(version[1]):
+ test_version = '%d.%d' % (version[0], n)
+ LOG.info('testing obj: %s version: %s' %
+ (obj_name, test_version))
+ obj_class().obj_to_primitive(target_version=test_version)
diff --git a/nova/tests/unit/objects/test_pci_device.py b/nova/tests/unit/objects/test_pci_device.py
new file mode 100644
index 0000000000..804709a262
--- /dev/null
+++ b/nova/tests/unit/objects/test_pci_device.py
@@ -0,0 +1,254 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from nova import context
+from nova import db
+from nova.objects import instance
+from nova.objects import pci_device
+from nova.tests.unit.objects import test_objects
+
+dev_dict = {
+ 'compute_node_id': 1,
+ 'address': 'a',
+ 'product_id': 'p',
+ 'vendor_id': 'v',
+ 'status': 'available'}
+
+
+fake_db_dev = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 1,
+ 'compute_node_id': 1,
+ 'address': 'a',
+ 'vendor_id': 'v',
+ 'product_id': 'p',
+ 'dev_type': 't',
+ 'status': 'available',
+ 'dev_id': 'i',
+ 'label': 'l',
+ 'instance_uuid': None,
+ 'extra_info': '{}',
+ 'request_id': None,
+ }
+
+
+fake_db_dev_1 = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 2,
+ 'compute_node_id': 1,
+ 'address': 'a1',
+ 'vendor_id': 'v1',
+ 'product_id': 'p1',
+ 'dev_type': 't',
+ 'status': 'available',
+ 'dev_id': 'i',
+ 'label': 'l',
+ 'instance_uuid': None,
+ 'extra_info': '{}',
+ 'request_id': None,
+ }
+
+
+class _TestPciDeviceObject(object):
+ def _create_fake_instance(self):
+ self.inst = instance.Instance()
+ self.inst.uuid = 'fake-inst-uuid'
+ self.inst.pci_devices = pci_device.PciDeviceList()
+
+ def _create_fake_pci_device(self):
+ ctxt = context.get_admin_context()
+ self.mox.StubOutWithMock(db, 'pci_device_get_by_addr')
+ db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev)
+ self.mox.ReplayAll()
+ self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
+
+ def test_create_pci_device(self):
+ self.pci_device = pci_device.PciDevice.create(dev_dict)
+ self.assertEqual(self.pci_device.product_id, 'p')
+ self.assertEqual(self.pci_device.obj_what_changed(),
+ set(['compute_node_id', 'product_id', 'vendor_id',
+ 'status', 'address', 'extra_info']))
+
+ def test_pci_device_extra_info(self):
+ self.dev_dict = copy.copy(dev_dict)
+ self.dev_dict['k1'] = 'v1'
+ self.dev_dict['k2'] = 'v2'
+ self.pci_device = pci_device.PciDevice.create(self.dev_dict)
+ extra_value = self.pci_device.extra_info
+ self.assertEqual(extra_value.get('k1'), 'v1')
+ self.assertEqual(set(extra_value.keys()), set(('k1', 'k2')))
+ self.assertEqual(self.pci_device.obj_what_changed(),
+ set(['compute_node_id', 'address', 'product_id',
+ 'vendor_id', 'status', 'extra_info']))
+
+ def test_update_device(self):
+ self.pci_device = pci_device.PciDevice.create(dev_dict)
+ self.pci_device.obj_reset_changes()
+ changes = {'product_id': 'p2', 'vendor_id': 'v2'}
+ self.pci_device.update_device(changes)
+ self.assertEqual(self.pci_device.vendor_id, 'v2')
+ self.assertEqual(self.pci_device.obj_what_changed(),
+ set(['vendor_id', 'product_id']))
+
+ def test_update_device_same_value(self):
+ self.pci_device = pci_device.PciDevice.create(dev_dict)
+ self.pci_device.obj_reset_changes()
+ changes = {'product_id': 'p', 'vendor_id': 'v2'}
+ self.pci_device.update_device(changes)
+ self.assertEqual(self.pci_device.product_id, 'p')
+ self.assertEqual(self.pci_device.vendor_id, 'v2')
+ self.assertEqual(self.pci_device.obj_what_changed(),
+ set(['vendor_id', 'product_id']))
+
+ def test_get_by_dev_addr(self):
+ ctxt = context.get_admin_context()
+ self.mox.StubOutWithMock(db, 'pci_device_get_by_addr')
+ db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev)
+ self.mox.ReplayAll()
+ self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
+ self.assertEqual(self.pci_device.product_id, 'p')
+ self.assertEqual(self.pci_device.obj_what_changed(), set())
+ self.assertRemotes()
+
+ def test_get_by_dev_id(self):
+ ctxt = context.get_admin_context()
+ self.mox.StubOutWithMock(db, 'pci_device_get_by_id')
+ db.pci_device_get_by_id(ctxt, 1).AndReturn(fake_db_dev)
+ self.mox.ReplayAll()
+ self.pci_device = pci_device.PciDevice.get_by_dev_id(ctxt, 1)
+ self.assertEqual(self.pci_device.product_id, 'p')
+ self.assertEqual(self.pci_device.obj_what_changed(), set())
+ self.assertRemotes()
+
+ def test_save(self):
+ ctxt = context.get_admin_context()
+ self._create_fake_pci_device()
+ return_dev = dict(fake_db_dev, status='available',
+ instance_uuid='fake-uuid-3')
+ self.pci_device.status = 'allocated'
+ self.pci_device.instance_uuid = 'fake-uuid-2'
+ expected_updates = dict(status='allocated',
+ instance_uuid='fake-uuid-2')
+ self.mox.StubOutWithMock(db, 'pci_device_update')
+ db.pci_device_update(ctxt, 1, 'a',
+ expected_updates).AndReturn(return_dev)
+ self.mox.ReplayAll()
+ self.pci_device.save(ctxt)
+ self.assertEqual(self.pci_device.status, 'available')
+ self.assertEqual(self.pci_device.instance_uuid,
+ 'fake-uuid-3')
+ self.assertRemotes()
+
+ def test_save_no_extra_info(self):
+ return_dev = dict(fake_db_dev, status='available',
+ instance_uuid='fake-uuid-3')
+
+ def _fake_update(ctxt, node_id, addr, updates):
+ self.extra_info = updates.get('extra_info')
+ return return_dev
+
+ ctxt = context.get_admin_context()
+ self.stubs.Set(db, 'pci_device_update', _fake_update)
+ self.pci_device = pci_device.PciDevice.create(dev_dict)
+ self.pci_device.save(ctxt)
+ self.assertEqual(self.extra_info, '{}')
+
+ def test_save_removed(self):
+ ctxt = context.get_admin_context()
+ self._create_fake_pci_device()
+ self.pci_device.status = 'removed'
+ self.mox.StubOutWithMock(db, 'pci_device_destroy')
+ db.pci_device_destroy(ctxt, 1, 'a')
+ self.mox.ReplayAll()
+ self.pci_device.save(ctxt)
+ self.assertEqual(self.pci_device.status, 'deleted')
+ self.assertRemotes()
+
+ def test_save_deleted(self):
+ def _fake_destroy(ctxt, node_id, addr):
+ self.called = True
+
+ def _fake_update(ctxt, node_id, addr, updates):
+ self.called = True
+ ctxt = context.get_admin_context()
+ self.stubs.Set(db, 'pci_device_destroy', _fake_destroy)
+ self.stubs.Set(db, 'pci_device_update', _fake_update)
+ self._create_fake_pci_device()
+ self.pci_device.status = 'deleted'
+ self.called = False
+ self.pci_device.save(ctxt)
+ self.assertEqual(self.called, False)
+
+
+class TestPciDeviceObject(test_objects._LocalTest,
+ _TestPciDeviceObject):
+ pass
+
+
+class TestPciDeviceObjectRemote(test_objects._RemoteTest,
+ _TestPciDeviceObject):
+ pass
+
+
+fake_pci_devs = [fake_db_dev, fake_db_dev_1]
+
+
+class _TestPciDeviceListObject(object):
+ def test_get_by_compute_node(self):
+ ctxt = context.get_admin_context()
+ self.mox.StubOutWithMock(db, 'pci_device_get_all_by_node')
+ db.pci_device_get_all_by_node(ctxt, 1).AndReturn(fake_pci_devs)
+ self.mox.ReplayAll()
+ devs = pci_device.PciDeviceList.get_by_compute_node(ctxt, 1)
+ for i in range(len(fake_pci_devs)):
+ self.assertIsInstance(devs[i], pci_device.PciDevice)
+ self.assertEqual(fake_pci_devs[i]['vendor_id'], devs[i].vendor_id)
+ self.assertRemotes()
+
+ def test_get_by_instance_uuid(self):
+ ctxt = context.get_admin_context()
+ fake_db_1 = dict(fake_db_dev, address='a1',
+ status='allocated', instance_uuid='1')
+ fake_db_2 = dict(fake_db_dev, address='a2',
+ status='allocated', instance_uuid='1')
+ self.mox.StubOutWithMock(db, 'pci_device_get_all_by_instance_uuid')
+ db.pci_device_get_all_by_instance_uuid(ctxt, '1').AndReturn(
+ [fake_db_1, fake_db_2])
+ self.mox.ReplayAll()
+ devs = pci_device.PciDeviceList.get_by_instance_uuid(ctxt, '1')
+ self.assertEqual(len(devs), 2)
+ for i in range(len(fake_pci_devs)):
+ self.assertIsInstance(devs[i], pci_device.PciDevice)
+ self.assertEqual(devs[0].vendor_id, 'v')
+ self.assertEqual(devs[1].vendor_id, 'v')
+ self.assertRemotes()
+
+
+class TestPciDeviceListObject(test_objects._LocalTest,
+ _TestPciDeviceListObject):
+ pass
+
+
+class TestPciDeviceListObjectRemote(test_objects._RemoteTest,
+ _TestPciDeviceListObject):
+ pass
diff --git a/nova/tests/unit/objects/test_quotas.py b/nova/tests/unit/objects/test_quotas.py
new file mode 100644
index 0000000000..02781a7cd5
--- /dev/null
+++ b/nova/tests/unit/objects/test_quotas.py
@@ -0,0 +1,167 @@
+# Copyright 2013 Rackspace Hosting.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import context
+from nova.objects import quotas as quotas_obj
+from nova import quota
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit.objects import test_objects
+
+
+QUOTAS = quota.QUOTAS
+
+
+class TestQuotasModule(test.NoDBTestCase):
+ def setUp(self):
+ super(TestQuotasModule, self).setUp()
+ self.context = context.RequestContext('fake_user1', 'fake_proj1')
+ self.instance = fake_instance.fake_db_instance(
+ project_id='fake_proj2', user_id='fake_user2')
+
+ def test_ids_from_instance_non_admin(self):
+ project_id, user_id = quotas_obj.ids_from_instance(
+ self.context, self.instance)
+ self.assertEqual('fake_user2', user_id)
+ self.assertEqual('fake_proj1', project_id)
+
+ def test_ids_from_instance_admin(self):
+ project_id, user_id = quotas_obj.ids_from_instance(
+ self.context.elevated(), self.instance)
+ self.assertEqual('fake_user2', user_id)
+ self.assertEqual('fake_proj2', project_id)
+
+
+class _TestQuotasObject(object):
+ def setUp(self):
+ super(_TestQuotasObject, self).setUp()
+ self.context = context.RequestContext('fake_user1', 'fake_proj1')
+ self.instance = fake_instance.fake_db_instance(
+ project_id='fake_proj2', user_id='fake_user2')
+
+ def test_from_reservations(self):
+ fake_reservations = ['1', '2']
+ quotas = quotas_obj.Quotas.from_reservations(
+ self.context, fake_reservations)
+ self.assertEqual(self.context, quotas._context)
+ self.assertEqual(fake_reservations, quotas.reservations)
+ self.assertIsNone(quotas.project_id)
+ self.assertIsNone(quotas.user_id)
+
+ def test_from_reservations_bogus(self):
+ fake_reservations = [_TestQuotasObject, _TestQuotasObject]
+ self.assertRaises(ValueError,
+ quotas_obj.Quotas.from_reservations,
+ self.context, fake_reservations)
+
+ def test_from_reservations_instance(self):
+ fake_reservations = ['1', '2']
+ quotas = quotas_obj.Quotas.from_reservations(
+ self.context, fake_reservations,
+ instance=self.instance)
+ self.assertEqual(self.context, quotas._context)
+ self.assertEqual(fake_reservations, quotas.reservations)
+ self.assertEqual('fake_proj1', quotas.project_id)
+ self.assertEqual('fake_user2', quotas.user_id)
+
+ def test_from_reservations_instance_admin(self):
+ fake_reservations = ['1', '2']
+ elevated = self.context.elevated()
+ quotas = quotas_obj.Quotas.from_reservations(
+ elevated, fake_reservations,
+ instance=self.instance)
+ self.assertEqual(elevated, quotas._context)
+ self.assertEqual(fake_reservations, quotas.reservations)
+ self.assertEqual('fake_proj2', quotas.project_id)
+ self.assertEqual('fake_user2', quotas.user_id)
+
+ def test_reserve(self):
+ fake_reservations = ['1', '2']
+ quotas = quotas_obj.Quotas()
+
+ self.mox.StubOutWithMock(QUOTAS, 'reserve')
+ QUOTAS.reserve(self.context, expire='expire',
+ project_id='project_id', user_id='user_id',
+ moo='cow').AndReturn(fake_reservations)
+
+ self.mox.ReplayAll()
+ quotas.reserve(self.context, expire='expire',
+ project_id='project_id', user_id='user_id',
+ moo='cow')
+ self.assertEqual(self.context, quotas._context)
+ self.assertEqual(fake_reservations, quotas.reservations)
+ self.assertEqual('project_id', quotas.project_id)
+ self.assertEqual('user_id', quotas.user_id)
+
+ def test_commit(self):
+ fake_reservations = ['1', '2']
+ quotas = quotas_obj.Quotas.from_reservations(
+ self.context, fake_reservations)
+
+ self.mox.StubOutWithMock(QUOTAS, 'commit')
+ QUOTAS.commit(self.context, fake_reservations,
+ project_id=None, user_id=None)
+
+ self.mox.ReplayAll()
+ quotas.commit()
+ self.assertIsNone(quotas.reservations)
+
+ def test_commit_none_reservations(self):
+ quotas = quotas_obj.Quotas.from_reservations(self.context, None)
+ self.mox.StubOutWithMock(QUOTAS, 'commit')
+ self.mox.ReplayAll()
+ quotas.commit()
+
+ def test_rollback(self):
+ fake_reservations = ['1', '2']
+ quotas = quotas_obj.Quotas.from_reservations(
+ self.context, fake_reservations)
+
+ self.mox.StubOutWithMock(QUOTAS, 'rollback')
+ QUOTAS.rollback(self.context, fake_reservations,
+ project_id=None, user_id=None)
+
+ self.mox.ReplayAll()
+ quotas.rollback()
+ self.assertIsNone(quotas.reservations)
+
+ def test_rollback_none_reservations(self):
+ quotas = quotas_obj.Quotas.from_reservations(self.context, None)
+ self.mox.StubOutWithMock(QUOTAS, 'rollback')
+ self.mox.ReplayAll()
+ quotas.rollback()
+
+ @mock.patch('nova.db.quota_create')
+ def test_create_limit(self, mock_create):
+ quotas_obj.Quotas.create_limit(self.context, 'fake-project',
+ 'foo', 10, user_id='user')
+ mock_create.assert_called_once_with(self.context, 'fake-project',
+ 'foo', 10, user_id='user')
+
+ @mock.patch('nova.db.quota_update')
+ def test_update_limit(self, mock_update):
+ quotas_obj.Quotas.update_limit(self.context, 'fake-project',
+ 'foo', 10, user_id='user')
+ mock_update.assert_called_once_with(self.context, 'fake-project',
+ 'foo', 10, user_id='user')
+
+
+class TestQuotasObject(_TestQuotasObject, test_objects._LocalTest):
+ pass
+
+
+class TestRemoteQuotasObject(_TestQuotasObject, test_objects._RemoteTest):
+ pass
diff --git a/nova/tests/unit/objects/test_security_group.py b/nova/tests/unit/objects/test_security_group.py
new file mode 100644
index 0000000000..91966d0676
--- /dev/null
+++ b/nova/tests/unit/objects/test_security_group.py
@@ -0,0 +1,175 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import db
+from nova.objects import instance
+from nova.objects import security_group
+from nova.tests.unit.objects import test_objects
+
+
+fake_secgroup = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 1,
+ 'name': 'fake-name',
+ 'description': 'fake-desc',
+ 'user_id': 'fake-user',
+ 'project_id': 'fake-project',
+ }
+
+
+class _TestSecurityGroupObject(object):
+ def _fix_deleted(self, db_secgroup):
+ # NOTE(danms): Account for the difference in 'deleted'
+ return dict(db_secgroup.items(), deleted=False)
+
+ def test_get(self):
+ self.mox.StubOutWithMock(db, 'security_group_get')
+ db.security_group_get(self.context, 1).AndReturn(fake_secgroup)
+ self.mox.ReplayAll()
+ secgroup = security_group.SecurityGroup.get(self.context, 1)
+ self.assertEqual(self._fix_deleted(fake_secgroup),
+ dict(secgroup.items()))
+ self.assertEqual(secgroup.obj_what_changed(), set())
+ self.assertRemotes()
+
+ def test_get_by_name(self):
+ self.mox.StubOutWithMock(db, 'security_group_get_by_name')
+ db.security_group_get_by_name(self.context, 'fake-project',
+ 'fake-name').AndReturn(fake_secgroup)
+ self.mox.ReplayAll()
+ secgroup = security_group.SecurityGroup.get_by_name(self.context,
+ 'fake-project',
+ 'fake-name')
+ self.assertEqual(self._fix_deleted(fake_secgroup),
+ dict(secgroup.items()))
+ self.assertEqual(secgroup.obj_what_changed(), set())
+ self.assertRemotes()
+
+ def test_in_use(self):
+ self.mox.StubOutWithMock(db, 'security_group_in_use')
+ db.security_group_in_use(self.context, 123).AndReturn(True)
+ self.mox.ReplayAll()
+ secgroup = security_group.SecurityGroup()
+ secgroup.id = 123
+ self.assertTrue(secgroup.in_use(self.context))
+ self.assertRemotes()
+
+ def test_save(self):
+ self.mox.StubOutWithMock(db, 'security_group_update')
+ updated_secgroup = dict(fake_secgroup, project_id='changed')
+ db.security_group_update(self.context, 1,
+ {'description': 'foobar'}).AndReturn(
+ updated_secgroup)
+ self.mox.ReplayAll()
+ secgroup = security_group.SecurityGroup._from_db_object(
+ self.context, security_group.SecurityGroup(), fake_secgroup)
+ secgroup.description = 'foobar'
+ secgroup.save(self.context)
+ self.assertEqual(self._fix_deleted(updated_secgroup),
+ dict(secgroup.items()))
+ self.assertEqual(secgroup.obj_what_changed(), set())
+ self.assertRemotes()
+
+ def test_save_no_changes(self):
+ self.mox.StubOutWithMock(db, 'security_group_update')
+ self.mox.ReplayAll()
+ secgroup = security_group.SecurityGroup._from_db_object(
+ self.context, security_group.SecurityGroup(), fake_secgroup)
+ secgroup.save(self.context)
+
+ def test_refresh(self):
+ updated_secgroup = dict(fake_secgroup, description='changed')
+ self.mox.StubOutWithMock(db, 'security_group_get')
+ db.security_group_get(self.context, 1).AndReturn(updated_secgroup)
+ self.mox.ReplayAll()
+ secgroup = security_group.SecurityGroup._from_db_object(
+ self.context, security_group.SecurityGroup(), fake_secgroup)
+ secgroup.refresh(self.context)
+ self.assertEqual(self._fix_deleted(updated_secgroup),
+ dict(secgroup.items()))
+ self.assertEqual(secgroup.obj_what_changed(), set())
+ self.assertRemotes()
+
+
+class TestSecurityGroupObject(test_objects._LocalTest,
+ _TestSecurityGroupObject):
+ pass
+
+
+class TestSecurityGroupObjectRemote(test_objects._RemoteTest,
+ _TestSecurityGroupObject):
+ pass
+
+
+fake_secgroups = [
+ dict(fake_secgroup, id=1, name='secgroup1'),
+ dict(fake_secgroup, id=2, name='secgroup2'),
+ ]
+
+
+class _TestSecurityGroupListObject(object):
+ def test_get_all(self):
+ self.mox.StubOutWithMock(db, 'security_group_get_all')
+ db.security_group_get_all(self.context).AndReturn(fake_secgroups)
+ self.mox.ReplayAll()
+ secgroup_list = security_group.SecurityGroupList.get_all(self.context)
+ for i in range(len(fake_secgroups)):
+ self.assertIsInstance(secgroup_list[i],
+ security_group.SecurityGroup)
+ self.assertEqual(fake_secgroups[i]['id'],
+ secgroup_list[i]['id'])
+ self.assertEqual(secgroup_list[i]._context, self.context)
+
+ def test_get_by_project(self):
+ self.mox.StubOutWithMock(db, 'security_group_get_by_project')
+ db.security_group_get_by_project(self.context,
+ 'fake-project').AndReturn(
+ fake_secgroups)
+ self.mox.ReplayAll()
+ secgroup_list = security_group.SecurityGroupList.get_by_project(
+ self.context, 'fake-project')
+ for i in range(len(fake_secgroups)):
+ self.assertIsInstance(secgroup_list[i],
+ security_group.SecurityGroup)
+ self.assertEqual(fake_secgroups[i]['id'],
+ secgroup_list[i]['id'])
+
+ def test_get_by_instance(self):
+ inst = instance.Instance()
+ inst.uuid = 'fake-inst-uuid'
+ self.mox.StubOutWithMock(db, 'security_group_get_by_instance')
+ db.security_group_get_by_instance(self.context,
+ 'fake-inst-uuid').AndReturn(
+ fake_secgroups)
+ self.mox.ReplayAll()
+ secgroup_list = security_group.SecurityGroupList.get_by_instance(
+ self.context, inst)
+ for i in range(len(fake_secgroups)):
+ self.assertIsInstance(secgroup_list[i],
+ security_group.SecurityGroup)
+ self.assertEqual(fake_secgroups[i]['id'],
+ secgroup_list[i]['id'])
+
+
+class TestSecurityGroupListObject(test_objects._LocalTest,
+ _TestSecurityGroupListObject):
+ pass
+
+
+class TestSecurityGroupListObjectRemote(test_objects._RemoteTest,
+ _TestSecurityGroupListObject):
+ pass
diff --git a/nova/tests/unit/objects/test_security_group_rule.py b/nova/tests/unit/objects/test_security_group_rule.py
new file mode 100644
index 0000000000..481be189a5
--- /dev/null
+++ b/nova/tests/unit/objects/test_security_group_rule.py
@@ -0,0 +1,95 @@
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import db
+from nova import exception
+from nova import objects
+from nova.tests.unit.objects import test_objects
+from nova.tests.unit.objects import test_security_group
+
+fake_rule = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'id': 1,
+ 'protocol': 'tcp',
+ 'from_port': 22,
+ 'to_port': 22,
+ 'cidr': '0.0.0.0/0',
+ }
+
+
+class _TestSecurityGroupRuleObject(object):
+ def test_get_by_id(self):
+ with mock.patch.object(db, 'security_group_rule_get') as sgrg:
+ sgrg.return_value = fake_rule
+ rule = objects.SecurityGroupRule.get_by_id(
+ self.context, 1)
+ for field in fake_rule:
+ if field == 'cidr':
+ self.assertEqual(fake_rule[field], str(rule[field]))
+ else:
+ self.assertEqual(fake_rule[field], rule[field])
+ sgrg.assert_called_with(self.context, 1)
+
+ def test_get_by_security_group(self):
+ secgroup = objects.SecurityGroup()
+ secgroup.id = 123
+ rule = dict(fake_rule)
+ rule['grantee_group'] = dict(test_security_group.fake_secgroup, id=123)
+ stupid_method = 'security_group_rule_get_by_security_group'
+ with mock.patch.object(db, stupid_method) as sgrgbsg:
+ sgrgbsg.return_value = [rule]
+ rules = (objects.SecurityGroupRuleList.
+ get_by_security_group(self.context, secgroup))
+ self.assertEqual(1, len(rules))
+ self.assertEqual(123, rules[0].grantee_group.id)
+
+ @mock.patch.object(db, 'security_group_rule_create',
+ return_value=fake_rule)
+ def test_create(self, db_mock):
+ rule = objects.SecurityGroupRule()
+ rule.protocol = 'tcp'
+ secgroup = objects.SecurityGroup()
+ secgroup.id = 123
+ parentgroup = objects.SecurityGroup()
+ parentgroup.id = 223
+ rule.grantee_group = secgroup
+ rule.parent_group = parentgroup
+ rule.create(self.context)
+ updates = db_mock.call_args[0][1]
+ self.assertEqual(fake_rule['id'], rule.id)
+ self.assertEqual(updates['group_id'], rule.grantee_group.id)
+ self.assertEqual(updates['parent_group_id'], rule.parent_group.id)
+
+ @mock.patch.object(db, 'security_group_rule_create',
+ return_value=fake_rule)
+ def test_set_id_failure(self, db_mock):
+ rule = objects.SecurityGroupRule()
+ rule.create(self.context)
+ self.assertRaises(exception.ReadOnlyFieldError, setattr,
+ rule, 'id', 124)
+
+
+class TestSecurityGroupRuleObject(test_objects._LocalTest,
+ _TestSecurityGroupRuleObject):
+ pass
+
+
+class TestSecurityGroupRuleObjectRemote(test_objects._RemoteTest,
+ _TestSecurityGroupRuleObject):
+ pass
diff --git a/nova/tests/unit/objects/test_service.py b/nova/tests/unit/objects/test_service.py
new file mode 100644
index 0000000000..d8a72056a5
--- /dev/null
+++ b/nova/tests/unit/objects/test_service.py
@@ -0,0 +1,226 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.utils import timeutils
+
+from nova import db
+from nova import exception
+from nova.objects import aggregate
+from nova.objects import service
+from nova.tests.unit.objects import test_compute_node
+from nova.tests.unit.objects import test_objects
+
+NOW = timeutils.utcnow().replace(microsecond=0)
+fake_service = {
+ 'created_at': NOW,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'id': 123,
+ 'host': 'fake-host',
+ 'binary': 'fake-service',
+ 'topic': 'fake-service-topic',
+ 'report_count': 1,
+ 'disabled': False,
+ 'disabled_reason': None,
+ }
+
+OPTIONAL = ['availability_zone', 'compute_node']
+
+
+class _TestServiceObject(object):
+ def supported_hv_specs_comparator(self, expected, obj_val):
+ obj_val = [inst.to_list() for inst in obj_val]
+ self.json_comparator(expected, obj_val)
+
+ def comparators(self):
+ return {'stats': self.json_comparator,
+ 'host_ip': self.str_comparator,
+ 'supported_hv_specs': self.supported_hv_specs_comparator}
+
+ def subs(self):
+ return {'supported_hv_specs': 'supported_instances'}
+
+ def _test_query(self, db_method, obj_method, *args, **kwargs):
+ self.mox.StubOutWithMock(db, db_method)
+ getattr(db, db_method)(self.context, *args, **kwargs).AndReturn(
+ fake_service)
+ self.mox.ReplayAll()
+ obj = getattr(service.Service, obj_method)(self.context, *args,
+ **kwargs)
+ self.compare_obj(obj, fake_service, allow_missing=OPTIONAL)
+
+ def test_get_by_id(self):
+ self._test_query('service_get', 'get_by_id', 123)
+
+ def test_get_by_host_and_topic(self):
+ self._test_query('service_get_by_host_and_topic',
+ 'get_by_host_and_topic', 'fake-host', 'fake-topic')
+
+ def test_get_by_compute_host(self):
+ self._test_query('service_get_by_compute_host', 'get_by_compute_host',
+ 'fake-host')
+
+ def test_get_by_args(self):
+ self._test_query('service_get_by_args', 'get_by_args', 'fake-host',
+ 'fake-service')
+
+ def test_with_compute_node(self):
+ self.mox.StubOutWithMock(db, 'service_get')
+ self.mox.StubOutWithMock(db, 'compute_node_get_by_service_id')
+ _fake_service = dict(
+ fake_service, compute_node=[test_compute_node.fake_compute_node])
+ db.service_get(self.context, 123).AndReturn(_fake_service)
+ self.mox.ReplayAll()
+ service_obj = service.Service.get_by_id(self.context, 123)
+ self.assertTrue(service_obj.obj_attr_is_set('compute_node'))
+ self.compare_obj(service_obj.compute_node,
+ test_compute_node.fake_compute_node,
+ subs=self.subs(),
+ allow_missing=OPTIONAL,
+ comparators=self.comparators())
+
+ def test_create(self):
+ self.mox.StubOutWithMock(db, 'service_create')
+ db.service_create(self.context, {'host': 'fake-host'}).AndReturn(
+ fake_service)
+ self.mox.ReplayAll()
+ service_obj = service.Service()
+ service_obj.host = 'fake-host'
+ service_obj.create(self.context)
+ self.assertEqual(fake_service['id'], service_obj.id)
+
+ def test_recreate_fails(self):
+ self.mox.StubOutWithMock(db, 'service_create')
+ db.service_create(self.context, {'host': 'fake-host'}).AndReturn(
+ fake_service)
+ self.mox.ReplayAll()
+ service_obj = service.Service()
+ service_obj.host = 'fake-host'
+ service_obj.create(self.context)
+ self.assertRaises(exception.ObjectActionError, service_obj.create,
+ self.context)
+
+ def test_save(self):
+ self.mox.StubOutWithMock(db, 'service_update')
+ db.service_update(self.context, 123, {'host': 'fake-host'}).AndReturn(
+ fake_service)
+ self.mox.ReplayAll()
+ service_obj = service.Service()
+ service_obj.id = 123
+ service_obj.host = 'fake-host'
+ service_obj.save(self.context)
+
+ @mock.patch.object(db, 'service_create',
+ return_value=fake_service)
+ def test_set_id_failure(self, db_mock):
+ service_obj = service.Service()
+ service_obj.create(self.context)
+ self.assertRaises(exception.ReadOnlyFieldError, setattr,
+ service_obj, 'id', 124)
+
+ def _test_destroy(self):
+ self.mox.StubOutWithMock(db, 'service_destroy')
+ db.service_destroy(self.context, 123)
+ self.mox.ReplayAll()
+ service_obj = service.Service()
+ service_obj.id = 123
+ service_obj.destroy(self.context)
+
+ def test_destroy(self):
+ # The test harness needs db.service_destroy to work,
+ # so avoid leaving it broken here after we're done
+ orig_service_destroy = db.service_destroy
+ try:
+ self._test_destroy()
+ finally:
+ db.service_destroy = orig_service_destroy
+
+ def test_get_by_topic(self):
+ self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
+ db.service_get_all_by_topic(self.context, 'fake-topic').AndReturn(
+ [fake_service])
+ self.mox.ReplayAll()
+ services = service.ServiceList.get_by_topic(self.context, 'fake-topic')
+ self.assertEqual(1, len(services))
+ self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
+
+ def test_get_by_host(self):
+ self.mox.StubOutWithMock(db, 'service_get_all_by_host')
+ db.service_get_all_by_host(self.context, 'fake-host').AndReturn(
+ [fake_service])
+ self.mox.ReplayAll()
+ services = service.ServiceList.get_by_host(self.context, 'fake-host')
+ self.assertEqual(1, len(services))
+ self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
+
+ def test_get_all(self):
+ self.mox.StubOutWithMock(db, 'service_get_all')
+ db.service_get_all(self.context, disabled=False).AndReturn(
+ [fake_service])
+ self.mox.ReplayAll()
+ services = service.ServiceList.get_all(self.context, disabled=False)
+ self.assertEqual(1, len(services))
+ self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
+
+ def test_get_all_with_az(self):
+ self.mox.StubOutWithMock(db, 'service_get_all')
+ self.mox.StubOutWithMock(aggregate.AggregateList,
+ 'get_by_metadata_key')
+ db.service_get_all(self.context, disabled=None).AndReturn(
+ [dict(fake_service, topic='compute')])
+ agg = aggregate.Aggregate()
+ agg.name = 'foo'
+ agg.metadata = {'availability_zone': 'test-az'}
+ agg.create(self.context)
+ agg.hosts = [fake_service['host']]
+ aggregate.AggregateList.get_by_metadata_key(self.context,
+ 'availability_zone', hosts=set(agg.hosts)).AndReturn([agg])
+ self.mox.ReplayAll()
+ services = service.ServiceList.get_all(self.context, set_zones=True)
+ self.assertEqual(1, len(services))
+ self.assertEqual('test-az', services[0].availability_zone)
+
+ def test_compute_node(self):
+ self.mox.StubOutWithMock(db, 'compute_node_get_by_service_id')
+ db.compute_node_get_by_service_id(self.context, 123).AndReturn(
+ test_compute_node.fake_compute_node)
+ self.mox.ReplayAll()
+ service_obj = service.Service()
+ service_obj._context = self.context
+ service_obj.id = 123
+ self.compare_obj(service_obj.compute_node,
+ test_compute_node.fake_compute_node,
+ subs=self.subs(),
+ allow_missing=OPTIONAL,
+ comparators=self.comparators())
+ # Make sure it doesn't re-fetch this
+ service_obj.compute_node
+
+ def test_load_when_orphaned(self):
+ service_obj = service.Service()
+ service_obj.id = 123
+ self.assertRaises(exception.OrphanedObjectError,
+ getattr, service_obj, 'compute_node')
+
+
+class TestServiceObject(test_objects._LocalTest,
+ _TestServiceObject):
+ pass
+
+
+class TestRemoteServiceObject(test_objects._RemoteTest,
+ _TestServiceObject):
+ pass
diff --git a/nova/tests/unit/objects/test_virtual_interface.py b/nova/tests/unit/objects/test_virtual_interface.py
new file mode 100644
index 0000000000..6c416315c4
--- /dev/null
+++ b/nova/tests/unit/objects/test_virtual_interface.py
@@ -0,0 +1,126 @@
+# Copyright (C) 2014, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import db
+from nova.objects import virtual_interface as vif_obj
+from nova.tests.unit.objects import test_objects
+
+
+fake_vif = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'id': 1,
+ 'address': '00:00:00:00:00:00',
+ 'network_id': 123,
+ 'instance_uuid': 'fake-uuid',
+ 'uuid': 'fake-uuid-2',
+}
+
+
+class _TestVirtualInterface(object):
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ def test_get_by_id(self):
+ with mock.patch.object(db, 'virtual_interface_get') as get:
+ get.return_value = fake_vif
+ vif = vif_obj.VirtualInterface.get_by_id(self.context, 1)
+ self._compare(self, fake_vif, vif)
+
+ def test_get_by_uuid(self):
+ with mock.patch.object(db, 'virtual_interface_get_by_uuid') as get:
+ get.return_value = fake_vif
+ vif = vif_obj.VirtualInterface.get_by_uuid(self.context,
+ 'fake-uuid-2')
+ self._compare(self, fake_vif, vif)
+
+ def test_get_by_address(self):
+ with mock.patch.object(db, 'virtual_interface_get_by_address') as get:
+ get.return_value = fake_vif
+ vif = vif_obj.VirtualInterface.get_by_address(self.context,
+ '00:00:00:00:00:00')
+ self._compare(self, fake_vif, vif)
+
+ def test_get_by_instance_and_network(self):
+ with mock.patch.object(db,
+ 'virtual_interface_get_by_instance_and_network') as get:
+ get.return_value = fake_vif
+ vif = vif_obj.VirtualInterface.get_by_instance_and_network(
+ self.context, 'fake-uuid', 123)
+ self._compare(self, fake_vif, vif)
+
+ def test_create(self):
+ vif = vif_obj.VirtualInterface()
+ vif.address = '00:00:00:00:00:00'
+ vif.network_id = 123
+ vif.instance_uuid = 'fake-uuid'
+ vif.uuid = 'fake-uuid-2'
+
+ with mock.patch.object(db, 'virtual_interface_create') as create:
+ create.return_value = fake_vif
+ vif.create(self.context)
+
+ self.assertEqual(self.context, vif._context)
+ vif._context = None
+ self._compare(self, fake_vif, vif)
+
+ def test_delete_by_instance_uuid(self):
+ with mock.patch.object(db,
+ 'virtual_interface_delete_by_instance') as delete:
+ vif_obj.VirtualInterface.delete_by_instance_uuid(self.context,
+ 'fake-uuid')
+ delete.assert_called_with(self.context, 'fake-uuid')
+
+
+class TestVirtualInterfaceObject(test_objects._LocalTest,
+ _TestVirtualInterface):
+ pass
+
+
+class TestRemoteVirtualInterfaceObject(test_objects._RemoteTest,
+ _TestVirtualInterface):
+ pass
+
+
+class _TestVirtualInterfaceList(object):
+ def test_get_all(self):
+ with mock.patch.object(db, 'virtual_interface_get_all') as get:
+ get.return_value = [fake_vif]
+ vifs = vif_obj.VirtualInterfaceList.get_all(self.context)
+ self.assertEqual(1, len(vifs))
+ _TestVirtualInterface._compare(self, fake_vif, vifs[0])
+
+ def test_get_by_instance_uuid(self):
+ with mock.patch.object(db, 'virtual_interface_get_by_instance') as get:
+ get.return_value = [fake_vif]
+ vifs = vif_obj.VirtualInterfaceList.get_by_instance_uuid(
+ self.context, 'fake-uuid')
+ self.assertEqual(1, len(vifs))
+ _TestVirtualInterface._compare(self, fake_vif, vifs[0])
+
+
+class TestVirtualInterfaceList(test_objects._LocalTest,
+ _TestVirtualInterfaceList):
+ pass
+
+
+class TestRemoteVirtualInterfaceList(test_objects._RemoteTest,
+ _TestVirtualInterfaceList):
+ pass
diff --git a/nova/tests/unit/pci/__init__.py b/nova/tests/unit/pci/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/pci/__init__.py
diff --git a/nova/tests/unit/pci/fakes.py b/nova/tests/unit/pci/fakes.py
new file mode 100644
index 0000000000..b56dfc20a8
--- /dev/null
+++ b/nova/tests/unit/pci/fakes.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+
+import mock
+
+from nova.pci import whitelist
+
+
+def fake_pci_whitelist():
+ devspec = mock.Mock()
+ devspec.get_tags.return_value = None
+ patcher = mock.patch.object(whitelist, 'get_pci_device_devspec',
+ return_value=devspec)
+ patcher.start()
+ return patcher
+
+
+def patch_pci_whitelist(f):
+ @functools.wraps(f)
+ def wrapper(self, *args, **kwargs):
+ patcher = fake_pci_whitelist()
+ f(self, *args, **kwargs)
+ patcher.stop()
+ return wrapper
diff --git a/nova/tests/unit/pci/test_device.py b/nova/tests/unit/pci/test_device.py
new file mode 100644
index 0000000000..2406ac254b
--- /dev/null
+++ b/nova/tests/unit/pci/test_device.py
@@ -0,0 +1,119 @@
+# Copyright 2014 Intel Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import context
+from nova import exception
+from nova import objects
+from nova.pci import device
+from nova import test
+
+
+dev_dict = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 1,
+ 'compute_node_id': 1,
+ 'address': 'a',
+ 'vendor_id': 'v',
+ 'product_id': 'p',
+ 'dev_type': 't',
+ 'status': 'available',
+ 'dev_id': 'i',
+ 'label': 'l',
+ 'instance_uuid': None,
+ 'extra_info': '{}',
+ 'request_id': None,
+ }
+
+
+class PciDeviceTestCase(test.TestCase):
+ def setUp(self):
+ super(PciDeviceTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.inst = objects.Instance()
+ self.inst.uuid = 'fake-inst-uuid'
+ self.inst.pci_devices = objects.PciDeviceList()
+ self.devobj = objects.PciDevice._from_db_object(
+ self.ctxt,
+ objects.PciDevice(),
+ dev_dict)
+
+ def test_claim_device(self):
+ device.claim(self.devobj, self.inst)
+ self.assertEqual(self.devobj.status, 'claimed')
+ self.assertEqual(self.devobj.instance_uuid,
+ self.inst.uuid)
+ self.assertEqual(len(self.inst.pci_devices), 0)
+
+ def test_claim_device_fail(self):
+ self.devobj.status = 'allocated'
+ self.assertRaises(exception.PciDeviceInvalidStatus,
+ device.claim, self.devobj, self.inst)
+
+ def test_allocate_device(self):
+ device.claim(self.devobj, self.inst)
+ device.allocate(self.devobj, self.inst)
+ self.assertEqual(self.devobj.status, 'allocated')
+ self.assertEqual(self.devobj.instance_uuid, 'fake-inst-uuid')
+ self.assertEqual(len(self.inst.pci_devices), 1)
+ self.assertEqual(self.inst.pci_devices[0]['vendor_id'], 'v')
+ self.assertEqual(self.inst.pci_devices[0]['status'], 'allocated')
+
+ def test_allocacte_device_fail_status(self):
+ self.devobj.status = 'removed'
+ self.assertRaises(exception.PciDeviceInvalidStatus,
+ device.allocate,
+ self.devobj,
+ self.inst)
+
+ def test_allocacte_device_fail_owner(self):
+ inst_2 = objects.Instance()
+ inst_2.uuid = 'fake-inst-uuid-2'
+ device.claim(self.devobj, self.inst)
+ self.assertRaises(exception.PciDeviceInvalidOwner,
+ device.allocate,
+ self.devobj, inst_2)
+
+ def test_free_claimed_device(self):
+ device.claim(self.devobj, self.inst)
+ device.free(self.devobj, self.inst)
+ self.assertEqual(self.devobj.status, 'available')
+ self.assertIsNone(self.devobj.instance_uuid)
+
+ def test_free_allocated_device(self):
+ device.claim(self.devobj, self.inst)
+ device.allocate(self.devobj, self.inst)
+ self.assertEqual(len(self.inst.pci_devices), 1)
+ device.free(self.devobj, self.inst)
+ self.assertEqual(len(self.inst.pci_devices), 0)
+ self.assertEqual(self.devobj.status, 'available')
+ self.assertIsNone(self.devobj.instance_uuid)
+
+ def test_free_device_fail(self):
+ self.devobj.status = 'removed'
+ self.assertRaises(exception.PciDeviceInvalidStatus,
+ device.free, self.devobj)
+
+ def test_remove_device(self):
+ device.remove(self.devobj)
+ self.assertEqual(self.devobj.status, 'removed')
+ self.assertIsNone(self.devobj.instance_uuid)
+
+ def test_remove_device_fail(self):
+ device.claim(self.devobj, self.inst)
+ self.assertRaises(exception.PciDeviceInvalidStatus,
+ device.remove, self.devobj)
diff --git a/nova/tests/unit/pci/test_devspec.py b/nova/tests/unit/pci/test_devspec.py
new file mode 100644
index 0000000000..d7b6098871
--- /dev/null
+++ b/nova/tests/unit/pci/test_devspec.py
@@ -0,0 +1,177 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import mock
+import six
+
+from nova import exception
+from nova import objects
+from nova.pci import devspec
+from nova import test
+
+dev = {"vendor_id": "8086",
+ "product_id": "5057",
+ "address": "1234:5678:8988.5",
+ "phys_function": "0000:0a:00.0"}
+
+
+class PciAddressTestCase(test.NoDBTestCase):
+ def test_wrong_address(self):
+ pci_info = ('{"vendor_id": "8086", "address": "*: *: *.6",' +
+ '"product_id": "5057", "physical_network": "hr_net"}')
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertFalse(pci.match(dev))
+
+ def test_address_too_big(self):
+ pci_info = ('{"address": "0000:0a:0b:00.5", ' +
+ '"physical_network": "hr_net"}')
+ self.assertRaises(exception.PciDeviceWrongAddressFormat,
+ devspec.PciDeviceSpec, pci_info)
+
+ def test_address_invalid_character(self):
+ pci_info = '{"address": "0000:h4.12:6", "physical_network": "hr_net"}'
+ self.assertRaises(exception.PciDeviceWrongAddressFormat,
+ devspec.PciDeviceSpec, pci_info)
+
+ def test_max_func(self):
+ pci_info = (('{"address": "0000:0a:00.%s", ' +
+ '"physical_network": "hr_net"}') %
+ (devspec.MAX_FUNC + 1))
+ exc = self.assertRaises(exception.PciDeviceInvalidAddressField,
+ devspec.PciDeviceSpec, pci_info)
+ msg = ('Invalid PCI Whitelist: '
+ 'The PCI address 0000:0a:00.%s has an invalid function.'
+ % (devspec.MAX_FUNC + 1))
+ self.assertEqual(msg, six.text_type(exc))
+
+ def test_max_domain(self):
+ pci_info = ('{"address": "%x:0a:00.5", "physical_network":"hr_net"}'
+ % (devspec.MAX_DOMAIN + 1))
+ exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ devspec.PciDeviceSpec, pci_info)
+ msg = ('Invalid PCI devices Whitelist config invalid domain %x'
+ % (devspec.MAX_DOMAIN + 1))
+ self.assertEqual(msg, six.text_type(exc))
+
+ def test_max_bus(self):
+ pci_info = ('{"address": "0000:%x:00.5", "physical_network":"hr_net"}'
+ % (devspec.MAX_BUS + 1))
+ exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ devspec.PciDeviceSpec, pci_info)
+ msg = ('Invalid PCI devices Whitelist config invalid bus %x'
+ % (devspec.MAX_BUS + 1))
+ self.assertEqual(msg, six.text_type(exc))
+
+ def test_max_slot(self):
+ pci_info = ('{"address": "0000:0a:%x.5", "physical_network":"hr_net"}'
+ % (devspec.MAX_SLOT + 1))
+ exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ devspec.PciDeviceSpec, pci_info)
+ msg = ('Invalid PCI devices Whitelist config invalid slot %x'
+ % (devspec.MAX_SLOT + 1))
+ self.assertEqual(msg, six.text_type(exc))
+
+ def test_address_is_undefined(self):
+ pci_info = '{"vendor_id":"8086", "product_id":"5057"}'
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertTrue(pci.match(dev))
+
+ def test_partial_address(self):
+ pci_info = '{"address":":0a:00.", "physical_network":"hr_net"}'
+ pci = devspec.PciDeviceSpec(pci_info)
+ dev = {"vendor_id": "1137",
+ "product_id": "0071",
+ "address": "0000:0a:00.5",
+ "phys_function": "0000:0a:00.0"}
+ self.assertTrue(pci.match(dev))
+
+ @mock.patch('nova.pci.utils.is_physical_function', return_value = True)
+ def test_address_is_pf(self, mock_is_physical_function):
+ pci_info = '{"address":"0000:0a:00.0", "physical_network":"hr_net"}'
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertTrue(pci.match(dev))
+
+
+class PciDevSpecTestCase(test.NoDBTestCase):
+ def test_spec_match(self):
+ pci_info = ('{"vendor_id": "8086","address": "*: *: *.5",' +
+ '"product_id": "5057", "physical_network": "hr_net"}')
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertTrue(pci.match(dev))
+
+ def test_invalid_vendor_id(self):
+ pci_info = ('{"vendor_id": "8087","address": "*: *: *.5", ' +
+ '"product_id": "5057", "physical_network": "hr_net"}')
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertFalse(pci.match(dev))
+
+ def test_vendor_id_out_of_range(self):
+ pci_info = ('{"vendor_id": "80860", "address": "*:*:*.5", ' +
+ '"product_id": "5057", "physical_network": "hr_net"}')
+ exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ devspec.PciDeviceSpec, pci_info)
+ self.assertEqual("Invalid PCI devices Whitelist config "
+ "invalid vendor_id 80860", six.text_type(exc))
+
+ def test_invalid_product_id(self):
+ pci_info = ('{"vendor_id": "8086","address": "*: *: *.5", ' +
+ '"product_id": "5056", "physical_network": "hr_net"}')
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertFalse(pci.match(dev))
+
+ def test_product_id_out_of_range(self):
+ pci_info = ('{"vendor_id": "8086","address": "*:*:*.5", ' +
+ '"product_id": "50570", "physical_network": "hr_net"}')
+ exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ devspec.PciDeviceSpec, pci_info)
+ self.assertEqual("Invalid PCI devices Whitelist config "
+ "invalid product_id 50570", six.text_type(exc))
+
+ def test_devname_and_address(self):
+ pci_info = ('{"devname": "eth0", "vendor_id":"8086", ' +
+ '"address":"*:*:*.5", "physical_network": "hr_net"}')
+ self.assertRaises(exception.PciDeviceInvalidDeviceName,
+ devspec.PciDeviceSpec, pci_info)
+
+ @mock.patch('nova.pci.utils.get_function_by_ifname',
+ return_value = ("0000:0a:00.0", True))
+ def test_by_name(self, mock_get_function_by_ifname):
+ pci_info = '{"devname": "eth0", "physical_network": "hr_net"}'
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertTrue(pci.match(dev))
+
+ @mock.patch('nova.pci.utils.get_function_by_ifname',
+ return_value = (None, False))
+ def test_invalid_name(self, mock_get_function_by_ifname):
+ pci_info = '{"devname": "lo", "physical_network": "hr_net"}'
+ exc = self.assertRaises(exception.PciDeviceNotFoundById,
+ devspec.PciDeviceSpec, pci_info)
+ self.assertEqual('PCI device lo not found', six.text_type(exc))
+
+ def test_pci_obj(self):
+ pci_info = ('{"vendor_id": "8086","address": "*:*:*.5", ' +
+ '"product_id": "5057", "physical_network": "hr_net"}')
+
+ pci = devspec.PciDeviceSpec(pci_info)
+ pci_dev = {
+ 'compute_node_id': 1,
+ 'address': '0000:00:00.5',
+ 'product_id': '5057',
+ 'vendor_id': '8086',
+ 'status': 'available',
+ 'extra_k1': 'v1',
+ }
+
+ pci_obj = objects.PciDevice.create(pci_dev)
+ self.assertTrue(pci.match_pci_obj(pci_obj))
diff --git a/nova/tests/unit/pci/test_manager.py b/nova/tests/unit/pci/test_manager.py
new file mode 100644
index 0000000000..787ea41bd2
--- /dev/null
+++ b/nova/tests/unit/pci/test_manager.py
@@ -0,0 +1,364 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+import mock
+
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova import objects
+from nova.pci import device
+from nova.pci import manager
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.pci import fakes as pci_fakes
+
+
+fake_pci = {
+ 'compute_node_id': 1,
+ 'address': '0000:00:00.1',
+ 'product_id': 'p',
+ 'vendor_id': 'v',
+ 'request_id': None,
+ 'status': 'available'}
+fake_pci_1 = dict(fake_pci, address='0000:00:00.2',
+ product_id='p1', vendor_id='v1')
+fake_pci_2 = dict(fake_pci, address='0000:00:00.3')
+
+
+fake_db_dev = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 1,
+ 'compute_node_id': 1,
+ 'address': '0000:00:00.1',
+ 'vendor_id': 'v',
+ 'product_id': 'p',
+ 'dev_type': 't',
+ 'status': 'available',
+ 'dev_id': 'i',
+ 'label': 'l',
+ 'instance_uuid': None,
+ 'extra_info': '{}',
+ 'request_id': None,
+ }
+fake_db_dev_1 = dict(fake_db_dev, vendor_id='v1',
+ product_id='p1', id=2,
+ address='0000:00:00.2')
+fake_db_dev_2 = dict(fake_db_dev, id=3, address='0000:00:00.3')
+fake_db_devs = [fake_db_dev, fake_db_dev_1, fake_db_dev_2]
+
+
+fake_pci_requests = [
+ {'count': 1,
+ 'spec': [{'vendor_id': 'v'}]},
+ {'count': 1,
+ 'spec': [{'vendor_id': 'v1'}]}]
+
+
+class PciDevTrackerTestCase(test.TestCase):
+ def _create_fake_instance(self):
+ self.inst = objects.Instance()
+ self.inst.uuid = 'fake-inst-uuid'
+ self.inst.pci_devices = objects.PciDeviceList()
+ self.inst.vm_state = vm_states.ACTIVE
+ self.inst.task_state = None
+
+ def _fake_get_pci_devices(self, ctxt, node_id):
+ return fake_db_devs[:]
+
+ def _fake_pci_device_update(self, ctxt, node_id, address, value):
+ self.update_called += 1
+ self.called_values = value
+ fake_return = copy.deepcopy(fake_db_dev)
+ return fake_return
+
+ def _fake_pci_device_destroy(self, ctxt, node_id, address):
+ self.destroy_called += 1
+
+ def _create_pci_requests_object(self, mock_get, requests):
+ pci_reqs = []
+ for request in requests:
+ pci_req_obj = objects.InstancePCIRequest(count=request['count'],
+ spec=request['spec'])
+ pci_reqs.append(pci_req_obj)
+ mock_get.return_value = objects.InstancePCIRequests(requests=pci_reqs)
+
+ def setUp(self):
+ super(PciDevTrackerTestCase, self).setUp()
+ self.stubs.Set(db, 'pci_device_get_all_by_node',
+ self._fake_get_pci_devices)
+ # The fake_pci_whitelist must be called before creating the fake
+ # devices
+ patcher = pci_fakes.fake_pci_whitelist()
+ self.addCleanup(patcher.stop)
+ self._create_fake_instance()
+ self.tracker = manager.PciDevTracker(1)
+
+ def test_pcidev_tracker_create(self):
+ self.assertEqual(len(self.tracker.pci_devs), 3)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 3)
+ self.assertEqual(self.tracker.stale.keys(), [])
+ self.assertEqual(len(self.tracker.stats.pools), 2)
+ self.assertEqual(self.tracker.node_id, 1)
+
+ def test_pcidev_tracker_create_no_nodeid(self):
+ self.tracker = manager.PciDevTracker()
+ self.assertEqual(len(self.tracker.pci_devs), 0)
+
+ def test_set_hvdev_new_dev(self):
+ fake_pci_3 = dict(fake_pci, address='0000:00:00.4', vendor_id='v2')
+ fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1),
+ copy.deepcopy(fake_pci_2), copy.deepcopy(fake_pci_3)]
+ self.tracker.set_hvdevs(fake_pci_devs)
+ self.assertEqual(len(self.tracker.pci_devs), 4)
+ self.assertEqual(set([dev['address'] for
+ dev in self.tracker.pci_devs]),
+ set(['0000:00:00.1', '0000:00:00.2',
+ '0000:00:00.3', '0000:00:00.4']))
+ self.assertEqual(set([dev['vendor_id'] for
+ dev in self.tracker.pci_devs]),
+ set(['v', 'v1', 'v2']))
+
+ def test_set_hvdev_changed(self):
+ fake_pci_v2 = dict(fake_pci, address='0000:00:00.2', vendor_id='v1')
+ fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2),
+ copy.deepcopy(fake_pci_v2)]
+ self.tracker.set_hvdevs(fake_pci_devs)
+ self.assertEqual(set([dev['vendor_id'] for
+ dev in self.tracker.pci_devs]),
+ set(['v', 'v1']))
+
+ def test_set_hvdev_remove(self):
+ self.tracker.set_hvdevs([fake_pci])
+ self.assertEqual(len([dev for dev in self.tracker.pci_devs
+ if dev['status'] == 'removed']),
+ 2)
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_set_hvdev_changed_stal(self, mock_get):
+ self._create_pci_requests_object(mock_get,
+ [{'count': 1, 'spec': [{'vendor_id': 'v1'}]}])
+ self.tracker._claim_instance(mock.sentinel.context, self.inst)
+ fake_pci_3 = dict(fake_pci, address='0000:00:00.2', vendor_id='v2')
+ fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2),
+ copy.deepcopy(fake_pci_3)]
+ self.tracker.set_hvdevs(fake_pci_devs)
+ self.assertEqual(len(self.tracker.stale), 1)
+ self.assertEqual(self.tracker.stale['0000:00:00.2']['vendor_id'], 'v2')
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_update_pci_for_instance_active(self, mock_get):
+ self._create_pci_requests_object(mock_get, fake_pci_requests)
+ self.tracker.update_pci_for_instance(None, self.inst)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
+ self.assertEqual(free_devs[0]['vendor_id'], 'v')
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_update_pci_for_instance_fail(self, mock_get):
+ pci_requests = copy.deepcopy(fake_pci_requests)
+ pci_requests[0]['count'] = 4
+ self._create_pci_requests_object(mock_get, pci_requests)
+ self.assertRaises(exception.PciDeviceRequestFailed,
+ self.tracker.update_pci_for_instance,
+ None,
+ self.inst)
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_update_pci_for_instance_deleted(self, mock_get):
+ self._create_pci_requests_object(mock_get, fake_pci_requests)
+ self.tracker.update_pci_for_instance(None, self.inst)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
+ self.inst.vm_state = vm_states.DELETED
+ self.tracker.update_pci_for_instance(None, self.inst)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 3)
+ self.assertEqual(set([dev['vendor_id'] for
+ dev in self.tracker.pci_devs]),
+ set(['v', 'v1']))
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_update_pci_for_instance_resize_source(self, mock_get):
+ self._create_pci_requests_object(mock_get, fake_pci_requests)
+ self.tracker.update_pci_for_instance(None, self.inst)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
+ self.inst.task_state = task_states.RESIZE_MIGRATED
+ self.tracker.update_pci_for_instance(None, self.inst)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 3)
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_update_pci_for_instance_resize_dest(self, mock_get):
+ self._create_pci_requests_object(mock_get, fake_pci_requests)
+ self.tracker.update_pci_for_migration(None, self.inst)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
+ self.assertEqual(len(self.tracker.claims['fake-inst-uuid']), 2)
+ self.assertNotIn('fake-inst-uuid', self.tracker.allocations)
+ self.inst.task_state = task_states.RESIZE_FINISH
+ self.tracker.update_pci_for_instance(None, self.inst)
+ self.assertEqual(len(self.tracker.allocations['fake-inst-uuid']), 2)
+ self.assertNotIn('fake-inst-uuid', self.tracker.claims)
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_update_pci_for_migration_in(self, mock_get):
+ self._create_pci_requests_object(mock_get, fake_pci_requests)
+ self.tracker.update_pci_for_migration(None, self.inst)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
+ self.assertEqual(free_devs[0]['vendor_id'], 'v')
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_update_pci_for_migration_out(self, mock_get):
+ self._create_pci_requests_object(mock_get, fake_pci_requests)
+ self.tracker.update_pci_for_migration(None, self.inst)
+ self.tracker.update_pci_for_migration(None, self.inst, sign=-1)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 3)
+ self.assertEqual(set([dev['vendor_id'] for
+ dev in self.tracker.pci_devs]),
+ set(['v', 'v1']))
+
+ def test_save(self):
+ self.stubs.Set(db, "pci_device_update", self._fake_pci_device_update)
+ ctxt = context.get_admin_context()
+ fake_pci_v3 = dict(fake_pci, address='0000:00:00.2', vendor_id='v3')
+ fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2),
+ copy.deepcopy(fake_pci_v3)]
+ self.tracker.set_hvdevs(fake_pci_devs)
+ self.update_called = 0
+ self.tracker.save(ctxt)
+ self.assertEqual(self.update_called, 3)
+
+ def test_save_removed(self):
+ self.stubs.Set(db, "pci_device_update", self._fake_pci_device_update)
+ self.stubs.Set(db, "pci_device_destroy", self._fake_pci_device_destroy)
+ self.destroy_called = 0
+ ctxt = context.get_admin_context()
+ self.assertEqual(len(self.tracker.pci_devs), 3)
+ dev = self.tracker.pci_devs[0]
+ self.update_called = 0
+ device.remove(dev)
+ self.tracker.save(ctxt)
+ self.assertEqual(len(self.tracker.pci_devs), 2)
+ self.assertEqual(self.destroy_called, 1)
+
+ def test_set_compute_node_id(self):
+ self.tracker = manager.PciDevTracker()
+ fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1),
+ copy.deepcopy(fake_pci_2)]
+ self.tracker.set_hvdevs(fake_pci_devs)
+ self.tracker.set_compute_node_id(1)
+ self.assertEqual(self.tracker.node_id, 1)
+ self.assertEqual(self.tracker.pci_devs[0].compute_node_id, 1)
+ fake_pci_3 = dict(fake_pci, address='0000:00:00.4', vendor_id='v2')
+ fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1),
+ copy.deepcopy(fake_pci_3), copy.deepcopy(fake_pci_3)]
+ self.tracker.set_hvdevs(fake_pci_devs)
+ for dev in self.tracker.pci_devs:
+ self.assertEqual(dev.compute_node_id, 1)
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_clean_usage(self, mock_get):
+ inst_2 = copy.copy(self.inst)
+ inst_2.uuid = 'uuid5'
+ migr = {'instance_uuid': 'uuid2', 'vm_state': vm_states.BUILDING}
+ orph = {'uuid': 'uuid3', 'vm_state': vm_states.BUILDING}
+
+ self._create_pci_requests_object(mock_get,
+ [{'count': 1, 'spec': [{'vendor_id': 'v'}]}])
+ self.tracker.update_pci_for_instance(None, self.inst)
+ self._create_pci_requests_object(mock_get,
+ [{'count': 1, 'spec': [{'vendor_id': 'v1'}]}])
+ self.tracker.update_pci_for_instance(None, inst_2)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
+ self.assertEqual(free_devs[0]['vendor_id'], 'v')
+
+ self.tracker.clean_usage([self.inst], [migr], [orph])
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 2)
+ self.assertEqual(
+ set([dev['vendor_id'] for dev in free_devs]),
+ set(['v', 'v1']))
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_clean_usage_claims(self, mock_get):
+ inst_2 = copy.copy(self.inst)
+ inst_2.uuid = 'uuid5'
+ migr = {'instance_uuid': 'uuid2', 'vm_state': vm_states.BUILDING}
+ orph = {'uuid': 'uuid3', 'vm_state': vm_states.BUILDING}
+
+ self._create_pci_requests_object(mock_get,
+ [{'count': 1, 'spec': [{'vendor_id': 'v'}]}])
+ self.tracker.update_pci_for_instance(None, self.inst)
+ self._create_pci_requests_object(mock_get,
+ [{'count': 1, 'spec': [{'vendor_id': 'v1'}]}])
+ self.tracker.update_pci_for_migration(None, inst_2)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
+ self.tracker.clean_usage([self.inst], [migr], [orph])
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 2)
+ self.assertEqual(
+ set([dev['vendor_id'] for dev in free_devs]),
+ set(['v', 'v1']))
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_clean_usage_no_request_match_no_claims(self, mock_get):
+ # Tests the case that there is no match for the request so the
+ # claims mapping is set to None for the instance when the tracker
+ # calls clean_usage.
+ self._create_pci_requests_object(mock_get, [])
+ self.tracker.update_pci_for_migration(None, instance=self.inst, sign=1)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(3, len(free_devs))
+ self.tracker.clean_usage([], [], [])
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(3, len(free_devs))
+ self.assertEqual(
+ set([dev['address'] for dev in free_devs]),
+ set(['0000:00:00.1', '0000:00:00.2', '0000:00:00.3']))
+
+
+class PciGetInstanceDevs(test.TestCase):
+ def test_get_devs_object(self):
+ def _fake_obj_load_attr(foo, attrname):
+ if attrname == 'pci_devices':
+ self.load_attr_called = True
+ foo.pci_devices = objects.PciDeviceList()
+
+ inst = fakes.stub_instance(id='1')
+ ctxt = context.get_admin_context()
+ self.mox.StubOutWithMock(db, 'instance_get')
+ db.instance_get(ctxt, '1', columns_to_join=[]
+ ).AndReturn(inst)
+ self.mox.ReplayAll()
+ inst = objects.Instance.get_by_id(ctxt, '1', expected_attrs=[])
+ self.stubs.Set(objects.Instance, 'obj_load_attr', _fake_obj_load_attr)
+
+ self.load_attr_called = False
+ manager.get_instance_pci_devs(inst)
+ self.assertEqual(self.load_attr_called, True)
diff --git a/nova/tests/unit/pci/test_request.py b/nova/tests/unit/pci/test_request.py
new file mode 100644
index 0000000000..32c768b0c0
--- /dev/null
+++ b/nova/tests/unit/pci/test_request.py
@@ -0,0 +1,209 @@
+# Copyright 2013 Intel Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for PCI request."""
+
+from nova import exception
+from nova.pci import request
+from nova import test
+
+
+_fake_alias1 = """{
+ "name": "QuicAssist",
+ "capability_type": "pci",
+ "product_id": "4443",
+ "vendor_id": "8086",
+ "device_type": "ACCEL"
+ }"""
+
+_fake_alias11 = """{
+ "name": "QuicAssist",
+ "capability_type": "pci",
+ "product_id": "4444",
+ "vendor_id": "8086",
+ "device_type": "ACCEL"
+ }"""
+
+_fake_alias2 = """{
+ "name": "xxx",
+ "capability_type": "pci",
+ "product_id": "1111",
+ "vendor_id": "1111",
+ "device_type": "N"
+ }"""
+
+_fake_alias3 = """{
+ "name": "IntelNIC",
+ "capability_type": "pci",
+ "product_id": "1111",
+ "vendor_id": "8086",
+ "device_type": "NIC"
+ }"""
+
+
+class AliasTestCase(test.NoDBTestCase):
+ def test_good_alias(self):
+ self.flags(pci_alias=[_fake_alias1])
+ als = request._get_alias_from_config()
+ self.assertIsInstance(als['QuicAssist'], list)
+ expect_dict = {
+ "capability_type": "pci",
+ "product_id": "4443",
+ "vendor_id": "8086",
+ "device_type": "ACCEL"
+ }
+ self.assertEqual(expect_dict, als['QuicAssist'][0])
+
+ def test_multispec_alias(self):
+ self.flags(pci_alias=[_fake_alias1, _fake_alias11])
+ als = request._get_alias_from_config()
+ self.assertIsInstance(als['QuicAssist'], list)
+ expect_dict1 = {
+ "capability_type": "pci",
+ "product_id": "4443",
+ "vendor_id": "8086",
+ "device_type": "ACCEL"
+ }
+ expect_dict2 = {
+ "capability_type": "pci",
+ "product_id": "4444",
+ "vendor_id": "8086",
+ "device_type": "ACCEL"
+ }
+
+ self.assertEqual(expect_dict1, als['QuicAssist'][0])
+ self.assertEqual(expect_dict2, als['QuicAssist'][1])
+
+ def test_wrong_type_aliase(self):
+ self.flags(pci_alias=[_fake_alias2])
+ self.assertRaises(exception.PciInvalidAlias,
+ request._get_alias_from_config)
+
+ def test_wrong_product_id_aliase(self):
+ self.flags(pci_alias=[
+ """{
+ "name": "xxx",
+ "capability_type": "pci",
+ "product_id": "g111",
+ "vendor_id": "1111",
+ "device_type": "NIC"
+ }"""])
+ self.assertRaises(exception.PciInvalidAlias,
+ request._get_alias_from_config)
+
+ def test_wrong_vendor_id_aliase(self):
+ self.flags(pci_alias=[
+ """{
+ "name": "xxx",
+ "capability_type": "pci",
+ "product_id": "1111",
+ "vendor_id": "0xg111",
+ "device_type": "NIC"
+ }"""])
+ self.assertRaises(exception.PciInvalidAlias,
+ request._get_alias_from_config)
+
+ def test_wrong_cap_type_aliase(self):
+ self.flags(pci_alias=[
+ """{
+ "name": "xxx",
+ "capability_type": "usb",
+ "product_id": "1111",
+ "vendor_id": "8086",
+ "device_type": "NIC"
+ }"""])
+ self.assertRaises(exception.PciInvalidAlias,
+ request._get_alias_from_config)
+
+ def test_dup_aliase(self):
+ self.flags(pci_alias=[
+ """{
+ "name": "xxx",
+ "capability_type": "pci",
+ "product_id": "1111",
+ "vendor_id": "8086",
+ "device_type": "NIC"
+ }""",
+ """{
+ "name": "xxx",
+ "capability_type": "pci",
+ "product_id": "1111",
+ "vendor_id": "8086",
+ "device_type": "ACCEL"
+ }"""])
+ self.assertRaises(
+ exception.PciInvalidAlias,
+ request._get_alias_from_config)
+
+ def _verify_result(self, expected, real):
+ exp_real = zip(expected, real)
+ for exp, real in exp_real:
+ self.assertEqual(exp['count'], real.count)
+ self.assertEqual(exp['alias_name'], real.alias_name)
+ self.assertEqual(exp['spec'], real.spec)
+
+ def test_aliase_2_request(self):
+ self.flags(pci_alias=[_fake_alias1, _fake_alias3])
+ expect_request = [
+ {'count': 3,
+ 'spec': [{'vendor_id': '8086', 'product_id': '4443',
+ 'device_type': 'ACCEL',
+ 'capability_type': 'pci'}],
+ 'alias_name': 'QuicAssist'},
+
+ {'count': 1,
+ 'spec': [{'vendor_id': '8086', 'product_id': '1111',
+ 'device_type': "NIC",
+ 'capability_type': 'pci'}],
+ 'alias_name': 'IntelNIC'}, ]
+
+ requests = request._translate_alias_to_requests(
+ "QuicAssist : 3, IntelNIC: 1")
+ self.assertEqual(set([p['count'] for p in requests]), set([1, 3]))
+ self._verify_result(expect_request, requests)
+
+ def test_aliase_2_request_invalid(self):
+ self.flags(pci_alias=[_fake_alias1, _fake_alias3])
+ self.assertRaises(exception.PciRequestAliasNotDefined,
+ request._translate_alias_to_requests,
+ "QuicAssistX : 3")
+
+ def test_get_pci_requests_from_flavor(self):
+ self.flags(pci_alias=[_fake_alias1, _fake_alias3])
+ expect_request = [
+ {'count': 3,
+ 'spec': [{'vendor_id': '8086', 'product_id': '4443',
+ 'device_type': "ACCEL",
+ 'capability_type': 'pci'}],
+ 'alias_name': 'QuicAssist'},
+
+ {'count': 1,
+ 'spec': [{'vendor_id': '8086', 'product_id': '1111',
+ 'device_type': "NIC",
+ 'capability_type': 'pci'}],
+ 'alias_name': 'IntelNIC'}, ]
+
+ flavor = {'extra_specs': {"pci_passthrough:alias":
+ "QuicAssist:3, IntelNIC: 1"}}
+ requests = request.get_pci_requests_from_flavor(flavor)
+ self.assertEqual(set([1, 3]),
+ set([p.count for p in requests.requests]))
+ self._verify_result(expect_request, requests.requests)
+
+ def test_get_pci_requests_from_flavor_no_extra_spec(self):
+ self.flags(pci_alias=[_fake_alias1, _fake_alias3])
+ flavor = {}
+ requests = request.get_pci_requests_from_flavor(flavor)
+ self.assertEqual([], requests.requests)
diff --git a/nova/tests/unit/pci/test_stats.py b/nova/tests/unit/pci/test_stats.py
new file mode 100644
index 0000000000..6960cf93cf
--- /dev/null
+++ b/nova/tests/unit/pci/test_stats.py
@@ -0,0 +1,267 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova import exception
+from nova import objects
+from nova.pci import stats
+from nova.pci import whitelist
+from nova import test
+from nova.tests.unit.pci import fakes
+
+fake_pci_1 = {
+ 'compute_node_id': 1,
+ 'address': '0000:00:00.1',
+ 'product_id': 'p1',
+ 'vendor_id': 'v1',
+ 'status': 'available',
+ 'extra_k1': 'v1',
+ 'request_id': None,
+ }
+
+
+fake_pci_2 = dict(fake_pci_1, vendor_id='v2',
+ product_id='p2',
+ address='0000:00:00.2')
+
+
+fake_pci_3 = dict(fake_pci_1, address='0000:00:00.3')
+
+
+pci_requests = [objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': 'v1'}]),
+ objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': 'v2'}])]
+
+
+pci_requests_multiple = [objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': 'v1'}]),
+ objects.InstancePCIRequest(count=3,
+ spec=[{'vendor_id': 'v2'}])]
+
+
+class PciDeviceStatsTestCase(test.NoDBTestCase):
+ def _create_fake_devs(self):
+ self.fake_dev_1 = objects.PciDevice.create(fake_pci_1)
+ self.fake_dev_2 = objects.PciDevice.create(fake_pci_2)
+ self.fake_dev_3 = objects.PciDevice.create(fake_pci_3)
+
+ map(self.pci_stats.add_device,
+ [self.fake_dev_1, self.fake_dev_2, self.fake_dev_3])
+
+ def setUp(self):
+ super(PciDeviceStatsTestCase, self).setUp()
+ self.pci_stats = stats.PciDeviceStats()
+ # The following two calls need to be made before adding the devices.
+ patcher = fakes.fake_pci_whitelist()
+ self.addCleanup(patcher.stop)
+ self._create_fake_devs()
+
+ def test_add_device(self):
+ self.assertEqual(len(self.pci_stats.pools), 2)
+ self.assertEqual(set([d['vendor_id'] for d in self.pci_stats]),
+ set(['v1', 'v2']))
+ self.assertEqual(set([d['count'] for d in self.pci_stats]),
+ set([1, 2]))
+
+ def test_remove_device(self):
+ self.pci_stats.remove_device(self.fake_dev_2)
+ self.assertEqual(len(self.pci_stats.pools), 1)
+ self.assertEqual(self.pci_stats.pools[0]['count'], 2)
+ self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
+
+ def test_remove_device_exception(self):
+ self.pci_stats.remove_device(self.fake_dev_2)
+ self.assertRaises(exception.PciDevicePoolEmpty,
+ self.pci_stats.remove_device,
+ self.fake_dev_2)
+
+ def test_json_creat(self):
+ m = jsonutils.dumps(self.pci_stats)
+ new_stats = stats.PciDeviceStats(m)
+
+ self.assertEqual(len(new_stats.pools), 2)
+ self.assertEqual(set([d['count'] for d in new_stats]),
+ set([1, 2]))
+ self.assertEqual(set([d['vendor_id'] for d in new_stats]),
+ set(['v1', 'v2']))
+
+ def test_support_requests(self):
+ self.assertEqual(self.pci_stats.support_requests(pci_requests),
+ True)
+ self.assertEqual(len(self.pci_stats.pools), 2)
+ self.assertEqual(set([d['count'] for d in self.pci_stats]),
+ set((1, 2)))
+
+ def test_support_requests_failed(self):
+ self.assertEqual(
+ self.pci_stats.support_requests(pci_requests_multiple), False)
+ self.assertEqual(len(self.pci_stats.pools), 2)
+ self.assertEqual(set([d['count'] for d in self.pci_stats]),
+ set([1, 2]))
+
+ def test_apply_requests(self):
+ self.pci_stats.apply_requests(pci_requests)
+ self.assertEqual(len(self.pci_stats.pools), 1)
+ self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
+ self.assertEqual(self.pci_stats.pools[0]['count'], 1)
+
+ def test_apply_requests_failed(self):
+ self.assertRaises(exception.PciDeviceRequestFailed,
+ self.pci_stats.apply_requests,
+ pci_requests_multiple)
+
+ def test_consume_requests(self):
+ devs = self.pci_stats.consume_requests(pci_requests)
+ self.assertEqual(2, len(devs))
+ self.assertEqual(set(['v1', 'v2']),
+ set([dev['vendor_id'] for dev in devs]))
+
+ def test_consume_requests_empty(self):
+ devs = self.pci_stats.consume_requests([])
+ self.assertEqual(0, len(devs))
+
+ def test_consume_requests_failed(self):
+ self.assertRaises(exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests_multiple)
+
+
+@mock.patch.object(whitelist, 'get_pci_devices_filter')
+class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(PciDeviceStatsWithTagsTestCase, self).setUp()
+ self.pci_stats = stats.PciDeviceStats()
+ self._create_whitelist()
+
+ def _create_whitelist(self):
+ white_list = ['{"vendor_id":"1137","product_id":"0071",'
+ '"address":"*:0a:00.*","physical_network":"physnet1"}',
+ '{"vendor_id":"1137","product_id":"0072"}']
+ self.pci_wlist = whitelist.PciHostDevicesWhiteList(white_list)
+
+ def _create_pci_devices(self):
+ self.pci_tagged_devices = []
+ for dev in range(4):
+ pci_dev = {'compute_node_id': 1,
+ 'address': '0000:0a:00.%d' % dev,
+ 'vendor_id': '1137',
+ 'product_id': '0071',
+ 'status': 'available',
+ 'request_id': None}
+ self.pci_tagged_devices.append(objects.PciDevice.create(pci_dev))
+
+ self.pci_untagged_devices = []
+ for dev in range(3):
+ pci_dev = {'compute_node_id': 1,
+ 'address': '0000:0b:00.%d' % dev,
+ 'vendor_id': '1137',
+ 'product_id': '0072',
+ 'status': 'available',
+ 'request_id': None}
+ self.pci_untagged_devices.append(objects.PciDevice.create(pci_dev))
+
+ map(self.pci_stats.add_device, self.pci_tagged_devices)
+ map(self.pci_stats.add_device, self.pci_untagged_devices)
+
+ def _assertPoolContent(self, pool, vendor_id, product_id, count, **tags):
+ self.assertEqual(vendor_id, pool['vendor_id'])
+ self.assertEqual(product_id, pool['product_id'])
+ self.assertEqual(count, pool['count'])
+ if tags:
+ for k, v in tags.iteritems():
+ self.assertEqual(v, pool[k])
+
+ def _assertPools(self):
+ # Pools are ordered based on the number of keys. 'product_id',
+ # 'vendor_id' are always part of the keys. When tags are present,
+ # they are also part of the keys. In this test class, we have
+ # two pools with the second one having the tag 'physical_network'
+ # and the value 'physnet1'
+ self.assertEqual(2, len(self.pci_stats.pools))
+ self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072',
+ len(self.pci_untagged_devices))
+ self.assertEqual(self.pci_untagged_devices,
+ self.pci_stats.pools[0]['devices'])
+ self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071',
+ len(self.pci_tagged_devices),
+ physical_network='physnet1')
+ self.assertEqual(self.pci_tagged_devices,
+ self.pci_stats.pools[1]['devices'])
+
+ def test_add_devices(self, mock_get_dev_filter):
+ mock_get_dev_filter.return_value = self.pci_wlist
+ self._create_pci_devices()
+ self._assertPools()
+
+ def test_consume_reqeusts(self, mock_get_dev_filter):
+ mock_get_dev_filter.return_value = self.pci_wlist
+ self._create_pci_devices()
+ pci_requests = [objects.InstancePCIRequest(count=1,
+ spec=[{'physical_network': 'physnet1'}]),
+ objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': '1137',
+ 'product_id': '0072'}])]
+ devs = self.pci_stats.consume_requests(pci_requests)
+ self.assertEqual(2, len(devs))
+ self.assertEqual(set(['0071', '0072']),
+ set([dev['product_id'] for dev in devs]))
+ self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072', 2)
+ self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071', 3,
+ physical_network='physnet1')
+
+ def test_add_device_no_devspec(self, mock_get_dev_filter):
+ mock_get_dev_filter.return_value = self.pci_wlist
+ self._create_pci_devices()
+ pci_dev = {'compute_node_id': 1,
+ 'address': '0000:0c:00.1',
+ 'vendor_id': '2345',
+ 'product_id': '0172',
+ 'status': 'available',
+ 'request_id': None}
+ pci_dev_obj = objects.PciDevice.create(pci_dev)
+ self.pci_stats.add_device(pci_dev_obj)
+ # There should be no change
+ self.assertIsNone(
+ self.pci_stats._create_pool_keys_from_dev(pci_dev_obj))
+ self._assertPools()
+
+ def test_remove_device_no_devspec(self, mock_get_dev_filter):
+ mock_get_dev_filter.return_value = self.pci_wlist
+ self._create_pci_devices()
+ pci_dev = {'compute_node_id': 1,
+ 'address': '0000:0c:00.1',
+ 'vendor_id': '2345',
+ 'product_id': '0172',
+ 'status': 'available',
+ 'request_id': None}
+ pci_dev_obj = objects.PciDevice.create(pci_dev)
+ self.pci_stats.remove_device(pci_dev_obj)
+ # There should be no change
+ self.assertIsNone(
+ self.pci_stats._create_pool_keys_from_dev(pci_dev_obj))
+ self._assertPools()
+
+ def test_remove_device(self, mock_get_dev_filter):
+ mock_get_dev_filter.return_value = self.pci_wlist
+ self._create_pci_devices()
+ dev1 = self.pci_untagged_devices.pop()
+ self.pci_stats.remove_device(dev1)
+ dev2 = self.pci_tagged_devices.pop()
+ self.pci_stats.remove_device(dev2)
+ self._assertPools()
diff --git a/nova/tests/unit/pci/test_utils.py b/nova/tests/unit/pci/test_utils.py
new file mode 100644
index 0000000000..77a0ce24f5
--- /dev/null
+++ b/nova/tests/unit/pci/test_utils.py
@@ -0,0 +1,61 @@
+# Copyright (c) 2013 Intel, Inc.
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import exception
+from nova.pci import utils
+from nova import test
+
+
+class PciDeviceMatchTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(PciDeviceMatchTestCase, self).setUp()
+ self.fake_pci_1 = {'vendor_id': 'v1',
+ 'device_id': 'd1'}
+
+ def test_single_spec_match(self):
+ self.assertTrue(utils.pci_device_prop_match(
+ self.fake_pci_1, [{'vendor_id': 'v1', 'device_id': 'd1'}]))
+
+ def test_multiple_spec_match(self):
+ self.assertTrue(utils.pci_device_prop_match(
+ self.fake_pci_1,
+ [{'vendor_id': 'v1', 'device_id': 'd1'},
+ {'vendor_id': 'v3', 'device_id': 'd3'}]))
+
+ def test_spec_dismatch(self):
+ self.assertFalse(utils.pci_device_prop_match(
+ self.fake_pci_1,
+ [{'vendor_id': 'v4', 'device_id': 'd4'},
+ {'vendor_id': 'v3', 'device_id': 'd3'}]))
+
+ def test_spec_extra_key(self):
+ self.assertFalse(utils.pci_device_prop_match(
+ self.fake_pci_1,
+ [{'vendor_id': 'v1', 'device_id': 'd1', 'wrong_key': 'k1'}]))
+
+
+class PciDeviceAddressParserTestCase(test.NoDBTestCase):
+ def test_parse_address(self):
+ self.parse_result = utils.parse_address("0000:04:12.6")
+ self.assertEqual(self.parse_result, ('0000', '04', '12', '6'))
+
+ def test_parse_address_wrong(self):
+ self.assertRaises(exception.PciDeviceWrongAddressFormat,
+ utils.parse_address, "0000:04.12:6")
+
+ def test_parse_address_invalid_character(self):
+ self.assertRaises(exception.PciDeviceWrongAddressFormat,
+ utils.parse_address, "0000:h4.12:6")
diff --git a/nova/tests/unit/pci/test_whitelist.py b/nova/tests/unit/pci/test_whitelist.py
new file mode 100644
index 0000000000..cb5891dffb
--- /dev/null
+++ b/nova/tests/unit/pci/test_whitelist.py
@@ -0,0 +1,66 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.pci import whitelist
+from nova import test
+
+
+dev_dict = {
+ 'compute_node_id': 1,
+ 'address': '0000:00:0a.1',
+ 'product_id': '0001',
+ 'vendor_id': '8086',
+ 'status': 'available',
+ 'phys_function': '0000:00:0a.0',
+ }
+
+
+class PciHostDevicesWhiteListTestCase(test.NoDBTestCase):
+ def test_whitelist(self):
+ white_list = '{"product_id":"0001", "vendor_id":"8086"}'
+ parsed = whitelist.PciHostDevicesWhiteList([white_list])
+ self.assertEqual(1, len(parsed.specs))
+
+ def test_whitelist_empty(self):
+ parsed = whitelist.PciHostDevicesWhiteList()
+ self.assertFalse(parsed.device_assignable(dev_dict))
+
+ def test_whitelist_multiple(self):
+ wl1 = '{"product_id":"0001", "vendor_id":"8086"}'
+ wl2 = '{"product_id":"0002", "vendor_id":"8087"}'
+ parsed = whitelist.PciHostDevicesWhiteList([wl1, wl2])
+ self.assertEqual(2, len(parsed.specs))
+
+ def test_device_assignable(self):
+ white_list = '{"product_id":"0001", "vendor_id":"8086"}'
+ parsed = whitelist.PciHostDevicesWhiteList([white_list])
+ self.assertIsNotNone(parsed.device_assignable(dev_dict))
+
+ def test_device_assignable_multiple(self):
+ white_list_1 = '{"product_id":"0001", "vendor_id":"8086"}'
+ white_list_2 = '{"product_id":"0002", "vendor_id":"8087"}'
+ parsed = whitelist.PciHostDevicesWhiteList(
+ [white_list_1, white_list_2])
+ self.assertIsNotNone(parsed.device_assignable(dev_dict))
+ dev_dict1 = dev_dict.copy()
+ dev_dict1['vendor_id'] = '8087'
+ dev_dict1['product_id'] = '0002'
+ self.assertIsNotNone(parsed.device_assignable(dev_dict1))
+
+ def test_get_pci_devices_filter(self):
+ white_list_1 = '{"product_id":"0001", "vendor_id":"8086"}'
+ self.flags(pci_passthrough_whitelist=[white_list_1])
+ pci_filter = whitelist.get_pci_devices_filter()
+ self.assertIsNotNone(pci_filter.device_assignable(dev_dict))
diff --git a/nova/tests/unit/policy_fixture.py b/nova/tests/unit/policy_fixture.py
new file mode 100644
index 0000000000..cf28875240
--- /dev/null
+++ b/nova/tests/unit/policy_fixture.py
@@ -0,0 +1,73 @@
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import fixtures
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+
+from nova.openstack.common import policy as common_policy
+import nova.policy
+from nova.tests.unit import fake_policy
+
+CONF = cfg.CONF
+
+
+class PolicyFixture(fixtures.Fixture):
+
+ def setUp(self):
+ super(PolicyFixture, self).setUp()
+ self.policy_dir = self.useFixture(fixtures.TempDir())
+ self.policy_file_name = os.path.join(self.policy_dir.path,
+ 'policy.json')
+ with open(self.policy_file_name, 'w') as policy_file:
+ policy_file.write(fake_policy.policy_data)
+ CONF.set_override('policy_file', self.policy_file_name)
+ nova.policy.reset()
+ nova.policy.init()
+ self.addCleanup(nova.policy.reset)
+
+ def set_rules(self, rules):
+ policy = nova.policy._ENFORCER
+ policy.set_rules(dict((k, common_policy.parse_rule(v))
+ for k, v in rules.items()))
+
+
+class RoleBasedPolicyFixture(fixtures.Fixture):
+
+ def __init__(self, role="admin", *args, **kwargs):
+ super(RoleBasedPolicyFixture, self).__init__(*args, **kwargs)
+ self.role = role
+
+ def setUp(self):
+ """Copy live policy.json file and convert all actions to
+ allow users of the specified role only
+ """
+ super(RoleBasedPolicyFixture, self).setUp()
+ policy = jsonutils.load(open(CONF.policy_file))
+
+ # Convert all actions to require specified role
+ for action, rule in policy.iteritems():
+ policy[action] = 'role:%s' % self.role
+
+ self.policy_dir = self.useFixture(fixtures.TempDir())
+ self.policy_file_name = os.path.join(self.policy_dir.path,
+ 'policy.json')
+ with open(self.policy_file_name, 'w') as policy_file:
+ jsonutils.dump(policy, policy_file)
+ CONF.set_override('policy_file', self.policy_file_name)
+ nova.policy.reset()
+ nova.policy.init()
+ self.addCleanup(nova.policy.reset)
diff --git a/nova/tests/unit/scheduler/__init__.py b/nova/tests/unit/scheduler/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/scheduler/__init__.py
diff --git a/nova/tests/unit/scheduler/fakes.py b/nova/tests/unit/scheduler/fakes.py
new file mode 100644
index 0000000000..d1b2918d33
--- /dev/null
+++ b/nova/tests/unit/scheduler/fakes.py
@@ -0,0 +1,268 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Fakes For Scheduler tests.
+"""
+
+import mox
+from oslo.serialization import jsonutils
+
+from nova.compute import vm_states
+from nova import db
+from nova.scheduler import filter_scheduler
+from nova.scheduler import host_manager
+from nova.virt import hardware
+
+NUMA_TOPOLOGY = hardware.VirtNUMAHostTopology(
+ cells=[hardware.VirtNUMATopologyCellUsage(
+ 0, set([1, 2]), 512),
+ hardware.VirtNUMATopologyCellUsage(
+ 1, set([3, 4]), 512)])
+
+COMPUTE_NODES = [
+ dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
+ disk_available_least=None, free_ram_mb=512, vcpus_used=1,
+ free_disk_gb=512, local_gb_used=0, updated_at=None,
+ service=dict(host='host1', disabled=False),
+ hypervisor_hostname='node1', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None),
+ dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
+ disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
+ free_disk_gb=1024, local_gb_used=0, updated_at=None,
+ service=dict(host='host2', disabled=True),
+ hypervisor_hostname='node2', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None),
+ dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
+ disk_available_least=3333, free_ram_mb=3072, vcpus_used=1,
+ free_disk_gb=3072, local_gb_used=0, updated_at=None,
+ service=dict(host='host3', disabled=False),
+ hypervisor_hostname='node3', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=NUMA_TOPOLOGY.to_json()),
+ dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
+ disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
+ free_disk_gb=8888, local_gb_used=0, updated_at=None,
+ service=dict(host='host4', disabled=False),
+ hypervisor_hostname='node4', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None),
+ # Broken entry
+ dict(id=5, local_gb=1024, memory_mb=1024, vcpus=1, service=None),
+]
+
+COMPUTE_NODES_METRICS = [
+ dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
+ disk_available_least=512, free_ram_mb=512, vcpus_used=1,
+ free_disk_gb=512, local_gb_used=0, updated_at=None,
+ service=dict(host='host1', disabled=False),
+ hypervisor_hostname='node1', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ metrics=jsonutils.dumps([{'name': 'foo',
+ 'value': 512,
+ 'timestamp': None,
+ 'source': 'host1'
+ },
+ {'name': 'bar',
+ 'value': 1.0,
+ 'timestamp': None,
+ 'source': 'host1'
+ },
+ ])),
+ dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
+ disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
+ free_disk_gb=1024, local_gb_used=0, updated_at=None,
+ service=dict(host='host2', disabled=True),
+ hypervisor_hostname='node2', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ metrics=jsonutils.dumps([{'name': 'foo',
+ 'value': 1024,
+ 'timestamp': None,
+ 'source': 'host2'
+ },
+ {'name': 'bar',
+ 'value': 2.0,
+ 'timestamp': None,
+ 'source': 'host2'
+ },
+ ])),
+ dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
+ disk_available_least=3072, free_ram_mb=3072, vcpus_used=1,
+ free_disk_gb=3072, local_gb_used=0, updated_at=None,
+ service=dict(host='host3', disabled=False),
+ hypervisor_hostname='node3', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ metrics=jsonutils.dumps([{'name': 'foo',
+ 'value': 3072,
+ 'timestamp': None,
+ 'source': 'host3'
+ },
+ {'name': 'bar',
+ 'value': 1.0,
+ 'timestamp': None,
+ 'source': 'host3'
+ },
+ ])),
+ dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
+ disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
+ free_disk_gb=8192, local_gb_used=0, updated_at=None,
+ service=dict(host='host4', disabled=False),
+ hypervisor_hostname='node4', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ metrics=jsonutils.dumps([{'name': 'foo',
+ 'value': 8192,
+ 'timestamp': None,
+ 'source': 'host4'
+ },
+ {'name': 'bar',
+ 'value': 0,
+ 'timestamp': None,
+ 'source': 'host4'
+ },
+ ])),
+ dict(id=5, local_gb=768, memory_mb=768, vcpus=8,
+ disk_available_least=768, free_ram_mb=768, vcpus_used=0,
+ free_disk_gb=768, local_gb_used=0, updated_at=None,
+ service=dict(host='host5', disabled=False),
+ hypervisor_hostname='node5', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ metrics=jsonutils.dumps([{'name': 'foo',
+ 'value': 768,
+ 'timestamp': None,
+ 'source': 'host5'
+ },
+ {'name': 'bar',
+ 'value': 0,
+ 'timestamp': None,
+ 'source': 'host5'
+ },
+ {'name': 'zot',
+ 'value': 1,
+ 'timestamp': None,
+ 'source': 'host5'
+ },
+ ])),
+ dict(id=6, local_gb=2048, memory_mb=2048, vcpus=8,
+ disk_available_least=2048, free_ram_mb=2048, vcpus_used=0,
+ free_disk_gb=2048, local_gb_used=0, updated_at=None,
+ service=dict(host='host6', disabled=False),
+ hypervisor_hostname='node6', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ metrics=jsonutils.dumps([{'name': 'foo',
+ 'value': 2048,
+ 'timestamp': None,
+ 'source': 'host6'
+ },
+ {'name': 'bar',
+ 'value': 0,
+ 'timestamp': None,
+ 'source': 'host6'
+ },
+ {'name': 'zot',
+ 'value': 2,
+ 'timestamp': None,
+ 'source': 'host6'
+ },
+ ])),
+]
+
+INSTANCES = [
+ dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
+ host='host1', node='node1'),
+ dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
+ host='host2', node='node2'),
+ dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
+ host='host2', node='node2'),
+ dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
+ host='host3', node='node3'),
+ # Broken host
+ dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
+ host=None),
+ # No matching host
+ dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
+ host='host5', node='node5'),
+]
+
+
+class FakeFilterScheduler(filter_scheduler.FilterScheduler):
+ def __init__(self, *args, **kwargs):
+ super(FakeFilterScheduler, self).__init__(*args, **kwargs)
+ self.host_manager = host_manager.HostManager()
+
+
+class FakeHostManager(host_manager.HostManager):
+ """host1: free_ram_mb=1024-512-512=0, free_disk_gb=1024-512-512=0
+ host2: free_ram_mb=2048-512=1536 free_disk_gb=2048-512=1536
+ host3: free_ram_mb=4096-1024=3072 free_disk_gb=4096-1024=3072
+ host4: free_ram_mb=8192 free_disk_gb=8192
+ """
+
+ def __init__(self):
+ super(FakeHostManager, self).__init__()
+
+ self.service_states = {
+ 'host1': {
+ 'compute': {'host_memory_free': 1073741824},
+ },
+ 'host2': {
+ 'compute': {'host_memory_free': 2147483648},
+ },
+ 'host3': {
+ 'compute': {'host_memory_free': 3221225472},
+ },
+ 'host4': {
+ 'compute': {'host_memory_free': 999999999},
+ },
+ }
+
+
+class FakeHostState(host_manager.HostState):
+ def __init__(self, host, node, attribute_dict):
+ super(FakeHostState, self).__init__(host, node)
+ for (key, val) in attribute_dict.iteritems():
+ setattr(self, key, val)
+
+
+class FakeInstance(object):
+ def __init__(self, context=None, params=None):
+ """Create a test instance. Returns uuid."""
+ self.context = context
+
+ i = self._create_fake_instance(params=params)
+ self.uuid = i['uuid']
+
+ def _create_fake_instance(self, params=None):
+ """Create a test instance."""
+ if not params:
+ params = {}
+
+ inst = {}
+ inst['vm_state'] = vm_states.ACTIVE
+ inst['image_ref'] = 1
+ inst['reservation_id'] = 'r-fakeres'
+ inst['user_id'] = 'fake'
+ inst['project_id'] = 'fake'
+ inst['instance_type_id'] = 2
+ inst['ami_launch_index'] = 0
+ inst.update(params)
+ return db.instance_create(self.context, inst)
+
+
+class FakeComputeAPI(object):
+ def create_db_entry_for_new_instance(self, *args, **kwargs):
+ pass
+
+
+def mox_host_manager_db_calls(mock, context):
+ mock.StubOutWithMock(db, 'compute_node_get_all')
+
+ db.compute_node_get_all(mox.IgnoreArg()).AndReturn(COMPUTE_NODES)
diff --git a/nova/tests/unit/scheduler/filters/__init__.py b/nova/tests/unit/scheduler/filters/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/__init__.py
diff --git a/nova/tests/unit/scheduler/filters/test_affinity_filters.py b/nova/tests/unit/scheduler/filters/test_affinity_filters.py
new file mode 100644
index 0000000000..d47d10a57d
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_affinity_filters.py
@@ -0,0 +1,258 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.config import cfg
+
+from nova.scheduler.filters import affinity_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+CONF = cfg.CONF
+
+CONF.import_opt('my_ip', 'nova.netconf')
+
+
+@mock.patch('nova.compute.api.API.get_all')
+class TestDifferentHostFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestDifferentHostFilter, self).setUp()
+ self.filt_cls = affinity_filter.DifferentHostFilter()
+
+ def test_affinity_different_filter_passes(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = []
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'different_host': ['fake'], }}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_different_filter_no_list_passes(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = []
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'different_host': 'fake'}}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_different_filter_fails(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = [mock.sentinel.instances]
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'different_host': ['fake'], }}
+
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_different_filter_handles_none(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': None}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(get_all_mock.called)
+
+
+@mock.patch('nova.compute.api.API.get_all')
+class TestSameHostFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestSameHostFilter, self).setUp()
+ self.filt_cls = affinity_filter.SameHostFilter()
+
+ def test_affinity_same_filter_passes(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = [mock.sentinel.images]
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'same_host': ['fake'], }}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_same_filter_no_list_passes(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = [mock.sentinel.images]
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'same_host': 'fake'}}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_same_filter_fails(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = []
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'same_host': ['fake'], }}
+
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_same_filter_handles_none(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': None}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(get_all_mock.called)
+
+
+class TestSimpleCIDRAffinityFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestSimpleCIDRAffinityFilter, self).setUp()
+ self.filt_cls = affinity_filter.SimpleCIDRAffinityFilter()
+
+ def test_affinity_simple_cidr_filter_passes(self):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ host.host_ip = '10.8.1.1'
+
+ affinity_ip = "10.8.1.100"
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'cidr': '/24',
+ 'build_near_host_ip': affinity_ip}}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_affinity_simple_cidr_filter_fails(self):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ host.host_ip = '10.8.1.1'
+
+ affinity_ip = "10.8.1.100"
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'cidr': '/32',
+ 'build_near_host_ip': affinity_ip}}
+
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_affinity_simple_cidr_filter_handles_none(self):
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ affinity_ip = CONF.my_ip.split('.')[0:3]
+ affinity_ip.append('100')
+ affinity_ip = str.join('.', affinity_ip)
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': None}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+
+class TestGroupAffinityFilter(test.NoDBTestCase):
+
+ def _test_group_anti_affinity_filter_passes(self, filt_cls, policy):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ filter_properties = {'group_policies': ['affinity']}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ filter_properties = {'group_policies': [policy]}
+ filter_properties['group_hosts'] = []
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ filter_properties['group_hosts'] = ['host2']
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_group_anti_affinity_filter_passes(self):
+ self._test_group_anti_affinity_filter_passes(
+ affinity_filter.ServerGroupAntiAffinityFilter(),
+ 'anti-affinity')
+
+ def test_group_anti_affinity_filter_passes_legacy(self):
+ self._test_group_anti_affinity_filter_passes(
+ affinity_filter.GroupAntiAffinityFilter(),
+ 'legacy')
+
+ def _test_group_anti_affinity_filter_fails(self, filt_cls, policy):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {'group_policies': [policy],
+ 'group_hosts': ['host1']}
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_group_anti_affinity_filter_fails(self):
+ self._test_group_anti_affinity_filter_fails(
+ affinity_filter.ServerGroupAntiAffinityFilter(),
+ 'anti-affinity')
+
+ def test_group_anti_affinity_filter_fails_legacy(self):
+ self._test_group_anti_affinity_filter_fails(
+ affinity_filter.GroupAntiAffinityFilter(),
+ 'legacy')
+
+ def _test_group_affinity_filter_passes(self, filt_cls, policy):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ filter_properties = {'group_policies': ['anti-affinity']}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ filter_properties = {'group_policies': ['affinity'],
+ 'group_hosts': ['host1']}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_group_affinity_filter_passes(self):
+ self._test_group_affinity_filter_passes(
+ affinity_filter.ServerGroupAffinityFilter(), 'affinity')
+
+ def test_group_affinity_filter_passes_legacy(self):
+ self._test_group_affinity_filter_passes(
+ affinity_filter.GroupAffinityFilter(), 'legacy')
+
+ def _test_group_affinity_filter_fails(self, filt_cls, policy):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {'group_policies': [policy],
+ 'group_hosts': ['host2']}
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_group_affinity_filter_fails(self):
+ self._test_group_affinity_filter_fails(
+ affinity_filter.ServerGroupAffinityFilter(), 'affinity')
+
+ def test_group_affinity_filter_fails_legacy(self):
+ self._test_group_affinity_filter_fails(
+ affinity_filter.GroupAffinityFilter(), 'legacy')
diff --git a/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py b/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py
new file mode 100644
index 0000000000..b4eacf321f
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py
@@ -0,0 +1,98 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import aggregate_image_properties_isolation as aipi
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+@mock.patch('nova.db.aggregate_metadata_get_by_host')
+class TestAggImagePropsIsolationFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAggImagePropsIsolationFilter, self).setUp()
+ self.filt_cls = aipi.AggregateImagePropertiesIsolation()
+
+ def test_aggregate_image_properties_isolation_passes(self, agg_mock):
+ agg_mock.return_value = {'foo': 'bar'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {'foo': 'bar'}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_image_properties_isolation_multi_props_passes(self,
+ agg_mock):
+ agg_mock.return_value = {'foo': 'bar', 'foo2': 'bar2'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {'foo': 'bar',
+ 'foo2': 'bar2'}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_image_properties_isolation_props_with_meta_passes(self,
+ agg_mock):
+ agg_mock.return_value = {'foo': 'bar'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_image_properties_isolation_props_imgprops_passes(self,
+ agg_mock):
+ agg_mock.return_value = {}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {'foo': 'bar'}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_image_properties_isolation_props_not_match_fails(self,
+ agg_mock):
+ agg_mock.return_value = {'foo': 'bar'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {'foo': 'no-bar'}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_image_properties_isolation_props_not_match2_fails(self,
+ agg_mock):
+ agg_mock.return_value = {'foo': 'bar', 'foo2': 'bar2'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {'foo': 'bar',
+ 'foo2': 'bar3'}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_image_properties_isolation_props_namespace(self,
+ agg_mock):
+ self.flags(aggregate_image_properties_isolation_namespace="np")
+ agg_mock.return_value = {'np.foo': 'bar', 'foo2': 'bar2'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {'np.foo': 'bar',
+ 'foo2': 'bar3'}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py b/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py
new file mode 100644
index 0000000000..4512841062
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py
@@ -0,0 +1,72 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import aggregate_instance_extra_specs as agg_specs
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+@mock.patch('nova.db.aggregate_metadata_get_by_host')
+class TestAggregateInstanceExtraSpecsFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAggregateInstanceExtraSpecsFilter, self).setUp()
+ self.filt_cls = agg_specs.AggregateInstanceExtraSpecsFilter()
+
+ def test_aggregate_filter_passes_no_extra_specs(self, agg_mock):
+ capabilities = {'opt1': 1, 'opt2': 2}
+
+ filter_properties = {'context': mock.sentinel.ctx, 'instance_type':
+ {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(agg_mock.called)
+
+ def _do_test_aggregate_filter_extra_specs(self, especs, passes):
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024, 'extra_specs': especs}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024})
+ assertion = self.assertTrue if passes else self.assertFalse
+ assertion(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_filter_passes_extra_specs_simple(self, agg_mock):
+ agg_mock.return_value = {'opt1': '1', 'opt2': '2'}
+ especs = {
+ # Un-scoped extra spec
+ 'opt1': '1',
+ # Scoped extra spec that applies to this filter
+ 'aggregate_instance_extra_specs:opt2': '2',
+ # Scoped extra spec that does not apply to this filter
+ 'trust:trusted_host': 'true',
+ }
+ self._do_test_aggregate_filter_extra_specs(especs, passes=True)
+
+ def test_aggregate_filter_passes_with_key_same_as_scope(self, agg_mock):
+ agg_mock.return_value = {'aggregate_instance_extra_specs': '1'}
+ especs = {
+ # Un-scoped extra spec, make sure we don't blow up if it
+ # happens to match our scope.
+ 'aggregate_instance_extra_specs': '1',
+ }
+ self._do_test_aggregate_filter_extra_specs(especs, passes=True)
+
+ def test_aggregate_filter_fails_extra_specs_simple(self, agg_mock):
+ agg_mock.return_value = {'opt1': '1', 'opt2': '2'}
+ especs = {
+ 'opt1': '1',
+ 'opt2': '222',
+ 'trust:trusted_host': 'true'
+ }
+ self._do_test_aggregate_filter_extra_specs(especs, passes=False)
diff --git a/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py b/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py
new file mode 100644
index 0000000000..70fe5e2d41
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py
@@ -0,0 +1,53 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import aggregate_multitenancy_isolation as ami
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+@mock.patch('nova.db.aggregate_metadata_get_by_host')
+class TestAggregateMultitenancyIsolationFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAggregateMultitenancyIsolationFilter, self).setUp()
+ self.filt_cls = ami.AggregateMultiTenancyIsolation()
+
+ def test_aggregate_multi_tenancy_isolation_with_meta_passes(self,
+ agg_mock):
+ agg_mock.return_value = {'filter_tenant_id': 'my_tenantid'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'instance_properties': {
+ 'project_id': 'my_tenantid'}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_multi_tenancy_isolation_fails(self, agg_mock):
+ agg_mock.return_value = {'filter_tenant_id': 'other_tenantid'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'instance_properties': {
+ 'project_id': 'my_tenantid'}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_multi_tenancy_isolation_no_meta_passes(self, agg_mock):
+ agg_mock.return_value = {}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'instance_properties': {
+ 'project_id': 'my_tenantid'}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py b/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py
new file mode 100644
index 0000000000..3cf860dfb2
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py
@@ -0,0 +1,48 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import availability_zone_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+@mock.patch('nova.db.aggregate_metadata_get_by_host')
+class TestAvailabilityZoneFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAvailabilityZoneFilter, self).setUp()
+ self.filt_cls = availability_zone_filter.AvailabilityZoneFilter()
+
+ @staticmethod
+ def _make_zone_request(zone):
+ return {
+ 'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'instance_properties': {
+ 'availability_zone': zone
+ }
+ }
+ }
+
+ def test_availability_zone_filter_same(self, agg_mock):
+ agg_mock.return_value = {'availability_zone': 'nova'}
+ request = self._make_zone_request('nova')
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertTrue(self.filt_cls.host_passes(host, request))
+
+ def test_availability_zone_filter_different(self, agg_mock):
+ agg_mock.return_value = {'availability_zone': 'nova'}
+ request = self._make_zone_request('bad')
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertFalse(self.filt_cls.host_passes(host, request))
diff --git a/nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py b/nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py
new file mode 100644
index 0000000000..506b207d2a
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py
@@ -0,0 +1,99 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import six
+
+from nova.scheduler.filters import compute_capabilities_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestComputeCapabilitiesFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestComputeCapabilitiesFilter, self).setUp()
+ self.filt_cls = compute_capabilities_filter.ComputeCapabilitiesFilter()
+
+ def _do_test_compute_filter_extra_specs(self, ecaps, especs, passes):
+ # In real OpenStack runtime environment,compute capabilities
+ # value may be number, so we should use number to do unit test.
+ capabilities = {}
+ capabilities.update(ecaps)
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'extra_specs': especs}}
+ host_state = {'free_ram_mb': 1024}
+ host_state.update(capabilities)
+ host = fakes.FakeHostState('host1', 'node1', host_state)
+ assertion = self.assertTrue if passes else self.assertFalse
+ assertion(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_compute_filter_pass_cpu_info_as_text_type(self):
+ cpu_info = """ { "vendor": "Intel", "model": "core2duo",
+ "arch": "i686","features": ["lahf_lm", "rdtscp"], "topology":
+ {"cores": 1, "threads":1, "sockets": 1}} """
+
+ cpu_info = six.text_type(cpu_info)
+
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'cpu_info': cpu_info},
+ especs={'capabilities:cpu_info:vendor': 'Intel'},
+ passes=True)
+
+ def test_compute_filter_fail_cpu_info_as_text_type_not_valid(self):
+ cpu_info = "cpu_info"
+
+ cpu_info = six.text_type(cpu_info)
+
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'cpu_info': cpu_info},
+ especs={'capabilities:cpu_info:vendor': 'Intel'},
+ passes=False)
+
+ def test_compute_filter_passes_extra_specs_simple(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'stats': {'opt1': 1, 'opt2': 2}},
+ especs={'opt1': '1', 'opt2': '2', 'trust:trusted_host': 'true'},
+ passes=True)
+
+ def test_compute_filter_fails_extra_specs_simple(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'stats': {'opt1': 1, 'opt2': 2}},
+ especs={'opt1': '1', 'opt2': '222', 'trust:trusted_host': 'true'},
+ passes=False)
+
+ def test_compute_filter_pass_extra_specs_simple_with_scope(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'stats': {'opt1': 1, 'opt2': 2}},
+ especs={'capabilities:opt1': '1',
+ 'trust:trusted_host': 'true'},
+ passes=True)
+
+ def test_compute_filter_pass_extra_specs_same_as_scope(self):
+ # Make sure this still works even if the key is the same as the scope
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'capabilities': 1},
+ especs={'capabilities': '1'},
+ passes=True)
+
+ def test_compute_filter_extra_specs_simple_with_wrong_scope(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': 1, 'opt2': 2},
+ especs={'wrong_scope:opt1': '1',
+ 'trust:trusted_host': 'true'},
+ passes=True)
+
+ def test_compute_filter_extra_specs_pass_multi_level_with_scope(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'stats': {'opt1': {'a': 1, 'b': {'aa': 2}}, 'opt2': 2}},
+ especs={'opt1:a': '1', 'capabilities:opt1:b:aa': '2',
+ 'trust:trusted_host': 'true'},
+ passes=True)
diff --git a/nova/tests/unit/scheduler/filters/test_compute_filters.py b/nova/tests/unit/scheduler/filters/test_compute_filters.py
new file mode 100644
index 0000000000..7e31e1ef5a
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_compute_filters.py
@@ -0,0 +1,50 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import compute_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+@mock.patch('nova.servicegroup.API.service_is_up')
+class TestComputeFilter(test.NoDBTestCase):
+
+ def test_compute_filter_manual_disable(self, service_up_mock):
+ filt_cls = compute_filter.ComputeFilter()
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ service = {'disabled': True}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024, 'service': service})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(service_up_mock.called)
+
+ def test_compute_filter_sgapi_passes(self, service_up_mock):
+ filt_cls = compute_filter.ComputeFilter()
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024, 'service': service})
+ service_up_mock.return_value = True
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ service_up_mock.assert_called_once_with(service)
+
+ def test_compute_filter_sgapi_fails(self, service_up_mock):
+ filt_cls = compute_filter.ComputeFilter()
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ service = {'disabled': False, 'updated_at': 'now'}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024, 'service': service})
+ service_up_mock.return_value = False
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ service_up_mock.assert_called_once_with(service)
diff --git a/nova/tests/unit/scheduler/filters/test_core_filters.py b/nova/tests/unit/scheduler/filters/test_core_filters.py
new file mode 100644
index 0000000000..cfe2c51be6
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_core_filters.py
@@ -0,0 +1,87 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import core_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestCoreFilter(test.NoDBTestCase):
+
+ def test_core_filter_passes(self):
+ self.filt_cls = core_filter.CoreFilter()
+ filter_properties = {'instance_type': {'vcpus': 1}}
+ self.flags(cpu_allocation_ratio=2)
+ host = fakes.FakeHostState('host1', 'node1',
+ {'vcpus_total': 4, 'vcpus_used': 7})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_core_filter_fails_safe(self):
+ self.filt_cls = core_filter.CoreFilter()
+ filter_properties = {'instance_type': {'vcpus': 1}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_core_filter_fails(self):
+ self.filt_cls = core_filter.CoreFilter()
+ filter_properties = {'instance_type': {'vcpus': 1}}
+ self.flags(cpu_allocation_ratio=2)
+ host = fakes.FakeHostState('host1', 'node1',
+ {'vcpus_total': 4, 'vcpus_used': 8})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_core_filter_value_error(self, agg_mock):
+ self.filt_cls = core_filter.AggregateCoreFilter()
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'vcpus': 1}}
+ self.flags(cpu_allocation_ratio=2)
+ host = fakes.FakeHostState('host1', 'node1',
+ {'vcpus_total': 4, 'vcpus_used': 7})
+ agg_mock.return_value = set(['XXX'])
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'cpu_allocation_ratio')
+ self.assertEqual(4 * 2, host.limits['vcpu'])
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_core_filter_default_value(self, agg_mock):
+ self.filt_cls = core_filter.AggregateCoreFilter()
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'vcpus': 1}}
+ self.flags(cpu_allocation_ratio=2)
+ host = fakes.FakeHostState('host1', 'node1',
+ {'vcpus_total': 4, 'vcpus_used': 8})
+ agg_mock.return_value = set([])
+ # False: fallback to default flag w/o aggregates
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'cpu_allocation_ratio')
+ # True: use ratio from aggregates
+ agg_mock.return_value = set(['3'])
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(4 * 3, host.limits['vcpu'])
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_core_filter_conflict_values(self, agg_mock):
+ self.filt_cls = core_filter.AggregateCoreFilter()
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'vcpus': 1}}
+ self.flags(cpu_allocation_ratio=1)
+ host = fakes.FakeHostState('host1', 'node1',
+ {'vcpus_total': 4, 'vcpus_used': 8})
+ agg_mock.return_value = set(['2', '3'])
+ # use the minimum ratio from aggregates
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(4 * 2, host.limits['vcpu'])
diff --git a/nova/tests/unit/scheduler/filters/test_disk_filters.py b/nova/tests/unit/scheduler/filters/test_disk_filters.py
new file mode 100644
index 0000000000..14e9328732
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_disk_filters.py
@@ -0,0 +1,100 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import disk_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestDiskFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestDiskFilter, self).setUp()
+
+ def test_disk_filter_passes(self):
+ self.flags(disk_allocation_ratio=1.0)
+ filt_cls = disk_filter.DiskFilter()
+ filter_properties = {'instance_type': {'root_gb': 1,
+ 'ephemeral_gb': 1, 'swap': 512}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_disk_filter_fails(self):
+ self.flags(disk_allocation_ratio=1.0)
+ filt_cls = disk_filter.DiskFilter()
+ filter_properties = {'instance_type': {'root_gb': 10,
+ 'ephemeral_gb': 1, 'swap': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_disk_filter_oversubscribe(self):
+ self.flags(disk_allocation_ratio=10.0)
+ filt_cls = disk_filter.DiskFilter()
+ filter_properties = {'instance_type': {'root_gb': 100,
+ 'ephemeral_gb': 18, 'swap': 1024}}
+ # 1GB used... so 119GB allowed...
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(12 * 10.0, host.limits['disk_gb'])
+
+ def test_disk_filter_oversubscribe_fail(self):
+ self.flags(disk_allocation_ratio=10.0)
+ filt_cls = disk_filter.DiskFilter()
+ filter_properties = {'instance_type': {'root_gb': 100,
+ 'ephemeral_gb': 19, 'swap': 1024}}
+ # 1GB used... so 119GB allowed...
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_disk_filter_value_error(self, agg_mock):
+ filt_cls = disk_filter.AggregateDiskFilter()
+ self.flags(disk_allocation_ratio=1.0)
+ filter_properties = {
+ 'context': mock.sentinel.ctx,
+ 'instance_type': {'root_gb': 1,
+ 'ephemeral_gb': 1,
+ 'swap': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 3 * 1024,
+ 'total_usable_disk_gb': 1})
+ agg_mock.return_value = set(['XXX'])
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'disk_allocation_ratio')
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_disk_filter_default_value(self, agg_mock):
+ filt_cls = disk_filter.AggregateDiskFilter()
+ self.flags(disk_allocation_ratio=1.0)
+ filter_properties = {
+ 'context': mock.sentinel.ctx,
+ 'instance_type': {'root_gb': 2,
+ 'ephemeral_gb': 1,
+ 'swap': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 3 * 1024,
+ 'total_usable_disk_gb': 1})
+ # Uses global conf.
+ agg_mock.return_value = set([])
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'disk_allocation_ratio')
+
+ agg_mock.return_value = set(['2'])
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/unit/scheduler/filters/test_extra_specs_ops.py b/nova/tests/unit/scheduler/filters/test_extra_specs_ops.py
new file mode 100644
index 0000000000..5f8f912a81
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_extra_specs_ops.py
@@ -0,0 +1,200 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.scheduler.filters import extra_specs_ops
+from nova import test
+
+
+class ExtraSpecsOpsTestCase(test.NoDBTestCase):
+ def _do_extra_specs_ops_test(self, value, req, matches):
+ assertion = self.assertTrue if matches else self.assertFalse
+ assertion(extra_specs_ops.match(value, req))
+
+ def test_extra_specs_matches_simple(self):
+ self._do_extra_specs_ops_test(
+ value='1',
+ req='1',
+ matches=True)
+
+ def test_extra_specs_fails_simple(self):
+ self._do_extra_specs_ops_test(
+ value='',
+ req='1',
+ matches=False)
+
+ def test_extra_specs_fails_simple2(self):
+ self._do_extra_specs_ops_test(
+ value='3',
+ req='1',
+ matches=False)
+
+ def test_extra_specs_fails_simple3(self):
+ self._do_extra_specs_ops_test(
+ value='222',
+ req='2',
+ matches=False)
+
+ def test_extra_specs_fails_with_bogus_ops(self):
+ self._do_extra_specs_ops_test(
+ value='4',
+ req='> 2',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_eq(self):
+ self._do_extra_specs_ops_test(
+ value='123',
+ req='= 123',
+ matches=True)
+
+ def test_extra_specs_matches_with_op_eq2(self):
+ self._do_extra_specs_ops_test(
+ value='124',
+ req='= 123',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_eq(self):
+ self._do_extra_specs_ops_test(
+ value='34',
+ req='= 234',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_eq3(self):
+ self._do_extra_specs_ops_test(
+ value='34',
+ req='=',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_seq(self):
+ self._do_extra_specs_ops_test(
+ value='123',
+ req='s== 123',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_seq(self):
+ self._do_extra_specs_ops_test(
+ value='1234',
+ req='s== 123',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_sneq(self):
+ self._do_extra_specs_ops_test(
+ value='1234',
+ req='s!= 123',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_sneq(self):
+ self._do_extra_specs_ops_test(
+ value='123',
+ req='s!= 123',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_sge(self):
+ self._do_extra_specs_ops_test(
+ value='1000',
+ req='s>= 234',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_sle(self):
+ self._do_extra_specs_ops_test(
+ value='1234',
+ req='s<= 1000',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_sl(self):
+ self._do_extra_specs_ops_test(
+ value='2',
+ req='s< 12',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_sg(self):
+ self._do_extra_specs_ops_test(
+ value='12',
+ req='s> 2',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_in(self):
+ self._do_extra_specs_ops_test(
+ value='12311321',
+ req='<in> 11',
+ matches=True)
+
+ def test_extra_specs_matches_with_op_in2(self):
+ self._do_extra_specs_ops_test(
+ value='12311321',
+ req='<in> 12311321',
+ matches=True)
+
+ def test_extra_specs_matches_with_op_in3(self):
+ self._do_extra_specs_ops_test(
+ value='12311321',
+ req='<in> 12311321 <in>',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_in(self):
+ self._do_extra_specs_ops_test(
+ value='12310321',
+ req='<in> 11',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_in2(self):
+ self._do_extra_specs_ops_test(
+ value='12310321',
+ req='<in> 11 <in>',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_or(self):
+ self._do_extra_specs_ops_test(
+ value='12',
+ req='<or> 11 <or> 12',
+ matches=True)
+
+ def test_extra_specs_matches_with_op_or2(self):
+ self._do_extra_specs_ops_test(
+ value='12',
+ req='<or> 11 <or> 12 <or>',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_or(self):
+ self._do_extra_specs_ops_test(
+ value='13',
+ req='<or> 11 <or> 12',
+ matches=False)
+
+ def test_extra_specs_fails_with_op_or2(self):
+ self._do_extra_specs_ops_test(
+ value='13',
+ req='<or> 11 <or> 12 <or>',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_le(self):
+ self._do_extra_specs_ops_test(
+ value='2',
+ req='<= 10',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_le(self):
+ self._do_extra_specs_ops_test(
+ value='3',
+ req='<= 2',
+ matches=False)
+
+ def test_extra_specs_matches_with_op_ge(self):
+ self._do_extra_specs_ops_test(
+ value='3',
+ req='>= 1',
+ matches=True)
+
+ def test_extra_specs_fails_with_op_ge(self):
+ self._do_extra_specs_ops_test(
+ value='2',
+ req='>= 3',
+ matches=False)
diff --git a/nova/tests/unit/scheduler/filters/test_image_props_filters.py b/nova/tests/unit/scheduler/filters/test_image_props_filters.py
new file mode 100644
index 0000000000..ee3a175dce
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_image_props_filters.py
@@ -0,0 +1,189 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.compute import arch
+from nova.compute import hvtype
+from nova.compute import vm_mode
+from nova.scheduler.filters import image_props_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+from nova import utils
+
+
+class TestImagePropsFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestImagePropsFilter, self).setUp()
+ self.filt_cls = image_props_filter.ImagePropertiesFilter()
+
+ def test_image_properties_filter_passes_same_inst_props_and_version(self):
+ img_props = {'properties': {'_architecture': arch.X86_64,
+ 'hypervisor_type': hvtype.KVM,
+ 'vm_mode': vm_mode.HVM,
+ 'hypervisor_version_requires': '>=6.0,<6.2'
+ }}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_fails_different_inst_props(self):
+ img_props = {'properties': {'architecture': arch.ARMV7,
+ 'hypervisor_type': hvtype.QEMU,
+ 'vm_mode': vm_mode.HVM}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_fails_different_hyper_version(self):
+ img_props = {'properties': {'architecture': arch.X86_64,
+ 'hypervisor_type': hvtype.KVM,
+ 'vm_mode': vm_mode.HVM,
+ 'hypervisor_version_requires': '>=6.2'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'enabled': True,
+ 'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_passes_partial_inst_props(self):
+ img_props = {'properties': {'architecture': arch.X86_64,
+ 'vm_mode': vm_mode.HVM}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_fails_partial_inst_props(self):
+ img_props = {'properties': {'architecture': arch.X86_64,
+ 'vm_mode': vm_mode.HVM}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.XEN, vm_mode.XEN)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_passes_without_inst_props(self):
+ filter_properties = {'request_spec': {}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_fails_without_host_props(self):
+ img_props = {'properties': {'architecture': arch.X86_64,
+ 'hypervisor_type': hvtype.KVM,
+ 'vm_mode': vm_mode.HVM}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'enabled': True,
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_passes_without_hyper_version(self):
+ img_props = {'properties': {'architecture': arch.X86_64,
+ 'hypervisor_type': hvtype.KVM,
+ 'vm_mode': vm_mode.HVM,
+ 'hypervisor_version_requires': '>=6.0'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ capabilities = {'enabled': True,
+ 'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)]}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_fails_with_unsupported_hyper_ver(self):
+ img_props = {'properties': {'architecture': arch.X86_64,
+ 'hypervisor_type': hvtype.KVM,
+ 'vm_mode': vm_mode.HVM,
+ 'hypervisor_version_requires': '>=6.0'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ capabilities = {'enabled': True,
+ 'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': 5000}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_pv_mode_compat(self):
+ # if an old image has 'pv' for a vm_mode it should be treated as xen
+ img_props = {'properties': {'vm_mode': 'pv'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.XEN, vm_mode.XEN)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_hvm_mode_compat(self):
+ # if an old image has 'hv' for a vm_mode it should be treated as xen
+ img_props = {'properties': {'vm_mode': 'hv'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_xen_arch_compat(self):
+ # if an old image has 'x86_32' for arch it should be treated as i686
+ img_props = {'properties': {'architecture': 'x86_32'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.I686, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_xen_hvtype_compat(self):
+ # if an old image has 'xapi' for hvtype it should be treated as xen
+ img_props = {'properties': {'hypervisor_type': 'xapi'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.I686, hvtype.XEN, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_baremetal_vmmode_compat(self):
+ # if an old image has 'baremetal' for vmmode it should be
+ # treated as hvm
+ img_props = {'properties': {'vm_mode': 'baremetal'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.I686, hvtype.BAREMETAL, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/unit/scheduler/filters/test_io_ops_filters.py b/nova/tests/unit/scheduler/filters/test_io_ops_filters.py
new file mode 100644
index 0000000000..c558b7711f
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_io_ops_filters.py
@@ -0,0 +1,63 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import mock
+
+from nova.scheduler.filters import io_ops_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestNumInstancesFilter(test.NoDBTestCase):
+
+ def test_filter_num_iops_passes(self):
+ self.flags(max_io_ops_per_host=8)
+ self.filt_cls = io_ops_filter.IoOpsFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_io_ops': 7})
+ filter_properties = {}
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_filter_num_iops_fails(self):
+ self.flags(max_io_ops_per_host=8)
+ self.filt_cls = io_ops_filter.IoOpsFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_io_ops': 8})
+ filter_properties = {}
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_filter_num_iops_value(self, agg_mock):
+ self.flags(max_io_ops_per_host=7)
+ self.filt_cls = io_ops_filter.AggregateIoOpsFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_io_ops': 7})
+ filter_properties = {'context': mock.sentinel.ctx}
+ agg_mock.return_value = set([])
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'max_io_ops_per_host')
+ agg_mock.return_value = set(['8'])
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_filter_num_iops_value_error(self, agg_mock):
+ self.flags(max_io_ops_per_host=8)
+ self.filt_cls = io_ops_filter.AggregateIoOpsFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_io_ops': 7})
+ agg_mock.return_value = set(['XXX'])
+ filter_properties = {'context': mock.sentinel.ctx}
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'max_io_ops_per_host')
diff --git a/nova/tests/unit/scheduler/filters/test_isolated_hosts_filter.py b/nova/tests/unit/scheduler/filters/test_isolated_hosts_filter.py
new file mode 100644
index 0000000000..343c86264c
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_isolated_hosts_filter.py
@@ -0,0 +1,90 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.scheduler.filters import isolated_hosts_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestIsolatedHostsFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestIsolatedHostsFilter, self).setUp()
+ self.filt_cls = isolated_hosts_filter.IsolatedHostsFilter()
+
+ def _do_test_isolated_hosts(self, host_in_list, image_in_list,
+ set_flags=True,
+ restrict_isolated_hosts_to_isolated_images=True):
+ if set_flags:
+ self.flags(isolated_images=['isolated_image'],
+ isolated_hosts=['isolated_host'],
+ restrict_isolated_hosts_to_isolated_images=
+ restrict_isolated_hosts_to_isolated_images)
+ host_name = 'isolated_host' if host_in_list else 'free_host'
+ image_ref = 'isolated_image' if image_in_list else 'free_image'
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': {'image_ref': image_ref}
+ }
+ }
+ host = fakes.FakeHostState(host_name, 'node', {})
+ return self.filt_cls.host_passes(host, filter_properties)
+
+ def test_isolated_hosts_fails_isolated_on_non_isolated(self):
+ self.assertFalse(self._do_test_isolated_hosts(False, True))
+
+ def test_isolated_hosts_fails_non_isolated_on_isolated(self):
+ self.assertFalse(self._do_test_isolated_hosts(True, False))
+
+ def test_isolated_hosts_passes_isolated_on_isolated(self):
+ self.assertTrue(self._do_test_isolated_hosts(True, True))
+
+ def test_isolated_hosts_passes_non_isolated_on_non_isolated(self):
+ self.assertTrue(self._do_test_isolated_hosts(False, False))
+
+ def test_isolated_hosts_no_config(self):
+ # If there are no hosts nor isolated images in the config, it should
+ # not filter at all. This is the default config.
+ self.assertTrue(self._do_test_isolated_hosts(False, True, False))
+ self.assertTrue(self._do_test_isolated_hosts(True, False, False))
+ self.assertTrue(self._do_test_isolated_hosts(True, True, False))
+ self.assertTrue(self._do_test_isolated_hosts(False, False, False))
+
+ def test_isolated_hosts_no_hosts_config(self):
+ self.flags(isolated_images=['isolated_image'])
+ # If there are no hosts in the config, it should only filter out
+ # images that are listed
+ self.assertFalse(self._do_test_isolated_hosts(False, True, False))
+ self.assertTrue(self._do_test_isolated_hosts(True, False, False))
+ self.assertFalse(self._do_test_isolated_hosts(True, True, False))
+ self.assertTrue(self._do_test_isolated_hosts(False, False, False))
+
+ def test_isolated_hosts_no_images_config(self):
+ self.flags(isolated_hosts=['isolated_host'])
+ # If there are no images in the config, it should only filter out
+ # isolated_hosts
+ self.assertTrue(self._do_test_isolated_hosts(False, True, False))
+ self.assertFalse(self._do_test_isolated_hosts(True, False, False))
+ self.assertFalse(self._do_test_isolated_hosts(True, True, False))
+ self.assertTrue(self._do_test_isolated_hosts(False, False, False))
+
+ def test_isolated_hosts_less_restrictive(self):
+ # If there are isolated hosts and non isolated images
+ self.assertTrue(self._do_test_isolated_hosts(True, False, True, False))
+ # If there are isolated hosts and isolated images
+ self.assertTrue(self._do_test_isolated_hosts(True, True, True, False))
+ # If there are non isolated hosts and non isolated images
+ self.assertTrue(self._do_test_isolated_hosts(False, False, True,
+ False))
+ # If there are non isolated hosts and isolated images
+ self.assertFalse(self._do_test_isolated_hosts(False, True, True,
+ False))
diff --git a/nova/tests/unit/scheduler/filters/test_json_filters.py b/nova/tests/unit/scheduler/filters/test_json_filters.py
new file mode 100644
index 0000000000..c5ddca7520
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_json_filters.py
@@ -0,0 +1,289 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.serialization import jsonutils
+
+from nova.scheduler.filters import json_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestJsonFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestJsonFilter, self).setUp()
+ self.filt_cls = json_filter.JsonFilter()
+ self.json_query = jsonutils.dumps(
+ ['and', ['>=', '$free_ram_mb', 1024],
+ ['>=', '$free_disk_mb', 200 * 1024]])
+
+ def test_json_filter_passes(self):
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'root_gb': 200,
+ 'ephemeral_gb': 0},
+ 'scheduler_hints': {'query': self.json_query}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024,
+ 'free_disk_mb': 200 * 1024})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_passes_with_no_query(self):
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'root_gb': 200,
+ 'ephemeral_gb': 0}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 0,
+ 'free_disk_mb': 0})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_fails_on_memory(self):
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'root_gb': 200,
+ 'ephemeral_gb': 0},
+ 'scheduler_hints': {'query': self.json_query}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1023,
+ 'free_disk_mb': 200 * 1024})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_fails_on_disk(self):
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'root_gb': 200,
+ 'ephemeral_gb': 0},
+ 'scheduler_hints': {'query': self.json_query}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024,
+ 'free_disk_mb': (200 * 1024) - 1})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_fails_on_service_disabled(self):
+ json_query = jsonutils.dumps(
+ ['and', ['>=', '$free_ram_mb', 1024],
+ ['>=', '$free_disk_mb', 200 * 1024],
+ ['not', '$service.disabled']])
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'local_gb': 200},
+ 'scheduler_hints': {'query': json_query}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024,
+ 'free_disk_mb': 200 * 1024})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_happy_day(self):
+ # Test json filter more thoroughly.
+ raw = ['and',
+ '$capabilities.enabled',
+ ['=', '$capabilities.opt1', 'match'],
+ ['or',
+ ['and',
+ ['<', '$free_ram_mb', 30],
+ ['<', '$free_disk_mb', 300]],
+ ['and',
+ ['>', '$free_ram_mb', 30],
+ ['>', '$free_disk_mb', 300]]]]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+
+ # Passes
+ capabilities = {'opt1': 'match'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 10,
+ 'free_disk_mb': 200,
+ 'capabilities': capabilities,
+ 'service': service})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ # Passes
+ capabilities = {'opt1': 'match'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 40,
+ 'free_disk_mb': 400,
+ 'capabilities': capabilities,
+ 'service': service})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ # Fails due to capabilities being disabled
+ capabilities = {'enabled': False, 'opt1': 'match'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 40,
+ 'free_disk_mb': 400,
+ 'capabilities': capabilities,
+ 'service': service})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ # Fails due to being exact memory/disk we don't want
+ capabilities = {'enabled': True, 'opt1': 'match'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 30,
+ 'free_disk_mb': 300,
+ 'capabilities': capabilities,
+ 'service': service})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ # Fails due to memory lower but disk higher
+ capabilities = {'enabled': True, 'opt1': 'match'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 20,
+ 'free_disk_mb': 400,
+ 'capabilities': capabilities,
+ 'service': service})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ # Fails due to capabilities 'opt1' not equal
+ capabilities = {'enabled': True, 'opt1': 'no-match'}
+ service = {'enabled': True}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 20,
+ 'free_disk_mb': 400,
+ 'capabilities': capabilities,
+ 'service': service})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_basic_operators(self):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ # (operator, arguments, expected_result)
+ ops_to_test = [
+ ['=', [1, 1], True],
+ ['=', [1, 2], False],
+ ['<', [1, 2], True],
+ ['<', [1, 1], False],
+ ['<', [2, 1], False],
+ ['>', [2, 1], True],
+ ['>', [2, 2], False],
+ ['>', [2, 3], False],
+ ['<=', [1, 2], True],
+ ['<=', [1, 1], True],
+ ['<=', [2, 1], False],
+ ['>=', [2, 1], True],
+ ['>=', [2, 2], True],
+ ['>=', [2, 3], False],
+ ['in', [1, 1], True],
+ ['in', [1, 1, 2, 3], True],
+ ['in', [4, 1, 2, 3], False],
+ ['not', [True], False],
+ ['not', [False], True],
+ ['or', [True, False], True],
+ ['or', [False, False], False],
+ ['and', [True, True], True],
+ ['and', [False, False], False],
+ ['and', [True, False], False],
+ # Nested ((True or False) and (2 > 1)) == Passes
+ ['and', [['or', True, False], ['>', 2, 1]], True]]
+
+ for (op, args, expected) in ops_to_test:
+ raw = [op] + args
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertEqual(expected,
+ self.filt_cls.host_passes(host, filter_properties))
+
+ # This results in [False, True, False, True] and if any are True
+ # then it passes...
+ raw = ['not', True, False, True, False]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ # This results in [False, False, False] and if any are True
+ # then it passes...which this doesn't
+ raw = ['not', True, True, True]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_unknown_operator_raises(self):
+ raw = ['!=', 1, 2]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ host = fakes.FakeHostState('host1', 'node1',
+ {})
+ self.assertRaises(KeyError,
+ self.filt_cls.host_passes, host, filter_properties)
+
+ def test_json_filter_empty_filters_pass(self):
+ host = fakes.FakeHostState('host1', 'node1',
+ {})
+
+ raw = []
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ raw = {}
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_invalid_num_arguments_fails(self):
+ host = fakes.FakeHostState('host1', 'node1',
+ {})
+
+ raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ raw = ['>', 1]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_unknown_variable_ignored(self):
+ host = fakes.FakeHostState('host1', 'node1',
+ {})
+
+ raw = ['=', '$........', 1, 1]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ raw = ['=', '$foo', 2, 2]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/unit/scheduler/filters/test_metrics_filters.py b/nova/tests/unit/scheduler/filters/test_metrics_filters.py
new file mode 100644
index 0000000000..9ae0f6c77c
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_metrics_filters.py
@@ -0,0 +1,34 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.scheduler.filters import metrics_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestMetricsFilter(test.NoDBTestCase):
+
+ def test_metrics_filter_pass(self):
+ self.flags(weight_setting=['foo=1', 'bar=2'], group='metrics')
+ filt_cls = metrics_filter.MetricsFilter()
+ metrics = dict(foo=1, bar=2)
+ host = fakes.FakeHostState('host1', 'node1',
+ attribute_dict={'metrics': metrics})
+ self.assertTrue(filt_cls.host_passes(host, None))
+
+ def test_metrics_filter_missing_metrics(self):
+ self.flags(weight_setting=['foo=1', 'bar=2'], group='metrics')
+ filt_cls = metrics_filter.MetricsFilter()
+ metrics = dict(foo=1)
+ host = fakes.FakeHostState('host1', 'node1',
+ attribute_dict={'metrics': metrics})
+ self.assertFalse(filt_cls.host_passes(host, None))
diff --git a/nova/tests/unit/scheduler/filters/test_num_instances_filters.py b/nova/tests/unit/scheduler/filters/test_num_instances_filters.py
new file mode 100644
index 0000000000..3db0eeb6e7
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_num_instances_filters.py
@@ -0,0 +1,63 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import num_instances_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestNumInstancesFilter(test.NoDBTestCase):
+
+ def test_filter_num_instances_passes(self):
+ self.flags(max_instances_per_host=5)
+ self.filt_cls = num_instances_filter.NumInstancesFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_instances': 4})
+ filter_properties = {}
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_filter_num_instances_fails(self):
+ self.flags(max_instances_per_host=5)
+ self.filt_cls = num_instances_filter.NumInstancesFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_instances': 5})
+ filter_properties = {}
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_filter_aggregate_num_instances_value(self, agg_mock):
+ self.flags(max_instances_per_host=4)
+ self.filt_cls = num_instances_filter.AggregateNumInstancesFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_instances': 5})
+ filter_properties = {'context': mock.sentinel.ctx}
+ agg_mock.return_value = set([])
+ # No aggregate defined for that host.
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'max_instances_per_host')
+ agg_mock.return_value = set(['6'])
+ # Aggregate defined for that host.
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_filter_aggregate_num_instances_value_error(self, agg_mock):
+ self.flags(max_instances_per_host=6)
+ self.filt_cls = num_instances_filter.AggregateNumInstancesFilter()
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {'context': mock.sentinel.ctx}
+ agg_mock.return_value = set(['XXX'])
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'max_instances_per_host')
diff --git a/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py b/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
new file mode 100644
index 0000000000..3c8eb049c8
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
@@ -0,0 +1,151 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova import objects
+from nova.objects import base as obj_base
+from nova.scheduler.filters import numa_topology_filter
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit.scheduler import fakes
+from nova.virt import hardware
+
+
+class TestNUMATopologyFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestNUMATopologyFilter, self).setUp()
+ self.filt_cls = numa_topology_filter.NUMATopologyFilter()
+
+ def test_numa_topology_filter_pass(self):
+ instance_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 512),
+ hardware.VirtNUMATopologyCellInstance(1, set([3]), 512)])
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = (
+ objects.InstanceNUMATopology.obj_from_topology(
+ instance_topology))
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'numa_topology': fakes.NUMA_TOPOLOGY})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_numa_topology_filter_numa_instance_no_numa_host_fail(self):
+ instance_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 512),
+ hardware.VirtNUMATopologyCellInstance(1, set([3]), 512)])
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = (
+ objects.InstanceNUMATopology.obj_from_topology(
+ instance_topology))
+
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_numa_topology_filter_numa_host_no_numa_instance_pass(self):
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = None
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'numa_topology': fakes.NUMA_TOPOLOGY})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_numa_topology_filter_fail_fit(self):
+ instance_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 512),
+ hardware.VirtNUMATopologyCellInstance(1, set([2]), 512),
+ hardware.VirtNUMATopologyCellInstance(2, set([3]), 512)])
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = (
+ objects.InstanceNUMATopology.obj_from_topology(
+ instance_topology))
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'numa_topology': fakes.NUMA_TOPOLOGY})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_numa_topology_filter_fail_memory(self):
+ self.flags(ram_allocation_ratio=1)
+
+ instance_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 1024),
+ hardware.VirtNUMATopologyCellInstance(1, set([3]), 512)])
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = (
+ objects.InstanceNUMATopology.obj_from_topology(
+ instance_topology))
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'numa_topology': fakes.NUMA_TOPOLOGY})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_numa_topology_filter_fail_cpu(self):
+ self.flags(cpu_allocation_ratio=1)
+
+ instance_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 512),
+ hardware.VirtNUMATopologyCellInstance(
+ 1, set([3, 4, 5]), 512)])
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = (
+ objects.InstanceNUMATopology.obj_from_topology(
+ instance_topology))
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'numa_topology': fakes.NUMA_TOPOLOGY})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_numa_topology_filter_pass_set_limit(self):
+ self.flags(cpu_allocation_ratio=21)
+ self.flags(ram_allocation_ratio=1.3)
+
+ instance_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 512),
+ hardware.VirtNUMATopologyCellInstance(1, set([3]), 512)])
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = (
+ objects.InstanceNUMATopology.obj_from_topology(
+ instance_topology))
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'numa_topology': fakes.NUMA_TOPOLOGY})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ limits_topology = hardware.VirtNUMALimitTopology.from_json(
+ host.limits['numa_topology'])
+ self.assertEqual(limits_topology.cells[0].cpu_limit, 42)
+ self.assertEqual(limits_topology.cells[1].cpu_limit, 42)
+ self.assertEqual(limits_topology.cells[0].memory_limit, 665)
+ self.assertEqual(limits_topology.cells[1].memory_limit, 665)
diff --git a/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py b/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
new file mode 100644
index 0000000000..57dd5ebc02
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
@@ -0,0 +1,67 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import objects
+from nova.scheduler.filters import pci_passthrough_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestPCIPassthroughFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestPCIPassthroughFilter, self).setUp()
+ self.filt_cls = pci_passthrough_filter.PciPassthroughFilter()
+
+ def test_pci_passthrough_pass(self):
+ pci_stats_mock = mock.MagicMock()
+ pci_stats_mock.support_requests.return_value = True
+ request = objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': '8086'}])
+ requests = objects.InstancePCIRequests(requests=[request])
+ filter_properties = {'pci_requests': requests}
+ host = fakes.FakeHostState(
+ 'host1', 'node1',
+ attribute_dict={'pci_stats': pci_stats_mock})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ pci_stats_mock.support_requests.assert_called_once_with(
+ requests.requests)
+
+ def test_pci_passthrough_fail(self):
+ pci_stats_mock = mock.MagicMock()
+ pci_stats_mock.support_requests.return_value = False
+ request = objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': '8086'}])
+ requests = objects.InstancePCIRequests(requests=[request])
+ filter_properties = {'pci_requests': requests}
+ host = fakes.FakeHostState(
+ 'host1', 'node1',
+ attribute_dict={'pci_stats': pci_stats_mock})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ pci_stats_mock.support_requests.assert_called_once_with(
+ requests.requests)
+
+ def test_pci_passthrough_no_pci_request(self):
+ filter_properties = {}
+ host = fakes.FakeHostState('h1', 'n1', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_pci_passthrough_compute_stats(self):
+ requests = [{'count': 1, 'spec': [{'vendor_id': '8086'}]}]
+ filter_properties = {'pci_requests': requests}
+ host = fakes.FakeHostState(
+ 'host1', 'node1',
+ attribute_dict={})
+ self.assertRaises(AttributeError, self.filt_cls.host_passes,
+ host, filter_properties)
diff --git a/nova/tests/unit/scheduler/filters/test_ram_filters.py b/nova/tests/unit/scheduler/filters/test_ram_filters.py
new file mode 100644
index 0000000000..c7a6df58c9
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_ram_filters.py
@@ -0,0 +1,89 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import ram_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestRamFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestRamFilter, self).setUp()
+ self.filt_cls = ram_filter.RamFilter()
+
+ def test_ram_filter_fails_on_memory(self):
+ ram_filter.RamFilter.ram_allocation_ratio = 1.0
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_ram_filter_passes(self):
+ ram_filter.RamFilter.ram_allocation_ratio = 1.0
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024, 'total_usable_ram_mb': 1024})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_ram_filter_oversubscribe(self):
+ ram_filter.RamFilter.ram_allocation_ratio = 2.0
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': -1024, 'total_usable_ram_mb': 2048})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(2048 * 2.0, host.limits['memory_mb'])
+
+
+@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+class TestAggregateRamFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAggregateRamFilter, self).setUp()
+ self.filt_cls = ram_filter.AggregateRamFilter()
+
+ def test_aggregate_ram_filter_value_error(self, agg_mock):
+ self.flags(ram_allocation_ratio=1.0)
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024, 'total_usable_ram_mb': 1024})
+ agg_mock.return_value = set(['XXX'])
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(1024 * 1.0, host.limits['memory_mb'])
+
+ def test_aggregate_ram_filter_default_value(self, agg_mock):
+ self.flags(ram_allocation_ratio=1.0)
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024})
+ # False: fallback to default flag w/o aggregates
+ agg_mock.return_value = set()
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.return_value = set(['2.0'])
+ # True: use ratio from aggregates
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(1024 * 2.0, host.limits['memory_mb'])
+
+ def test_aggregate_ram_filter_conflict_values(self, agg_mock):
+ self.flags(ram_allocation_ratio=1.0)
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024})
+ agg_mock.return_value = set(['1.5', '2.0'])
+ # use the minimum ratio from aggregates
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(1024 * 1.5, host.limits['memory_mb'])
diff --git a/nova/tests/unit/scheduler/filters/test_retry_filters.py b/nova/tests/unit/scheduler/filters/test_retry_filters.py
new file mode 100644
index 0000000000..04510cd419
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_retry_filters.py
@@ -0,0 +1,46 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.scheduler.filters import retry_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestRetryFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestRetryFilter, self).setUp()
+ self.filt_cls = retry_filter.RetryFilter()
+
+ def test_retry_filter_disabled(self):
+ # Test case where retry/re-scheduling is disabled.
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {}
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_retry_filter_pass(self):
+ # Node not previously tried.
+ host = fakes.FakeHostState('host1', 'nodeX', {})
+ retry = dict(num_attempts=2,
+ hosts=[['host1', 'node1'], # same host, different node
+ ['host2', 'node2'], # different host and node
+ ])
+ filter_properties = dict(retry=retry)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_retry_filter_fail(self):
+ # Node was already tried.
+ host = fakes.FakeHostState('host1', 'node1', {})
+ retry = dict(num_attempts=1,
+ hosts=[['host1', 'node1']])
+ filter_properties = dict(retry=retry)
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/unit/scheduler/filters/test_trusted_filters.py b/nova/tests/unit/scheduler/filters/test_trusted_filters.py
new file mode 100644
index 0000000000..b6afb92ae0
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_trusted_filters.py
@@ -0,0 +1,203 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.config import cfg
+from oslo.utils import timeutils
+import requests
+
+from nova.scheduler.filters import trusted_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+CONF = cfg.CONF
+
+
+@mock.patch.object(trusted_filter.AttestationService, '_request')
+class TestTrustedFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestTrustedFilter, self).setUp()
+ # TrustedFilter's constructor creates the attestation cache, which
+ # calls to get a list of all the compute nodes.
+ fake_compute_nodes = [
+ {'hypervisor_hostname': 'node1',
+ 'service': {'host': 'host1'},
+ }
+ ]
+ with mock.patch('nova.db.compute_node_get_all') as mocked:
+ mocked.return_value = fake_compute_nodes
+ self.filt_cls = trusted_filter.TrustedFilter()
+
+ def test_trusted_filter_default_passes(self, req_mock):
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(req_mock.called)
+
+ def test_trusted_filter_trusted_and_trusted_passes(self, req_mock):
+ oat_data = {"hosts": [{"host_name": "node1",
+ "trust_lvl": "trusted",
+ "vtime": timeutils.isotime()}]}
+ req_mock.return_value = requests.codes.OK, oat_data
+
+ extra_specs = {'trust:trusted_host': 'trusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ req_mock.assert_called_once_with("POST", "PollHosts", ["node1"])
+
+ def test_trusted_filter_trusted_and_untrusted_fails(self, req_mock):
+ oat_data = {"hosts": [{"host_name": "node1",
+ "trust_lvl": "untrusted",
+ "vtime": timeutils.isotime()}]}
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'trusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_trusted_filter_untrusted_and_trusted_fails(self, req_mock):
+ oat_data = {"hosts": [{"host_name": "node",
+ "trust_lvl": "trusted",
+ "vtime": timeutils.isotime()}]}
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'untrusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_trusted_filter_untrusted_and_untrusted_passes(self, req_mock):
+ oat_data = {"hosts": [{"host_name": "node1",
+ "trust_lvl": "untrusted",
+ "vtime": timeutils.isotime()}]}
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'untrusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_trusted_filter_update_cache(self, req_mock):
+ oat_data = {"hosts": [{"host_name": "node1",
+ "trust_lvl": "untrusted",
+ "vtime": timeutils.isotime()}]}
+
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'untrusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ self.filt_cls.host_passes(host, filter_properties) # Fill the caches
+
+ req_mock.reset_mock()
+ self.filt_cls.host_passes(host, filter_properties)
+ self.assertFalse(req_mock.called)
+
+ req_mock.reset_mock()
+
+ timeutils.set_time_override(timeutils.utcnow())
+ timeutils.advance_time_seconds(
+ CONF.trusted_computing.attestation_auth_timeout + 80)
+ self.filt_cls.host_passes(host, filter_properties)
+ self.assertTrue(req_mock.called)
+
+ timeutils.clear_time_override()
+
+ def test_trusted_filter_update_cache_timezone(self, req_mock):
+ oat_data = {"hosts": [{"host_name": "node1",
+ "trust_lvl": "untrusted",
+ "vtime": "2012-09-09T05:10:40-04:00"}]}
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'untrusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ timeutils.set_time_override(
+ timeutils.normalize_time(
+ timeutils.parse_isotime("2012-09-09T09:10:40Z")))
+
+ self.filt_cls.host_passes(host, filter_properties) # Fill the caches
+
+ req_mock.reset_mock()
+ self.filt_cls.host_passes(host, filter_properties)
+ self.assertFalse(req_mock.called)
+
+ req_mock.reset_mock()
+ timeutils.advance_time_seconds(
+ CONF.trusted_computing.attestation_auth_timeout - 10)
+ self.filt_cls.host_passes(host, filter_properties)
+ self.assertFalse(req_mock.called)
+
+ timeutils.clear_time_override()
+
+ def test_trusted_filter_combine_hosts(self, req_mock):
+ fake_compute_nodes = [
+ {'hypervisor_hostname': 'node1',
+ 'service': {'host': 'host1'},
+ },
+ {'hypervisor_hostname': 'node2',
+ 'service': {'host': 'host2'},
+ },
+ ]
+ with mock.patch('nova.db.compute_node_get_all') as mocked:
+ mocked.return_value = fake_compute_nodes
+ self.filt_cls = trusted_filter.TrustedFilter()
+ oat_data = {"hosts": [{"host_name": "node1",
+ "trust_lvl": "untrusted",
+ "vtime": "2012-09-09T05:10:40-04:00"}]}
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'trusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ self.filt_cls.host_passes(host, filter_properties) # Fill the caches
+ req_mock.assert_called_once_with("POST", "PollHosts",
+ ["node1", "node2"])
+
+ def test_trusted_filter_trusted_and_locale_formated_vtime_passes(self,
+ req_mock):
+ oat_data = {"hosts": [{"host_name": "host1",
+ "trust_lvl": "trusted",
+ "vtime": timeutils.strtime(fmt="%c")},
+ {"host_name": "host2",
+ "trust_lvl": "trusted",
+ "vtime": timeutils.strtime(fmt="%D")},
+ # This is just a broken date to ensure that
+ # we're not just arbitrarily accepting any
+ # date format.
+ ]}
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'trusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'host1', {})
+ bad_host = fakes.FakeHostState('host2', 'host2', {})
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(self.filt_cls.host_passes(bad_host,
+ filter_properties))
diff --git a/nova/tests/unit/scheduler/filters/test_type_filters.py b/nova/tests/unit/scheduler/filters/test_type_filters.py
new file mode 100644
index 0000000000..3aebba1a76
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_type_filters.py
@@ -0,0 +1,56 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import type_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestTypeFilter(test.NoDBTestCase):
+
+ @mock.patch('nova.db.instance_get_all_by_host_and_not_type')
+ def test_type_filter(self, get_mock):
+ self.filt_cls = type_filter.TypeAffinityFilter()
+
+ host = fakes.FakeHostState('fake_host', 'fake_node', {})
+ filter_properties = {'context': mock.MagicMock(),
+ 'instance_type': {'id': 'fake1'}}
+ get_mock.return_value = []
+ # True since empty
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ get_mock.assert_called_once_with(
+ mock.ANY, # context...
+ 'fake_host',
+ 'fake1'
+ )
+ get_mock.return_value = [mock.sentinel.instances]
+ # False since not empty
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_type_filter(self, agg_mock):
+ self.filt_cls = type_filter.AggregateTypeAffinityFilter()
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'name': 'fake1'}}
+ filter2_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'name': 'fake2'}}
+ host = fakes.FakeHostState('fake_host', 'fake_node', {})
+ agg_mock.return_value = set(['fake1'])
+ # True since no aggregates
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'fake_host',
+ 'instance_type')
+ # False since type matches aggregate, metadata
+ self.assertFalse(self.filt_cls.host_passes(host, filter2_properties))
diff --git a/nova/tests/unit/scheduler/ironic_fakes.py b/nova/tests/unit/scheduler/ironic_fakes.py
new file mode 100644
index 0000000000..5c63afafe3
--- /dev/null
+++ b/nova/tests/unit/scheduler/ironic_fakes.py
@@ -0,0 +1,75 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Fake nodes for Ironic host manager tests.
+"""
+
+from oslo.serialization import jsonutils
+
+
+COMPUTE_NODES = [
+ dict(id=1, local_gb=10, memory_mb=1024, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ service=dict(host='host1', disabled=False),
+ hypervisor_hostname='node1uuid', host_ip='127.0.0.1',
+ hypervisor_version=1, hypervisor_type='ironic',
+ stats=jsonutils.dumps(dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386')),
+ supported_instances='[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=10, free_ram_mb=1024),
+ dict(id=2, local_gb=20, memory_mb=2048, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ service=dict(host='host2', disabled=True),
+ hypervisor_hostname='node2uuid', host_ip='127.0.0.1',
+ hypervisor_version=1, hypervisor_type='ironic',
+ stats=jsonutils.dumps(dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386')),
+ supported_instances='[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=20, free_ram_mb=2048),
+ dict(id=3, local_gb=30, memory_mb=3072, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ service=dict(host='host3', disabled=False),
+ hypervisor_hostname='node3uuid', host_ip='127.0.0.1',
+ hypervisor_version=1, hypervisor_type='ironic',
+ stats=jsonutils.dumps(dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386')),
+ supported_instances='[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=30, free_ram_mb=3072),
+ dict(id=4, local_gb=40, memory_mb=4096, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ service=dict(host='host4', disabled=False),
+ hypervisor_hostname='node4uuid', host_ip='127.0.0.1',
+ hypervisor_version=1, hypervisor_type='ironic',
+ stats=jsonutils.dumps(dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386')),
+ supported_instances='[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=40, free_ram_mb=4096),
+ # Broken entry
+ dict(id=5, local_gb=50, memory_mb=5120, vcpus=1, service=None,
+ cpu_info='baremetal cpu',
+ stats=jsonutils.dumps(dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386')),
+ supported_instances='[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=50, free_ram_mb=5120),
+]
diff --git a/nova/tests/unit/scheduler/test_baremetal_host_manager.py b/nova/tests/unit/scheduler/test_baremetal_host_manager.py
new file mode 100644
index 0000000000..1f6e2d70fa
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_baremetal_host_manager.py
@@ -0,0 +1,81 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For BaremetalHostManager
+"""
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova.scheduler import baremetal_host_manager
+from nova.scheduler import host_manager
+from nova import test
+
+
+class BaremetalHostManagerTestCase(test.NoDBTestCase):
+ """Test case for BaremetalHostManager class."""
+
+ def setUp(self):
+ super(BaremetalHostManagerTestCase, self).setUp()
+ self.host_manager = baremetal_host_manager.BaremetalHostManager()
+
+ def test_manager_public_api_signatures(self):
+ self.assertPublicAPISignatures(host_manager.HostManager(),
+ self.host_manager)
+
+ def test_state_public_api_signatures(self):
+ self.assertPublicAPISignatures(
+ host_manager.HostState("dummy",
+ "dummy"),
+ baremetal_host_manager.BaremetalNodeState("dummy",
+ "dummy")
+ )
+
+ @mock.patch.object(baremetal_host_manager.BaremetalNodeState, '__init__')
+ def test_create_baremetal_node_state(self, init_mock):
+ init_mock.return_value = None
+ compute = {'cpu_info': 'baremetal cpu'}
+ host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
+ compute=compute)
+ self.assertIs(baremetal_host_manager.BaremetalNodeState,
+ type(host_state))
+
+ @mock.patch.object(host_manager.HostState, '__init__')
+ def test_create_non_baremetal_host_state(self, init_mock):
+ init_mock.return_value = None
+ compute = {'cpu_info': 'other cpu'}
+ host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
+ compute=compute)
+ self.assertIs(host_manager.HostState, type(host_state))
+
+
+class BaremetalNodeStateTestCase(test.NoDBTestCase):
+ """Test case for BaremetalNodeState class."""
+
+ def test_update_from_compute_node(self):
+ stats = {'cpu_arch': 'cpu_arch'}
+ json_stats = jsonutils.dumps(stats)
+ compute_node = {'memory_mb': 1024, 'free_disk_gb': 10,
+ 'free_ram_mb': 1024, 'vcpus': 1, 'vcpus_used': 0,
+ 'stats': json_stats}
+
+ host = baremetal_host_manager.BaremetalNodeState('fakehost',
+ 'fakenode')
+ host.update_from_compute_node(compute_node)
+
+ self.assertEqual(compute_node['free_ram_mb'], host.free_ram_mb)
+ self.assertEqual(compute_node['memory_mb'], host.total_usable_ram_mb)
+ self.assertEqual(compute_node['free_disk_gb'] * 1024,
+ host.free_disk_mb)
+ self.assertEqual(compute_node['vcpus'], host.vcpus_total)
+ self.assertEqual(compute_node['vcpus_used'], host.vcpus_used)
+ self.assertEqual(stats, host.stats)
diff --git a/nova/tests/unit/scheduler/test_caching_scheduler.py b/nova/tests/unit/scheduler/test_caching_scheduler.py
new file mode 100644
index 0000000000..15525f1b20
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_caching_scheduler.py
@@ -0,0 +1,199 @@
+# Copyright (c) 2014 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.utils import timeutils
+
+from nova import exception
+from nova.scheduler import caching_scheduler
+from nova.scheduler import host_manager
+from nova.tests.unit.scheduler import test_scheduler
+
+ENABLE_PROFILER = False
+
+
+class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
+ """Test case for Caching Scheduler."""
+
+ driver_cls = caching_scheduler.CachingScheduler
+
+ @mock.patch.object(caching_scheduler.CachingScheduler,
+ "_get_up_hosts")
+ def test_run_periodic_tasks_loads_hosts(self, mock_up_hosts):
+ mock_up_hosts.return_value = []
+ context = mock.Mock()
+
+ self.driver.run_periodic_tasks(context)
+
+ self.assertTrue(mock_up_hosts.called)
+ self.assertEqual([], self.driver.all_host_states)
+ context.elevated.assert_called_with()
+
+ @mock.patch.object(caching_scheduler.CachingScheduler,
+ "_get_up_hosts")
+ def test_get_all_host_states_returns_cached_value(self, mock_up_hosts):
+ self.driver.all_host_states = []
+
+ self.driver._get_all_host_states(self.context)
+
+ self.assertFalse(mock_up_hosts.called)
+ self.assertEqual([], self.driver.all_host_states)
+
+ @mock.patch.object(caching_scheduler.CachingScheduler,
+ "_get_up_hosts")
+ def test_get_all_host_states_loads_hosts(self, mock_up_hosts):
+ mock_up_hosts.return_value = ["asdf"]
+
+ result = self.driver._get_all_host_states(self.context)
+
+ self.assertTrue(mock_up_hosts.called)
+ self.assertEqual(["asdf"], self.driver.all_host_states)
+ self.assertEqual(["asdf"], result)
+
+ def test_get_up_hosts(self):
+ with mock.patch.object(self.driver.host_manager,
+ "get_all_host_states") as mock_get_hosts:
+ mock_get_hosts.return_value = ["asdf"]
+
+ result = self.driver._get_up_hosts(self.context)
+
+ self.assertTrue(mock_get_hosts.called)
+ self.assertEqual(mock_get_hosts.return_value, result)
+
+ def test_select_destination_raises_with_no_hosts(self):
+ fake_request_spec = self._get_fake_request_spec()
+ self.driver.all_host_states = []
+
+ self.assertRaises(exception.NoValidHost,
+ self.driver.select_destinations,
+ self.context, fake_request_spec, {})
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_select_destination_works(self, mock_get_extra):
+ fake_request_spec = self._get_fake_request_spec()
+ fake_host = self._get_fake_host_state()
+ self.driver.all_host_states = [fake_host]
+
+ result = self._test_select_destinations(fake_request_spec)
+
+ self.assertEqual(1, len(result))
+ self.assertEqual(result[0]["host"], fake_host.host)
+
+ def _test_select_destinations(self, request_spec):
+ return self.driver.select_destinations(
+ self.context, request_spec, {})
+
+ def _get_fake_request_spec(self):
+ flavor = {
+ "flavorid": "small",
+ "memory_mb": 512,
+ "root_gb": 1,
+ "ephemeral_gb": 1,
+ "vcpus": 1,
+ }
+ instance_properties = {
+ "os_type": "linux",
+ "project_id": "1234",
+ "memory_mb": 512,
+ "root_gb": 1,
+ "ephemeral_gb": 1,
+ "vcpus": 1,
+ "uuid": 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ }
+ request_spec = {
+ "instance_type": flavor,
+ "instance_properties": instance_properties,
+ "num_instances": 1,
+ }
+ return request_spec
+
+ def _get_fake_host_state(self, index=0):
+ host_state = host_manager.HostState(
+ 'host_%s' % index,
+ 'node_%s' % index)
+ host_state.free_ram_mb = 50000
+ host_state.service = {
+ "disabled": False,
+ "updated_at": timeutils.utcnow(),
+ "created_at": timeutils.utcnow(),
+ }
+ return host_state
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_performance_check_select_destination(self, mock_get_extra):
+ hosts = 2
+ requests = 1
+
+ self.flags(service_down_time=240)
+
+ request_spec = self._get_fake_request_spec()
+ host_states = []
+ for x in xrange(hosts):
+ host_state = self._get_fake_host_state(x)
+ host_states.append(host_state)
+ self.driver.all_host_states = host_states
+
+ def run_test():
+ a = timeutils.utcnow()
+
+ for x in xrange(requests):
+ self.driver.select_destinations(
+ self.context, request_spec, {})
+
+ b = timeutils.utcnow()
+ c = b - a
+
+ seconds = (c.days * 24 * 60 * 60 + c.seconds)
+ microseconds = seconds * 1000 + c.microseconds / 1000.0
+ per_request_ms = microseconds / requests
+ return per_request_ms
+
+ per_request_ms = None
+ if ENABLE_PROFILER:
+ import pycallgraph
+ from pycallgraph import output
+ config = pycallgraph.Config(max_depth=10)
+ config.trace_filter = pycallgraph.GlobbingFilter(exclude=[
+ 'pycallgraph.*',
+ 'unittest.*',
+ 'nova.tests.unit.*',
+ ])
+ graphviz = output.GraphvizOutput(output_file='scheduler.png')
+
+ with pycallgraph.PyCallGraph(output=graphviz):
+ per_request_ms = run_test()
+
+ else:
+ per_request_ms = run_test()
+
+ # This has proved to be around 1 ms on a random dev box
+ # But this is here so you can do simply performance testing easily.
+ self.assertTrue(per_request_ms < 1000)
+
+
+if __name__ == '__main__':
+ # A handy tool to help profile the schedulers performance
+ ENABLE_PROFILER = True
+ import unittest
+ suite = unittest.TestSuite()
+ test = "test_performance_check_select_destination"
+ test_case = CachingSchedulerTestCase(test)
+ suite.addTest(test_case)
+ runner = unittest.TextTestRunner()
+ runner.run(suite)
diff --git a/nova/tests/unit/scheduler/test_chance_scheduler.py b/nova/tests/unit/scheduler/test_chance_scheduler.py
new file mode 100644
index 0000000000..73a4696ec3
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_chance_scheduler.py
@@ -0,0 +1,182 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Chance Scheduler.
+"""
+
+import random
+
+import mox
+
+from nova.compute import rpcapi as compute_rpcapi
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.scheduler import chance
+from nova.scheduler import driver
+from nova.tests.unit.scheduler import test_scheduler
+
+
+class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
+ """Test case for Chance Scheduler."""
+
+ driver_cls = chance.ChanceScheduler
+
+ def test_filter_hosts_avoid(self):
+ """Test to make sure _filter_hosts() filters original hosts if
+ avoid_original_host is True.
+ """
+
+ hosts = ['host1', 'host2', 'host3']
+ request_spec = dict(instance_properties=dict(host='host2'))
+ filter_properties = {'ignore_hosts': ['host2']}
+
+ filtered = self.driver._filter_hosts(request_spec, hosts,
+ filter_properties=filter_properties)
+ self.assertEqual(filtered, ['host1', 'host3'])
+
+ def test_filter_hosts_no_avoid(self):
+ """Test to make sure _filter_hosts() does not filter original
+ hosts if avoid_original_host is False.
+ """
+
+ hosts = ['host1', 'host2', 'host3']
+ request_spec = dict(instance_properties=dict(host='host2'))
+ filter_properties = {'ignore_hosts': []}
+
+ filtered = self.driver._filter_hosts(request_spec, hosts,
+ filter_properties=filter_properties)
+ self.assertEqual(filtered, hosts)
+
+ def test_basic_schedule_run_instance(self):
+ ctxt = context.RequestContext('fake', 'fake', False)
+ ctxt_elevated = 'fake-context-elevated'
+ instance_opts = {'fake_opt1': 'meow', 'launch_index': -1}
+ instance1 = {'uuid': 'fake-uuid1'}
+ instance2 = {'uuid': 'fake-uuid2'}
+ request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'],
+ 'instance_properties': instance_opts}
+
+ def inc_launch_index(*args):
+ request_spec['instance_properties']['launch_index'] = (
+ request_spec['instance_properties']['launch_index'] + 1)
+
+ self.mox.StubOutWithMock(ctxt, 'elevated')
+ self.mox.StubOutWithMock(self.driver, 'hosts_up')
+ self.mox.StubOutWithMock(random, 'choice')
+ self.mox.StubOutWithMock(driver, 'instance_update_db')
+ self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance')
+
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ # instance 1
+ hosts_full = ['host1', 'host2', 'host3', 'host4']
+ self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
+ random.choice(hosts_full).AndReturn('host3')
+ driver.instance_update_db(ctxt, instance1['uuid']).WithSideEffects(
+ inc_launch_index).AndReturn(instance1)
+ compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host3',
+ instance=instance1, requested_networks=None,
+ injected_files=None, admin_password=None, is_first_time=None,
+ request_spec=request_spec, filter_properties={},
+ legacy_bdm_in_spec=False)
+
+ # instance 2
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
+ random.choice(hosts_full).AndReturn('host1')
+ driver.instance_update_db(ctxt, instance2['uuid']).WithSideEffects(
+ inc_launch_index).AndReturn(instance2)
+ compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host1',
+ instance=instance2, requested_networks=None,
+ injected_files=None, admin_password=None, is_first_time=None,
+ request_spec=request_spec, filter_properties={},
+ legacy_bdm_in_spec=False)
+
+ self.mox.ReplayAll()
+ self.driver.schedule_run_instance(ctxt, request_spec,
+ None, None, None, None, {}, False)
+
+ def test_basic_schedule_run_instance_no_hosts(self):
+ ctxt = context.RequestContext('fake', 'fake', False)
+ ctxt_elevated = 'fake-context-elevated'
+ uuid = 'fake-uuid1'
+ instance_opts = {'fake_opt1': 'meow', 'launch_index': -1}
+ request_spec = {'instance_uuids': [uuid],
+ 'instance_properties': instance_opts}
+
+ self.mox.StubOutWithMock(ctxt, 'elevated')
+ self.mox.StubOutWithMock(self.driver, 'hosts_up')
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ # instance 1
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn([])
+ old_ref, new_ref = db.instance_update_and_get_original(ctxt, uuid,
+ {'vm_state': vm_states.ERROR,
+ 'task_state': None}).AndReturn(({}, {}))
+ compute_utils.add_instance_fault_from_exc(ctxt, new_ref,
+ mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.driver.schedule_run_instance(
+ ctxt, request_spec, None, None, None, None, {}, False)
+
+ def test_select_destinations(self):
+ ctxt = context.RequestContext('fake', 'fake', False)
+ ctxt_elevated = 'fake-context-elevated'
+ request_spec = {'num_instances': 2}
+
+ self.mox.StubOutWithMock(ctxt, 'elevated')
+ self.mox.StubOutWithMock(self.driver, 'hosts_up')
+ self.mox.StubOutWithMock(random, 'choice')
+
+ hosts_full = ['host1', 'host2', 'host3', 'host4']
+
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
+ random.choice(hosts_full).AndReturn('host3')
+
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
+ random.choice(hosts_full).AndReturn('host2')
+
+ self.mox.ReplayAll()
+ dests = self.driver.select_destinations(ctxt, request_spec, {})
+ self.assertEqual(2, len(dests))
+ (host, node) = (dests[0]['host'], dests[0]['nodename'])
+ self.assertEqual('host3', host)
+ self.assertIsNone(node)
+ (host, node) = (dests[1]['host'], dests[1]['nodename'])
+ self.assertEqual('host2', host)
+ self.assertIsNone(node)
+
+ def test_select_destinations_no_valid_host(self):
+
+ def _return_no_host(*args, **kwargs):
+ return []
+
+ self.mox.StubOutWithMock(self.driver, 'hosts_up')
+ self.driver.hosts_up(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn([1, 2])
+ self.stubs.Set(self.driver, '_filter_hosts', _return_no_host)
+ self.mox.ReplayAll()
+
+ request_spec = {'num_instances': 1}
+ self.assertRaises(exception.NoValidHost,
+ self.driver.select_destinations, self.context,
+ request_spec, {})
diff --git a/nova/tests/unit/scheduler/test_client.py b/nova/tests/unit/scheduler/test_client.py
new file mode 100644
index 0000000000..5ea915c4f6
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_client.py
@@ -0,0 +1,113 @@
+# Copyright (c) 2014 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.conductor import api as conductor_api
+from nova import context
+from nova import exception
+from nova.scheduler import client as scheduler_client
+from nova.scheduler.client import query as scheduler_query_client
+from nova.scheduler.client import report as scheduler_report_client
+from nova.scheduler import rpcapi as scheduler_rpcapi
+from nova import test
+"""Tests for Scheduler Client."""
+
+
+class SchedulerReportClientTestCase(test.TestCase):
+
+ def setUp(self):
+ super(SchedulerReportClientTestCase, self).setUp()
+ self.context = context.get_admin_context()
+
+ self.flags(use_local=True, group='conductor')
+
+ self.client = scheduler_report_client.SchedulerReportClient()
+
+ def test_constructor(self):
+ self.assertIsNotNone(self.client.conductor_api)
+
+ @mock.patch.object(conductor_api.LocalAPI, 'compute_node_update')
+ def test_update_compute_node_works(self, mock_cn_update):
+ stats = {"id": 1, "foo": "bar"}
+ self.client.update_resource_stats(self.context,
+ ('fakehost', 'fakenode'),
+ stats)
+ mock_cn_update.assert_called_once_with(self.context,
+ {"id": 1},
+ {"foo": "bar"})
+
+ def test_update_compute_node_raises(self):
+ stats = {"foo": "bar"}
+ self.assertRaises(exception.ComputeHostNotCreated,
+ self.client.update_resource_stats,
+ self.context, ('fakehost', 'fakenode'), stats)
+
+
+class SchedulerQueryClientTestCase(test.TestCase):
+
+ def setUp(self):
+ super(SchedulerQueryClientTestCase, self).setUp()
+ self.context = context.get_admin_context()
+
+ self.client = scheduler_query_client.SchedulerQueryClient()
+
+ def test_constructor(self):
+ self.assertIsNotNone(self.client.scheduler_rpcapi)
+
+ @mock.patch.object(scheduler_rpcapi.SchedulerAPI, 'select_destinations')
+ def test_select_destinations(self, mock_select_destinations):
+ self.client.select_destinations(
+ context=self.context,
+ request_spec='fake_request_spec',
+ filter_properties='fake_prop'
+ )
+ mock_select_destinations.assert_called_once_with(
+ self.context,
+ 'fake_request_spec',
+ 'fake_prop')
+
+
+class SchedulerClientTestCase(test.TestCase):
+
+ def setUp(self):
+ super(SchedulerClientTestCase, self).setUp()
+ self.client = scheduler_client.SchedulerClient()
+
+ def test_constructor(self):
+ self.assertIsNotNone(self.client.queryclient)
+ self.assertIsNotNone(self.client.reportclient)
+
+ @mock.patch.object(scheduler_query_client.SchedulerQueryClient,
+ 'select_destinations')
+ def test_select_destinations(self, mock_select_destinations):
+ self.assertIsNone(self.client.queryclient.instance)
+
+ self.client.select_destinations('ctxt', 'fake_spec', 'fake_prop')
+
+ self.assertIsNotNone(self.client.queryclient.instance)
+ mock_select_destinations.assert_called_once_with(
+ 'ctxt', 'fake_spec', 'fake_prop')
+
+ @mock.patch.object(scheduler_report_client.SchedulerReportClient,
+ 'update_resource_stats')
+ def test_update_resource_stats(self, mock_update_resource_stats):
+ self.assertIsNone(self.client.reportclient.instance)
+
+ self.client.update_resource_stats('ctxt', 'fake_name', 'fake_stats')
+
+ self.assertIsNotNone(self.client.reportclient.instance)
+ mock_update_resource_stats.assert_called_once_with(
+ 'ctxt', 'fake_name', 'fake_stats')
diff --git a/nova/tests/unit/scheduler/test_filter_scheduler.py b/nova/tests/unit/scheduler/test_filter_scheduler.py
new file mode 100644
index 0000000000..96231ef13a
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_filter_scheduler.py
@@ -0,0 +1,596 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Filter Scheduler.
+"""
+
+import mock
+import mox
+
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.scheduler import driver
+from nova.scheduler import filter_scheduler
+from nova.scheduler import host_manager
+from nova.scheduler import utils as scheduler_utils
+from nova.scheduler import weights
+from nova.tests.unit.scheduler import fakes
+from nova.tests.unit.scheduler import test_scheduler
+
+
+def fake_get_filtered_hosts(hosts, filter_properties, index):
+ return list(hosts)
+
+
+class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
+ """Test case for Filter Scheduler."""
+
+ driver_cls = filter_scheduler.FilterScheduler
+
+ def test_run_instance_no_hosts(self):
+ sched = fakes.FakeFilterScheduler()
+ uuid = 'fake-uuid1'
+ fake_context = context.RequestContext('user', 'project')
+ instance_properties = {'project_id': 1, 'os_type': 'Linux'}
+ request_spec = {'instance_type': {'memory_mb': 1, 'root_gb': 1,
+ 'ephemeral_gb': 0},
+ 'instance_properties': instance_properties,
+ 'instance_uuids': [uuid]}
+
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ old_ref, new_ref = db.instance_update_and_get_original(fake_context,
+ uuid, {'vm_state': vm_states.ERROR, 'task_state':
+ None}).AndReturn(({}, {}))
+ compute_utils.add_instance_fault_from_exc(fake_context, new_ref,
+ mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
+
+ self.mox.ReplayAll()
+ sched.schedule_run_instance(
+ fake_context, request_spec, None, None,
+ None, None, {}, False)
+
+ def test_run_instance_non_admin(self):
+ self.was_admin = False
+
+ def fake_get(context, *args, **kwargs):
+ # make sure this is called with admin context, even though
+ # we're using user context below
+ self.was_admin = context.is_admin
+ return {}
+
+ sched = fakes.FakeFilterScheduler()
+ self.stubs.Set(sched.host_manager, 'get_all_host_states', fake_get)
+
+ fake_context = context.RequestContext('user', 'project')
+
+ uuid = 'fake-uuid1'
+ instance_properties = {'project_id': 1, 'os_type': 'Linux'}
+ request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1},
+ 'instance_properties': instance_properties,
+ 'instance_uuids': [uuid]}
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ old_ref, new_ref = db.instance_update_and_get_original(fake_context,
+ uuid, {'vm_state': vm_states.ERROR, 'task_state':
+ None}).AndReturn(({}, {}))
+ compute_utils.add_instance_fault_from_exc(fake_context, new_ref,
+ mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+ self.mox.ReplayAll()
+ sched.schedule_run_instance(
+ fake_context, request_spec, None, None, None, None, {}, False)
+ self.assertTrue(self.was_admin)
+
+ def test_scheduler_includes_launch_index(self):
+ fake_context = context.RequestContext('user', 'project')
+ instance_opts = {'fake_opt1': 'meow'}
+ request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'],
+ 'instance_properties': instance_opts}
+ instance1 = {'uuid': 'fake-uuid1'}
+ instance2 = {'uuid': 'fake-uuid2'}
+
+ def _has_launch_index(expected_index):
+ """Return a function that verifies the expected index."""
+ def _check_launch_index(value):
+ if 'instance_properties' in value:
+ if 'launch_index' in value['instance_properties']:
+ index = value['instance_properties']['launch_index']
+ if index == expected_index:
+ return True
+ return False
+ return _check_launch_index
+
+ self.mox.StubOutWithMock(self.driver, '_schedule')
+ self.mox.StubOutWithMock(self.driver, '_provision_resource')
+
+ expected_filter_properties = {'retry': {'num_attempts': 1,
+ 'hosts': []}}
+ self.driver._schedule(fake_context, request_spec,
+ expected_filter_properties).AndReturn(['host1', 'host2'])
+ # instance 1
+ self.driver._provision_resource(
+ fake_context, 'host1',
+ mox.Func(_has_launch_index(0)), expected_filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid1',
+ legacy_bdm_in_spec=False).AndReturn(instance1)
+ # instance 2
+ self.driver._provision_resource(
+ fake_context, 'host2',
+ mox.Func(_has_launch_index(1)), expected_filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid2',
+ legacy_bdm_in_spec=False).AndReturn(instance2)
+ self.mox.ReplayAll()
+
+ self.driver.schedule_run_instance(fake_context, request_spec,
+ None, None, None, None, {}, False)
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_schedule_happy_day(self, mock_get_extra):
+ """Make sure there's nothing glaringly wrong with _schedule()
+ by doing a happy day pass through.
+ """
+
+ self.next_weight = 1.0
+
+ def _fake_weigh_objects(_self, functions, hosts, options):
+ self.next_weight += 2.0
+ host_state = hosts[0]
+ return [weights.WeighedHost(host_state, self.next_weight)]
+
+ sched = fakes.FakeFilterScheduler()
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ self.stubs.Set(weights.HostWeightHandler,
+ 'get_weighed_objects', _fake_weigh_objects)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ request_spec = {'num_instances': 10,
+ 'instance_type': {'memory_mb': 512, 'root_gb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1},
+ 'instance_properties': {'project_id': 1,
+ 'root_gb': 512,
+ 'memory_mb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1,
+ 'os_type': 'Linux',
+ 'uuid': 'fake-uuid'}}
+ self.mox.ReplayAll()
+ weighed_hosts = sched._schedule(fake_context, request_spec, {})
+ self.assertEqual(len(weighed_hosts), 10)
+ for weighed_host in weighed_hosts:
+ self.assertIsNotNone(weighed_host.obj)
+
+ def test_max_attempts(self):
+ self.flags(scheduler_max_attempts=4)
+ self.assertEqual(4, scheduler_utils._max_attempts())
+
+ def test_invalid_max_attempts(self):
+ self.flags(scheduler_max_attempts=0)
+ self.assertRaises(exception.NovaException,
+ scheduler_utils._max_attempts)
+
+ def test_retry_disabled(self):
+ # Retry info should not get populated when re-scheduling is off.
+ self.flags(scheduler_max_attempts=1)
+ sched = fakes.FakeFilterScheduler()
+ request_spec = dict(instance_properties={},
+ instance_uuids=['fake-uuid1'])
+ filter_properties = {}
+
+ self.mox.StubOutWithMock(sched, '_schedule')
+ self.mox.StubOutWithMock(sched, '_provision_resource')
+
+ sched._schedule(self.context, request_spec,
+ filter_properties).AndReturn(['host1'])
+ sched._provision_resource(
+ self.context, 'host1',
+ request_spec, filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid1',
+ legacy_bdm_in_spec=False)
+
+ self.mox.ReplayAll()
+
+ sched.schedule_run_instance(self.context, request_spec, None, None,
+ None, None, filter_properties, False)
+
+ def test_retry_force_hosts(self):
+ # Retry info should not get populated when re-scheduling is off.
+ self.flags(scheduler_max_attempts=2)
+ sched = fakes.FakeFilterScheduler()
+ request_spec = dict(instance_properties={},
+ instance_uuids=['fake-uuid1'])
+ filter_properties = {'force_hosts': ['force_host']}
+
+ self.mox.StubOutWithMock(sched, '_schedule')
+ self.mox.StubOutWithMock(sched, '_provision_resource')
+
+ sched._schedule(self.context, request_spec,
+ filter_properties).AndReturn(['host1'])
+ sched._provision_resource(
+ self.context, 'host1',
+ request_spec, filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid1',
+ legacy_bdm_in_spec=False)
+
+ self.mox.ReplayAll()
+
+ sched.schedule_run_instance(self.context, request_spec, None, None,
+ None, None, filter_properties, False)
+
+ def test_retry_force_nodes(self):
+ # Retry info should not get populated when re-scheduling is off.
+ self.flags(scheduler_max_attempts=2)
+ sched = fakes.FakeFilterScheduler()
+ request_spec = dict(instance_properties={},
+ instance_uuids=['fake-uuid1'])
+ filter_properties = {'force_nodes': ['force_node']}
+
+ self.mox.StubOutWithMock(sched, '_schedule')
+ self.mox.StubOutWithMock(sched, '_provision_resource')
+
+ sched._schedule(self.context, request_spec,
+ filter_properties).AndReturn(['host1'])
+ sched._provision_resource(
+ self.context, 'host1',
+ request_spec, filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid1',
+ legacy_bdm_in_spec=False)
+
+ self.mox.ReplayAll()
+
+ sched.schedule_run_instance(self.context, request_spec, None, None,
+ None, None, filter_properties, False)
+
+ def test_retry_attempt_one(self):
+ # Test retry logic on initial scheduling attempt.
+ self.flags(scheduler_max_attempts=2)
+ sched = fakes.FakeFilterScheduler()
+ request_spec = dict(instance_properties={},
+ instance_uuids=['fake-uuid1'])
+ filter_properties = {}
+ expected_filter_properties = {'retry': {'num_attempts': 1,
+ 'hosts': []}}
+ self.mox.StubOutWithMock(sched, '_schedule')
+ self.mox.StubOutWithMock(sched, '_provision_resource')
+
+ sched._schedule(self.context, request_spec,
+ expected_filter_properties).AndReturn(['host1'])
+ sched._provision_resource(
+ self.context, 'host1',
+ request_spec, expected_filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid1',
+ legacy_bdm_in_spec=False)
+
+ self.mox.ReplayAll()
+
+ sched.schedule_run_instance(self.context, request_spec, None, None,
+ None, None, filter_properties, False)
+
+ def test_retry_attempt_two(self):
+ # Test retry logic when re-scheduling.
+ self.flags(scheduler_max_attempts=2)
+ sched = fakes.FakeFilterScheduler()
+ request_spec = dict(instance_properties={},
+ instance_uuids=['fake-uuid1'])
+ filter_properties = {'retry': {'num_attempts': 1}}
+ expected_filter_properties = {'retry': {'num_attempts': 2}}
+ self.mox.StubOutWithMock(sched, '_schedule')
+ self.mox.StubOutWithMock(sched, '_provision_resource')
+
+ sched._schedule(self.context, request_spec,
+ expected_filter_properties).AndReturn(['host1'])
+ sched._provision_resource(
+ self.context, 'host1',
+ request_spec, expected_filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid1',
+ legacy_bdm_in_spec=False)
+
+ self.mox.ReplayAll()
+
+ sched.schedule_run_instance(self.context, request_spec, None, None,
+ None, None, filter_properties, False)
+
+ def test_retry_exceeded_max_attempts(self):
+ # Test for necessary explosion when max retries is exceeded and that
+ # the information needed in request_spec is still present for error
+ # handling
+ self.flags(scheduler_max_attempts=2)
+ sched = fakes.FakeFilterScheduler()
+ request_spec = dict(instance_properties={},
+ instance_uuids=['fake-uuid1'])
+ filter_properties = {'retry': {'num_attempts': 2}}
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.NoValidHost, sched.schedule_run_instance,
+ self.context, request_spec, None, None,
+ None, None, filter_properties, False)
+
+ def test_add_retry_host(self):
+ retry = dict(num_attempts=1, hosts=[])
+ filter_properties = dict(retry=retry)
+ host = "fakehost"
+ node = "fakenode"
+
+ scheduler_utils._add_retry_host(filter_properties, host, node)
+
+ hosts = filter_properties['retry']['hosts']
+ self.assertEqual(1, len(hosts))
+ self.assertEqual([host, node], hosts[0])
+
+ def test_post_select_populate(self):
+ # Test addition of certain filter props after a node is selected.
+ retry = {'hosts': [], 'num_attempts': 1}
+ filter_properties = {'retry': retry}
+
+ host_state = host_manager.HostState('host', 'node')
+ host_state.limits['vcpus'] = 5
+ scheduler_utils.populate_filter_properties(filter_properties,
+ host_state)
+
+ self.assertEqual(['host', 'node'],
+ filter_properties['retry']['hosts'][0])
+
+ self.assertEqual({'vcpus': 5}, host_state.limits)
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_schedule_host_pool(self, mock_get_extra):
+ """Make sure the scheduler_host_subset_size property works properly."""
+
+ self.flags(scheduler_host_subset_size=2)
+ sched = fakes.FakeFilterScheduler()
+
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ instance_properties = {'project_id': 1,
+ 'root_gb': 512,
+ 'memory_mb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1,
+ 'os_type': 'Linux',
+ 'uuid': 'fake-uuid'}
+
+ request_spec = dict(instance_properties=instance_properties,
+ instance_type={})
+ filter_properties = {}
+ self.mox.ReplayAll()
+ hosts = sched._schedule(self.context, request_spec,
+ filter_properties=filter_properties)
+
+ # one host should be chosen
+ self.assertEqual(len(hosts), 1)
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_schedule_large_host_pool(self, mock_get_extra):
+ """Hosts should still be chosen if pool size
+ is larger than number of filtered hosts.
+ """
+
+ sched = fakes.FakeFilterScheduler()
+
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+ self.flags(scheduler_host_subset_size=20)
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ instance_properties = {'project_id': 1,
+ 'root_gb': 512,
+ 'memory_mb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1,
+ 'os_type': 'Linux',
+ 'uuid': 'fake-uuid'}
+ request_spec = dict(instance_properties=instance_properties,
+ instance_type={})
+ filter_properties = {}
+ self.mox.ReplayAll()
+ hosts = sched._schedule(self.context, request_spec,
+ filter_properties=filter_properties)
+
+ # one host should be chose
+ self.assertEqual(len(hosts), 1)
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_schedule_chooses_best_host(self, mock_get_extra):
+ """If scheduler_host_subset_size is 1, the largest host with greatest
+ weight should be returned.
+ """
+
+ self.flags(scheduler_host_subset_size=1)
+
+ sched = fakes.FakeFilterScheduler()
+
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ self.next_weight = 50
+
+ def _fake_weigh_objects(_self, functions, hosts, options):
+ this_weight = self.next_weight
+ self.next_weight = 0
+ host_state = hosts[0]
+ return [weights.WeighedHost(host_state, this_weight)]
+
+ instance_properties = {'project_id': 1,
+ 'root_gb': 512,
+ 'memory_mb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1,
+ 'os_type': 'Linux',
+ 'uuid': 'fake-uuid'}
+
+ request_spec = dict(instance_properties=instance_properties,
+ instance_type={})
+
+ self.stubs.Set(weights.HostWeightHandler,
+ 'get_weighed_objects', _fake_weigh_objects)
+
+ filter_properties = {}
+ self.mox.ReplayAll()
+ hosts = sched._schedule(self.context, request_spec,
+ filter_properties=filter_properties)
+
+ # one host should be chosen
+ self.assertEqual(1, len(hosts))
+
+ self.assertEqual(50, hosts[0].weight)
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_select_destinations(self, mock_get_extra):
+ """select_destinations is basically a wrapper around _schedule().
+
+ Similar to the _schedule tests, this just does a happy path test to
+ ensure there is nothing glaringly wrong.
+ """
+
+ self.next_weight = 1.0
+
+ selected_hosts = []
+ selected_nodes = []
+
+ def _fake_weigh_objects(_self, functions, hosts, options):
+ self.next_weight += 2.0
+ host_state = hosts[0]
+ selected_hosts.append(host_state.host)
+ selected_nodes.append(host_state.nodename)
+ return [weights.WeighedHost(host_state, self.next_weight)]
+
+ sched = fakes.FakeFilterScheduler()
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ self.stubs.Set(weights.HostWeightHandler,
+ 'get_weighed_objects', _fake_weigh_objects)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ request_spec = {'instance_type': {'memory_mb': 512, 'root_gb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1},
+ 'instance_properties': {'project_id': 1,
+ 'root_gb': 512,
+ 'memory_mb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1,
+ 'os_type': 'Linux',
+ 'uuid': 'fake-uuid'},
+ 'num_instances': 1}
+ self.mox.ReplayAll()
+ dests = sched.select_destinations(fake_context, request_spec, {})
+ (host, node) = (dests[0]['host'], dests[0]['nodename'])
+ self.assertEqual(host, selected_hosts[0])
+ self.assertEqual(node, selected_nodes[0])
+
+ @mock.patch.object(filter_scheduler.FilterScheduler, '_schedule')
+ def test_select_destinations_notifications(self, mock_schedule):
+ mock_schedule.return_value = [mock.Mock()]
+
+ with mock.patch.object(self.driver.notifier, 'info') as mock_info:
+ request_spec = {'num_instances': 1}
+
+ self.driver.select_destinations(self.context, request_spec, {})
+
+ expected = [
+ mock.call(self.context, 'scheduler.select_destinations.start',
+ dict(request_spec=request_spec)),
+ mock.call(self.context, 'scheduler.select_destinations.end',
+ dict(request_spec=request_spec))]
+ self.assertEqual(expected, mock_info.call_args_list)
+
+ def test_select_destinations_no_valid_host(self):
+
+ def _return_no_host(*args, **kwargs):
+ return []
+
+ self.stubs.Set(self.driver, '_schedule', _return_no_host)
+ self.assertRaises(exception.NoValidHost,
+ self.driver.select_destinations, self.context,
+ {'num_instances': 1}, {})
+
+ def test_select_destinations_no_valid_host_not_enough(self):
+ # Tests that we have fewer hosts available than number of instances
+ # requested to build.
+ with mock.patch.object(self.driver, '_schedule',
+ return_value=[mock.sentinel.host1]):
+ try:
+ self.driver.select_destinations(
+ self.context, {'num_instances': 2}, {})
+ self.fail('Expected NoValidHost to be raised.')
+ except exception.NoValidHost as e:
+ # Make sure that we provided a reason why NoValidHost.
+ self.assertIn('reason', e.kwargs)
+ self.assertTrue(len(e.kwargs['reason']) > 0)
+
+ def test_handles_deleted_instance(self):
+ """Test instance deletion while being scheduled."""
+
+ def _raise_instance_not_found(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='123')
+
+ self.stubs.Set(driver, 'instance_update_db',
+ _raise_instance_not_found)
+
+ sched = fakes.FakeFilterScheduler()
+
+ fake_context = context.RequestContext('user', 'project')
+ host_state = host_manager.HostState('host2', 'node2')
+ weighted_host = weights.WeighedHost(host_state, 1.42)
+ filter_properties = {}
+
+ uuid = 'fake-uuid1'
+ instance_properties = {'project_id': 1, 'os_type': 'Linux'}
+ request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1},
+ 'instance_properties': instance_properties,
+ 'instance_uuids': [uuid]}
+ sched._provision_resource(fake_context, weighted_host,
+ request_spec, filter_properties,
+ None, None, None, None)
diff --git a/nova/tests/unit/scheduler/test_filters.py b/nova/tests/unit/scheduler/test_filters.py
new file mode 100644
index 0000000000..6469829078
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_filters.py
@@ -0,0 +1,206 @@
+# Copyright 2012 OpenStack Foundation # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler Host Filters.
+"""
+
+import inspect
+import sys
+
+from nova import filters
+from nova import loadables
+from nova import test
+
+
+class Filter1(filters.BaseFilter):
+ """Test Filter class #1."""
+ pass
+
+
+class Filter2(filters.BaseFilter):
+ """Test Filter class #2."""
+ pass
+
+
+class FiltersTestCase(test.NoDBTestCase):
+ def test_filter_all(self):
+ filter_obj_list = ['obj1', 'obj2', 'obj3']
+ filter_properties = 'fake_filter_properties'
+ base_filter = filters.BaseFilter()
+
+ self.mox.StubOutWithMock(base_filter, '_filter_one')
+
+ base_filter._filter_one('obj1', filter_properties).AndReturn(True)
+ base_filter._filter_one('obj2', filter_properties).AndReturn(False)
+ base_filter._filter_one('obj3', filter_properties).AndReturn(True)
+
+ self.mox.ReplayAll()
+
+ result = base_filter.filter_all(filter_obj_list, filter_properties)
+ self.assertTrue(inspect.isgenerator(result))
+ self.assertEqual(['obj1', 'obj3'], list(result))
+
+ def test_filter_all_recursive_yields(self):
+ # Test filter_all() allows generators from previous filter_all()s.
+ # filter_all() yields results. We want to make sure that we can
+ # call filter_all() with generators returned from previous calls
+ # to filter_all().
+ filter_obj_list = ['obj1', 'obj2', 'obj3']
+ filter_properties = 'fake_filter_properties'
+ base_filter = filters.BaseFilter()
+
+ self.mox.StubOutWithMock(base_filter, '_filter_one')
+
+ total_iterations = 200
+
+ # The order that _filter_one is going to get called gets
+ # confusing because we will be recursively yielding things..
+ # We are going to simulate the first call to filter_all()
+ # returning False for 'obj2'. So, 'obj1' will get yielded
+ # 'total_iterations' number of times before the first filter_all()
+ # call gets to processing 'obj2'. We then return 'False' for it.
+ # After that, 'obj3' gets yielded 'total_iterations' number of
+ # times.
+ for x in xrange(total_iterations):
+ base_filter._filter_one('obj1', filter_properties).AndReturn(True)
+ base_filter._filter_one('obj2', filter_properties).AndReturn(False)
+ for x in xrange(total_iterations):
+ base_filter._filter_one('obj3', filter_properties).AndReturn(True)
+ self.mox.ReplayAll()
+
+ objs = iter(filter_obj_list)
+ for x in xrange(total_iterations):
+ # Pass in generators returned from previous calls.
+ objs = base_filter.filter_all(objs, filter_properties)
+ self.assertTrue(inspect.isgenerator(objs))
+ self.assertEqual(['obj1', 'obj3'], list(objs))
+
+ def test_get_filtered_objects(self):
+ filter_objs_initial = ['initial', 'filter1', 'objects1']
+ filter_objs_second = ['second', 'filter2', 'objects2']
+ filter_objs_last = ['last', 'filter3', 'objects3']
+ filter_properties = 'fake_filter_properties'
+
+ def _fake_base_loader_init(*args, **kwargs):
+ pass
+
+ self.stubs.Set(loadables.BaseLoader, '__init__',
+ _fake_base_loader_init)
+
+ filt1_mock = self.mox.CreateMock(Filter1)
+ filt2_mock = self.mox.CreateMock(Filter2)
+
+ self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
+ self.mox.StubOutWithMock(filt1_mock, 'filter_all')
+ self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(filt2_mock, 'run_filter_for_index')
+ self.mox.StubOutWithMock(filt2_mock, 'filter_all')
+
+ Filter1().AndReturn(filt1_mock)
+ filt1_mock.run_filter_for_index(0).AndReturn(True)
+ filt1_mock.filter_all(filter_objs_initial,
+ filter_properties).AndReturn(filter_objs_second)
+ Filter2().AndReturn(filt2_mock)
+ filt2_mock.run_filter_for_index(0).AndReturn(True)
+ filt2_mock.filter_all(filter_objs_second,
+ filter_properties).AndReturn(filter_objs_last)
+
+ self.mox.ReplayAll()
+
+ filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
+ filter_classes = [Filter1, Filter2]
+ result = filter_handler.get_filtered_objects(filter_classes,
+ filter_objs_initial,
+ filter_properties)
+ self.assertEqual(filter_objs_last, result)
+
+ def test_get_filtered_objects_for_index(self):
+ """Test that we don't call a filter when its
+ run_filter_for_index() method returns false
+ """
+ filter_objs_initial = ['initial', 'filter1', 'objects1']
+ filter_objs_second = ['second', 'filter2', 'objects2']
+ filter_properties = 'fake_filter_properties'
+
+ def _fake_base_loader_init(*args, **kwargs):
+ pass
+
+ self.stubs.Set(loadables.BaseLoader, '__init__',
+ _fake_base_loader_init)
+
+ filt1_mock = self.mox.CreateMock(Filter1)
+ filt2_mock = self.mox.CreateMock(Filter2)
+
+ self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
+ self.mox.StubOutWithMock(filt1_mock, 'filter_all')
+ self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(filt2_mock, 'run_filter_for_index')
+ self.mox.StubOutWithMock(filt2_mock, 'filter_all')
+
+ Filter1().AndReturn(filt1_mock)
+ filt1_mock.run_filter_for_index(0).AndReturn(True)
+ filt1_mock.filter_all(filter_objs_initial,
+ filter_properties).AndReturn(filter_objs_second)
+ Filter2().AndReturn(filt2_mock)
+ # return false so filter_all will not be called
+ filt2_mock.run_filter_for_index(0).AndReturn(False)
+
+ self.mox.ReplayAll()
+
+ filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
+ filter_classes = [Filter1, Filter2]
+ filter_handler.get_filtered_objects(filter_classes,
+ filter_objs_initial,
+ filter_properties)
+
+ def test_get_filtered_objects_none_response(self):
+ filter_objs_initial = ['initial', 'filter1', 'objects1']
+ filter_properties = 'fake_filter_properties'
+
+ def _fake_base_loader_init(*args, **kwargs):
+ pass
+
+ self.stubs.Set(loadables.BaseLoader, '__init__',
+ _fake_base_loader_init)
+
+ filt1_mock = self.mox.CreateMock(Filter1)
+ filt2_mock = self.mox.CreateMock(Filter2)
+
+ self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
+ self.mox.StubOutWithMock(filt1_mock, 'filter_all')
+ # Shouldn't be called.
+ self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(filt2_mock, 'filter_all')
+
+ Filter1().AndReturn(filt1_mock)
+ filt1_mock.run_filter_for_index(0).AndReturn(True)
+ filt1_mock.filter_all(filter_objs_initial,
+ filter_properties).AndReturn(None)
+ self.mox.ReplayAll()
+
+ filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
+ filter_classes = [Filter1, Filter2]
+ result = filter_handler.get_filtered_objects(filter_classes,
+ filter_objs_initial,
+ filter_properties)
+ self.assertIsNone(result)
diff --git a/nova/tests/unit/scheduler/test_filters_utils.py b/nova/tests/unit/scheduler/test_filters_utils.py
new file mode 100644
index 0000000000..48792fae35
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_filters_utils.py
@@ -0,0 +1,44 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import utils
+from nova import test
+
+
+class UtilsTestCase(test.NoDBTestCase):
+ def test_validate_num_values(self):
+ f = utils.validate_num_values
+
+ self.assertEqual("x", f(set(), default="x"))
+ self.assertEqual(1, f(set(["1"]), cast_to=int))
+ self.assertEqual(1.0, f(set(["1"]), cast_to=float))
+ self.assertEqual(1, f(set([1, 2]), based_on=min))
+ self.assertEqual(2, f(set([1, 2]), based_on=max))
+
+ @mock.patch("nova.objects.aggregate.AggregateList.get_by_host")
+ def test_aggregate_values_from_db(self, get_by_host):
+ aggrA = mock.MagicMock()
+ aggrB = mock.MagicMock()
+ context = mock.MagicMock()
+
+ get_by_host.return_value = [aggrA, aggrB]
+ aggrA.metadata = {'k1': 1, 'k2': 2}
+ aggrB.metadata = {'k1': 3, 'k2': 4}
+
+ values = utils.aggregate_values_from_db(context, 'h1', key_name='k1')
+
+ self.assertTrue(context.elevated.called)
+ self.assertEqual(set([1, 3]), values)
diff --git a/nova/tests/unit/scheduler/test_host_filters.py b/nova/tests/unit/scheduler/test_host_filters.py
new file mode 100644
index 0000000000..caed938aa3
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_host_filters.py
@@ -0,0 +1,38 @@
+# Copyright 2011 OpenStack Foundation # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler Host Filters.
+"""
+
+from nova.scheduler import filters
+from nova.scheduler.filters import all_hosts_filter
+from nova.scheduler.filters import compute_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class HostFiltersTestCase(test.NoDBTestCase):
+
+ def test_filter_handler(self):
+ # Double check at least a couple of known filters exist
+ filter_handler = filters.HostFilterHandler()
+ classes = filter_handler.get_matching_classes(
+ ['nova.scheduler.filters.all_filters'])
+ self.assertIn(all_hosts_filter.AllHostsFilter, classes)
+ self.assertIn(compute_filter.ComputeFilter, classes)
+
+ def test_all_host_filter(self):
+ filt_cls = all_hosts_filter.AllHostsFilter()
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertTrue(filt_cls.host_passes(host, {}))
diff --git a/nova/tests/unit/scheduler/test_host_manager.py b/nova/tests/unit/scheduler/test_host_manager.py
new file mode 100644
index 0000000000..b891baf7b4
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_host_manager.py
@@ -0,0 +1,545 @@
+# Copyright (c) 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For HostManager
+"""
+
+import mock
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import six
+
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import db
+from nova import exception
+from nova.i18n import _LW
+from nova.scheduler import filters
+from nova.scheduler import host_manager
+from nova import test
+from nova.tests.unit import matchers
+from nova.tests.unit.scheduler import fakes
+from nova import utils
+from nova.virt import hardware
+
+
+class FakeFilterClass1(filters.BaseHostFilter):
+ def host_passes(self, host_state, filter_properties):
+ pass
+
+
+class FakeFilterClass2(filters.BaseHostFilter):
+ def host_passes(self, host_state, filter_properties):
+ pass
+
+
+class HostManagerTestCase(test.NoDBTestCase):
+ """Test case for HostManager class."""
+
+ def setUp(self):
+ super(HostManagerTestCase, self).setUp()
+ self.host_manager = host_manager.HostManager()
+ self.fake_hosts = [host_manager.HostState('fake_host%s' % x,
+ 'fake-node') for x in xrange(1, 5)]
+ self.fake_hosts += [host_manager.HostState('fake_multihost',
+ 'fake-node%s' % x) for x in xrange(1, 5)]
+ self.addCleanup(timeutils.clear_time_override)
+
+ def test_choose_host_filters_not_found(self):
+ self.flags(scheduler_default_filters='FakeFilterClass3')
+ self.host_manager.filter_classes = [FakeFilterClass1,
+ FakeFilterClass2]
+ self.assertRaises(exception.SchedulerHostFilterNotFound,
+ self.host_manager._choose_host_filters, None)
+
+ def test_choose_host_filters(self):
+ self.flags(scheduler_default_filters=['FakeFilterClass2'])
+ self.host_manager.filter_classes = [FakeFilterClass1,
+ FakeFilterClass2]
+
+ # Test we returns 1 correct function
+ filter_classes = self.host_manager._choose_host_filters(None)
+ self.assertEqual(len(filter_classes), 1)
+ self.assertEqual(filter_classes[0].__name__, 'FakeFilterClass2')
+
+ def _mock_get_filtered_hosts(self, info, specified_filters=None):
+ self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
+
+ info['got_objs'] = []
+ info['got_fprops'] = []
+
+ def fake_filter_one(_self, obj, filter_props):
+ info['got_objs'].append(obj)
+ info['got_fprops'].append(filter_props)
+ return True
+
+ self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
+ self.host_manager._choose_host_filters(specified_filters).AndReturn(
+ [FakeFilterClass1])
+
+ def _verify_result(self, info, result, filters=True):
+ for x in info['got_fprops']:
+ self.assertEqual(x, info['expected_fprops'])
+ if filters:
+ self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
+ self.assertEqual(set(info['expected_objs']), set(result))
+
+ def test_get_filtered_hosts(self):
+ fake_properties = {'moo': 1, 'cow': 2}
+
+ info = {'expected_objs': self.fake_hosts,
+ 'expected_fprops': fake_properties}
+
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_specified_filters(self):
+ fake_properties = {'moo': 1, 'cow': 2}
+
+ specified_filters = ['FakeFilterClass1', 'FakeFilterClass2']
+ info = {'expected_objs': self.fake_hosts,
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info, specified_filters)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties, filter_class_names=specified_filters)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_ignore(self):
+ fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3',
+ 'fake_host5', 'fake_multihost']}
+
+ # [1] and [3] are host2 and host4
+ info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_force_hosts(self):
+ fake_properties = {'force_hosts': ['fake_host1', 'fake_host3',
+ 'fake_host5']}
+
+ # [0] and [2] are host1 and host3
+ info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_no_matching_force_hosts(self):
+ fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_and_force_hosts(self):
+ # Ensure ignore_hosts processed before force_hosts in host filters.
+ fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
+ 'ignore_hosts': ['fake_host1']}
+
+ # only fake_host3 should be left.
+ info = {'expected_objs': [self.fake_hosts[2]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_host_and_many_nodes(self):
+ # Ensure all nodes returned for a host with many nodes
+ fake_properties = {'force_hosts': ['fake_multihost']}
+
+ info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
+ self.fake_hosts[6], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_nodes(self):
+ fake_properties = {'force_nodes': ['fake-node2', 'fake-node4',
+ 'fake-node9']}
+
+ # [5] is fake-node2, [7] is fake-node4
+ info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_hosts_and_nodes(self):
+ # Ensure only overlapping results if both force host and node
+ fake_properties = {'force_hosts': ['fake_host1', 'fake_multihost'],
+ 'force_nodes': ['fake-node2', 'fake-node9']}
+
+ # [5] is fake-node2
+ info = {'expected_objs': [self.fake_hosts[5]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self):
+ # Ensure non-overlapping force_node and force_host yield no result
+ fake_properties = {'force_hosts': ['fake_multihost'],
+ 'force_nodes': ['fake-node']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self):
+ # Ensure ignore_hosts can coexist with force_nodes
+ fake_properties = {'force_nodes': ['fake-node4', 'fake-node2'],
+ 'ignore_hosts': ['fake_host1', 'fake_host2']}
+
+ info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self):
+ # Ensure ignore_hosts is processed before force_nodes
+ fake_properties = {'force_nodes': ['fake_node4', 'fake_node2'],
+ 'ignore_hosts': ['fake_multihost']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_all_host_states(self):
+
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ self.mox.StubOutWithMock(host_manager.LOG, 'warn')
+
+ db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
+ # node 3 host physical disk space is greater than database
+ host_manager.LOG.warn(_LW("Host %(hostname)s has more disk space than "
+ "database expected (%(physical)sgb > "
+ "%(database)sgb)"),
+ {'physical': 3333, 'database': 3072,
+ 'hostname': 'node3'})
+ # Invalid service
+ host_manager.LOG.warn(_LW("No service for compute ID %s"), 5)
+
+ self.mox.ReplayAll()
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+
+ self.assertEqual(len(host_states_map), 4)
+ # Check that .service is set properly
+ for i in xrange(4):
+ compute_node = fakes.COMPUTE_NODES[i]
+ host = compute_node['service']['host']
+ node = compute_node['hypervisor_hostname']
+ state_key = (host, node)
+ self.assertEqual(host_states_map[state_key].service,
+ compute_node['service'])
+ self.assertEqual(host_states_map[('host1', 'node1')].free_ram_mb,
+ 512)
+ # 511GB
+ self.assertEqual(host_states_map[('host1', 'node1')].free_disk_mb,
+ 524288)
+ self.assertEqual(host_states_map[('host2', 'node2')].free_ram_mb,
+ 1024)
+ # 1023GB
+ self.assertEqual(host_states_map[('host2', 'node2')].free_disk_mb,
+ 1048576)
+ self.assertEqual(host_states_map[('host3', 'node3')].free_ram_mb,
+ 3072)
+ # 3071GB
+ self.assertEqual(host_states_map[('host3', 'node3')].free_disk_mb,
+ 3145728)
+ self.assertThat(
+ hardware.VirtNUMAHostTopology.from_json(
+ host_states_map[('host3', 'node3')].numa_topology
+ )._to_dict(),
+ matchers.DictMatches(fakes.NUMA_TOPOLOGY._to_dict()))
+ self.assertEqual(host_states_map[('host4', 'node4')].free_ram_mb,
+ 8192)
+ # 8191GB
+ self.assertEqual(host_states_map[('host4', 'node4')].free_disk_mb,
+ 8388608)
+
+
+class HostManagerChangedNodesTestCase(test.NoDBTestCase):
+ """Test case for HostManager class."""
+
+ def setUp(self):
+ super(HostManagerChangedNodesTestCase, self).setUp()
+ self.host_manager = host_manager.HostManager()
+ self.fake_hosts = [
+ host_manager.HostState('host1', 'node1'),
+ host_manager.HostState('host2', 'node2'),
+ host_manager.HostState('host3', 'node3'),
+ host_manager.HostState('host4', 'node4')
+ ]
+ self.addCleanup(timeutils.clear_time_override)
+
+ def test_get_all_host_states(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(len(host_states_map), 4)
+
+ def test_get_all_host_states_after_delete_one(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ # all nodes active for first call
+ db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
+ # remove node4 for second call
+ running_nodes = [n for n in fakes.COMPUTE_NODES
+ if n.get('hypervisor_hostname') != 'node4']
+ db.compute_node_get_all(context).AndReturn(running_nodes)
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(len(host_states_map), 3)
+
+ def test_get_all_host_states_after_delete_all(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ # all nodes active for first call
+ db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
+ # remove all nodes for second call
+ db.compute_node_get_all(context).AndReturn([])
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(len(host_states_map), 0)
+
+
+class HostStateTestCase(test.NoDBTestCase):
+ """Test case for HostState class."""
+
+ # update_from_compute_node() and consume_from_instance() are tested
+ # in HostManagerTestCase.test_get_all_host_states()
+
+ def test_stat_consumption_from_compute_node(self):
+ stats = {
+ 'num_instances': '5',
+ 'num_proj_12345': '3',
+ 'num_proj_23456': '1',
+ 'num_vm_%s' % vm_states.BUILDING: '2',
+ 'num_vm_%s' % vm_states.SUSPENDED: '1',
+ 'num_task_%s' % task_states.RESIZE_MIGRATING: '1',
+ 'num_task_%s' % task_states.MIGRATING: '2',
+ 'num_os_type_linux': '4',
+ 'num_os_type_windoze': '1',
+ 'io_workload': '42',
+ }
+ stats = jsonutils.dumps(stats)
+
+ hyper_ver_int = utils.convert_version_to_int('6.0.0')
+ compute = dict(stats=stats, memory_mb=1, free_disk_gb=0, local_gb=0,
+ local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
+ updated_at=None, host_ip='127.0.0.1',
+ hypervisor_type='htype',
+ hypervisor_hostname='hostname', cpu_info='cpu_info',
+ supported_instances='{}',
+ hypervisor_version=hyper_ver_int, numa_topology=None)
+
+ host = host_manager.HostState("fakehost", "fakenode")
+ host.update_from_compute_node(compute)
+
+ self.assertEqual(5, host.num_instances)
+ self.assertEqual(42, host.num_io_ops)
+ self.assertEqual(10, len(host.stats))
+
+ self.assertEqual('127.0.0.1', host.host_ip)
+ self.assertEqual('htype', host.hypervisor_type)
+ self.assertEqual('hostname', host.hypervisor_hostname)
+ self.assertEqual('cpu_info', host.cpu_info)
+ self.assertEqual({}, host.supported_instances)
+ self.assertEqual(hyper_ver_int, host.hypervisor_version)
+
+ def test_stat_consumption_from_compute_node_non_pci(self):
+ stats = {
+ 'num_instances': '5',
+ 'num_proj_12345': '3',
+ 'num_proj_23456': '1',
+ 'num_vm_%s' % vm_states.BUILDING: '2',
+ 'num_vm_%s' % vm_states.SUSPENDED: '1',
+ 'num_task_%s' % task_states.RESIZE_MIGRATING: '1',
+ 'num_task_%s' % task_states.MIGRATING: '2',
+ 'num_os_type_linux': '4',
+ 'num_os_type_windoze': '1',
+ 'io_workload': '42',
+ }
+ stats = jsonutils.dumps(stats)
+
+ hyper_ver_int = utils.convert_version_to_int('6.0.0')
+ compute = dict(stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0,
+ local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
+ updated_at=None, host_ip='127.0.0.1',
+ hypervisor_version=hyper_ver_int, numa_topology=None)
+
+ host = host_manager.HostState("fakehost", "fakenode")
+ host.update_from_compute_node(compute)
+ self.assertIsNone(host.pci_stats)
+ self.assertEqual(hyper_ver_int, host.hypervisor_version)
+
+ def test_stat_consumption_from_compute_node_rescue_unshelving(self):
+ stats = {
+ 'num_instances': '5',
+ 'num_proj_12345': '3',
+ 'num_proj_23456': '1',
+ 'num_vm_%s' % vm_states.BUILDING: '2',
+ 'num_vm_%s' % vm_states.SUSPENDED: '1',
+ 'num_task_%s' % task_states.UNSHELVING: '1',
+ 'num_task_%s' % task_states.RESCUING: '2',
+ 'num_os_type_linux': '4',
+ 'num_os_type_windoze': '1',
+ 'io_workload': '42',
+ }
+ stats = jsonutils.dumps(stats)
+
+ hyper_ver_int = utils.convert_version_to_int('6.0.0')
+ compute = dict(stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0,
+ local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
+ updated_at=None, host_ip='127.0.0.1',
+ hypervisor_version=hyper_ver_int, numa_topology=None)
+
+ host = host_manager.HostState("fakehost", "fakenode")
+ host.update_from_compute_node(compute)
+
+ self.assertEqual(5, host.num_instances)
+ self.assertEqual(42, host.num_io_ops)
+ self.assertEqual(10, len(host.stats))
+
+ self.assertIsNone(host.pci_stats)
+ self.assertEqual(hyper_ver_int, host.hypervisor_version)
+
+ @mock.patch('nova.virt.hardware.get_host_numa_usage_from_instance')
+ def test_stat_consumption_from_instance(self, numa_usage_mock):
+ numa_usage_mock.return_value = 'fake-consumed-once'
+ host = host_manager.HostState("fakehost", "fakenode")
+
+ instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
+ project_id='12345', vm_state=vm_states.BUILDING,
+ task_state=task_states.SCHEDULING, os_type='Linux',
+ uuid='fake-uuid')
+ host.consume_from_instance(instance)
+ numa_usage_mock.assert_called_once_with(host, instance)
+ self.assertEqual('fake-consumed-once', host.numa_topology)
+
+ numa_usage_mock.return_value = 'fake-consumed-twice'
+ instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
+ project_id='12345', vm_state=vm_states.PAUSED,
+ task_state=None, os_type='Linux',
+ uuid='fake-uuid')
+ host.consume_from_instance(instance)
+
+ self.assertEqual(2, host.num_instances)
+ self.assertEqual(1, host.num_io_ops)
+ self.assertEqual(2, numa_usage_mock.call_count)
+ self.assertEqual(((host, instance),), numa_usage_mock.call_args)
+ self.assertEqual('fake-consumed-twice', host.numa_topology)
+
+ def test_resources_consumption_from_compute_node(self):
+ metrics = [
+ dict(name='res1',
+ value=1.0,
+ source='source1',
+ timestamp=None),
+ dict(name='res2',
+ value="string2",
+ source='source2',
+ timestamp=None),
+ ]
+ hyper_ver_int = utils.convert_version_to_int('6.0.0')
+ compute = dict(metrics=jsonutils.dumps(metrics),
+ memory_mb=0, free_disk_gb=0, local_gb=0,
+ local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
+ updated_at=None, host_ip='127.0.0.1',
+ hypervisor_version=hyper_ver_int,
+ numa_topology=fakes.NUMA_TOPOLOGY.to_json())
+ host = host_manager.HostState("fakehost", "fakenode")
+ host.update_from_compute_node(compute)
+
+ self.assertEqual(len(host.metrics), 2)
+ self.assertEqual(set(['res1', 'res2']), set(host.metrics.keys()))
+ self.assertEqual(1.0, host.metrics['res1'].value)
+ self.assertEqual('source1', host.metrics['res1'].source)
+ self.assertEqual('string2', host.metrics['res2'].value)
+ self.assertEqual('source2', host.metrics['res2'].source)
+ self.assertIsInstance(host.numa_topology, six.string_types)
diff --git a/nova/tests/unit/scheduler/test_ironic_host_manager.py b/nova/tests/unit/scheduler/test_ironic_host_manager.py
new file mode 100644
index 0000000000..50ec038cb3
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_ironic_host_manager.py
@@ -0,0 +1,430 @@
+# Copyright (c) 2014 OpenStack Foundation
+# Copyright (c) 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For IronicHostManager
+"""
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova import db
+from nova import exception
+from nova.scheduler import filters
+from nova.scheduler import host_manager
+from nova.scheduler import ironic_host_manager
+from nova import test
+from nova.tests.unit.scheduler import ironic_fakes
+
+
+class FakeFilterClass1(filters.BaseHostFilter):
+ def host_passes(self, host_state, filter_properties):
+ pass
+
+
+class FakeFilterClass2(filters.BaseHostFilter):
+ def host_passes(self, host_state, filter_properties):
+ pass
+
+
+class IronicHostManagerTestCase(test.NoDBTestCase):
+ """Test case for IronicHostManager class."""
+
+ def setUp(self):
+ super(IronicHostManagerTestCase, self).setUp()
+ self.host_manager = ironic_host_manager.IronicHostManager()
+
+ def test_manager_public_api_signatures(self):
+ self.assertPublicAPISignatures(host_manager.HostManager(),
+ self.host_manager)
+
+ def test_state_public_api_signatures(self):
+ self.assertPublicAPISignatures(
+ host_manager.HostState("dummy",
+ "dummy"),
+ ironic_host_manager.IronicNodeState("dummy",
+ "dummy")
+ )
+
+ def test_get_all_host_states(self):
+ # Ensure .service is set and we have the values we expect to.
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+
+ self.assertEqual(len(host_states_map), 4)
+ for i in range(4):
+ compute_node = ironic_fakes.COMPUTE_NODES[i]
+ host = compute_node['service']['host']
+ node = compute_node['hypervisor_hostname']
+ state_key = (host, node)
+ self.assertEqual(compute_node['service'],
+ host_states_map[state_key].service)
+ self.assertEqual(jsonutils.loads(compute_node['stats']),
+ host_states_map[state_key].stats)
+ self.assertEqual(compute_node['free_ram_mb'],
+ host_states_map[state_key].free_ram_mb)
+ self.assertEqual(compute_node['free_disk_gb'] * 1024,
+ host_states_map[state_key].free_disk_mb)
+
+
+class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
+ """Test case for IronicHostManager class."""
+
+ def setUp(self):
+ super(IronicHostManagerChangedNodesTestCase, self).setUp()
+ self.host_manager = ironic_host_manager.IronicHostManager()
+ ironic_driver = "nova.virt.ironic.driver.IronicDriver"
+ supported_instances = '[["i386", "baremetal", "baremetal"]]'
+ self.compute_node = dict(id=1, local_gb=10, memory_mb=1024, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ stats=jsonutils.dumps(dict(
+ ironic_driver=ironic_driver,
+ cpu_arch='i386')),
+ supported_instances=supported_instances,
+ free_disk_gb=10, free_ram_mb=1024,
+ hypervisor_type='ironic',
+ hypervisor_version = 1,
+ hypervisor_hostname = 'fake_host')
+
+ @mock.patch.object(ironic_host_manager.IronicNodeState, '__init__')
+ def test_create_ironic_node_state(self, init_mock):
+ init_mock.return_value = None
+ compute = {'cpu_info': 'baremetal cpu'}
+ host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
+ compute=compute)
+ self.assertIs(ironic_host_manager.IronicNodeState, type(host_state))
+
+ @mock.patch.object(host_manager.HostState, '__init__')
+ def test_create_non_ironic_host_state(self, init_mock):
+ init_mock.return_value = None
+ compute = {'cpu_info': 'other cpu'}
+ host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
+ compute=compute)
+ self.assertIs(host_manager.HostState, type(host_state))
+
+ def test_get_all_host_states_after_delete_one(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ # all nodes active for first call
+ db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
+ # remove node4 for second call
+ running_nodes = [n for n in ironic_fakes.COMPUTE_NODES
+ if n.get('hypervisor_hostname') != 'node4uuid']
+ db.compute_node_get_all(context).AndReturn(running_nodes)
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(3, len(host_states_map))
+
+ def test_get_all_host_states_after_delete_all(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ # all nodes active for first call
+ db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
+ # remove all nodes for second call
+ db.compute_node_get_all(context).AndReturn([])
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(0, len(host_states_map))
+
+ def test_update_from_compute_node(self):
+ host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
+ host.update_from_compute_node(self.compute_node)
+
+ self.assertEqual(1024, host.free_ram_mb)
+ self.assertEqual(1024, host.total_usable_ram_mb)
+ self.assertEqual(10240, host.free_disk_mb)
+ self.assertEqual(1, host.vcpus_total)
+ self.assertEqual(0, host.vcpus_used)
+ self.assertEqual(jsonutils.loads(self.compute_node['stats']),
+ host.stats)
+ self.assertEqual('ironic', host.hypervisor_type)
+ self.assertEqual(1, host.hypervisor_version)
+ self.assertEqual('fake_host', host.hypervisor_hostname)
+
+ def test_consume_identical_instance_from_compute(self):
+ host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
+ host.update_from_compute_node(self.compute_node)
+
+ instance = dict(root_gb=10, ephemeral_gb=0, memory_mb=1024, vcpus=1)
+ host.consume_from_instance(instance)
+
+ self.assertEqual(1, host.vcpus_used)
+ self.assertEqual(0, host.free_ram_mb)
+ self.assertEqual(0, host.free_disk_mb)
+
+ def test_consume_larger_instance_from_compute(self):
+ host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
+ host.update_from_compute_node(self.compute_node)
+
+ instance = dict(root_gb=20, ephemeral_gb=0, memory_mb=2048, vcpus=2)
+ host.consume_from_instance(instance)
+
+ self.assertEqual(1, host.vcpus_used)
+ self.assertEqual(0, host.free_ram_mb)
+ self.assertEqual(0, host.free_disk_mb)
+
+ def test_consume_smaller_instance_from_compute(self):
+ host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
+ host.update_from_compute_node(self.compute_node)
+
+ instance = dict(root_gb=5, ephemeral_gb=0, memory_mb=512, vcpus=1)
+ host.consume_from_instance(instance)
+
+ self.assertEqual(1, host.vcpus_used)
+ self.assertEqual(0, host.free_ram_mb)
+ self.assertEqual(0, host.free_disk_mb)
+
+
+class IronicHostManagerTestFilters(test.NoDBTestCase):
+ """Test filters work for IronicHostManager."""
+
+ def setUp(self):
+ super(IronicHostManagerTestFilters, self).setUp()
+ self.host_manager = ironic_host_manager.IronicHostManager()
+ self.fake_hosts = [ironic_host_manager.IronicNodeState(
+ 'fake_host%s' % x, 'fake-node') for x in range(1, 5)]
+ self.fake_hosts += [ironic_host_manager.IronicNodeState(
+ 'fake_multihost', 'fake-node%s' % x) for x in range(1, 5)]
+
+ def test_choose_host_filters_not_found(self):
+ self.flags(scheduler_default_filters='FakeFilterClass3')
+ self.host_manager.filter_classes = [FakeFilterClass1,
+ FakeFilterClass2]
+ self.assertRaises(exception.SchedulerHostFilterNotFound,
+ self.host_manager._choose_host_filters, None)
+
+ def test_choose_host_filters(self):
+ self.flags(scheduler_default_filters=['FakeFilterClass2'])
+ self.host_manager.filter_classes = [FakeFilterClass1,
+ FakeFilterClass2]
+
+ # Test we returns 1 correct function
+ filter_classes = self.host_manager._choose_host_filters(None)
+ self.assertEqual(1, len(filter_classes))
+ self.assertEqual('FakeFilterClass2', filter_classes[0].__name__)
+
+ def _mock_get_filtered_hosts(self, info, specified_filters=None):
+ self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
+
+ info['got_objs'] = []
+ info['got_fprops'] = []
+
+ def fake_filter_one(_self, obj, filter_props):
+ info['got_objs'].append(obj)
+ info['got_fprops'].append(filter_props)
+ return True
+
+ self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
+ self.host_manager._choose_host_filters(specified_filters).AndReturn(
+ [FakeFilterClass1])
+
+ def _verify_result(self, info, result, filters=True):
+ for x in info['got_fprops']:
+ self.assertEqual(x, info['expected_fprops'])
+ if filters:
+ self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
+ self.assertEqual(set(info['expected_objs']), set(result))
+
+ def test_get_filtered_hosts(self):
+ fake_properties = {'moo': 1, 'cow': 2}
+
+ info = {'expected_objs': self.fake_hosts,
+ 'expected_fprops': fake_properties}
+
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_specified_filters(self):
+ fake_properties = {'moo': 1, 'cow': 2}
+
+ specified_filters = ['FakeFilterClass1', 'FakeFilterClass2']
+ info = {'expected_objs': self.fake_hosts,
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info, specified_filters)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties, filter_class_names=specified_filters)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_ignore(self):
+ fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3',
+ 'fake_host5', 'fake_multihost']}
+
+ # [1] and [3] are host2 and host4
+ info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_force_hosts(self):
+ fake_properties = {'force_hosts': ['fake_host1', 'fake_host3',
+ 'fake_host5']}
+
+ # [0] and [2] are host1 and host3
+ info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_no_matching_force_hosts(self):
+ fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_and_force_hosts(self):
+ # Ensure ignore_hosts processed before force_hosts in host filters.
+ fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
+ 'ignore_hosts': ['fake_host1']}
+
+ # only fake_host3 should be left.
+ info = {'expected_objs': [self.fake_hosts[2]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_host_and_many_nodes(self):
+ # Ensure all nodes returned for a host with many nodes
+ fake_properties = {'force_hosts': ['fake_multihost']}
+
+ info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
+ self.fake_hosts[6], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_nodes(self):
+ fake_properties = {'force_nodes': ['fake-node2', 'fake-node4',
+ 'fake-node9']}
+
+ # [5] is fake-node2, [7] is fake-node4
+ info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_hosts_and_nodes(self):
+ # Ensure only overlapping results if both force host and node
+ fake_properties = {'force_hosts': ['fake_host1', 'fake_multihost'],
+ 'force_nodes': ['fake-node2', 'fake-node9']}
+
+ # [5] is fake-node2
+ info = {'expected_objs': [self.fake_hosts[5]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self):
+ # Ensure non-overlapping force_node and force_host yield no result
+ fake_properties = {'force_hosts': ['fake_multihost'],
+ 'force_nodes': ['fake-node']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self):
+ # Ensure ignore_hosts can coexist with force_nodes
+ fake_properties = {'force_nodes': ['fake-node4', 'fake-node2'],
+ 'ignore_hosts': ['fake_host1', 'fake_host2']}
+
+ info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self):
+ # Ensure ignore_hosts is processed before force_nodes
+ fake_properties = {'force_nodes': ['fake_node4', 'fake_node2'],
+ 'ignore_hosts': ['fake_multihost']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
diff --git a/nova/tests/unit/scheduler/test_rpcapi.py b/nova/tests/unit/scheduler/test_rpcapi.py
new file mode 100644
index 0000000000..0ba0feb540
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_rpcapi.py
@@ -0,0 +1,69 @@
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Unit Tests for nova.scheduler.rpcapi
+"""
+
+import mox
+from oslo.config import cfg
+
+from nova import context
+from nova.scheduler import rpcapi as scheduler_rpcapi
+from nova import test
+
+CONF = cfg.CONF
+
+
+class SchedulerRpcAPITestCase(test.NoDBTestCase):
+ def _test_scheduler_api(self, method, rpc_method, **kwargs):
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+
+ rpcapi = scheduler_rpcapi.SchedulerAPI()
+ self.assertIsNotNone(rpcapi.client)
+ self.assertEqual(rpcapi.client.target.topic, CONF.scheduler_topic)
+
+ expected_retval = 'foo' if rpc_method == 'call' else None
+ expected_version = kwargs.pop('version', None)
+ expected_fanout = kwargs.pop('fanout', None)
+ expected_kwargs = kwargs.copy()
+
+ self.mox.StubOutWithMock(rpcapi, 'client')
+
+ rpcapi.client.can_send_version(
+ mox.IsA(str)).MultipleTimes().AndReturn(True)
+
+ prepare_kwargs = {}
+ if expected_fanout:
+ prepare_kwargs['fanout'] = True
+ if expected_version:
+ prepare_kwargs['version'] = expected_version
+ rpcapi.client.prepare(**prepare_kwargs).AndReturn(rpcapi.client)
+
+ rpc_method = getattr(rpcapi.client, rpc_method)
+
+ rpc_method(ctxt, method, **expected_kwargs).AndReturn('foo')
+
+ self.mox.ReplayAll()
+
+ # NOTE(markmc): MultipleTimes() is OnceOrMore() not ZeroOrMore()
+ rpcapi.client.can_send_version('I fool you mox')
+
+ retval = getattr(rpcapi, method)(ctxt, **kwargs)
+ self.assertEqual(retval, expected_retval)
+
+ def test_select_destinations(self):
+ self._test_scheduler_api('select_destinations', rpc_method='call',
+ request_spec='fake_request_spec',
+ filter_properties='fake_prop')
diff --git a/nova/tests/unit/scheduler/test_scheduler.py b/nova/tests/unit/scheduler/test_scheduler.py
new file mode 100644
index 0000000000..2435d60343
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_scheduler.py
@@ -0,0 +1,378 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler
+"""
+
+import mox
+from oslo.config import cfg
+
+from nova.compute import api as compute_api
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.image import glance
+from nova import objects
+from nova import rpc
+from nova.scheduler import driver
+from nova.scheduler import manager
+from nova import servicegroup
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_server_actions
+from nova.tests.unit.image import fake as fake_image
+from nova.tests.unit.objects import test_instance_fault
+from nova.tests.unit.scheduler import fakes
+
+CONF = cfg.CONF
+
+
+class SchedulerManagerTestCase(test.NoDBTestCase):
+ """Test case for scheduler manager."""
+
+ manager_cls = manager.SchedulerManager
+ driver_cls = driver.Scheduler
+ driver_cls_name = 'nova.scheduler.driver.Scheduler'
+
+ def setUp(self):
+ super(SchedulerManagerTestCase, self).setUp()
+ self.flags(scheduler_driver=self.driver_cls_name)
+ self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
+ self.manager = self.manager_cls()
+ self.context = context.RequestContext('fake_user', 'fake_project')
+ self.topic = 'fake_topic'
+ self.fake_args = (1, 2, 3)
+ self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
+ fake_server_actions.stub_out_action_events(self.stubs)
+
+ def test_1_correct_init(self):
+ # Correct scheduler driver
+ manager = self.manager
+ self.assertIsInstance(manager.driver, self.driver_cls)
+
+ def _mox_schedule_method_helper(self, method_name):
+ # Make sure the method exists that we're going to test call
+ def stub_method(*args, **kwargs):
+ pass
+
+ setattr(self.manager.driver, method_name, stub_method)
+
+ self.mox.StubOutWithMock(self.manager.driver,
+ method_name)
+
+ def test_run_instance_exception_puts_instance_in_error_state(self):
+ fake_instance_uuid = 'fake-instance-id'
+ inst = {"vm_state": "", "task_state": ""}
+
+ self._mox_schedule_method_helper('schedule_run_instance')
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ request_spec = {'instance_properties': inst,
+ 'instance_uuids': [fake_instance_uuid]}
+
+ self.manager.driver.schedule_run_instance(self.context,
+ request_spec, None, None, None, None, {}, False).AndRaise(
+ exception.NoValidHost(reason=""))
+ old, new_ref = db.instance_update_and_get_original(self.context,
+ fake_instance_uuid,
+ {"vm_state": vm_states.ERROR,
+ "task_state": None}).AndReturn((inst, inst))
+ compute_utils.add_instance_fault_from_exc(self.context,
+ new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.manager.run_instance(self.context, request_spec,
+ None, None, None, None, {}, False)
+
+ def test_prep_resize_no_valid_host_back_in_active_state(self):
+ fake_instance_uuid = 'fake-instance-id'
+ fake_instance = {'uuid': fake_instance_uuid}
+ inst = {"vm_state": "", "task_state": ""}
+
+ self._mox_schedule_method_helper('select_destinations')
+
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ request_spec = {'instance_type': 'fake_type',
+ 'instance_uuids': [fake_instance_uuid],
+ 'instance_properties': {'uuid': fake_instance_uuid}}
+ kwargs = {
+ 'context': self.context,
+ 'image': 'fake_image',
+ 'request_spec': request_spec,
+ 'filter_properties': 'fake_props',
+ 'instance': fake_instance,
+ 'instance_type': 'fake_type',
+ 'reservations': list('fake_res'),
+ }
+ self.manager.driver.select_destinations(
+ self.context, request_spec, 'fake_props').AndRaise(
+ exception.NoValidHost(reason=""))
+ old_ref, new_ref = db.instance_update_and_get_original(self.context,
+ fake_instance_uuid,
+ {"vm_state": vm_states.ACTIVE, "task_state": None}).AndReturn(
+ (inst, inst))
+ compute_utils.add_instance_fault_from_exc(self.context, new_ref,
+ mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.manager.prep_resize(**kwargs)
+
+ def test_prep_resize_no_valid_host_back_in_shutoff_state(self):
+ fake_instance_uuid = 'fake-instance-id'
+ fake_instance = {'uuid': fake_instance_uuid, "vm_state": "stopped"}
+ inst = {"vm_state": "stopped", "task_state": ""}
+
+ self._mox_schedule_method_helper('select_destinations')
+
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ request_spec = {'instance_type': 'fake_type',
+ 'instance_uuids': [fake_instance_uuid],
+ 'instance_properties': {'uuid': fake_instance_uuid}}
+ kwargs = {
+ 'context': self.context,
+ 'image': 'fake_image',
+ 'request_spec': request_spec,
+ 'filter_properties': 'fake_props',
+ 'instance': fake_instance,
+ 'instance_type': 'fake_type',
+ 'reservations': list('fake_res'),
+ }
+ self.manager.driver.select_destinations(
+ self.context, request_spec, 'fake_props').AndRaise(
+ exception.NoValidHost(reason=""))
+ old_ref, new_ref = db.instance_update_and_get_original(self.context,
+ fake_instance_uuid,
+ {"vm_state": vm_states.STOPPED, "task_state": None}).AndReturn(
+ (inst, inst))
+ compute_utils.add_instance_fault_from_exc(self.context, new_ref,
+ mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.manager.prep_resize(**kwargs)
+
+ def test_prep_resize_exception_host_in_error_state_and_raise(self):
+ fake_instance_uuid = 'fake-instance-id'
+ fake_instance = {'uuid': fake_instance_uuid}
+
+ self._mox_schedule_method_helper('select_destinations')
+
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ request_spec = {
+ 'instance_properties': {'uuid': fake_instance_uuid},
+ 'instance_uuids': [fake_instance_uuid]
+ }
+ kwargs = {
+ 'context': self.context,
+ 'image': 'fake_image',
+ 'request_spec': request_spec,
+ 'filter_properties': 'fake_props',
+ 'instance': fake_instance,
+ 'instance_type': 'fake_type',
+ 'reservations': list('fake_res'),
+ }
+
+ self.manager.driver.select_destinations(
+ self.context, request_spec, 'fake_props').AndRaise(
+ test.TestingException('something happened'))
+
+ inst = {
+ "vm_state": "",
+ "task_state": "",
+ }
+ old_ref, new_ref = db.instance_update_and_get_original(self.context,
+ fake_instance_uuid,
+ {"vm_state": vm_states.ERROR,
+ "task_state": None}).AndReturn((inst, inst))
+ compute_utils.add_instance_fault_from_exc(self.context, new_ref,
+ mox.IsA(test.TestingException), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(test.TestingException, self.manager.prep_resize,
+ **kwargs)
+
+ def test_set_vm_state_and_notify_adds_instance_fault(self):
+ request = {'instance_properties': {'uuid': 'fake-uuid'}}
+ updates = {'vm_state': 'foo'}
+ fake_inst = {'uuid': 'fake-uuid'}
+
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(db, 'instance_fault_create')
+ self.mox.StubOutWithMock(rpc, 'get_notifier')
+ notifier = self.mox.CreateMockAnything()
+ rpc.get_notifier('scheduler').AndReturn(notifier)
+ db.instance_update_and_get_original(self.context, 'fake-uuid',
+ updates).AndReturn((None,
+ fake_inst))
+ db.instance_fault_create(self.context, mox.IgnoreArg()).AndReturn(
+ test_instance_fault.fake_faults['fake-uuid'][0])
+ notifier.error(self.context, 'scheduler.foo', mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ self.manager._set_vm_state_and_notify('foo', {'vm_state': 'foo'},
+ self.context, None, request)
+
+ def test_prep_resize_post_populates_retry(self):
+ self.manager.driver = fakes.FakeFilterScheduler()
+
+ image = 'image'
+ instance_uuid = 'fake-instance-id'
+ instance = fake_instance.fake_db_instance(uuid=instance_uuid)
+
+ instance_properties = {'project_id': 'fake', 'os_type': 'Linux'}
+ instance_type = "m1.tiny"
+ request_spec = {'instance_properties': instance_properties,
+ 'instance_type': instance_type,
+ 'instance_uuids': [instance_uuid]}
+ retry = {'hosts': [], 'num_attempts': 1}
+ filter_properties = {'retry': retry}
+ reservations = None
+
+ hosts = [dict(host='host', nodename='node', limits={})]
+
+ self._mox_schedule_method_helper('select_destinations')
+ self.manager.driver.select_destinations(
+ self.context, request_spec, filter_properties).AndReturn(hosts)
+
+ self.mox.StubOutWithMock(self.manager.compute_rpcapi, 'prep_resize')
+ self.manager.compute_rpcapi.prep_resize(self.context, image,
+ mox.IsA(objects.Instance),
+ instance_type, 'host', reservations, request_spec=request_spec,
+ filter_properties=filter_properties, node='node')
+
+ self.mox.ReplayAll()
+ self.manager.prep_resize(self.context, image, request_spec,
+ filter_properties, instance, instance_type, reservations)
+
+ self.assertEqual([['host', 'node']],
+ filter_properties['retry']['hosts'])
+
+
+class SchedulerTestCase(test.NoDBTestCase):
+ """Test case for base scheduler driver class."""
+
+ # So we can subclass this test and re-use tests if we need.
+ driver_cls = driver.Scheduler
+
+ def setUp(self):
+ super(SchedulerTestCase, self).setUp()
+ self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
+
+ def fake_show(meh, context, id, **kwargs):
+ if id:
+ return {'id': id, 'min_disk': None, 'min_ram': None,
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {'kernel_id': 'fake_kernel_id',
+ 'ramdisk_id': 'fake_ramdisk_id',
+ 'something_else': 'meow'}}
+ else:
+ raise exception.ImageNotFound(image_id=id)
+
+ fake_image.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ self.image_service = glance.get_default_image_service()
+
+ self.driver = self.driver_cls()
+ self.context = context.RequestContext('fake_user', 'fake_project')
+ self.topic = 'fake_topic'
+ self.servicegroup_api = servicegroup.API()
+
+ def test_hosts_up(self):
+ service1 = {'host': 'host1'}
+ service2 = {'host': 'host2'}
+ services = [service1, service2]
+
+ self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
+ self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
+
+ db.service_get_all_by_topic(self.context,
+ self.topic).AndReturn(services)
+ self.servicegroup_api.service_is_up(service1).AndReturn(False)
+ self.servicegroup_api.service_is_up(service2).AndReturn(True)
+
+ self.mox.ReplayAll()
+ result = self.driver.hosts_up(self.context, self.topic)
+ self.assertEqual(result, ['host2'])
+
+ def test_handle_schedule_error_adds_instance_fault(self):
+ instance = {'uuid': 'fake-uuid'}
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(db, 'instance_fault_create')
+ db.instance_update_and_get_original(self.context, instance['uuid'],
+ mox.IgnoreArg()).AndReturn(
+ (None, instance))
+ db.instance_fault_create(self.context, mox.IgnoreArg()).AndReturn(
+ test_instance_fault.fake_faults['fake-uuid'][0])
+ self.mox.StubOutWithMock(rpc, 'get_notifier')
+ notifier = self.mox.CreateMockAnything()
+ rpc.get_notifier('scheduler').AndReturn(notifier)
+ notifier.error(self.context, 'scheduler.run_instance', mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ driver.handle_schedule_error(self.context,
+ exception.NoValidHost('test'),
+ instance['uuid'], {})
+
+
+class SchedulerDriverBaseTestCase(SchedulerTestCase):
+ """Test cases for base scheduler driver class methods
+ that will fail if the driver is changed.
+ """
+
+ def test_unimplemented_schedule_run_instance(self):
+ fake_request_spec = {'instance_properties':
+ {'uuid': 'uuid'}}
+
+ self.assertRaises(NotImplementedError,
+ self.driver.schedule_run_instance,
+ self.context, fake_request_spec, None, None, None,
+ None, None, False)
+
+ def test_unimplemented_select_destinations(self):
+ self.assertRaises(NotImplementedError,
+ self.driver.select_destinations, self.context, {}, {})
+
+
+class SchedulerInstanceGroupData(test.TestCase):
+
+ driver_cls = driver.Scheduler
+
+ def setUp(self):
+ super(SchedulerInstanceGroupData, self).setUp()
+ self.user_id = 'fake_user'
+ self.project_id = 'fake_project'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ self.driver = self.driver_cls()
+
+ def _get_default_values(self):
+ return {'name': 'fake_name',
+ 'user_id': self.user_id,
+ 'project_id': self.project_id}
+
+ def _create_instance_group(self, context, values, policies=None,
+ metadata=None, members=None):
+ return db.instance_group_create(context, values, policies=policies,
+ metadata=metadata, members=members)
diff --git a/nova/tests/unit/scheduler/test_scheduler_options.py b/nova/tests/unit/scheduler/test_scheduler_options.py
new file mode 100644
index 0000000000..29d42ccd2f
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_scheduler_options.py
@@ -0,0 +1,138 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For PickledScheduler.
+"""
+
+import datetime
+import StringIO
+
+from oslo.serialization import jsonutils
+
+from nova.scheduler import scheduler_options
+from nova import test
+
+
+class FakeSchedulerOptions(scheduler_options.SchedulerOptions):
+ def __init__(self, last_checked, now, file_old, file_now, data, filedata):
+ super(FakeSchedulerOptions, self).__init__()
+ # Change internals ...
+ self.last_modified = file_old
+ self.last_checked = last_checked
+ self.data = data
+
+ # For overrides ...
+ self._time_now = now
+ self._file_now = file_now
+ self._file_data = filedata
+
+ self.file_was_loaded = False
+
+ def _get_file_timestamp(self, filename):
+ return self._file_now
+
+ def _get_file_handle(self, filename):
+ self.file_was_loaded = True
+ return StringIO.StringIO(self._file_data)
+
+ def _get_time_now(self):
+ return self._time_now
+
+
+class SchedulerOptionsTestCase(test.NoDBTestCase):
+ def test_get_configuration_first_time_no_flag(self):
+ last_checked = None
+ now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_old = None
+ file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+
+ data = dict(a=1, b=2, c=3)
+ jdata = jsonutils.dumps(data)
+
+ fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
+ {}, jdata)
+ self.assertEqual({}, fake.get_configuration())
+ self.assertFalse(fake.file_was_loaded)
+
+ def test_get_configuration_first_time_empty_file(self):
+ last_checked = None
+ now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_old = None
+ file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+
+ jdata = ""
+
+ fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
+ {}, jdata)
+ self.assertEqual({}, fake.get_configuration('foo.json'))
+ self.assertTrue(fake.file_was_loaded)
+
+ def test_get_configuration_first_time_happy_day(self):
+ last_checked = None
+ now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_old = None
+ file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+
+ data = dict(a=1, b=2, c=3)
+ jdata = jsonutils.dumps(data)
+
+ fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
+ {}, jdata)
+ self.assertEqual(data, fake.get_configuration('foo.json'))
+ self.assertTrue(fake.file_was_loaded)
+
+ def test_get_configuration_second_time_no_change(self):
+ last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
+ now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+
+ data = dict(a=1, b=2, c=3)
+ jdata = jsonutils.dumps(data)
+
+ fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
+ data, jdata)
+ self.assertEqual(data, fake.get_configuration('foo.json'))
+ self.assertFalse(fake.file_was_loaded)
+
+ def test_get_configuration_second_time_too_fast(self):
+ last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
+ now = datetime.datetime(2011, 1, 1, 1, 1, 2)
+ file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
+
+ old_data = dict(a=1, b=2, c=3)
+ data = dict(a=11, b=12, c=13)
+ jdata = jsonutils.dumps(data)
+
+ fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
+ old_data, jdata)
+ self.assertEqual(old_data, fake.get_configuration('foo.json'))
+ self.assertFalse(fake.file_was_loaded)
+
+ def test_get_configuration_second_time_change(self):
+ last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
+ now = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
+ file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
+
+ old_data = dict(a=1, b=2, c=3)
+ data = dict(a=11, b=12, c=13)
+ jdata = jsonutils.dumps(data)
+
+ fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
+ old_data, jdata)
+ self.assertEqual(data, fake.get_configuration('foo.json'))
+ self.assertTrue(fake.file_was_loaded)
diff --git a/nova/tests/unit/scheduler/test_scheduler_utils.py b/nova/tests/unit/scheduler/test_scheduler_utils.py
new file mode 100644
index 0000000000..0dfade7deb
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_scheduler_utils.py
@@ -0,0 +1,314 @@
+# Copyright (c) 2013 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler Utils
+"""
+import contextlib
+import uuid
+
+import mock
+import mox
+from oslo.config import cfg
+
+from nova.compute import flavors
+from nova.compute import utils as compute_utils
+from nova import db
+from nova import exception
+from nova import notifications
+from nova import objects
+from nova import rpc
+from nova.scheduler import utils as scheduler_utils
+from nova import test
+from nova.tests.unit import fake_instance
+
+CONF = cfg.CONF
+
+
+class SchedulerUtilsTestCase(test.NoDBTestCase):
+ """Test case for scheduler utils methods."""
+ def setUp(self):
+ super(SchedulerUtilsTestCase, self).setUp()
+ self.context = 'fake-context'
+
+ def test_build_request_spec_without_image(self):
+ image = None
+ instance = {'uuid': 'fake-uuid'}
+ instance_type = {'flavorid': 'fake-id'}
+
+ self.mox.StubOutWithMock(flavors, 'extract_flavor')
+ self.mox.StubOutWithMock(db, 'flavor_extra_specs_get')
+ flavors.extract_flavor(mox.IgnoreArg()).AndReturn(instance_type)
+ db.flavor_extra_specs_get(self.context, mox.IgnoreArg()).AndReturn([])
+ self.mox.ReplayAll()
+
+ request_spec = scheduler_utils.build_request_spec(self.context, image,
+ [instance])
+ self.assertEqual({}, request_spec['image'])
+
+ @mock.patch.object(flavors, 'extract_flavor')
+ @mock.patch.object(db, 'flavor_extra_specs_get')
+ def test_build_request_spec_with_object(self, flavor_extra_specs_get,
+ extract_flavor):
+ instance_type = {'flavorid': 'fake-id'}
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ extract_flavor.return_value = instance_type
+ flavor_extra_specs_get.return_value = []
+
+ request_spec = scheduler_utils.build_request_spec(self.context, None,
+ [instance])
+ self.assertIsInstance(request_spec['instance_properties'], dict)
+
+ def _test_set_vm_state_and_notify(self, request_spec,
+ expected_uuids):
+ updates = dict(vm_state='fake-vm-state')
+ service = 'fake-service'
+ method = 'fake-method'
+ exc_info = 'exc_info'
+
+ self.mox.StubOutWithMock(compute_utils,
+ 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(notifications, 'send_update')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ self.mox.StubOutWithMock(rpc, 'get_notifier')
+ notifier = self.mox.CreateMockAnything()
+ rpc.get_notifier(service).AndReturn(notifier)
+
+ old_ref = 'old_ref'
+ new_ref = 'new_ref'
+
+ for _uuid in expected_uuids:
+ db.instance_update_and_get_original(
+ self.context, _uuid, updates).AndReturn((old_ref, new_ref))
+ notifications.send_update(self.context, old_ref, new_ref,
+ service=service)
+ compute_utils.add_instance_fault_from_exc(
+ self.context,
+ new_ref, exc_info, mox.IsA(tuple))
+
+ payload = dict(request_spec=request_spec,
+ instance_properties=request_spec.get(
+ 'instance_properties', {}),
+ instance_id=_uuid,
+ state='fake-vm-state',
+ method=method,
+ reason=exc_info)
+ event_type = '%s.%s' % (service, method)
+ notifier.error(self.context, event_type, payload)
+
+ self.mox.ReplayAll()
+
+ scheduler_utils.set_vm_state_and_notify(self.context,
+ service,
+ method,
+ updates,
+ exc_info,
+ request_spec,
+ db)
+
+ def test_set_vm_state_and_notify_rs_uuids(self):
+ expected_uuids = ['1', '2', '3']
+ request_spec = dict(instance_uuids=expected_uuids)
+ self._test_set_vm_state_and_notify(request_spec, expected_uuids)
+
+ def test_set_vm_state_and_notify_uuid_from_instance_props(self):
+ expected_uuids = ['fake-uuid']
+ request_spec = dict(instance_properties=dict(uuid='fake-uuid'))
+ self._test_set_vm_state_and_notify(request_spec, expected_uuids)
+
+ def _test_populate_filter_props(self, host_state_obj=True,
+ with_retry=True,
+ force_hosts=None,
+ force_nodes=None):
+ if force_hosts is None:
+ force_hosts = []
+ if force_nodes is None:
+ force_nodes = []
+ if with_retry:
+ if not force_hosts and not force_nodes:
+ filter_properties = dict(retry=dict(hosts=[]))
+ else:
+ filter_properties = dict(force_hosts=force_hosts,
+ force_nodes=force_nodes)
+ else:
+ filter_properties = dict()
+
+ if host_state_obj:
+ class host_state(object):
+ host = 'fake-host'
+ nodename = 'fake-node'
+ limits = 'fake-limits'
+ else:
+ host_state = dict(host='fake-host',
+ nodename='fake-node',
+ limits='fake-limits')
+
+ scheduler_utils.populate_filter_properties(filter_properties,
+ host_state)
+ if with_retry and not force_hosts and not force_nodes:
+ # So we can check for 2 hosts
+ scheduler_utils.populate_filter_properties(filter_properties,
+ host_state)
+
+ if force_hosts:
+ expected_limits = None
+ else:
+ expected_limits = 'fake-limits'
+ self.assertEqual(expected_limits,
+ filter_properties.get('limits'))
+
+ if with_retry and not force_hosts and not force_nodes:
+ self.assertEqual([['fake-host', 'fake-node'],
+ ['fake-host', 'fake-node']],
+ filter_properties['retry']['hosts'])
+ else:
+ self.assertNotIn('retry', filter_properties)
+
+ def test_populate_filter_props(self):
+ self._test_populate_filter_props()
+
+ def test_populate_filter_props_host_dict(self):
+ self._test_populate_filter_props(host_state_obj=False)
+
+ def test_populate_filter_props_no_retry(self):
+ self._test_populate_filter_props(with_retry=False)
+
+ def test_populate_filter_props_force_hosts_no_retry(self):
+ self._test_populate_filter_props(force_hosts=['force-host'])
+
+ def test_populate_filter_props_force_nodes_no_retry(self):
+ self._test_populate_filter_props(force_nodes=['force-node'])
+
+ @mock.patch.object(scheduler_utils, '_max_attempts')
+ def test_populate_retry_exception_at_max_attempts(self, _max_attempts):
+ _max_attempts.return_value = 2
+ msg = 'The exception text was preserved!'
+ filter_properties = dict(retry=dict(num_attempts=2, hosts=[],
+ exc=[msg]))
+ nvh = self.assertRaises(exception.NoValidHost,
+ scheduler_utils.populate_retry,
+ filter_properties, 'fake-uuid')
+ # make sure 'msg' is a substring of the complete exception text
+ self.assertIn(msg, nvh.message)
+
+ def _check_parse_options(self, opts, sep, converter, expected):
+ good = scheduler_utils.parse_options(opts,
+ sep=sep,
+ converter=converter)
+ for item in expected:
+ self.assertIn(item, good)
+
+ def test_parse_options(self):
+ # check normal
+ self._check_parse_options(['foo=1', 'bar=-2.1'],
+ '=',
+ float,
+ [('foo', 1.0), ('bar', -2.1)])
+ # check convert error
+ self._check_parse_options(['foo=a1', 'bar=-2.1'],
+ '=',
+ float,
+ [('bar', -2.1)])
+ # check separator missing
+ self._check_parse_options(['foo', 'bar=-2.1'],
+ '=',
+ float,
+ [('bar', -2.1)])
+ # check key missing
+ self._check_parse_options(['=5', 'bar=-2.1'],
+ '=',
+ float,
+ [('bar', -2.1)])
+
+ def test_validate_filters_configured(self):
+ self.flags(scheduler_default_filters='FakeFilter1,FakeFilter2')
+ self.assertTrue(scheduler_utils.validate_filter('FakeFilter1'))
+ self.assertTrue(scheduler_utils.validate_filter('FakeFilter2'))
+ self.assertFalse(scheduler_utils.validate_filter('FakeFilter3'))
+
+ def _create_server_group(self, policy='anti-affinity'):
+ instance = fake_instance.fake_instance_obj(self.context,
+ params={'host': 'hostA'})
+
+ group = objects.InstanceGroup()
+ group.name = 'pele'
+ group.uuid = str(uuid.uuid4())
+ group.members = [instance.uuid]
+ group.policies = [policy]
+ return group
+
+ def _group_details_in_filter_properties(self, group, func='get_by_uuid',
+ hint=None, policy=None):
+ group_hint = hint
+ group_hosts = ['hostB']
+
+ with contextlib.nested(
+ mock.patch.object(objects.InstanceGroup, func, return_value=group),
+ mock.patch.object(objects.InstanceGroup, 'get_hosts',
+ return_value=['hostA']),
+ ) as (get_group, get_hosts):
+ scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
+ scheduler_utils._SUPPORTS_AFFINITY = None
+ group_info = scheduler_utils.setup_instance_group(
+ self.context, group_hint, group_hosts)
+ self.assertEqual(
+ (set(['hostA', 'hostB']), [policy]),
+ group_info)
+
+ def test_group_details_in_filter_properties(self):
+ for policy in ['affinity', 'anti-affinity']:
+ group = self._create_server_group(policy)
+ self._group_details_in_filter_properties(group, func='get_by_uuid',
+ hint=group.uuid,
+ policy=policy)
+
+ def _group_filter_with_filter_not_configured(self, policy):
+ self.flags(scheduler_default_filters=['f1', 'f2'])
+
+ instance = fake_instance.fake_instance_obj(self.context,
+ params={'host': 'hostA'})
+
+ group = objects.InstanceGroup()
+ group.uuid = str(uuid.uuid4())
+ group.members = [instance.uuid]
+ group.policies = [policy]
+
+ with contextlib.nested(
+ mock.patch.object(objects.InstanceGroup, 'get_by_uuid',
+ return_value=group),
+ mock.patch.object(objects.InstanceGroup, 'get_hosts',
+ return_value=['hostA']),
+ ) as (get_group, get_hosts):
+ scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
+ scheduler_utils._SUPPORTS_AFFINITY = None
+ self.assertRaises(exception.NoValidHost,
+ scheduler_utils.setup_instance_group,
+ self.context, group.uuid)
+
+ def test_group_filter_with_filter_not_configured(self):
+ policies = ['anti-affinity', 'affinity']
+ for policy in policies:
+ self._group_filter_with_filter_not_configured(policy)
+
+ def test_group_uuid_details_in_filter_properties(self):
+ group = self._create_server_group()
+ self._group_details_in_filter_properties(group, 'get_by_uuid',
+ group.uuid, 'anti-affinity')
+
+ def test_group_name_details_in_filter_properties(self):
+ group = self._create_server_group()
+ self._group_details_in_filter_properties(group, 'get_by_name',
+ group.name, 'anti-affinity')
diff --git a/nova/tests/unit/scheduler/test_weights.py b/nova/tests/unit/scheduler/test_weights.py
new file mode 100644
index 0000000000..5f168bf5df
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_weights.py
@@ -0,0 +1,338 @@
+# Copyright 2011-2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler weights.
+"""
+
+from oslo.serialization import jsonutils
+
+from nova import context
+from nova import exception
+from nova.openstack.common.fixture import mockpatch
+from nova.scheduler import weights
+from nova import test
+from nova.tests.unit import matchers
+from nova.tests.unit.scheduler import fakes
+
+
+class TestWeighedHost(test.NoDBTestCase):
+ def test_dict_conversion(self):
+ host_state = fakes.FakeHostState('somehost', None, {})
+ host = weights.WeighedHost(host_state, 'someweight')
+ expected = {'weight': 'someweight',
+ 'host': 'somehost'}
+ self.assertThat(host.to_dict(), matchers.DictMatches(expected))
+
+ def test_all_weighers(self):
+ classes = weights.all_weighers()
+ class_names = [cls.__name__ for cls in classes]
+ self.assertIn('RAMWeigher', class_names)
+ self.assertIn('MetricsWeigher', class_names)
+ self.assertIn('IoOpsWeigher', class_names)
+
+
+class RamWeigherTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(RamWeigherTestCase, self).setUp()
+ self.useFixture(mockpatch.Patch(
+ 'nova.db.compute_node_get_all',
+ return_value=fakes.COMPUTE_NODES))
+ self.host_manager = fakes.FakeHostManager()
+ self.weight_handler = weights.HostWeightHandler()
+ self.weight_classes = self.weight_handler.get_matching_classes(
+ ['nova.scheduler.weights.ram.RAMWeigher'])
+
+ def _get_weighed_host(self, hosts, weight_properties=None):
+ if weight_properties is None:
+ weight_properties = {}
+ return self.weight_handler.get_weighed_objects(self.weight_classes,
+ hosts, weight_properties)[0]
+
+ def _get_all_hosts(self):
+ ctxt = context.get_admin_context()
+ return self.host_manager.get_all_host_states(ctxt)
+
+ def test_default_of_spreading_first(self):
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_ram_mb=512
+ # host2: free_ram_mb=1024
+ # host3: free_ram_mb=3072
+ # host4: free_ram_mb=8192
+
+ # so, host4 should win:
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(1.0, weighed_host.weight)
+ self.assertEqual('host4', weighed_host.obj.host)
+
+ def test_ram_filter_multiplier1(self):
+ self.flags(ram_weight_multiplier=0.0)
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_ram_mb=512
+ # host2: free_ram_mb=1024
+ # host3: free_ram_mb=3072
+ # host4: free_ram_mb=8192
+
+ # We do not know the host, all have same weight.
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(0.0, weighed_host.weight)
+
+ def test_ram_filter_multiplier2(self):
+ self.flags(ram_weight_multiplier=2.0)
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_ram_mb=512
+ # host2: free_ram_mb=1024
+ # host3: free_ram_mb=3072
+ # host4: free_ram_mb=8192
+
+ # so, host4 should win:
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(1.0 * 2, weighed_host.weight)
+ self.assertEqual('host4', weighed_host.obj.host)
+
+ def test_ram_filter_negative(self):
+ self.flags(ram_weight_multiplier=1.0)
+ hostinfo_list = self._get_all_hosts()
+ host_attr = {'id': 100, 'memory_mb': 8192, 'free_ram_mb': -512}
+ host_state = fakes.FakeHostState('negative', 'negative', host_attr)
+ hostinfo_list = list(hostinfo_list) + [host_state]
+
+ # host1: free_ram_mb=512
+ # host2: free_ram_mb=1024
+ # host3: free_ram_mb=3072
+ # host4: free_ram_mb=8192
+ # negativehost: free_ram_mb=-512
+
+ # so, host4 should win
+ weights = self.weight_handler.get_weighed_objects(self.weight_classes,
+ hostinfo_list, {})
+
+ weighed_host = weights[0]
+ self.assertEqual(1, weighed_host.weight)
+ self.assertEqual('host4', weighed_host.obj.host)
+
+ # and negativehost should lose
+ weighed_host = weights[-1]
+ self.assertEqual(0, weighed_host.weight)
+ self.assertEqual('negative', weighed_host.obj.host)
+
+
+class MetricsWeigherTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(MetricsWeigherTestCase, self).setUp()
+ self.useFixture(mockpatch.Patch(
+ 'nova.db.compute_node_get_all',
+ return_value=fakes.COMPUTE_NODES_METRICS))
+ self.host_manager = fakes.FakeHostManager()
+ self.weight_handler = weights.HostWeightHandler()
+ self.weight_classes = self.weight_handler.get_matching_classes(
+ ['nova.scheduler.weights.metrics.MetricsWeigher'])
+
+ def _get_weighed_host(self, hosts, setting, weight_properties=None):
+ if not weight_properties:
+ weight_properties = {}
+ self.flags(weight_setting=setting, group='metrics')
+ return self.weight_handler.get_weighed_objects(self.weight_classes,
+ hosts, weight_properties)[0]
+
+ def _get_all_hosts(self):
+ ctxt = context.get_admin_context()
+ return self.host_manager.get_all_host_states(ctxt)
+
+ def _do_test(self, settings, expected_weight, expected_host):
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list, settings)
+ self.assertEqual(expected_weight, weighed_host.weight)
+ self.assertEqual(expected_host, weighed_host.obj.host)
+
+ def test_single_resource(self):
+ # host1: foo=512
+ # host2: foo=1024
+ # host3: foo=3072
+ # host4: foo=8192
+ # so, host4 should win:
+ setting = ['foo=1']
+ self._do_test(setting, 1.0, 'host4')
+
+ def test_multiple_resource(self):
+ # host1: foo=512, bar=1
+ # host2: foo=1024, bar=2
+ # host3: foo=3072, bar=1
+ # host4: foo=8192, bar=0
+ # so, host2 should win:
+ setting = ['foo=0.0001', 'bar=1']
+ self._do_test(setting, 1.0, 'host2')
+
+ def test_single_resourcenegtive_ratio(self):
+ # host1: foo=512
+ # host2: foo=1024
+ # host3: foo=3072
+ # host4: foo=8192
+ # so, host1 should win:
+ setting = ['foo=-1']
+ self._do_test(setting, 1.0, 'host1')
+
+ def test_multiple_resource_missing_ratio(self):
+ # host1: foo=512, bar=1
+ # host2: foo=1024, bar=2
+ # host3: foo=3072, bar=1
+ # host4: foo=8192, bar=0
+ # so, host4 should win:
+ setting = ['foo=0.0001', 'bar']
+ self._do_test(setting, 1.0, 'host4')
+
+ def test_multiple_resource_wrong_ratio(self):
+ # host1: foo=512, bar=1
+ # host2: foo=1024, bar=2
+ # host3: foo=3072, bar=1
+ # host4: foo=8192, bar=0
+ # so, host4 should win:
+ setting = ['foo=0.0001', 'bar = 2.0t']
+ self._do_test(setting, 1.0, 'host4')
+
+ def _check_parsing_result(self, weigher, setting, results):
+ self.flags(weight_setting=setting, group='metrics')
+ weigher._parse_setting()
+ self.assertEqual(len(weigher.setting), len(results))
+ for item in results:
+ self.assertIn(item, weigher.setting)
+
+ def test_parse_setting(self):
+ weigher = self.weight_classes[0]()
+ self._check_parsing_result(weigher,
+ ['foo=1'],
+ [('foo', 1.0)])
+ self._check_parsing_result(weigher,
+ ['foo=1', 'bar=-2.1'],
+ [('foo', 1.0), ('bar', -2.1)])
+ self._check_parsing_result(weigher,
+ ['foo=a1', 'bar=-2.1'],
+ [('bar', -2.1)])
+ self._check_parsing_result(weigher,
+ ['foo', 'bar=-2.1'],
+ [('bar', -2.1)])
+ self._check_parsing_result(weigher,
+ ['=5', 'bar=-2.1'],
+ [('bar', -2.1)])
+
+ def test_metric_not_found_required(self):
+ setting = ['foo=1', 'zot=2']
+ self.assertRaises(exception.ComputeHostMetricNotFound,
+ self._do_test,
+ setting,
+ 8192,
+ 'host4')
+
+ def test_metric_not_found_non_required(self):
+ # host1: foo=512, bar=1
+ # host2: foo=1024, bar=2
+ # host3: foo=3072, bar=1
+ # host4: foo=8192, bar=0
+ # host5: foo=768, bar=0, zot=1
+ # host6: foo=2048, bar=0, zot=2
+ # so, host5 should win:
+ self.flags(required=False, group='metrics')
+ setting = ['foo=0.0001', 'zot=-1']
+ self._do_test(setting, 1.0, 'host5')
+
+
+COMPUTE_NODES_IO_OPS = [
+ # host1: num_io_ops=1
+ dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
+ disk_available_least=None, free_ram_mb=512, vcpus_used=1,
+ free_disk_gb=512, local_gb_used=0, updated_at=None,
+ service=dict(host='host1', disabled=False),
+ hypervisor_hostname='node1', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ stats=jsonutils.dumps({'io_workload': '1'})),
+ # host2: num_io_ops=2
+ dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
+ disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
+ free_disk_gb=1024, local_gb_used=0, updated_at=None,
+ service=dict(host='host2', disabled=True),
+ hypervisor_hostname='node2', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ stats=jsonutils.dumps({'io_workload': '2'})),
+ # host3: num_io_ops=0, so host3 should win in the case of default
+ # io_ops_weight_multiplier configure.
+ dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
+ disk_available_least=3333, free_ram_mb=3072, vcpus_used=1,
+ free_disk_gb=3072, local_gb_used=0, updated_at=None,
+ service=dict(host='host3', disabled=False),
+ hypervisor_hostname='node3', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ stats=jsonutils.dumps({'io_workload': '0'})),
+ # host4: num_io_ops=4, so host4 should win in the case of positive
+ # io_ops_weight_multiplier configure.
+ dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
+ disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
+ free_disk_gb=8888, local_gb_used=0, updated_at=None,
+ service=dict(host='host4', disabled=False),
+ hypervisor_hostname='node4', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ stats=jsonutils.dumps({'io_workload': '4'})),
+ # Broken entry
+ dict(id=5, local_gb=1024, memory_mb=1024, vcpus=1, service=None),
+]
+
+
+class IoOpsWeigherTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(IoOpsWeigherTestCase, self).setUp()
+ self.useFixture(mockpatch.Patch(
+ 'nova.db.compute_node_get_all',
+ return_value=COMPUTE_NODES_IO_OPS))
+ self.host_manager = fakes.FakeHostManager()
+ self.weight_handler = weights.HostWeightHandler()
+ self.weight_classes = self.weight_handler.get_matching_classes(
+ ['nova.scheduler.weights.io_ops.IoOpsWeigher'])
+
+ def _get_weighed_host(self, hosts, io_ops_weight_multiplier):
+ if io_ops_weight_multiplier is not None:
+ self.flags(io_ops_weight_multiplier=io_ops_weight_multiplier)
+ return self.weight_handler.get_weighed_objects(self.weight_classes,
+ hosts, {})[0]
+
+ def _get_all_hosts(self):
+ ctxt = context.get_admin_context()
+ return self.host_manager.get_all_host_states(ctxt)
+
+ def _do_test(self, io_ops_weight_multiplier, expected_weight,
+ expected_host):
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list,
+ io_ops_weight_multiplier)
+ self.assertEqual(weighed_host.weight, expected_weight)
+ if expected_host:
+ self.assertEqual(weighed_host.obj.host, expected_host)
+
+ def test_io_ops_weight_multiplier_by_default(self):
+ self._do_test(io_ops_weight_multiplier=None,
+ expected_weight=0.0,
+ expected_host='host3')
+
+ def test_io_ops_weight_multiplier_zero_value(self):
+ # We do not know the host, all have same weight.
+ self._do_test(io_ops_weight_multiplier=0.0,
+ expected_weight=0.0,
+ expected_host=None)
+
+ def test_io_ops_weight_multiplier_positive_value(self):
+ self._do_test(io_ops_weight_multiplier=2.0,
+ expected_weight=2.0,
+ expected_host='host4')
diff --git a/nova/tests/unit/servicegroup/__init__.py b/nova/tests/unit/servicegroup/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/servicegroup/__init__.py
diff --git a/nova/tests/unit/servicegroup/test_db_servicegroup.py b/nova/tests/unit/servicegroup/test_db_servicegroup.py
new file mode 100644
index 0000000000..1cb47a6ce4
--- /dev/null
+++ b/nova/tests/unit/servicegroup/test_db_servicegroup.py
@@ -0,0 +1,144 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+import fixtures
+from oslo.utils import timeutils
+
+from nova import context
+from nova import db
+from nova import service
+from nova import servicegroup
+from nova import test
+
+
+class ServiceFixture(fixtures.Fixture):
+
+ def __init__(self, host, binary, topic):
+ super(ServiceFixture, self).__init__()
+ self.host = host
+ self.binary = binary
+ self.topic = topic
+ self.serv = None
+
+ def setUp(self):
+ super(ServiceFixture, self).setUp()
+ self.serv = service.Service(self.host,
+ self.binary,
+ self.topic,
+ 'nova.tests.unit.test_service.FakeManager',
+ 1, 1)
+ self.addCleanup(self.serv.kill)
+
+
+class DBServiceGroupTestCase(test.TestCase):
+
+ def setUp(self):
+ super(DBServiceGroupTestCase, self).setUp()
+ servicegroup.API._driver = None
+ self.flags(servicegroup_driver='db')
+ self.down_time = 15
+ self.flags(enable_new_services=True)
+ self.flags(service_down_time=self.down_time)
+ self.servicegroup_api = servicegroup.API()
+ self._host = 'foo'
+ self._binary = 'nova-fake'
+ self._topic = 'unittest'
+ self._ctx = context.get_admin_context()
+
+ def test_DB_driver(self):
+ serv = self.useFixture(
+ ServiceFixture(self._host, self._binary, self._topic)).serv
+ serv.start()
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+
+ self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
+ self.useFixture(test.TimeOverride())
+ timeutils.advance_time_seconds(self.down_time + 1)
+ self.servicegroup_api._driver._report_state(serv)
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+
+ self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
+ serv.stop()
+ timeutils.advance_time_seconds(self.down_time + 1)
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+ self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
+
+ def test_get_all(self):
+ host1 = self._host + '_1'
+ host2 = self._host + '_2'
+
+ serv1 = self.useFixture(
+ ServiceFixture(host1, self._binary, self._topic)).serv
+ serv1.start()
+
+ serv2 = self.useFixture(
+ ServiceFixture(host2, self._binary, self._topic)).serv
+ serv2.start()
+
+ service_ref1 = db.service_get_by_args(self._ctx,
+ host1,
+ self._binary)
+ service_ref2 = db.service_get_by_args(self._ctx,
+ host2,
+ self._binary)
+
+ services = self.servicegroup_api.get_all(self._topic)
+
+ self.assertIn(service_ref1['host'], services)
+ self.assertIn(service_ref2['host'], services)
+
+ service_id = self.servicegroup_api.get_one(self._topic)
+ self.assertIn(service_id, services)
+
+ def test_service_is_up(self):
+ fts_func = datetime.datetime.fromtimestamp
+ fake_now = 1000
+ down_time = 15
+ self.flags(service_down_time=down_time)
+ self.mox.StubOutWithMock(timeutils, 'utcnow')
+ self.servicegroup_api = servicegroup.API()
+
+ # Up (equal)
+ timeutils.utcnow().AndReturn(fts_func(fake_now))
+ service = {'updated_at': fts_func(fake_now - self.down_time),
+ 'created_at': fts_func(fake_now - self.down_time)}
+ self.mox.ReplayAll()
+ result = self.servicegroup_api.service_is_up(service)
+ self.assertTrue(result)
+
+ self.mox.ResetAll()
+ # Up
+ timeutils.utcnow().AndReturn(fts_func(fake_now))
+ service = {'updated_at': fts_func(fake_now - self.down_time + 1),
+ 'created_at': fts_func(fake_now - self.down_time + 1)}
+ self.mox.ReplayAll()
+ result = self.servicegroup_api.service_is_up(service)
+ self.assertTrue(result)
+
+ self.mox.ResetAll()
+ # Down
+ timeutils.utcnow().AndReturn(fts_func(fake_now))
+ service = {'updated_at': fts_func(fake_now - self.down_time - 3),
+ 'created_at': fts_func(fake_now - self.down_time - 3)}
+ self.mox.ReplayAll()
+ result = self.servicegroup_api.service_is_up(service)
+ self.assertFalse(result)
diff --git a/nova/tests/unit/servicegroup/test_mc_servicegroup.py b/nova/tests/unit/servicegroup/test_mc_servicegroup.py
new file mode 100644
index 0000000000..b04d86de7d
--- /dev/null
+++ b/nova/tests/unit/servicegroup/test_mc_servicegroup.py
@@ -0,0 +1,213 @@
+# Copyright (c) 2013 Akira Yoshiyama <akirayoshiyama at gmail dot com>
+#
+# This is derived from test_db_servicegroup.py.
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+from oslo.utils import timeutils
+
+from nova import context
+from nova import db
+from nova import service
+from nova import servicegroup
+from nova import test
+
+
+class ServiceFixture(fixtures.Fixture):
+
+ def __init__(self, host, binary, topic):
+ super(ServiceFixture, self).__init__()
+ self.host = host
+ self.binary = binary
+ self.topic = topic
+ self.serv = None
+
+ def setUp(self):
+ super(ServiceFixture, self).setUp()
+ self.serv = service.Service(self.host,
+ self.binary,
+ self.topic,
+ 'nova.tests.unit.test_service.FakeManager',
+ 1, 1)
+ self.addCleanup(self.serv.kill)
+
+
+class MemcachedServiceGroupTestCase(test.TestCase):
+
+ def setUp(self):
+ super(MemcachedServiceGroupTestCase, self).setUp()
+ servicegroup.API._driver = None
+ self.flags(servicegroup_driver='mc')
+ self.down_time = 15
+ self.flags(enable_new_services=True)
+ self.flags(service_down_time=self.down_time)
+ self.servicegroup_api = servicegroup.API(test=True)
+ self._host = 'foo'
+ self._binary = 'nova-fake'
+ self._topic = 'unittest'
+ self._ctx = context.get_admin_context()
+
+ def test_memcached_driver(self):
+ serv = self.useFixture(
+ ServiceFixture(self._host, self._binary, self._topic)).serv
+ serv.start()
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+ hostkey = str("%s:%s" % (self._topic, self._host))
+ self.servicegroup_api._driver.mc.set(hostkey,
+ timeutils.utcnow(),
+ time=self.down_time)
+
+ self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
+ self.useFixture(test.TimeOverride())
+ timeutils.advance_time_seconds(self.down_time + 1)
+ self.servicegroup_api._driver._report_state(serv)
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+
+ self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
+ serv.stop()
+ timeutils.advance_time_seconds(self.down_time + 1)
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+ self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
+
+ def test_get_all(self):
+ host1 = self._host + '_1'
+ host2 = self._host + '_2'
+ host3 = self._host + '_3'
+
+ serv1 = self.useFixture(
+ ServiceFixture(host1, self._binary, self._topic)).serv
+ serv1.start()
+
+ serv2 = self.useFixture(
+ ServiceFixture(host2, self._binary, self._topic)).serv
+ serv2.start()
+
+ serv3 = self.useFixture(
+ ServiceFixture(host3, self._binary, self._topic)).serv
+ serv3.start()
+
+ db.service_get_by_args(self._ctx, host1, self._binary)
+ db.service_get_by_args(self._ctx, host2, self._binary)
+ db.service_get_by_args(self._ctx, host3, self._binary)
+
+ host1key = str("%s:%s" % (self._topic, host1))
+ host2key = str("%s:%s" % (self._topic, host2))
+ host3key = str("%s:%s" % (self._topic, host3))
+ self.servicegroup_api._driver.mc.set(host1key,
+ timeutils.utcnow(),
+ time=self.down_time)
+ self.servicegroup_api._driver.mc.set(host2key,
+ timeutils.utcnow(),
+ time=self.down_time)
+ self.servicegroup_api._driver.mc.set(host3key,
+ timeutils.utcnow(),
+ time=-1)
+
+ services = self.servicegroup_api.get_all(self._topic)
+
+ self.assertIn(host1, services)
+ self.assertIn(host2, services)
+ self.assertNotIn(host3, services)
+
+ service_id = self.servicegroup_api.get_one(self._topic)
+ self.assertIn(service_id, services)
+
+ def test_service_is_up(self):
+ serv = self.useFixture(
+ ServiceFixture(self._host, self._binary, self._topic)).serv
+ serv.start()
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+ fake_now = 1000
+ down_time = 15
+ self.flags(service_down_time=down_time)
+ self.mox.StubOutWithMock(timeutils, 'utcnow_ts')
+ self.servicegroup_api = servicegroup.API()
+ hostkey = str("%s:%s" % (self._topic, self._host))
+
+ # Up (equal)
+ timeutils.utcnow_ts().AndReturn(fake_now)
+ timeutils.utcnow_ts().AndReturn(fake_now + down_time - 1)
+ self.mox.ReplayAll()
+ self.servicegroup_api._driver.mc.set(hostkey,
+ timeutils.utcnow(),
+ time=down_time)
+ result = self.servicegroup_api.service_is_up(service_ref)
+ self.assertTrue(result)
+
+ self.mox.ResetAll()
+ # Up
+ timeutils.utcnow_ts().AndReturn(fake_now)
+ timeutils.utcnow_ts().AndReturn(fake_now + down_time - 2)
+ self.mox.ReplayAll()
+ self.servicegroup_api._driver.mc.set(hostkey,
+ timeutils.utcnow(),
+ time=down_time)
+ result = self.servicegroup_api.service_is_up(service_ref)
+ self.assertTrue(result)
+
+ self.mox.ResetAll()
+ # Down
+ timeutils.utcnow_ts().AndReturn(fake_now)
+ timeutils.utcnow_ts().AndReturn(fake_now + down_time)
+ self.mox.ReplayAll()
+ self.servicegroup_api._driver.mc.set(hostkey,
+ timeutils.utcnow(),
+ time=down_time)
+ result = self.servicegroup_api.service_is_up(service_ref)
+ self.assertFalse(result)
+
+ self.mox.ResetAll()
+ # Down
+ timeutils.utcnow_ts().AndReturn(fake_now)
+ timeutils.utcnow_ts().AndReturn(fake_now + down_time + 1)
+ self.mox.ReplayAll()
+ self.servicegroup_api._driver.mc.set(hostkey,
+ timeutils.utcnow(),
+ time=down_time)
+ result = self.servicegroup_api.service_is_up(service_ref)
+ self.assertFalse(result)
+
+ self.mox.ResetAll()
+
+ def test_report_state(self):
+ serv = self.useFixture(
+ ServiceFixture(self._host, self._binary, self._topic)).serv
+ serv.start()
+ db.service_get_by_args(self._ctx, self._host, self._binary)
+ self.servicegroup_api = servicegroup.API()
+
+ # updating model_disconnected
+ serv.model_disconnected = True
+ self.servicegroup_api._driver._report_state(serv)
+ self.assertFalse(serv.model_disconnected)
+
+ # handling exception
+ serv.model_disconnected = True
+ self.servicegroup_api._driver.mc = None
+ self.servicegroup_api._driver._report_state(serv)
+ self.assertTrue(serv.model_disconnected)
+
+ delattr(serv, 'model_disconnected')
+ self.servicegroup_api._driver.mc = None
+ self.servicegroup_api._driver._report_state(serv)
+ self.assertTrue(serv.model_disconnected)
diff --git a/nova/tests/unit/servicegroup/test_zk_driver.py b/nova/tests/unit/servicegroup/test_zk_driver.py
new file mode 100644
index 0000000000..5a9f23f5e0
--- /dev/null
+++ b/nova/tests/unit/servicegroup/test_zk_driver.py
@@ -0,0 +1,65 @@
+# Copyright (c) AT&T 2012-2013 Yun Mao <yunmao@gmail.com>
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Test the ZooKeeper driver for servicegroup.
+
+You need to install ZooKeeper locally and related dependencies
+to run the test. It's unclear how to install python-zookeeper lib
+in venv so you might have to run the test without it.
+
+To set up in Ubuntu 12.04:
+$ sudo apt-get install zookeeper zookeeperd python-zookeeper
+$ sudo pip install evzookeeper
+$ nosetests nova.tests.unit.servicegroup.test_zk_driver
+"""
+
+import eventlet
+
+from nova import servicegroup
+from nova import test
+
+
+class ZKServiceGroupTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(ZKServiceGroupTestCase, self).setUp()
+ servicegroup.API._driver = None
+ from nova.servicegroup.drivers import zk
+ self.flags(servicegroup_driver='zk')
+ self.flags(address='localhost:2181', group="zookeeper")
+ try:
+ zk.ZooKeeperDriver()
+ except ImportError:
+ self.skipTest("Unable to test due to lack of ZooKeeper")
+
+ def test_join_leave(self):
+ self.servicegroup_api = servicegroup.API()
+ service_id = {'topic': 'unittest', 'host': 'serviceA'}
+ self.servicegroup_api.join(service_id['host'], service_id['topic'])
+ self.assertTrue(self.servicegroup_api.service_is_up(service_id))
+ self.servicegroup_api.leave(service_id['host'], service_id['topic'])
+ # make sure zookeeper is updated and watcher is triggered
+ eventlet.sleep(1)
+ self.assertFalse(self.servicegroup_api.service_is_up(service_id))
+
+ def test_stop(self):
+ self.servicegroup_api = servicegroup.API()
+ service_id = {'topic': 'unittest', 'host': 'serviceA'}
+ pulse = self.servicegroup_api.join(service_id['host'],
+ service_id['topic'], None)
+ self.assertTrue(self.servicegroup_api.service_is_up(service_id))
+ pulse.stop()
+ eventlet.sleep(1)
+ self.assertFalse(self.servicegroup_api.service_is_up(service_id))
diff --git a/nova/tests/unit/ssl_cert/ca.crt b/nova/tests/unit/ssl_cert/ca.crt
new file mode 100644
index 0000000000..9d66ca6270
--- /dev/null
+++ b/nova/tests/unit/ssl_cert/ca.crt
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGDDCCA/SgAwIBAgIJAPSvwQYk4qI4MA0GCSqGSIb3DQEBBQUAMGExCzAJBgNV
+BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMRUwEwYDVQQKEwxPcGVuc3RhY2sg
+Q0ExEjAQBgNVBAsTCUdsYW5jZSBDQTESMBAGA1UEAxMJR2xhbmNlIENBMB4XDTEy
+MDIwOTE3MTAwMloXDTIyMDIwNjE3MTAwMlowYTELMAkGA1UEBhMCQVUxEzARBgNV
+BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ
+R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQDmf+fapWfzy1Uylus0KGalw4X/5xZ+ltPVOr+IdCPbstvi
+RTC5g+O+TvXeOP32V/cnSY4ho/+f2q730za+ZA/cgWO252rcm3Q7KTJn3PoqzJvX
+/l3EXe3/TCrbzgZ7lW3QLTCTEE2eEzwYG3wfDTOyoBq+F6ct6ADh+86gmpbIRfYI
+N+ixB0hVyz9427PTof97fL7qxxkjAayB28OfwHrkEBl7iblNhUC0RoH+/H9r5GEl
+GnWiebxfNrONEHug6PHgiaGq7/Dj+u9bwr7J3/NoS84I08ajMnhlPZxZ8bS/O8If
+ceWGZv7clPozyhABT/otDfgVcNH1UdZ4zLlQwc1MuPYN7CwxrElxc8Quf94ttGjb
+tfGTl4RTXkDofYdG1qBWW962PsGl2tWmbYDXV0q5JhV/IwbrE1X9f+OksJQne1/+
+dZDxMhdf2Q1V0P9hZZICu4+YhmTMs5Mc9myKVnzp4NYdX5fXoB/uNYph+G7xG5IK
+WLSODKhr1wFGTTcuaa8LhOH5UREVenGDJuc6DdgX9a9PzyJGIi2ngQ03TJIkCiU/
+4J/r/vsm81ezDiYZSp2j5JbME+ixW0GBLTUWpOIxUSHgUFwH5f7lQwbXWBOgwXQk
+BwpZTmdQx09MfalhBtWeu4/6BnOCOj7e/4+4J0eVxXST0AmVyv8YjJ2nz1F9oQID
+AQABo4HGMIHDMB0GA1UdDgQWBBTk7Krj4bEsTjHXaWEtI2GZ5ACQyTCBkwYDVR0j
+BIGLMIGIgBTk7Krj4bEsTjHXaWEtI2GZ5ACQyaFlpGMwYTELMAkGA1UEBhMCQVUx
+EzARBgNVBAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAG
+A1UECxMJR2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0GCCQD0r8EGJOKiODAM
+BgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4ICAQA8Zrss/MiwFHGmDlercE0h
+UvzA54n/EvKP9nP3jHM2qW/VPfKdnFw99nEPFLhb+lN553vdjOpCYFm+sW0Z5Mi4
+qsFkk4AmXIIEFOPt6zKxMioLYDQ9Sw/BUv6EZGeANWr/bhmaE+dMcKJt5le/0jJm
+2ahsVB9fbFu9jBFeYb7Ba/x2aLkEGMxaDLla+6EQhj148fTnS1wjmX9G2cNzJvj/
++C2EfKJIuDJDqw2oS2FGVpP37FA2Bz2vga0QatNneLkGKCFI3ZTenBznoN+fmurX
+TL3eJE4IFNrANCcdfMpdyLAtXz4KpjcehqpZMu70er3d30zbi1l0Ajz4dU+WKz/a
+NQES+vMkT2wqjXHVTjrNwodxw3oLK/EuTgwoxIHJuplx5E5Wrdx9g7Gl1PBIJL8V
+xiOYS5N7CakyALvdhP7cPubA2+TPAjNInxiAcmhdASS/Vrmpvrkat6XhGn8h9liv
+ysDOpMQmYQkmgZBpW8yBKK7JABGGsJADJ3E6J5MMWBX2RR4kFoqVGAzdOU3oyaTy
+I0kz5sfuahaWpdYJVlkO+esc0CRXw8fLDYivabK2tOgUEWeZsZGZ9uK6aV1VxTAY
+9Guu3BJ4Rv/KP/hk7mP8rIeCwotV66/2H8nq72ImQhzSVyWcxbFf2rJiFQJ3BFwA
+WoRMgEwjGJWqzhJZUYpUAQ==
+-----END CERTIFICATE-----
diff --git a/nova/tests/unit/ssl_cert/certificate.crt b/nova/tests/unit/ssl_cert/certificate.crt
new file mode 100644
index 0000000000..3c1aa6363b
--- /dev/null
+++ b/nova/tests/unit/ssl_cert/certificate.crt
@@ -0,0 +1,30 @@
+-----BEGIN CERTIFICATE-----
+MIIFLjCCAxYCAQEwDQYJKoZIhvcNAQEFBQAwYTELMAkGA1UEBhMCQVUxEzARBgNV
+BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ
+R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwHhcNMTIwMjA5MTcxMDUzWhcN
+MjIwMjA2MTcxMDUzWjBZMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0
+ZTESMBAGA1UEChMJT3BlbnN0YWNrMQ8wDQYDVQQLEwZHbGFuY2UxEDAOBgNVBAMT
+BzAuMC4wLjAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXpUkQN6pu
+avo+gz3o1K4krVdPl1m7NjNJDyD/+ZH0EGNcEN7iag1qPE7JsjqGPNZsQK1dMoXb
+Sz+OSi9qvNeJnBcfwUx5qTAtwyAb9AxGkwuMafIU+lWbsclo+dPGsja01ywbXTCZ
+bF32iqnpOMYhfxWUdoQYiBkhxxhW9eMPKLS/KkP8/bx+Vaa2XJiAebqkd9nrksAA
+BeGc9mlafYBEmiChPdJEPw+1ePA4QVq9aPepDsqAKtGN8JLpmoC3BdxQQTbbwL3Q
+8fTXK4tCNUaVk4AbDy/McFq6y0ocQoBPJjihOY35mWG/OLtcI99yPOpWGnps/5aG
+/64DDJ2D67Fnaj6gKHV+6TXFO8KZxlnxtgtiZDJBZkneTBt9ArSOv+l6NBsumRz0
+iEJ4o4H1S2TSMnprAvX7WnGtc6Xi9gXahYcDHEelwwYzqAiTBv6hxSp4MZ2dNXa+
+KzOitC7ZbV2qsg0au0wjfE/oSQ3NvsvUr8nOmfutJTvHRAwbC1v4G/tuAsO7O0w2
+0u2B3u+pG06m5+rnEqp+rB9hmukRYTfgEFRRsVIvpFl/cwvPXKRcX03UIMx+lLr9
+Ft+ep7YooBhY3wY2kwCxD4lRYNmbwsCIVywZt40f/4ad98TkufR9NhsfycxGeqbr
+mTMFlZ8TTlmP82iohekKCOvoyEuTIWL2+wIDAQABMA0GCSqGSIb3DQEBBQUAA4IC
+AQBMUBgV0R+Qltf4Du7u/8IFmGAoKR/mktB7R1gRRAqsvecUt7kIwBexGdavGg1y
+0pU0+lgUZjJ20N1SlPD8gkNHfXE1fL6fmMjWz4dtYJjzRVhpufHPeBW4tl8DgHPN
+rBGAYQ+drDSXaEjiPQifuzKx8WS+DGA3ki4co5mPjVnVH1xvLIdFsk89z3b3YD1k
+yCJ/a9K36x6Z/c67JK7s6MWtrdRF9+MVnRKJ2PK4xznd1kBz16V+RA466wBDdARY
+vFbtkafbEqOb96QTonIZB7+fAldKDPZYnwPqasreLmaGOaM8sxtlPYAJ5bjDONbc
+AaXG8BMRQyO4FyH237otDKlxPyHOFV66BaffF5S8OlwIMiZoIvq+IcTZOdtDUSW2
+KHNLfe5QEDZdKjWCBrfqAfvNuG13m03WqfmcMHl3o/KiPJlx8l9Z4QEzZ9xcyQGL
+cncgeHM9wJtzi2cD/rTDNFsx/gxvoyutRmno7I3NRbKmpsXF4StZioU3USRspB07
+hYXOVnG3pS+PjVby7ThT3gvFHSocguOsxClx1epdUJAmJUbmM7NmOp5WVBVtMtC2
+Su4NG/xJciXitKzw+btb7C7RjO6OEqv/1X/oBDzKBWQAwxUC+lqmnM7W6oqWJFEM
+YfTLnrjs7Hj6ThMGcEnfvc46dWK3dz0RjsQzUxugPuEkLA==
+-----END CERTIFICATE-----
diff --git a/nova/tests/unit/ssl_cert/privatekey.key b/nova/tests/unit/ssl_cert/privatekey.key
new file mode 100644
index 0000000000..b63df3d29d
--- /dev/null
+++ b/nova/tests/unit/ssl_cert/privatekey.key
@@ -0,0 +1,51 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKAIBAAKCAgEA16VJEDeqbmr6PoM96NSuJK1XT5dZuzYzSQ8g//mR9BBjXBDe
+4moNajxOybI6hjzWbECtXTKF20s/jkovarzXiZwXH8FMeakwLcMgG/QMRpMLjGny
+FPpVm7HJaPnTxrI2tNcsG10wmWxd9oqp6TjGIX8VlHaEGIgZIccYVvXjDyi0vypD
+/P28flWmtlyYgHm6pHfZ65LAAAXhnPZpWn2ARJogoT3SRD8PtXjwOEFavWj3qQ7K
+gCrRjfCS6ZqAtwXcUEE228C90PH01yuLQjVGlZOAGw8vzHBaustKHEKATyY4oTmN
++Zlhvzi7XCPfcjzqVhp6bP+Whv+uAwydg+uxZ2o+oCh1fuk1xTvCmcZZ8bYLYmQy
+QWZJ3kwbfQK0jr/pejQbLpkc9IhCeKOB9Utk0jJ6awL1+1pxrXOl4vYF2oWHAxxH
+pcMGM6gIkwb+ocUqeDGdnTV2viszorQu2W1dqrINGrtMI3xP6EkNzb7L1K/Jzpn7
+rSU7x0QMGwtb+Bv7bgLDuztMNtLtgd7vqRtOpufq5xKqfqwfYZrpEWE34BBUUbFS
+L6RZf3MLz1ykXF9N1CDMfpS6/Rbfnqe2KKAYWN8GNpMAsQ+JUWDZm8LAiFcsGbeN
+H/+GnffE5Ln0fTYbH8nMRnqm65kzBZWfE05Zj/NoqIXpCgjr6MhLkyFi9vsCAwEA
+AQKCAgAA96baQcWr9SLmQOR4NOwLEhQAMWefpWCZhU3amB4FgEVR1mmJjnw868RW
+t0v36jH0Dl44us9K6o2Ab+jCi9JTtbWM2Osk6JNkwSlVtsSPVH2KxbbmTTExH50N
+sYE3tPj12rlB7isXpRrOzlRwzWZmJBHOtrFlAsdKFYCQc03vdXlKGkBv1BuSXYP/
+8W5ltSYXMspxehkOZvhaIejbFREMPbzDvGlDER1a7Q320qQ7kUr7ISvbY1XJUzj1
+f1HwgEA6w/AhED5Jv6wfgvx+8Yo9hYnflTPbsO1XRS4x7kJxGHTMlFuEsSF1ICYH
+Bcos0wUiGcBO2N6uAFuhe98BBn+nOwAPZYWwGkmVuK2psm2mXAHx94GT/XqgK/1r
+VWGSoOV7Fhjauc2Nv8/vJU18DXT3OY5hc4iXVeEBkuZwRb/NVUtnFoHxVO/Mp5Fh
+/W5KZaLWVrLghzvSQ/KUIM0k4lfKDZpY9ZpOdNgWDyZY8tNrXumUZZimzWdXZ9vR
+dBssmd8qEKs1AHGFnMDt56IjLGou6j0qnWsLdR1e/WEFsYzGXLVHCv6vXRNkbjqh
+WFw5nA+2Dw1YAsy+YkTfgx2pOe+exM/wxsVPa7tG9oZ374dywUi1k6VoHw5dkmJw
+1hbXqSLZtx2N51G+SpGmNAV4vLUF0y3dy2wnrzFkFT4uxh1w8QKCAQEA+h6LwHTK
+hgcJx6CQQ6zYRqXo4wdvMooY1FcqJOq7LvJUA2CX5OOLs8qN1TyFrOCuAUTurOrM
+ABlQ0FpsIaP8TOGz72dHe2eLB+dD6Bqjn10sEFMn54zWd/w9ympQrO9jb5X3ViTh
+sCcdYyXVS9Hz8nzbbIF+DaKlxF2Hh71uRDxXpMPxRcGbOIuKZXUj6RkTIulzqT6o
+uawlegWxch05QSgzq/1ASxtjTzo4iuDCAii3N45xqxnB+fV9NXEt4R2oOGquBRPJ
+LxKcOnaQKBD0YNX4muTq+zPlv/kOb8/ys2WGWDUrNkpyJXqhTve4KONjqM7+iL/U
+4WdJuiCjonzk/QKCAQEA3Lc+kNq35FNLxMcnCVcUgkmiCWZ4dyGZZPdqjOPww1+n
+bbudGPzY1nxOvE60dZM4or/tm6qlXYfb2UU3+OOJrK9s297EQybZ8DTZu2GHyitc
+NSFV3Gl4cgvKdbieGKkk9X2dV9xSNesNvX9lJEnQxuwHDTeo8ubLHtV88Ml1xokn
+7W+IFiyEuUIL4e5/fadbrI3EwMrbCF4+9VcfABx4PTNMzdc8LsncCMXE+jFX8AWp
+TsT2JezTe5o2WpvBoKMAYhJQNQiaWATn00pDVY/70H1vK3ljomAa1IUdOr/AhAF7
+3jL0MYMgXSHzXZOKAtc7yf+QfFWF1Ls8+sen1clJVwKCAQEAp59rB0r+Iz56RmgL
+5t7ifs5XujbURemY5E2aN+18DuVmenD0uvfoO1DnJt4NtCNLWhxpXEdq+jH9H/VJ
+fG4a+ydT4IC1vjVRTrWlo9qeh4H4suQX3S1c2kKY4pvHf25blH/Lp9bFzbkZD8Ze
+IRcOxxb4MsrBwL+dGnGYD9dbG63ZCtoqSxaKQSX7VS1hKKmeUopj8ivFBdIht5oz
+JogBQ/J+Vqg9u1gagRFCrYgdXTcOOtRix0lW336vL+6u0ax/fXe5MjvlW3+8Zc3p
+pIBgVrlvh9ccx8crFTIDg9m4DJRgqaLQV+0ifI2np3WK3RQvSQWYPetZ7sm69ltD
+bvUGvQKCAQAz5CEhjUqOs8asjOXwnDiGKSmfbCgGWi/mPQUf+rcwN9z1P5a/uTKB
+utgIDbj/q401Nkp2vrgCNV7KxitSqKxFnTjKuKUL5KZ4gvRtyZBTR751/1BgcauP
+pJYE91K0GZBG5zGG5pWtd4XTd5Af5/rdycAeq2ddNEWtCiRFuBeohbaNbBtimzTZ
+GV4R0DDJKf+zoeEQMqEsZnwG0mTHceoS+WylOGU92teQeG7HI7K5C5uymTwFzpgq
+ByegRd5QFgKRDB0vWsZuyzh1xI/wHdnmOpdYcUGre0zTijhFB7ALWQ32P6SJv3ps
+av78kSNxZ4j3BM7DbJf6W8sKasZazOghAoIBAHekpBcLq9gRv2+NfLYxWN2sTZVB
+1ldwioG7rWvk5YQR2akukecI3NRjtC5gG2vverawG852Y4+oLfgRMHxgp0qNStwX
+juTykzPkCwZn8AyR+avC3mkrtJyM3IigcYOu4/UoaRDFa0xvCC1EfumpnKXIpHag
+miSQZf2sVbgqb3/LWvHIg/ceOP9oGJve87/HVfQtBoLaIe5RXCWkqB7mcI/exvTS
+8ShaW6v2Fe5Bzdvawj7sbsVYRWe93Aq2tmIgSX320D2RVepb6mjD4nr0IUaM3Yed
+TFT7e2ikWXyDLLgVkDTU4Qe8fr3ZKGfanCIDzvgNw6H1gRi+2WQgOmjilMQ=
+-----END RSA PRIVATE KEY-----
diff --git a/nova/tests/unit/test_api_validation.py b/nova/tests/unit/test_api_validation.py
new file mode 100644
index 0000000000..bc694f4d70
--- /dev/null
+++ b/nova/tests/unit/test_api_validation.py
@@ -0,0 +1,872 @@
+# Copyright 2013 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+
+from nova.api import validation
+from nova.api.validation import parameter_types
+from nova import exception
+from nova import test
+
+
+class APIValidationTestCase(test.TestCase):
+
+ def check_validation_error(self, method, body, expected_detail):
+ try:
+ method(body=body)
+ except exception.ValidationError as ex:
+ self.assertEqual(400, ex.kwargs['code'])
+ if not re.match(expected_detail, ex.kwargs['detail']):
+ self.assertEqual(expected_detail, ex.kwargs['detail'],
+ 'Exception details did not match expected')
+ except Exception as ex:
+ self.fail('An unexpected exception happens: %s' % ex)
+ else:
+ self.fail('Any exception does not happen.')
+
+
+class RequiredDisableTestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(RequiredDisableTestCase, self).setUp()
+ schema = {
+ 'type': 'object',
+ 'properties': {
+ 'foo': {
+ 'type': 'integer',
+ },
+ },
+ }
+
+ @validation.schema(request_body_schema=schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_required_disable(self):
+ self.assertEqual(self.post(body={'foo': 1}), 'Validation succeeded.')
+ self.assertEqual(self.post(body={'abc': 1}), 'Validation succeeded.')
+
+
+class RequiredEnableTestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(RequiredEnableTestCase, self).setUp()
+ schema = {
+ 'type': 'object',
+ 'properties': {
+ 'foo': {
+ 'type': 'integer',
+ },
+ },
+ 'required': ['foo']
+ }
+
+ @validation.schema(request_body_schema=schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_required_enable(self):
+ self.assertEqual(self.post(body={'foo': 1}), 'Validation succeeded.')
+
+ def test_validate_required_enable_fails(self):
+ detail = "'foo' is a required property"
+ self.check_validation_error(self.post, body={'abc': 1},
+ expected_detail=detail)
+
+
+class AdditionalPropertiesEnableTestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(AdditionalPropertiesEnableTestCase, self).setUp()
+ schema = {
+ 'type': 'object',
+ 'properties': {
+ 'foo': {
+ 'type': 'integer',
+ },
+ },
+ 'required': ['foo'],
+ }
+
+ @validation.schema(request_body_schema=schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_additionalProperties_enable(self):
+ self.assertEqual(self.post(body={'foo': 1}), 'Validation succeeded.')
+ self.assertEqual(self.post(body={'foo': 1, 'ext': 1}),
+ 'Validation succeeded.')
+
+
+class AdditionalPropertiesDisableTestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(AdditionalPropertiesDisableTestCase, self).setUp()
+ schema = {
+ 'type': 'object',
+ 'properties': {
+ 'foo': {
+ 'type': 'integer',
+ },
+ },
+ 'required': ['foo'],
+ 'additionalProperties': False,
+ }
+
+ @validation.schema(request_body_schema=schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_additionalProperties_disable(self):
+ self.assertEqual(self.post(body={'foo': 1}), 'Validation succeeded.')
+
+ def test_validate_additionalProperties_disable_fails(self):
+ detail = "Additional properties are not allowed ('ext' was unexpected)"
+ self.check_validation_error(self.post, body={'foo': 1, 'ext': 1},
+ expected_detail=detail)
+
+
+class PatternPropertiesTestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(PatternPropertiesTestCase, self).setUp()
+ schema = {
+ 'patternProperties': {
+ '^[a-zA-Z0-9]{1,10}$': {
+ 'type': 'string'
+ },
+ },
+ 'additionalProperties': False,
+ }
+
+ @validation.schema(request_body_schema=schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_patternProperties(self):
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': 'bar'}))
+
+ def test_validate_patternProperties_fails(self):
+ detail = "Additional properties are not allowed ('__' was unexpected)"
+ self.check_validation_error(self.post, body={'__': 'bar'},
+ expected_detail=detail)
+
+ detail = "Additional properties are not allowed ('' was unexpected)"
+ self.check_validation_error(self.post, body={'': 'bar'},
+ expected_detail=detail)
+
+ detail = ("Additional properties are not allowed ('0123456789a' was"
+ " unexpected)")
+ self.check_validation_error(self.post, body={'0123456789a': 'bar'},
+ expected_detail=detail)
+
+ detail = "expected string or buffer"
+ self.check_validation_error(self.post, body={None: 'bar'},
+ expected_detail=detail)
+
+
+class StringTestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(StringTestCase, self).setUp()
+ schema = {
+ 'type': 'object',
+ 'properties': {
+ 'foo': {
+ 'type': 'string',
+ },
+ },
+ }
+
+ @validation.schema(request_body_schema=schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_string(self):
+ self.assertEqual(self.post(body={'foo': 'abc'}),
+ 'Validation succeeded.')
+ self.assertEqual(self.post(body={'foo': '0'}),
+ 'Validation succeeded.')
+ self.assertEqual(self.post(body={'foo': ''}),
+ 'Validation succeeded.')
+
+ def test_validate_string_fails(self):
+ detail = ("Invalid input for field/attribute foo. Value: 1."
+ " 1 is not of type 'string'")
+ self.check_validation_error(self.post, body={'foo': 1},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: 1.5."
+ " 1.5 is not of type 'string'")
+ self.check_validation_error(self.post, body={'foo': 1.5},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: True."
+ " True is not of type 'string'")
+ self.check_validation_error(self.post, body={'foo': True},
+ expected_detail=detail)
+
+
+class StringLengthTestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(StringLengthTestCase, self).setUp()
+ schema = {
+ 'type': 'object',
+ 'properties': {
+ 'foo': {
+ 'type': 'string',
+ 'minLength': 1,
+ 'maxLength': 10,
+ },
+ },
+ }
+
+ @validation.schema(request_body_schema=schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_string_length(self):
+ self.assertEqual(self.post(body={'foo': '0'}),
+ 'Validation succeeded.')
+ self.assertEqual(self.post(body={'foo': '0123456789'}),
+ 'Validation succeeded.')
+
+ def test_validate_string_length_fails(self):
+ detail = ("Invalid input for field/attribute foo. Value: ."
+ " '' is too short")
+ self.check_validation_error(self.post, body={'foo': ''},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: 0123456789a."
+ " '0123456789a' is too long")
+ self.check_validation_error(self.post, body={'foo': '0123456789a'},
+ expected_detail=detail)
+
+
+class IntegerTestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(IntegerTestCase, self).setUp()
+ schema = {
+ 'type': 'object',
+ 'properties': {
+ 'foo': {
+ 'type': ['integer', 'string'],
+ 'pattern': '^[0-9]+$',
+ },
+ },
+ }
+
+ @validation.schema(request_body_schema=schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_integer(self):
+ self.assertEqual(self.post(body={'foo': 1}),
+ 'Validation succeeded.')
+ self.assertEqual(self.post(body={'foo': '1'}),
+ 'Validation succeeded.')
+ self.assertEqual(self.post(body={'foo': '0123456789'}),
+ 'Validation succeeded.')
+
+ def test_validate_integer_fails(self):
+ detail = ("Invalid input for field/attribute foo. Value: abc."
+ " 'abc' does not match '^[0-9]+$'")
+ self.check_validation_error(self.post, body={'foo': 'abc'},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: True."
+ " True is not of type 'integer', 'string'")
+ self.check_validation_error(self.post, body={'foo': True},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: 0xffff."
+ " '0xffff' does not match '^[0-9]+$'")
+ self.check_validation_error(self.post, body={'foo': '0xffff'},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: 1.0."
+ " 1.0 is not of type 'integer', 'string'")
+ self.check_validation_error(self.post, body={'foo': 1.0},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: 1.0."
+ " '1.0' does not match '^[0-9]+$'")
+ self.check_validation_error(self.post, body={'foo': '1.0'},
+ expected_detail=detail)
+
+
+class IntegerRangeTestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(IntegerRangeTestCase, self).setUp()
+ schema = {
+ 'type': 'object',
+ 'properties': {
+ 'foo': {
+ 'type': ['integer', 'string'],
+ 'pattern': '^[0-9]+$',
+ 'minimum': 1,
+ 'maximum': 10,
+ },
+ },
+ }
+
+ @validation.schema(request_body_schema=schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_integer_range(self):
+ self.assertEqual(self.post(body={'foo': 1}),
+ 'Validation succeeded.')
+ self.assertEqual(self.post(body={'foo': 10}),
+ 'Validation succeeded.')
+ self.assertEqual(self.post(body={'foo': '1'}),
+ 'Validation succeeded.')
+
+ def test_validate_integer_range_fails(self):
+ detail = ("Invalid input for field/attribute foo. Value: 0."
+ " 0(.0)? is less than the minimum of 1")
+ self.check_validation_error(self.post, body={'foo': 0},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: 11."
+ " 11(.0)? is greater than the maximum of 10")
+ self.check_validation_error(self.post, body={'foo': 11},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: 0."
+ " 0(.0)? is less than the minimum of 1")
+ self.check_validation_error(self.post, body={'foo': '0'},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: 11."
+ " 11(.0)? is greater than the maximum of 10")
+ self.check_validation_error(self.post, body={'foo': '11'},
+ expected_detail=detail)
+
+
+class BooleanTestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(BooleanTestCase, self).setUp()
+ schema = {
+ 'type': 'object',
+ 'properties': {
+ 'foo': parameter_types.boolean,
+ },
+ }
+
+ @validation.schema(request_body_schema=schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_boolean(self):
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': True}))
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': False}))
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': 'True'}))
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': 'False'}))
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': '1'}))
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': '0'}))
+
+ def test_validate_boolean_fails(self):
+ enum_boolean = ("[True, 'True', 'TRUE', 'true', '1',"
+ " False, 'False', 'FALSE', 'false', '0']")
+
+ detail = ("Invalid input for field/attribute foo. Value: bar."
+ " 'bar' is not one of %s") % enum_boolean
+ self.check_validation_error(self.post, body={'foo': 'bar'},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: 2."
+ " '2' is not one of %s") % enum_boolean
+ self.check_validation_error(self.post, body={'foo': '2'},
+ expected_detail=detail)
+
+
+class HostnameTestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(HostnameTestCase, self).setUp()
+ schema = {
+ 'type': 'object',
+ 'properties': {
+ 'foo': parameter_types.hostname,
+ },
+ }
+
+ @validation.schema(request_body_schema=schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_hostname(self):
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': 'localhost'}))
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': 'localhost.localdomain.com'}))
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': 'my-host'}))
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': 'my_host'}))
+
+ def test_validate_hostname_fails(self):
+ detail = ("Invalid input for field/attribute foo. Value: True."
+ " True is not of type 'string'")
+ self.check_validation_error(self.post, body={'foo': True},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: 1."
+ " 1 is not of type 'string'")
+ self.check_validation_error(self.post, body={'foo': 1},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: my$host."
+ " 'my$host' does not match '^[a-zA-Z0-9-._]*$'")
+ self.check_validation_error(self.post, body={'foo': 'my$host'},
+ expected_detail=detail)
+
+
+class HostnameIPaddressTestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(HostnameIPaddressTestCase, self).setUp()
+ schema = {
+ 'type': 'object',
+ 'properties': {
+ 'foo': parameter_types.hostname_or_ip_address,
+ },
+ }
+
+ @validation.schema(request_body_schema=schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_hostname_or_ip_address(self):
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': 'localhost'}))
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': 'localhost.localdomain.com'}))
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': 'my-host'}))
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': 'my_host'}))
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': '192.168.10.100'}))
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': '2001:db8::9abc'}))
+
+ def test_validate_hostname_or_ip_address_fails(self):
+ detail = ("Invalid input for field/attribute foo. Value: True."
+ " True is not of type 'string'")
+ self.check_validation_error(self.post, body={'foo': True},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: 1."
+ " 1 is not of type 'string'")
+ self.check_validation_error(self.post, body={'foo': 1},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: my$host."
+ " 'my$host' does not match '^[a-zA-Z0-9-_.:]*$'")
+ self.check_validation_error(self.post, body={'foo': 'my$host'},
+ expected_detail=detail)
+
+
+class NameTestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(NameTestCase, self).setUp()
+ schema = {
+ 'type': 'object',
+ 'properties': {
+ 'foo': parameter_types.name,
+ },
+ }
+
+ @validation.schema(request_body_schema=schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_name(self):
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': 'm1.small'}))
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': 'my server'}))
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': 'a'}))
+
+ def test_validate_name_fails(self):
+ pattern = "'^(?! )[a-zA-Z0-9. _-]*(?<! )$'"
+ detail = ("Invalid input for field/attribute foo. Value: ."
+ " ' ' does not match %s") % pattern
+ self.check_validation_error(self.post, body={'foo': ' '},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: server."
+ " ' server' does not match %s") % pattern
+ self.check_validation_error(self.post, body={'foo': ' server'},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: server ."
+ " 'server ' does not match %s") % pattern
+ self.check_validation_error(self.post, body={'foo': 'server '},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: a."
+ " ' a' does not match %s") % pattern
+ self.check_validation_error(self.post, body={'foo': ' a'},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: a ."
+ " 'a ' does not match %s") % pattern
+ self.check_validation_error(self.post, body={'foo': 'a '},
+ expected_detail=detail)
+
+
+class TcpUdpPortTestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(TcpUdpPortTestCase, self).setUp()
+ schema = {
+ 'type': 'object',
+ 'properties': {
+ 'foo': parameter_types.tcp_udp_port,
+ },
+ }
+
+ @validation.schema(request_body_schema=schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_tcp_udp_port(self):
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': 1024}))
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': '1024'}))
+
+ def test_validate_tcp_udp_port_fails(self):
+ detail = ("Invalid input for field/attribute foo. Value: True."
+ " True is not of type 'integer', 'string'")
+ self.check_validation_error(self.post, body={'foo': True},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: 65536."
+ " 65536(.0)? is greater than the maximum of 65535")
+ self.check_validation_error(self.post, body={'foo': 65536},
+ expected_detail=detail)
+
+
+class DatetimeTestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(DatetimeTestCase, self).setUp()
+ schema = {
+ 'type': 'object',
+ 'properties': {
+ 'foo': {
+ 'type': 'string',
+ 'format': 'date-time',
+ },
+ },
+ }
+
+ @validation.schema(schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_datetime(self):
+ self.assertEqual('Validation succeeded.',
+ self.post(
+ body={'foo': '2014-01-14T01:00:00Z'}
+ ))
+
+ def test_validate_datetime_fails(self):
+ detail = ("Invalid input for field/attribute foo."
+ " Value: 2014-13-14T01:00:00Z."
+ " '2014-13-14T01:00:00Z' is not a 'date-time'")
+ self.check_validation_error(self.post,
+ body={'foo': '2014-13-14T01:00:00Z'},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo."
+ " Value: bar. 'bar' is not a 'date-time'")
+ self.check_validation_error(self.post, body={'foo': 'bar'},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: 1."
+ " '1' is not a 'date-time'")
+ self.check_validation_error(self.post, body={'foo': '1'},
+ expected_detail=detail)
+
+
+class UuidTestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(UuidTestCase, self).setUp()
+ schema = {
+ 'type': 'object',
+ 'properties': {
+ 'foo': {
+ 'type': 'string',
+ 'format': 'uuid',
+ },
+ },
+ }
+
+ @validation.schema(request_body_schema=schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_uuid(self):
+ self.assertEqual('Validation succeeded.',
+ self.post(
+ body={'foo': '70a599e0-31e7-49b7-b260-868f441e862b'}
+ ))
+
+ def test_validate_uuid_fails(self):
+ detail = ("Invalid input for field/attribute foo."
+ " Value: 70a599e031e749b7b260868f441e862b."
+ " '70a599e031e749b7b260868f441e862b' is not a 'uuid'")
+ self.check_validation_error(self.post,
+ body={'foo': '70a599e031e749b7b260868f441e862b'},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: 1."
+ " '1' is not a 'uuid'")
+ self.check_validation_error(self.post, body={'foo': '1'},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: abc."
+ " 'abc' is not a 'uuid'")
+ self.check_validation_error(self.post, body={'foo': 'abc'},
+ expected_detail=detail)
+
+
+class UriTestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(UriTestCase, self).setUp()
+ schema = {
+ 'type': 'object',
+ 'properties': {
+ 'foo': {
+ 'type': 'string',
+ 'format': 'uri',
+ },
+ },
+ }
+
+ @validation.schema(request_body_schema=schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_uri(self):
+ self.assertEqual('Validation succeeded.',
+ self.post(
+ body={'foo': 'http://localhost:8774/v2/servers'}
+ ))
+ self.assertEqual('Validation succeeded.',
+ self.post(
+ body={'foo': 'http://[::1]:8774/v2/servers'}
+ ))
+
+ def test_validate_uri_fails(self):
+ base_detail = ("Invalid input for field/attribute foo. Value: {0}. "
+ "'{0}' is not a 'uri'")
+ invalid_uri = 'http://localhost:8774/v2/servers##'
+ self.check_validation_error(self.post,
+ body={'foo': invalid_uri},
+ expected_detail=base_detail.format(
+ invalid_uri))
+
+ invalid_uri = 'http://[fdf8:01]:8774/v2/servers'
+ self.check_validation_error(self.post,
+ body={'foo': invalid_uri},
+ expected_detail=base_detail.format(
+ invalid_uri))
+
+ invalid_uri = '1'
+ self.check_validation_error(self.post,
+ body={'foo': invalid_uri},
+ expected_detail=base_detail.format(
+ invalid_uri))
+
+ invalid_uri = 'abc'
+ self.check_validation_error(self.post,
+ body={'foo': invalid_uri},
+ expected_detail=base_detail.format(
+ invalid_uri))
+
+
+class Ipv4TestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(Ipv4TestCase, self).setUp()
+ schema = {
+ 'type': 'object',
+ 'properties': {
+ 'foo': {
+ 'type': 'string',
+ 'format': 'ipv4',
+ },
+ },
+ }
+
+ @validation.schema(request_body_schema=schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_ipv4(self):
+ self.assertEqual('Validation succeeded.',
+ self.post(
+ body={'foo': '192.168.0.100'}
+ ))
+
+ def test_validate_ipv4_fails(self):
+ detail = ("Invalid input for field/attribute foo. Value: abc."
+ " 'abc' is not a 'ipv4'")
+ self.check_validation_error(self.post, body={'foo': 'abc'},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: localhost."
+ " 'localhost' is not a 'ipv4'")
+ self.check_validation_error(self.post, body={'foo': 'localhost'},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo."
+ " Value: 2001:db8::1234:0:0:9abc."
+ " '2001:db8::1234:0:0:9abc' is not a 'ipv4'")
+ self.check_validation_error(self.post,
+ body={'foo': '2001:db8::1234:0:0:9abc'},
+ expected_detail=detail)
+
+
+class Ipv6TestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(Ipv6TestCase, self).setUp()
+ schema = {
+ 'type': 'object',
+ 'properties': {
+ 'foo': {
+ 'type': 'string',
+ 'format': 'ipv6',
+ },
+ },
+ }
+
+ @validation.schema(request_body_schema=schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_ipv6(self):
+ self.assertEqual('Validation succeeded.',
+ self.post(
+ body={'foo': '2001:db8::1234:0:0:9abc'}
+ ))
+
+ def test_validate_ipv6_fails(self):
+ detail = ("Invalid input for field/attribute foo. Value: abc."
+ " 'abc' is not a 'ipv6'")
+ self.check_validation_error(self.post, body={'foo': 'abc'},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo. Value: localhost."
+ " 'localhost' is not a 'ipv6'")
+ self.check_validation_error(self.post, body={'foo': 'localhost'},
+ expected_detail=detail)
+
+ detail = ("Invalid input for field/attribute foo."
+ " Value: 192.168.0.100. '192.168.0.100' is not a 'ipv6'")
+ self.check_validation_error(self.post, body={'foo': '192.168.0.100'},
+ expected_detail=detail)
+
+
+class Base64TestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(APIValidationTestCase, self).setUp()
+ schema = {
+ 'type': 'object',
+ 'properties': {
+ 'foo': {
+ 'type': 'string',
+ 'format': 'base64',
+ },
+ },
+ }
+
+ @validation.schema(request_body_schema=schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_base64(self):
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': 'aGVsbG8gd29ybGQ='}))
+ # 'aGVsbG8gd29ybGQ=' is the base64 code of 'hello world'
+
+ def test_validate_base64_fails(self):
+ value = 'A random string'
+ detail = ("Invalid input for field/attribute foo. "
+ "Value: %s. '%s' is not a 'base64'") % (value, value)
+ self.check_validation_error(self.post, body={'foo': value},
+ expected_detail=detail)
diff --git a/nova/tests/unit/test_availability_zones.py b/nova/tests/unit/test_availability_zones.py
new file mode 100644
index 0000000000..2066a8f370
--- /dev/null
+++ b/nova/tests/unit/test_availability_zones.py
@@ -0,0 +1,255 @@
+# Copyright 2013 Netease Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests for availability zones
+"""
+
+from oslo.config import cfg
+
+from nova import availability_zones as az
+from nova import context
+from nova import db
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+CONF = cfg.CONF
+CONF.import_opt('internal_service_availability_zone',
+ 'nova.availability_zones')
+CONF.import_opt('default_availability_zone',
+ 'nova.availability_zones')
+
+
+class AvailabilityZoneTestCases(test.TestCase):
+ """Test case for aggregate based availability zone."""
+
+ def setUp(self):
+ super(AvailabilityZoneTestCases, self).setUp()
+ self.host = 'me'
+ self.availability_zone = 'nova-test'
+ self.default_az = CONF.default_availability_zone
+ self.default_in_az = CONF.internal_service_availability_zone
+ self.context = context.get_admin_context()
+ self.agg = self._create_az('az_agg', self.availability_zone)
+
+ def tearDown(self):
+ db.aggregate_delete(self.context, self.agg['id'])
+ super(AvailabilityZoneTestCases, self).tearDown()
+
+ def _create_az(self, agg_name, az_name):
+ agg_meta = {'name': agg_name}
+ agg = db.aggregate_create(self.context, agg_meta)
+
+ metadata = {'availability_zone': az_name}
+ db.aggregate_metadata_add(self.context, agg['id'], metadata)
+
+ return agg
+
+ def _update_az(self, aggregate, az_name):
+ metadata = {'availability_zone': az_name}
+ db.aggregate_update(self.context, aggregate['id'], metadata)
+
+ def _create_service_with_topic(self, topic, host, disabled=False):
+ values = {
+ 'binary': 'bin',
+ 'host': host,
+ 'topic': topic,
+ 'disabled': disabled,
+ }
+ return db.service_create(self.context, values)
+
+ def _destroy_service(self, service):
+ return db.service_destroy(self.context, service['id'])
+
+ def _add_to_aggregate(self, service, aggregate):
+ return db.aggregate_host_add(self.context,
+ aggregate['id'], service['host'])
+
+ def _delete_from_aggregate(self, service, aggregate):
+ return db.aggregate_host_delete(self.context,
+ aggregate['id'], service['host'])
+
+ def test_rest_availability_zone_reset_cache(self):
+ az._get_cache().add('cache', 'fake_value')
+ az.reset_cache()
+ self.assertIsNone(az._get_cache().get('cache'))
+
+ def test_update_host_availability_zone_cache(self):
+ """Test availability zone cache could be update."""
+ service = self._create_service_with_topic('compute', self.host)
+
+ # Create a new aggregate with an AZ and add the host to the AZ
+ az_name = 'az1'
+ cache_key = az._make_cache_key(self.host)
+ agg_az1 = self._create_az('agg-az1', az_name)
+ self._add_to_aggregate(service, agg_az1)
+ az.update_host_availability_zone_cache(self.context, self.host)
+ self.assertEqual(az._get_cache().get(cache_key), 'az1')
+ az.update_host_availability_zone_cache(self.context, self.host, 'az2')
+ self.assertEqual(az._get_cache().get(cache_key), 'az2')
+
+ def test_set_availability_zone_compute_service(self):
+ """Test for compute service get right availability zone."""
+ service = self._create_service_with_topic('compute', self.host)
+ services = db.service_get_all(self.context)
+
+ # The service is not add into aggregate, so confirm it is default
+ # availability zone.
+ new_service = az.set_availability_zones(self.context, services)[0]
+ self.assertEqual(new_service['availability_zone'],
+ self.default_az)
+
+ # The service is added into aggregate, confirm return the aggregate
+ # availability zone.
+ self._add_to_aggregate(service, self.agg)
+ new_service = az.set_availability_zones(self.context, services)[0]
+ self.assertEqual(new_service['availability_zone'],
+ self.availability_zone)
+
+ self._destroy_service(service)
+
+ def test_set_availability_zone_unicode_key(self):
+ """Test set availability zone cache key is unicode."""
+ service = self._create_service_with_topic('network', self.host)
+ services = db.service_get_all(self.context)
+ az.set_availability_zones(self.context, services)
+ self.assertIsInstance(services[0]['host'], unicode)
+ cached_key = az._make_cache_key(services[0]['host'])
+ self.assertIsInstance(cached_key, str)
+ self._destroy_service(service)
+
+ def test_set_availability_zone_not_compute_service(self):
+ """Test not compute service get right availability zone."""
+ service = self._create_service_with_topic('network', self.host)
+ services = db.service_get_all(self.context)
+ new_service = az.set_availability_zones(self.context, services)[0]
+ self.assertEqual(new_service['availability_zone'],
+ self.default_in_az)
+ self._destroy_service(service)
+
+ def test_get_host_availability_zone(self):
+ """Test get right availability zone by given host."""
+ self.assertEqual(self.default_az,
+ az.get_host_availability_zone(self.context, self.host))
+
+ service = self._create_service_with_topic('compute', self.host)
+ self._add_to_aggregate(service, self.agg)
+
+ self.assertEqual(self.availability_zone,
+ az.get_host_availability_zone(self.context, self.host))
+
+ def test_update_host_availability_zone(self):
+ """Test availability zone could be update by given host."""
+ service = self._create_service_with_topic('compute', self.host)
+
+ # Create a new aggregate with an AZ and add the host to the AZ
+ az_name = 'az1'
+ agg_az1 = self._create_az('agg-az1', az_name)
+ self._add_to_aggregate(service, agg_az1)
+ self.assertEqual(az_name,
+ az.get_host_availability_zone(self.context, self.host))
+ # Update AZ
+ new_az_name = 'az2'
+ self._update_az(agg_az1, new_az_name)
+ self.assertEqual(new_az_name,
+ az.get_host_availability_zone(self.context, self.host))
+
+ def test_delete_host_availability_zone(self):
+ """Test availability zone could be deleted successfully."""
+ service = self._create_service_with_topic('compute', self.host)
+
+ # Create a new aggregate with an AZ and add the host to the AZ
+ az_name = 'az1'
+ agg_az1 = self._create_az('agg-az1', az_name)
+ self._add_to_aggregate(service, agg_az1)
+ self.assertEqual(az_name,
+ az.get_host_availability_zone(self.context, self.host))
+ # Delete the AZ via deleting the aggregate
+ self._delete_from_aggregate(service, agg_az1)
+ self.assertEqual(self.default_az,
+ az.get_host_availability_zone(self.context, self.host))
+
+ def test_get_availability_zones(self):
+ """Test get_availability_zones."""
+
+ # When the param get_only_available of get_availability_zones is set
+ # to default False, it returns two lists, zones with at least one
+ # enabled services, and zones with no enabled services,
+ # when get_only_available is set to True, only return a list of zones
+ # with at least one enabled servies.
+ # Use the following test data:
+ #
+ # zone host enabled
+ # nova-test host1 Yes
+ # nova-test host2 No
+ # nova-test2 host3 Yes
+ # nova-test3 host4 No
+ # <default> host5 No
+
+ agg2 = self._create_az('agg-az2', 'nova-test2')
+ agg3 = self._create_az('agg-az3', 'nova-test3')
+
+ service1 = self._create_service_with_topic('compute', 'host1',
+ disabled=False)
+ service2 = self._create_service_with_topic('compute', 'host2',
+ disabled=True)
+ service3 = self._create_service_with_topic('compute', 'host3',
+ disabled=False)
+ service4 = self._create_service_with_topic('compute', 'host4',
+ disabled=True)
+ self._create_service_with_topic('compute', 'host5',
+ disabled=True)
+
+ self._add_to_aggregate(service1, self.agg)
+ self._add_to_aggregate(service2, self.agg)
+ self._add_to_aggregate(service3, agg2)
+ self._add_to_aggregate(service4, agg3)
+
+ zones, not_zones = az.get_availability_zones(self.context)
+
+ self.assertEqual(zones, ['nova-test', 'nova-test2'])
+ self.assertEqual(not_zones, ['nova-test3', 'nova'])
+
+ zones = az.get_availability_zones(self.context, True)
+
+ self.assertEqual(zones, ['nova-test', 'nova-test2'])
+
+ zones, not_zones = az.get_availability_zones(self.context,
+ with_hosts=True)
+
+ self.assertEqual(zones, [(u'nova-test2', set([u'host3'])),
+ (u'nova-test', set([u'host1']))])
+ self.assertEqual(not_zones, [(u'nova-test3', set([u'host4'])),
+ (u'nova', set([u'host5']))])
+
+ def test_get_instance_availability_zone_default_value(self):
+ """Test get right availability zone by given an instance."""
+ fake_inst_id = 162
+ fake_inst = fakes.stub_instance(fake_inst_id, host=self.host)
+
+ self.assertEqual(self.default_az,
+ az.get_instance_availability_zone(self.context, fake_inst))
+
+ def test_get_instance_availability_zone_from_aggregate(self):
+ """Test get availability zone from aggregate by given an instance."""
+ host = 'host170'
+ service = self._create_service_with_topic('compute', host)
+ self._add_to_aggregate(service, self.agg)
+
+ fake_inst_id = 174
+ fake_inst = fakes.stub_instance(fake_inst_id, host=host)
+
+ self.assertEqual(self.availability_zone,
+ az.get_instance_availability_zone(self.context, fake_inst))
diff --git a/nova/tests/unit/test_baserpc.py b/nova/tests/unit/test_baserpc.py
new file mode 100644
index 0000000000..d9013fb99e
--- /dev/null
+++ b/nova/tests/unit/test_baserpc.py
@@ -0,0 +1,50 @@
+#
+# Copyright 2013 - Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+Test the base rpc API.
+"""
+
+from oslo.config import cfg
+
+from nova import baserpc
+from nova import context
+from nova import test
+
+CONF = cfg.CONF
+
+
+class BaseAPITestCase(test.TestCase):
+
+ def setUp(self):
+ super(BaseAPITestCase, self).setUp()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id)
+ self.conductor = self.start_service(
+ 'conductor', manager=CONF.conductor.manager)
+ self.compute = self.start_service('compute')
+ self.base_rpcapi = baserpc.BaseAPI(CONF.compute_topic)
+
+ def test_ping(self):
+ res = self.base_rpcapi.ping(self.context, 'foo')
+ self.assertEqual(res, {'service': 'compute', 'arg': 'foo'})
+
+ def test_get_backdoor_port(self):
+ res = self.base_rpcapi.get_backdoor_port(self.context,
+ self.compute.host)
+ self.assertEqual(res, self.compute.backdoor_port)
diff --git a/nova/tests/unit/test_bdm.py b/nova/tests/unit/test_bdm.py
new file mode 100644
index 0000000000..52a0ca45ef
--- /dev/null
+++ b/nova/tests/unit/test_bdm.py
@@ -0,0 +1,248 @@
+# Copyright 2011 Isaku Yamahata
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests for Block Device Mapping Code.
+"""
+
+from nova.api.ec2 import cloud
+from nova.api.ec2 import ec2utils
+from nova import test
+from nova.tests.unit import matchers
+
+
+class BlockDeviceMappingEc2CloudTestCase(test.NoDBTestCase):
+ """Test Case for Block Device Mapping."""
+
+ def fake_ec2_vol_id_to_uuid(obj, ec2_id):
+ if ec2_id == 'vol-87654321':
+ return '22222222-3333-4444-5555-666666666666'
+ elif ec2_id == 'vol-98765432':
+ return '77777777-8888-9999-0000-aaaaaaaaaaaa'
+ else:
+ return 'OhNoooo'
+
+ def fake_ec2_snap_id_to_uuid(obj, ec2_id):
+ if ec2_id == 'snap-12345678':
+ return '00000000-1111-2222-3333-444444444444'
+ elif ec2_id == 'snap-23456789':
+ return '11111111-2222-3333-4444-555555555555'
+ else:
+ return 'OhNoooo'
+
+ def _assertApply(self, action, bdm_list):
+ for bdm, expected_result in bdm_list:
+ self.assertThat(action(bdm), matchers.DictMatches(expected_result))
+
+ def test_parse_block_device_mapping(self):
+ self.stubs.Set(ec2utils,
+ 'ec2_vol_id_to_uuid',
+ self.fake_ec2_vol_id_to_uuid)
+ self.stubs.Set(ec2utils,
+ 'ec2_snap_id_to_uuid',
+ self.fake_ec2_snap_id_to_uuid)
+ bdm_list = [
+ ({'device_name': '/dev/fake0',
+ 'ebs': {'snapshot_id': 'snap-12345678',
+ 'volume_size': 1}},
+ {'device_name': '/dev/fake0',
+ 'snapshot_id': '00000000-1111-2222-3333-444444444444',
+ 'volume_size': 1,
+ 'delete_on_termination': True}),
+
+ ({'device_name': '/dev/fake1',
+ 'ebs': {'snapshot_id': 'snap-23456789',
+ 'delete_on_termination': False}},
+ {'device_name': '/dev/fake1',
+ 'snapshot_id': '11111111-2222-3333-4444-555555555555',
+ 'delete_on_termination': False}),
+
+ ({'device_name': '/dev/fake2',
+ 'ebs': {'snapshot_id': 'vol-87654321',
+ 'volume_size': 2}},
+ {'device_name': '/dev/fake2',
+ 'volume_id': '22222222-3333-4444-5555-666666666666',
+ 'volume_size': 2,
+ 'delete_on_termination': True}),
+
+ ({'device_name': '/dev/fake3',
+ 'ebs': {'snapshot_id': 'vol-98765432',
+ 'delete_on_termination': False}},
+ {'device_name': '/dev/fake3',
+ 'volume_id': '77777777-8888-9999-0000-aaaaaaaaaaaa',
+ 'delete_on_termination': False}),
+
+ ({'device_name': '/dev/fake4',
+ 'ebs': {'no_device': True}},
+ {'device_name': '/dev/fake4',
+ 'no_device': True}),
+
+ ({'device_name': '/dev/fake5',
+ 'virtual_name': 'ephemeral0'},
+ {'device_name': '/dev/fake5',
+ 'virtual_name': 'ephemeral0'}),
+
+ ({'device_name': '/dev/fake6',
+ 'virtual_name': 'swap'},
+ {'device_name': '/dev/fake6',
+ 'virtual_name': 'swap'}),
+ ]
+ self._assertApply(cloud._parse_block_device_mapping, bdm_list)
+
+ def test_format_block_device_mapping(self):
+ bdm_list = [
+ ({'device_name': '/dev/fake0',
+ 'snapshot_id': 0x12345678,
+ 'volume_size': 1,
+ 'delete_on_termination': True},
+ {'deviceName': '/dev/fake0',
+ 'ebs': {'snapshotId': 'snap-12345678',
+ 'volumeSize': 1,
+ 'deleteOnTermination': True}}),
+
+ ({'device_name': '/dev/fake1',
+ 'snapshot_id': 0x23456789},
+ {'deviceName': '/dev/fake1',
+ 'ebs': {'snapshotId': 'snap-23456789'}}),
+
+ ({'device_name': '/dev/fake2',
+ 'snapshot_id': 0x23456789,
+ 'delete_on_termination': False},
+ {'deviceName': '/dev/fake2',
+ 'ebs': {'snapshotId': 'snap-23456789',
+ 'deleteOnTermination': False}}),
+
+ ({'device_name': '/dev/fake3',
+ 'volume_id': 0x12345678,
+ 'volume_size': 1,
+ 'delete_on_termination': True},
+ {'deviceName': '/dev/fake3',
+ 'ebs': {'snapshotId': 'vol-12345678',
+ 'volumeSize': 1,
+ 'deleteOnTermination': True}}),
+
+ ({'device_name': '/dev/fake4',
+ 'volume_id': 0x23456789},
+ {'deviceName': '/dev/fake4',
+ 'ebs': {'snapshotId': 'vol-23456789'}}),
+
+ ({'device_name': '/dev/fake5',
+ 'volume_id': 0x23456789,
+ 'delete_on_termination': False},
+ {'deviceName': '/dev/fake5',
+ 'ebs': {'snapshotId': 'vol-23456789',
+ 'deleteOnTermination': False}}),
+ ]
+ self._assertApply(cloud._format_block_device_mapping, bdm_list)
+
+ def test_format_mapping(self):
+ properties = {
+ 'mappings': [
+ {'virtual': 'ami',
+ 'device': 'sda1'},
+ {'virtual': 'root',
+ 'device': '/dev/sda1'},
+
+ {'virtual': 'swap',
+ 'device': 'sdb1'},
+ {'virtual': 'swap',
+ 'device': 'sdb2'},
+ {'virtual': 'swap',
+ 'device': 'sdb3'},
+ {'virtual': 'swap',
+ 'device': 'sdb4'},
+
+ {'virtual': 'ephemeral0',
+ 'device': 'sdc1'},
+ {'virtual': 'ephemeral1',
+ 'device': 'sdc2'},
+ {'virtual': 'ephemeral2',
+ 'device': 'sdc3'},
+ ],
+
+ 'block_device_mapping': [
+ # root
+ {'device_name': '/dev/sda1',
+ 'snapshot_id': 0x12345678,
+ 'delete_on_termination': False},
+
+
+ # overwrite swap
+ {'device_name': '/dev/sdb2',
+ 'snapshot_id': 0x23456789,
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdb3',
+ 'snapshot_id': 0x3456789A},
+ {'device_name': '/dev/sdb4',
+ 'no_device': True},
+
+ # overwrite ephemeral
+ {'device_name': '/dev/sdc2',
+ 'snapshot_id': 0x3456789A,
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdc3',
+ 'snapshot_id': 0x456789AB},
+ {'device_name': '/dev/sdc4',
+ 'no_device': True},
+
+ # volume
+ {'device_name': '/dev/sdd1',
+ 'snapshot_id': 0x87654321,
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdd2',
+ 'snapshot_id': 0x98765432},
+ {'device_name': '/dev/sdd3',
+ 'snapshot_id': 0xA9875463},
+ {'device_name': '/dev/sdd4',
+ 'no_device': True}]}
+
+ expected_result = {
+ 'blockDeviceMapping': [
+ # root
+ {'deviceName': '/dev/sda1',
+ 'ebs': {'snapshotId': 'snap-12345678',
+ 'deleteOnTermination': False}},
+
+ # swap
+ {'deviceName': '/dev/sdb1',
+ 'virtualName': 'swap'},
+ {'deviceName': '/dev/sdb2',
+ 'ebs': {'snapshotId': 'snap-23456789',
+ 'deleteOnTermination': False}},
+ {'deviceName': '/dev/sdb3',
+ 'ebs': {'snapshotId': 'snap-3456789a'}},
+
+ # ephemeral
+ {'deviceName': '/dev/sdc1',
+ 'virtualName': 'ephemeral0'},
+ {'deviceName': '/dev/sdc2',
+ 'ebs': {'snapshotId': 'snap-3456789a',
+ 'deleteOnTermination': False}},
+ {'deviceName': '/dev/sdc3',
+ 'ebs': {'snapshotId': 'snap-456789ab'}},
+
+ # volume
+ {'deviceName': '/dev/sdd1',
+ 'ebs': {'snapshotId': 'snap-87654321',
+ 'deleteOnTermination': False}},
+ {'deviceName': '/dev/sdd2',
+ 'ebs': {'snapshotId': 'snap-98765432'}},
+ {'deviceName': '/dev/sdd3',
+ 'ebs': {'snapshotId': 'snap-a9875463'}}]}
+
+ result = {}
+ cloud._format_mappings(properties, result)
+ self.assertEqual(result['blockDeviceMapping'].sort(),
+ expected_result['blockDeviceMapping'].sort())
diff --git a/nova/tests/unit/test_block_device.py b/nova/tests/unit/test_block_device.py
new file mode 100644
index 0000000000..2dff327e88
--- /dev/null
+++ b/nova/tests/unit/test_block_device.py
@@ -0,0 +1,604 @@
+# Copyright 2011 Isaku Yamahata
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests for Block Device utility functions.
+"""
+
+from nova import block_device
+from nova import exception
+from nova import objects
+from nova import test
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import matchers
+
+
+class BlockDeviceTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(BlockDeviceTestCase, self).setUp()
+ BDM = block_device.BlockDeviceDict
+
+ self.new_mapping = [
+ BDM({'id': 1, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdb1',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'delete_on_termination': True,
+ 'volume_size': 1,
+ 'guest_format': 'swap',
+ 'boot_index': -1}),
+ BDM({'id': 2, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdc1',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'volume_size': 10,
+ 'delete_on_termination': True,
+ 'boot_index': -1}),
+ BDM({'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda1',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id-1',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'boot_index': 0}),
+ BDM({'id': 4, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'volume_id': 'fake-volume-id-2',
+ 'boot_index': -1}),
+ BDM({'id': 5, 'instance_uuid': 'fake-instance',
+ 'no_device': True,
+ 'device_name': '/dev/vdc'}),
+ ]
+
+ def test_properties(self):
+ root_device0 = '/dev/sda'
+ root_device1 = '/dev/sdb'
+ mappings = [{'virtual': 'root',
+ 'device': root_device0}]
+
+ properties0 = {'mappings': mappings}
+ properties1 = {'mappings': mappings,
+ 'root_device_name': root_device1}
+
+ self.assertIsNone(block_device.properties_root_device_name({}))
+ self.assertEqual(
+ block_device.properties_root_device_name(properties0),
+ root_device0)
+ self.assertEqual(
+ block_device.properties_root_device_name(properties1),
+ root_device1)
+
+ def test_ephemeral(self):
+ self.assertFalse(block_device.is_ephemeral('ephemeral'))
+ self.assertTrue(block_device.is_ephemeral('ephemeral0'))
+ self.assertTrue(block_device.is_ephemeral('ephemeral1'))
+ self.assertTrue(block_device.is_ephemeral('ephemeral11'))
+ self.assertFalse(block_device.is_ephemeral('root'))
+ self.assertFalse(block_device.is_ephemeral('swap'))
+ self.assertFalse(block_device.is_ephemeral('/dev/sda1'))
+
+ self.assertEqual(block_device.ephemeral_num('ephemeral0'), 0)
+ self.assertEqual(block_device.ephemeral_num('ephemeral1'), 1)
+ self.assertEqual(block_device.ephemeral_num('ephemeral11'), 11)
+
+ self.assertFalse(block_device.is_swap_or_ephemeral('ephemeral'))
+ self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral0'))
+ self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral1'))
+ self.assertTrue(block_device.is_swap_or_ephemeral('swap'))
+ self.assertFalse(block_device.is_swap_or_ephemeral('root'))
+ self.assertFalse(block_device.is_swap_or_ephemeral('/dev/sda1'))
+
+ def test_mappings_prepend_dev(self):
+ mapping = [
+ {'virtual': 'ami', 'device': '/dev/sda'},
+ {'virtual': 'root', 'device': 'sda'},
+ {'virtual': 'ephemeral0', 'device': 'sdb'},
+ {'virtual': 'swap', 'device': 'sdc'},
+ {'virtual': 'ephemeral1', 'device': 'sdd'},
+ {'virtual': 'ephemeral2', 'device': 'sde'}]
+
+ expected = [
+ {'virtual': 'ami', 'device': '/dev/sda'},
+ {'virtual': 'root', 'device': 'sda'},
+ {'virtual': 'ephemeral0', 'device': '/dev/sdb'},
+ {'virtual': 'swap', 'device': '/dev/sdc'},
+ {'virtual': 'ephemeral1', 'device': '/dev/sdd'},
+ {'virtual': 'ephemeral2', 'device': '/dev/sde'}]
+
+ prepended = block_device.mappings_prepend_dev(mapping)
+ self.assertEqual(prepended.sort(), expected.sort())
+
+ def test_strip_dev(self):
+ self.assertEqual(block_device.strip_dev('/dev/sda'), 'sda')
+ self.assertEqual(block_device.strip_dev('sda'), 'sda')
+
+ def test_strip_prefix(self):
+ self.assertEqual(block_device.strip_prefix('/dev/sda'), 'a')
+ self.assertEqual(block_device.strip_prefix('a'), 'a')
+ self.assertEqual(block_device.strip_prefix('xvda'), 'a')
+ self.assertEqual(block_device.strip_prefix('vda'), 'a')
+
+ def test_get_device_letter(self):
+ self.assertEqual(block_device.get_device_letter(''), '')
+ self.assertEqual(block_device.get_device_letter('/dev/sda1'), 'a')
+ self.assertEqual(block_device.get_device_letter('/dev/xvdb'), 'b')
+ self.assertEqual(block_device.get_device_letter('/dev/d'), 'd')
+ self.assertEqual(block_device.get_device_letter('a'), 'a')
+ self.assertEqual(block_device.get_device_letter('sdb2'), 'b')
+ self.assertEqual(block_device.get_device_letter('vdc'), 'c')
+
+ def test_volume_in_mapping(self):
+ swap = {'device_name': '/dev/sdb',
+ 'swap_size': 1}
+ ephemerals = [{'num': 0,
+ 'virtual_name': 'ephemeral0',
+ 'device_name': '/dev/sdc1',
+ 'size': 1},
+ {'num': 2,
+ 'virtual_name': 'ephemeral2',
+ 'device_name': '/dev/sdd',
+ 'size': 1}]
+ block_device_mapping = [{'mount_device': '/dev/sde',
+ 'device_path': 'fake_device'},
+ {'mount_device': '/dev/sdf',
+ 'device_path': 'fake_device'}]
+ block_device_info = {
+ 'root_device_name': '/dev/sda',
+ 'swap': swap,
+ 'ephemerals': ephemerals,
+ 'block_device_mapping': block_device_mapping}
+
+ def _assert_volume_in_mapping(device_name, true_or_false):
+ in_mapping = block_device.volume_in_mapping(
+ device_name, block_device_info)
+ self.assertEqual(in_mapping, true_or_false)
+
+ _assert_volume_in_mapping('sda', False)
+ _assert_volume_in_mapping('sdb', True)
+ _assert_volume_in_mapping('sdc1', True)
+ _assert_volume_in_mapping('sdd', True)
+ _assert_volume_in_mapping('sde', True)
+ _assert_volume_in_mapping('sdf', True)
+ _assert_volume_in_mapping('sdg', False)
+ _assert_volume_in_mapping('sdh1', False)
+
+ def test_get_root_bdm(self):
+ root_bdm = {'device_name': 'vda', 'boot_index': 0}
+ bdms = [root_bdm,
+ {'device_name': 'vdb', 'boot_index': 1},
+ {'device_name': 'vdc', 'boot_index': -1},
+ {'device_name': 'vdd'}]
+ self.assertEqual(root_bdm, block_device.get_root_bdm(bdms))
+ self.assertEqual(root_bdm, block_device.get_root_bdm([bdms[0]]))
+ self.assertIsNone(block_device.get_root_bdm(bdms[1:]))
+ self.assertIsNone(block_device.get_root_bdm(bdms[2:]))
+ self.assertIsNone(block_device.get_root_bdm(bdms[3:]))
+ self.assertIsNone(block_device.get_root_bdm([]))
+
+ def test_get_bdm_ephemeral_disk_size(self):
+ size = block_device.get_bdm_ephemeral_disk_size(self.new_mapping)
+ self.assertEqual(10, size)
+
+ def test_get_bdm_swap_list(self):
+ swap_list = block_device.get_bdm_swap_list(self.new_mapping)
+ self.assertEqual(1, len(swap_list))
+ self.assertEqual(1, swap_list[0].get('id'))
+
+ def test_get_bdm_local_disk_num(self):
+ size = block_device.get_bdm_local_disk_num(self.new_mapping)
+ self.assertEqual(2, size)
+
+ def test_new_format_is_swap(self):
+ expected_results = [True, False, False, False, False]
+ for expected, bdm in zip(expected_results, self.new_mapping):
+ res = block_device.new_format_is_swap(bdm)
+ self.assertEqual(expected, res)
+
+ def test_new_format_is_ephemeral(self):
+ expected_results = [False, True, False, False, False]
+ for expected, bdm in zip(expected_results, self.new_mapping):
+ res = block_device.new_format_is_ephemeral(bdm)
+ self.assertEqual(expected, res)
+
+ def test_validate_device_name(self):
+ for value in [' ', 10, None, 'a' * 260]:
+ self.assertRaises(exception.InvalidBDMFormat,
+ block_device.validate_device_name,
+ value)
+
+ def test_validate_and_default_volume_size(self):
+ bdm = {}
+ for value in [-1, 'a', 2.5]:
+ bdm['volume_size'] = value
+ self.assertRaises(exception.InvalidBDMFormat,
+ block_device.validate_and_default_volume_size,
+ bdm)
+
+ def test_get_bdms_to_connect(self):
+ root_bdm = {'device_name': 'vda', 'boot_index': 0}
+ bdms = [root_bdm,
+ {'device_name': 'vdb', 'boot_index': 1},
+ {'device_name': 'vdc', 'boot_index': -1},
+ {'device_name': 'vde', 'boot_index': None},
+ {'device_name': 'vdd'}]
+ self.assertNotIn(root_bdm, block_device.get_bdms_to_connect(bdms,
+ exclude_root_mapping=True))
+ self.assertIn(root_bdm, block_device.get_bdms_to_connect(bdms))
+
+
+class TestBlockDeviceDict(test.NoDBTestCase):
+ def setUp(self):
+ super(TestBlockDeviceDict, self).setUp()
+
+ BDM = block_device.BlockDeviceDict
+
+ self.api_mapping = [
+ {'id': 1, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdb1',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'delete_on_termination': True,
+ 'guest_format': 'swap',
+ 'boot_index': -1},
+ {'id': 2, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdc1',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'delete_on_termination': True,
+ 'boot_index': -1},
+ {'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda1',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'uuid': 'fake-volume-id-1',
+ 'boot_index': 0},
+ {'id': 4, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'uuid': 'fake-snapshot-id-1',
+ 'boot_index': -1},
+ {'id': 5, 'instance_uuid': 'fake-instance',
+ 'no_device': True,
+ 'device_name': '/dev/vdc'},
+ ]
+
+ self.new_mapping = [
+ BDM({'id': 1, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdb1',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'delete_on_termination': True,
+ 'guest_format': 'swap',
+ 'boot_index': -1}),
+ BDM({'id': 2, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdc1',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'delete_on_termination': True,
+ 'boot_index': -1}),
+ BDM({'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda1',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id-1',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'boot_index': 0}),
+ BDM({'id': 4, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'volume_id': 'fake-volume-id-2',
+ 'boot_index': -1}),
+ BDM({'id': 5, 'instance_uuid': 'fake-instance',
+ 'no_device': True,
+ 'device_name': '/dev/vdc'}),
+ ]
+
+ self.legacy_mapping = [
+ {'id': 1, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdb1',
+ 'delete_on_termination': True,
+ 'virtual_name': 'swap'},
+ {'id': 2, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdc1',
+ 'delete_on_termination': True,
+ 'virtual_name': 'ephemeral0'},
+ {'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda1',
+ 'volume_id': 'fake-volume-id-1',
+ 'connection_info': "{'fake': 'connection_info'}"},
+ {'id': 4, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'volume_id': 'fake-volume-id-2'},
+ {'id': 5, 'instance_uuid': 'fake-instance',
+ 'no_device': True,
+ 'device_name': '/dev/vdc'},
+ ]
+
+ self.new_mapping_source_image = [
+ BDM({'id': 6, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda3',
+ 'source_type': 'image',
+ 'destination_type': 'volume',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'volume_id': 'fake-volume-id-3',
+ 'boot_index': -1}),
+ BDM({'id': 7, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda4',
+ 'source_type': 'image',
+ 'destination_type': 'local',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'image_id': 'fake-image-id-2',
+ 'boot_index': -1}),
+ ]
+
+ self.legacy_mapping_source_image = [
+ {'id': 6, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda3',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'volume_id': 'fake-volume-id-3'},
+ ]
+
+ def test_init(self):
+ def fake_validate(obj, dct):
+ pass
+
+ self.stubs.Set(block_device.BlockDeviceDict, '_fields',
+ set(['field1', 'field2']))
+ self.stubs.Set(block_device.BlockDeviceDict, '_db_only_fields',
+ set(['db_field1', 'db_field2']))
+ self.stubs.Set(block_device.BlockDeviceDict, '_validate',
+ fake_validate)
+
+ # Make sure db fields are not picked up if they are not
+ # in the original dict
+ dev_dict = block_device.BlockDeviceDict({'field1': 'foo',
+ 'field2': 'bar',
+ 'db_field1': 'baz'})
+ self.assertIn('field1', dev_dict)
+ self.assertIn('field2', dev_dict)
+ self.assertIn('db_field1', dev_dict)
+ self.assertNotIn('db_field2', dev_dict)
+
+ # Make sure all expected fields are defaulted
+ dev_dict = block_device.BlockDeviceDict({'field1': 'foo'})
+ self.assertIn('field1', dev_dict)
+ self.assertIn('field2', dev_dict)
+ self.assertIsNone(dev_dict['field2'])
+ self.assertNotIn('db_field1', dev_dict)
+ self.assertNotIn('db_field2', dev_dict)
+
+ # Unless they are not meant to be
+ dev_dict = block_device.BlockDeviceDict({'field1': 'foo'},
+ do_not_default=set(['field2']))
+ self.assertIn('field1', dev_dict)
+ self.assertNotIn('field2', dev_dict)
+ self.assertNotIn('db_field1', dev_dict)
+ self.assertNotIn('db_field2', dev_dict)
+
+ # Passing kwargs to constructor works
+ dev_dict = block_device.BlockDeviceDict(field1='foo')
+ self.assertIn('field1', dev_dict)
+ self.assertIn('field2', dev_dict)
+ self.assertIsNone(dev_dict['field2'])
+ dev_dict = block_device.BlockDeviceDict(
+ {'field1': 'foo'}, field2='bar')
+ self.assertEqual('foo', dev_dict['field1'])
+ self.assertEqual('bar', dev_dict['field2'])
+
+ def test_init_prepend_dev_to_device_name(self):
+ bdm = {'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': 'vda',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id-1',
+ 'boot_index': 0}
+ bdm_dict = block_device.BlockDeviceDict(bdm)
+ self.assertEqual('/dev/vda', bdm_dict['device_name'])
+
+ bdm['device_name'] = '/dev/vdb'
+ bdm_dict = block_device.BlockDeviceDict(bdm)
+ self.assertEqual('/dev/vdb', bdm_dict['device_name'])
+
+ bdm['device_name'] = None
+ bdm_dict = block_device.BlockDeviceDict(bdm)
+ self.assertIsNone(bdm_dict['device_name'])
+
+ def test_validate(self):
+ self.assertRaises(exception.InvalidBDMFormat,
+ block_device.BlockDeviceDict,
+ {'bogus_field': 'lame_val'})
+
+ lame_bdm = dict(self.new_mapping[2])
+ del lame_bdm['source_type']
+ self.assertRaises(exception.InvalidBDMFormat,
+ block_device.BlockDeviceDict,
+ lame_bdm)
+
+ lame_bdm['no_device'] = True
+ block_device.BlockDeviceDict(lame_bdm)
+
+ lame_dev_bdm = dict(self.new_mapping[2])
+ lame_dev_bdm['device_name'] = "not a valid name"
+ self.assertRaises(exception.InvalidBDMFormat,
+ block_device.BlockDeviceDict,
+ lame_dev_bdm)
+
+ lame_dev_bdm['device_name'] = ""
+ self.assertRaises(exception.InvalidBDMFormat,
+ block_device.BlockDeviceDict,
+ lame_dev_bdm)
+
+ cool_volume_size_bdm = dict(self.new_mapping[2])
+ cool_volume_size_bdm['volume_size'] = '42'
+ cool_volume_size_bdm = block_device.BlockDeviceDict(
+ cool_volume_size_bdm)
+ self.assertEqual(cool_volume_size_bdm['volume_size'], 42)
+
+ lame_volume_size_bdm = dict(self.new_mapping[2])
+ lame_volume_size_bdm['volume_size'] = 'some_non_int_string'
+ self.assertRaises(exception.InvalidBDMFormat,
+ block_device.BlockDeviceDict,
+ lame_volume_size_bdm)
+
+ truthy_bdm = dict(self.new_mapping[2])
+ truthy_bdm['delete_on_termination'] = '1'
+ truthy_bdm = block_device.BlockDeviceDict(truthy_bdm)
+ self.assertEqual(truthy_bdm['delete_on_termination'], True)
+
+ verbose_bdm = dict(self.new_mapping[2])
+ verbose_bdm['boot_index'] = 'first'
+ self.assertRaises(exception.InvalidBDMFormat,
+ block_device.BlockDeviceDict,
+ verbose_bdm)
+
+ def test_from_legacy(self):
+ for legacy, new in zip(self.legacy_mapping, self.new_mapping):
+ self.assertThat(
+ block_device.BlockDeviceDict.from_legacy(legacy),
+ matchers.IsSubDictOf(new))
+
+ def test_from_legacy_mapping(self):
+ def _get_image_bdms(bdms):
+ return [bdm for bdm in bdms if bdm['source_type'] == 'image']
+
+ def _get_bootable_bdms(bdms):
+ return [bdm for bdm in bdms if bdm['boot_index'] >= 0]
+
+ new_no_img = block_device.from_legacy_mapping(self.legacy_mapping)
+ self.assertEqual(len(_get_image_bdms(new_no_img)), 0)
+
+ for new, expected in zip(new_no_img, self.new_mapping):
+ self.assertThat(new, matchers.IsSubDictOf(expected))
+
+ new_with_img = block_device.from_legacy_mapping(
+ self.legacy_mapping, 'fake_image_ref')
+ image_bdms = _get_image_bdms(new_with_img)
+ boot_bdms = _get_bootable_bdms(new_with_img)
+ self.assertEqual(len(image_bdms), 1)
+ self.assertEqual(len(boot_bdms), 1)
+ self.assertEqual(image_bdms[0]['boot_index'], 0)
+ self.assertEqual(boot_bdms[0]['source_type'], 'image')
+
+ new_with_img_and_root = block_device.from_legacy_mapping(
+ self.legacy_mapping, 'fake_image_ref', 'sda1')
+ image_bdms = _get_image_bdms(new_with_img_and_root)
+ boot_bdms = _get_bootable_bdms(new_with_img_and_root)
+ self.assertEqual(len(image_bdms), 0)
+ self.assertEqual(len(boot_bdms), 1)
+ self.assertEqual(boot_bdms[0]['boot_index'], 0)
+ self.assertEqual(boot_bdms[0]['source_type'], 'volume')
+
+ new_no_root = block_device.from_legacy_mapping(
+ self.legacy_mapping, 'fake_image_ref', 'sda1', no_root=True)
+ self.assertEqual(len(_get_image_bdms(new_no_root)), 0)
+ self.assertEqual(len(_get_bootable_bdms(new_no_root)), 0)
+
+ def test_from_api(self):
+ for api, new in zip(self.api_mapping, self.new_mapping):
+ new['connection_info'] = None
+ if new['snapshot_id']:
+ new['volume_id'] = None
+ self.assertThat(
+ block_device.BlockDeviceDict.from_api(api),
+ matchers.IsSubDictOf(new))
+
+ def test_from_api_invalid_blank_id(self):
+ api_dict = {'id': 1,
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
+ 'uuid': 'fake-volume-id-1',
+ 'delete_on_termination': True,
+ 'boot_index': -1}
+ self.assertRaises(exception.InvalidBDMFormat,
+ block_device.BlockDeviceDict.from_api, api_dict)
+
+ def test_legacy(self):
+ for legacy, new in zip(self.legacy_mapping, self.new_mapping):
+ self.assertThat(
+ legacy,
+ matchers.IsSubDictOf(new.legacy()))
+
+ def test_legacy_mapping(self):
+ got_legacy = block_device.legacy_mapping(self.new_mapping)
+
+ for legacy, expected in zip(got_legacy, self.legacy_mapping):
+ self.assertThat(expected, matchers.IsSubDictOf(legacy))
+
+ def test_legacy_source_image(self):
+ for legacy, new in zip(self.legacy_mapping_source_image,
+ self.new_mapping_source_image):
+ if new['destination_type'] == 'volume':
+ self.assertThat(legacy, matchers.IsSubDictOf(new.legacy()))
+ else:
+ self.assertRaises(exception.InvalidBDMForLegacy, new.legacy)
+
+ def test_legacy_mapping_source_image(self):
+ got_legacy = block_device.legacy_mapping(self.new_mapping)
+
+ for legacy, expected in zip(got_legacy, self.legacy_mapping):
+ self.assertThat(expected, matchers.IsSubDictOf(legacy))
+
+ def test_legacy_mapping_from_object_list(self):
+ bdm1 = objects.BlockDeviceMapping()
+ bdm1 = objects.BlockDeviceMapping._from_db_object(
+ None, bdm1, fake_block_device.FakeDbBlockDeviceDict(
+ self.new_mapping[0]))
+ bdm2 = objects.BlockDeviceMapping()
+ bdm2 = objects.BlockDeviceMapping._from_db_object(
+ None, bdm2, fake_block_device.FakeDbBlockDeviceDict(
+ self.new_mapping[1]))
+ bdmlist = objects.BlockDeviceMappingList()
+ bdmlist.objects = [bdm1, bdm2]
+ block_device.legacy_mapping(bdmlist)
+
+ def test_image_mapping(self):
+ removed_fields = ['id', 'instance_uuid', 'connection_info',
+ 'device_name', 'created_at', 'updated_at',
+ 'deleted_at', 'deleted']
+ for bdm in self.new_mapping:
+ mapping_bdm = fake_block_device.FakeDbBlockDeviceDict(
+ bdm).get_image_mapping()
+ for fld in removed_fields:
+ self.assertNotIn(fld, mapping_bdm)
+
+ def _test_snapshot_from_bdm(self, template):
+ snapshot = block_device.snapshot_from_bdm('new-snapshot-id', template)
+ self.assertEqual(snapshot['snapshot_id'], 'new-snapshot-id')
+ self.assertEqual(snapshot['source_type'], 'snapshot')
+ self.assertEqual(snapshot['destination_type'], 'volume')
+ for key in ['disk_bus', 'device_type', 'boot_index']:
+ self.assertEqual(snapshot[key], template[key])
+
+ def test_snapshot_from_bdm(self):
+ for bdm in self.new_mapping:
+ self._test_snapshot_from_bdm(bdm)
+
+ def test_snapshot_from_object(self):
+ for bdm in self.new_mapping[:-1]:
+ obj = objects.BlockDeviceMapping()
+ obj = objects.BlockDeviceMapping._from_db_object(
+ None, obj, fake_block_device.FakeDbBlockDeviceDict(
+ bdm))
+ self._test_snapshot_from_bdm(obj)
diff --git a/nova/tests/unit/test_cinder.py b/nova/tests/unit/test_cinder.py
new file mode 100644
index 0000000000..913b4e4de7
--- /dev/null
+++ b/nova/tests/unit/test_cinder.py
@@ -0,0 +1,405 @@
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from cinderclient import exceptions as cinder_exception
+from cinderclient.v1 import client as cinder_client_v1
+from cinderclient.v2 import client as cinder_client_v2
+import mock
+import six.moves.urllib.parse as urlparse
+
+from nova import context
+from nova import exception
+from nova import test
+from nova.volume import cinder
+
+
+def _stub_volume(**kwargs):
+ volume = {
+ 'display_name': None,
+ 'display_description': None,
+ "attachments": [],
+ "availability_zone": "cinder",
+ "created_at": "2012-09-10T00:00:00.000000",
+ "id": '00000000-0000-0000-0000-000000000000',
+ "metadata": {},
+ "size": 1,
+ "snapshot_id": None,
+ "status": "available",
+ "volume_type": "None",
+ "bootable": "true"
+ }
+ volume.update(kwargs)
+ return volume
+
+
+def _stub_volume_v2(**kwargs):
+ volume_v2 = {
+ 'name': None,
+ 'description': None,
+ "attachments": [],
+ "availability_zone": "cinderv2",
+ "created_at": "2013-08-10T00:00:00.000000",
+ "id": '00000000-0000-0000-0000-000000000000',
+ "metadata": {},
+ "size": 1,
+ "snapshot_id": None,
+ "status": "available",
+ "volume_type": "None",
+ "bootable": "true"
+ }
+ volume_v2.update(kwargs)
+ return volume_v2
+
+
+_image_metadata = {
+ 'kernel_id': 'fake',
+ 'ramdisk_id': 'fake'
+}
+
+
+class FakeHTTPClient(cinder.cinder_client.HTTPClient):
+
+ def _cs_request(self, url, method, **kwargs):
+ # Check that certain things are called correctly
+ if method in ['GET', 'DELETE']:
+ assert 'body' not in kwargs
+ elif method == 'PUT':
+ assert 'body' in kwargs
+
+ # Call the method
+ args = urlparse.parse_qsl(urlparse.urlparse(url)[4])
+ kwargs.update(args)
+ munged_url = url.rsplit('?', 1)[0]
+ munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_')
+ munged_url = munged_url.replace('-', '_')
+
+ callback = "%s_%s" % (method.lower(), munged_url)
+
+ if not hasattr(self, callback):
+ raise AssertionError('Called unknown API method: %s %s, '
+ 'expected fakes method name: %s' %
+ (method, url, callback))
+
+ # Note the call
+ self.callstack.append((method, url, kwargs.get('body', None)))
+
+ status, body = getattr(self, callback)(**kwargs)
+ if hasattr(status, 'items'):
+ return status, body
+ else:
+ return {"status": status}, body
+
+ def get_volumes_1234(self, **kw):
+ volume = {'volume': _stub_volume(id='1234')}
+ return (200, volume)
+
+ def get_volumes_nonexisting(self, **kw):
+ raise cinder_exception.NotFound(code=404, message='Resource not found')
+
+ def get_volumes_5678(self, **kw):
+ """Volume with image metadata."""
+ volume = {'volume': _stub_volume(id='1234',
+ volume_image_metadata=_image_metadata)
+ }
+ return (200, volume)
+
+
+class FakeHTTPClientV2(cinder.cinder_client.HTTPClient):
+
+ def _cs_request(self, url, method, **kwargs):
+ # Check that certain things are called correctly
+ if method in ['GET', 'DELETE']:
+ assert 'body' not in kwargs
+ elif method == 'PUT':
+ assert 'body' in kwargs
+
+ # Call the method
+ args = urlparse.parse_qsl(urlparse.urlparse(url)[4])
+ kwargs.update(args)
+ munged_url = url.rsplit('?', 1)[0]
+ munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_')
+ munged_url = munged_url.replace('-', '_')
+
+ callback = "%s_%s" % (method.lower(), munged_url)
+
+ if not hasattr(self, callback):
+ raise AssertionError('Called unknown API method: %s %s, '
+ 'expected fakes method name: %s' %
+ (method, url, callback))
+
+ # Note the call
+ self.callstack.append((method, url, kwargs.get('body', None)))
+
+ status, body = getattr(self, callback)(**kwargs)
+ if hasattr(status, 'items'):
+ return status, body
+ else:
+ return {"status": status}, body
+
+ def get_volumes_1234(self, **kw):
+ volume = {'volume': _stub_volume_v2(id='1234')}
+ return (200, volume)
+
+ def get_volumes_nonexisting(self, **kw):
+ raise cinder_exception.NotFound(code=404, message='Resource not found')
+
+ def get_volumes_5678(self, **kw):
+ """Volume with image metadata."""
+ volume = {'volume': _stub_volume_v2(
+ id='1234',
+ volume_image_metadata=_image_metadata)
+ }
+ return (200, volume)
+
+
+class FakeCinderClient(cinder_client_v1.Client):
+
+ def __init__(self, username, password, project_id=None, auth_url=None,
+ insecure=False, retries=None, cacert=None, timeout=None):
+ super(FakeCinderClient, self).__init__(username, password,
+ project_id=project_id,
+ auth_url=auth_url,
+ insecure=insecure,
+ retries=retries,
+ cacert=cacert,
+ timeout=timeout)
+ self.client = FakeHTTPClient(username, password, project_id, auth_url,
+ insecure=insecure, retries=retries,
+ cacert=cacert, timeout=timeout)
+ # keep a ref to the clients callstack for factory's assert_called
+ self.callstack = self.client.callstack = []
+
+
+class FakeCinderClientV2(cinder_client_v2.Client):
+
+ def __init__(self, username, password, project_id=None, auth_url=None,
+ insecure=False, retries=None, cacert=None, timeout=None):
+ super(FakeCinderClientV2, self).__init__(username, password,
+ project_id=project_id,
+ auth_url=auth_url,
+ insecure=insecure,
+ retries=retries,
+ cacert=cacert,
+ timeout=timeout)
+ self.client = FakeHTTPClientV2(username, password, project_id,
+ auth_url, insecure=insecure,
+ retries=retries, cacert=cacert,
+ timeout=timeout)
+ # keep a ref to the clients callstack for factory's assert_called
+ self.callstack = self.client.callstack = []
+
+
+class FakeClientFactory(object):
+ """Keep a ref to the FakeClient since volume.api.cinder throws it away."""
+
+ def __call__(self, *args, **kwargs):
+ self.client = FakeCinderClient(*args, **kwargs)
+ return self.client
+
+ def assert_called(self, method, url, body=None, pos=-1):
+ expected = (method, url)
+ called = self.client.callstack[pos][0:2]
+
+ assert self.client.callstack, ("Expected %s %s but no calls "
+ "were made." % expected)
+
+ assert expected == called, 'Expected %s %s; got %s %s' % (expected +
+ called)
+
+ if body is not None:
+ assert self.client.callstack[pos][2] == body
+
+
+class FakeClientV2Factory(object):
+ """Keep a ref to the FakeClient since volume.api.cinder throws it away."""
+
+ def __call__(self, *args, **kwargs):
+ self.client = FakeCinderClientV2(*args, **kwargs)
+ return self.client
+
+ def assert_called(self, method, url, body=None, pos=-1):
+ expected = (method, url)
+ called = self.client.callstack[pos][0:2]
+
+ assert self.client.callstack, ("Expected %s %s but no calls "
+ "were made." % expected)
+
+ assert expected == called, 'Expected %s %s; got %s %s' % (expected +
+ called)
+
+ if body is not None:
+ assert self.client.callstack[pos][2] == body
+
+
+fake_client_factory = FakeClientFactory()
+fake_client_v2_factory = FakeClientV2Factory()
+
+
+@mock.patch.object(cinder_client_v1, 'Client', fake_client_factory)
+class CinderTestCase(test.NoDBTestCase):
+ """Test case for cinder volume v1 api."""
+
+ def setUp(self):
+ super(CinderTestCase, self).setUp()
+ catalog = [{
+ "type": "volume",
+ "name": "cinder",
+ "endpoints": [{"publicURL": "http://localhost:8776/v1/project_id"}]
+ }]
+ cinder.CONF.set_override('catalog_info',
+ 'volume:cinder:publicURL', group='cinder')
+ self.context = context.RequestContext('username', 'project_id',
+ service_catalog=catalog)
+ cinder.cinderclient(self.context)
+
+ self.api = cinder.API()
+
+ def assert_called(self, *args, **kwargs):
+ fake_client_factory.assert_called(*args, **kwargs)
+
+ def test_context_with_catalog(self):
+ self.api.get(self.context, '1234')
+ self.assert_called('GET', '/volumes/1234')
+ self.assertEqual(
+ fake_client_factory.client.client.management_url,
+ 'http://localhost:8776/v1/project_id')
+
+ def test_cinder_endpoint_template(self):
+ self.flags(
+ endpoint_template='http://other_host:8776/v1/%(project_id)s',
+ group='cinder'
+ )
+ self.api.get(self.context, '1234')
+ self.assert_called('GET', '/volumes/1234')
+ self.assertEqual(
+ fake_client_factory.client.client.management_url,
+ 'http://other_host:8776/v1/project_id')
+
+ def test_get_non_existing_volume(self):
+ self.assertRaises(exception.VolumeNotFound, self.api.get, self.context,
+ 'nonexisting')
+
+ def test_volume_with_image_metadata(self):
+ volume = self.api.get(self.context, '5678')
+ self.assert_called('GET', '/volumes/5678')
+ self.assertIn('volume_image_metadata', volume)
+ self.assertEqual(volume['volume_image_metadata'], _image_metadata)
+
+ def test_cinder_api_insecure(self):
+ # The True/False negation is awkward, but better for the client
+ # to pass us insecure=True and we check verify_cert == False
+ self.flags(api_insecure=True, group='cinder')
+ self.api.get(self.context, '1234')
+ self.assert_called('GET', '/volumes/1234')
+ self.assertEqual(
+ fake_client_factory.client.client.verify_cert, False)
+
+ def test_cinder_api_cacert_file(self):
+ cacert = "/etc/ssl/certs/ca-certificates.crt"
+ self.flags(ca_certificates_file=cacert, group='cinder')
+ self.api.get(self.context, '1234')
+ self.assert_called('GET', '/volumes/1234')
+ self.assertEqual(
+ fake_client_factory.client.client.verify_cert, cacert)
+
+ def test_cinder_http_retries(self):
+ retries = 42
+ self.flags(http_retries=retries, group='cinder')
+ self.api.get(self.context, '1234')
+ self.assert_called('GET', '/volumes/1234')
+ self.assertEqual(
+ fake_client_factory.client.client.retries, retries)
+
+
+@mock.patch.object(cinder_client_v2, 'Client', fake_client_v2_factory)
+class CinderV2TestCase(test.NoDBTestCase):
+ """Test case for cinder volume v2 api."""
+
+ def setUp(self):
+ super(CinderV2TestCase, self).setUp()
+ catalog = [{
+ "type": "volumev2",
+ "name": "cinderv2",
+ "endpoints": [{"publicURL": "http://localhost:8776/v2/project_id"}]
+ }]
+ self.context = context.RequestContext('username', 'project_id',
+ service_catalog=catalog)
+
+ cinder.cinderclient(self.context)
+ self.api = cinder.API()
+
+ def tearDown(self):
+ cinder.CONF.reset()
+ super(CinderV2TestCase, self).tearDown()
+
+ def assert_called(self, *args, **kwargs):
+ fake_client_v2_factory.assert_called(*args, **kwargs)
+
+ def test_context_with_catalog(self):
+ self.api.get(self.context, '1234')
+ self.assert_called('GET', '/volumes/1234')
+ self.assertEqual(
+ 'http://localhost:8776/v2/project_id',
+ fake_client_v2_factory.client.client.management_url)
+
+ def test_cinder_endpoint_template(self):
+ self.flags(
+ endpoint_template='http://other_host:8776/v2/%(project_id)s',
+ group='cinder'
+ )
+ self.api.get(self.context, '1234')
+ self.assert_called('GET', '/volumes/1234')
+ self.assertEqual(
+ 'http://other_host:8776/v2/project_id',
+ fake_client_v2_factory.client.client.management_url)
+
+ def test_get_non_existing_volume(self):
+ self.assertRaises(exception.VolumeNotFound, self.api.get, self.context,
+ 'nonexisting')
+
+ def test_volume_with_image_metadata(self):
+ volume = self.api.get(self.context, '5678')
+ self.assert_called('GET', '/volumes/5678')
+ self.assertIn('volume_image_metadata', volume)
+ self.assertEqual(_image_metadata, volume['volume_image_metadata'])
+
+ def test_cinder_api_insecure(self):
+ # The True/False negation is awkward, but better for the client
+ # to pass us insecure=True and we check verify_cert == False
+ self.flags(api_insecure=True, group='cinder')
+ self.api.get(self.context, '1234')
+ self.assert_called('GET', '/volumes/1234')
+ self.assertFalse(fake_client_v2_factory.client.client.verify_cert)
+
+ def test_cinder_api_cacert_file(self):
+ cacert = "/etc/ssl/certs/ca-certificates.crt"
+ self.flags(ca_certificates_file=cacert, group='cinder')
+ self.api.get(self.context, '1234')
+ self.assert_called('GET', '/volumes/1234')
+ self.assertEqual(cacert,
+ fake_client_v2_factory.client.client.verify_cert)
+
+ def test_cinder_http_retries(self):
+ retries = 42
+ self.flags(http_retries=retries, group='cinder')
+ self.api.get(self.context, '1234')
+ self.assert_called('GET', '/volumes/1234')
+ self.assertEqual(retries, fake_client_v2_factory.client.client.retries)
+
+ def test_cinder_http_timeout(self):
+ timeout = 123
+ self.flags(http_timeout=timeout, group='cinder')
+ self.api.get(self.context, '1234')
+ self.assertEqual(timeout,
+ fake_client_v2_factory.client.client.timeout)
diff --git a/nova/tests/unit/test_configdrive2.py b/nova/tests/unit/test_configdrive2.py
new file mode 100644
index 0000000000..f6bcaea99d
--- /dev/null
+++ b/nova/tests/unit/test_configdrive2.py
@@ -0,0 +1,104 @@
+# Copyright 2012 Michael Still and Canonical Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import os
+import tempfile
+
+import mox
+from oslo.config import cfg
+
+from nova import context
+from nova.openstack.common import fileutils
+from nova import test
+from nova.tests.unit import fake_instance
+from nova import utils
+from nova.virt import configdrive
+
+CONF = cfg.CONF
+
+
+class FakeInstanceMD(object):
+ def metadata_for_config_drive(self):
+ yield ('this/is/a/path/hello', 'This is some content')
+
+
+class ConfigDriveTestCase(test.NoDBTestCase):
+
+ def test_create_configdrive_iso(self):
+ CONF.set_override('config_drive_format', 'iso9660')
+ imagefile = None
+
+ try:
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots',
+ '-allow-lowercase', '-allow-multidot', '-l',
+ '-publisher', mox.IgnoreArg(), '-quiet', '-J', '-r',
+ '-V', 'config-2', mox.IgnoreArg(), attempts=1,
+ run_as_root=False).AndReturn(None)
+
+ self.mox.ReplayAll()
+
+ with configdrive.ConfigDriveBuilder(FakeInstanceMD()) as c:
+ (fd, imagefile) = tempfile.mkstemp(prefix='cd_iso_')
+ os.close(fd)
+ c.make_drive(imagefile)
+
+ finally:
+ if imagefile:
+ fileutils.delete_if_exists(imagefile)
+
+ def test_create_configdrive_vfat(self):
+ CONF.set_override('config_drive_format', 'vfat')
+ imagefile = None
+ try:
+ self.mox.StubOutWithMock(utils, 'mkfs')
+ self.mox.StubOutWithMock(utils, 'execute')
+ self.mox.StubOutWithMock(utils, 'trycmd')
+
+ utils.mkfs('vfat', mox.IgnoreArg(),
+ label='config-2').AndReturn(None)
+ utils.trycmd('mount', '-o', mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ run_as_root=True).AndReturn((None, None))
+ utils.execute('umount', mox.IgnoreArg(),
+ run_as_root=True).AndReturn(None)
+
+ self.mox.ReplayAll()
+
+ with configdrive.ConfigDriveBuilder(FakeInstanceMD()) as c:
+ (fd, imagefile) = tempfile.mkstemp(prefix='cd_vfat_')
+ os.close(fd)
+ c.make_drive(imagefile)
+
+ # NOTE(mikal): we can't check for a VFAT output here because the
+ # filesystem creation stuff has been mocked out because it
+ # requires root permissions
+
+ finally:
+ if imagefile:
+ fileutils.delete_if_exists(imagefile)
+
+ def test_config_drive_required_by_image_property(self):
+ inst = fake_instance.fake_instance_obj(context.get_admin_context())
+ inst.config_drive = ''
+ inst.system_metadata = {
+ utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive': 'mandatory'}
+ self.assertTrue(configdrive.required_by(inst))
+
+ inst.system_metadata = {
+ utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive': 'optional'}
+ self.assertFalse(configdrive.required_by(inst))
diff --git a/nova/tests/unit/test_context.py b/nova/tests/unit/test_context.py
new file mode 100644
index 0000000000..773f9e77f5
--- /dev/null
+++ b/nova/tests/unit/test_context.py
@@ -0,0 +1,121 @@
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import context
+from nova import test
+
+
+class ContextTestCase(test.NoDBTestCase):
+
+ def test_request_context_sets_is_admin(self):
+ ctxt = context.RequestContext('111',
+ '222',
+ roles=['admin', 'weasel'])
+ self.assertEqual(ctxt.is_admin, True)
+
+ def test_request_context_sets_is_admin_by_role(self):
+ ctxt = context.RequestContext('111',
+ '222',
+ roles=['administrator'])
+ self.assertEqual(ctxt.is_admin, True)
+
+ def test_request_context_sets_is_admin_upcase(self):
+ ctxt = context.RequestContext('111',
+ '222',
+ roles=['Admin', 'weasel'])
+ self.assertEqual(ctxt.is_admin, True)
+
+ def test_request_context_read_deleted(self):
+ ctxt = context.RequestContext('111',
+ '222',
+ read_deleted='yes')
+ self.assertEqual(ctxt.read_deleted, 'yes')
+
+ ctxt.read_deleted = 'no'
+ self.assertEqual(ctxt.read_deleted, 'no')
+
+ def test_request_context_read_deleted_invalid(self):
+ self.assertRaises(ValueError,
+ context.RequestContext,
+ '111',
+ '222',
+ read_deleted=True)
+
+ ctxt = context.RequestContext('111', '222')
+ self.assertRaises(ValueError,
+ setattr,
+ ctxt,
+ 'read_deleted',
+ True)
+
+ def test_extra_args_to_context_get_logged(self):
+ info = {}
+
+ def fake_warn(log_msg):
+ info['log_msg'] = log_msg
+
+ self.stubs.Set(context.LOG, 'warn', fake_warn)
+
+ c = context.RequestContext('user', 'project',
+ extra_arg1='meow', extra_arg2='wuff')
+ self.assertTrue(c)
+ self.assertIn("'extra_arg1': 'meow'", info['log_msg'])
+ self.assertIn("'extra_arg2': 'wuff'", info['log_msg'])
+
+ def test_service_catalog_default(self):
+ ctxt = context.RequestContext('111', '222')
+ self.assertEqual(ctxt.service_catalog, [])
+
+ ctxt = context.RequestContext('111', '222',
+ service_catalog=[])
+ self.assertEqual(ctxt.service_catalog, [])
+
+ ctxt = context.RequestContext('111', '222',
+ service_catalog=None)
+ self.assertEqual(ctxt.service_catalog, [])
+
+ def test_service_catalog_cinder_only(self):
+ service_catalog = [
+ {u'type': u'compute', u'name': u'nova'},
+ {u'type': u's3', u'name': u's3'},
+ {u'type': u'image', u'name': u'glance'},
+ {u'type': u'volume', u'name': u'cinder'},
+ {u'type': u'ec2', u'name': u'ec2'},
+ {u'type': u'object-store', u'name': u'swift'},
+ {u'type': u'identity', u'name': u'keystone'},
+ {u'type': None, u'name': u'S_withouttype'},
+ {u'type': u'vo', u'name': u'S_partofvolume'}]
+
+ volume_catalog = [{u'type': u'volume', u'name': u'cinder'}]
+ ctxt = context.RequestContext('111', '222',
+ service_catalog=service_catalog)
+ self.assertEqual(ctxt.service_catalog, volume_catalog)
+
+ def test_to_dict_from_dict_no_log(self):
+ warns = []
+
+ def stub_warn(msg, *a, **kw):
+ if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):
+ a = a[0]
+ warns.append(str(msg) % a)
+
+ self.stubs.Set(context.LOG, 'warn', stub_warn)
+
+ ctxt = context.RequestContext('111',
+ '222',
+ roles=['admin', 'weasel'])
+
+ ctxt = context.RequestContext.from_dict(ctxt.to_dict())
+
+ self.assertEqual(len(warns), 0, warns)
diff --git a/nova/tests/unit/test_crypto.py b/nova/tests/unit/test_crypto.py
new file mode 100644
index 0000000000..49634626a3
--- /dev/null
+++ b/nova/tests/unit/test_crypto.py
@@ -0,0 +1,256 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests for Crypto module.
+"""
+
+import os
+
+import mock
+import mox
+from oslo.concurrency import processutils
+
+from nova import crypto
+from nova import db
+from nova import exception
+from nova import test
+from nova import utils
+
+
+class X509Test(test.TestCase):
+ def test_can_generate_x509(self):
+ with utils.tempdir() as tmpdir:
+ self.flags(ca_path=tmpdir)
+ crypto.ensure_ca_filesystem()
+ _key, cert_str = crypto.generate_x509_cert('fake', 'fake')
+
+ project_cert = crypto.fetch_ca(project_id='fake')
+
+ signed_cert_file = os.path.join(tmpdir, "signed")
+ with open(signed_cert_file, 'w') as keyfile:
+ keyfile.write(cert_str)
+
+ project_cert_file = os.path.join(tmpdir, "project")
+ with open(project_cert_file, 'w') as keyfile:
+ keyfile.write(project_cert)
+
+ enc, err = utils.execute('openssl', 'verify', '-CAfile',
+ project_cert_file, '-verbose', signed_cert_file)
+ self.assertFalse(err)
+
+ def test_encrypt_decrypt_x509(self):
+ with utils.tempdir() as tmpdir:
+ self.flags(ca_path=tmpdir)
+ project_id = "fake"
+ crypto.ensure_ca_filesystem()
+ cert = crypto.fetch_ca(project_id)
+ public_key = os.path.join(tmpdir, "public.pem")
+ with open(public_key, 'w') as keyfile:
+ keyfile.write(cert)
+ text = "some @#!%^* test text"
+ enc, _err = utils.execute('openssl',
+ 'rsautl',
+ '-certin',
+ '-encrypt',
+ '-inkey', '%s' % public_key,
+ process_input=text)
+ dec = crypto.decrypt_text(project_id, enc)
+ self.assertEqual(text, dec)
+
+
+class RevokeCertsTest(test.TestCase):
+
+ def test_revoke_certs_by_user_and_project(self):
+ user_id = 'test_user'
+ project_id = 2
+ file_name = 'test_file'
+
+ def mock_certificate_get_all_by_user_and_project(context,
+ user_id,
+ project_id):
+
+ return [{"user_id": user_id, "project_id": project_id,
+ "file_name": file_name}]
+
+ self.stubs.Set(db, 'certificate_get_all_by_user_and_project',
+ mock_certificate_get_all_by_user_and_project)
+
+ self.mox.StubOutWithMock(crypto, 'revoke_cert')
+ crypto.revoke_cert(project_id, file_name)
+
+ self.mox.ReplayAll()
+
+ crypto.revoke_certs_by_user_and_project(user_id, project_id)
+
+ def test_revoke_certs_by_user(self):
+ user_id = 'test_user'
+ project_id = 2
+ file_name = 'test_file'
+
+ def mock_certificate_get_all_by_user(context, user_id):
+
+ return [{"user_id": user_id, "project_id": project_id,
+ "file_name": file_name}]
+
+ self.stubs.Set(db, 'certificate_get_all_by_user',
+ mock_certificate_get_all_by_user)
+
+ self.mox.StubOutWithMock(crypto, 'revoke_cert')
+ crypto.revoke_cert(project_id, mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ crypto.revoke_certs_by_user(user_id)
+
+ def test_revoke_certs_by_project(self):
+ user_id = 'test_user'
+ project_id = 2
+ file_name = 'test_file'
+
+ def mock_certificate_get_all_by_project(context, project_id):
+
+ return [{"user_id": user_id, "project_id": project_id,
+ "file_name": file_name}]
+
+ self.stubs.Set(db, 'certificate_get_all_by_project',
+ mock_certificate_get_all_by_project)
+
+ self.mox.StubOutWithMock(crypto, 'revoke_cert')
+ crypto.revoke_cert(project_id, mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ crypto.revoke_certs_by_project(project_id)
+
+ @mock.patch.object(utils, 'execute',
+ side_effect=processutils.ProcessExecutionError)
+ @mock.patch.object(os, 'chdir', return_value=None)
+ def test_revoke_cert_process_execution_error(self, *args, **kargs):
+ self.assertRaises(exception.RevokeCertFailure, crypto.revoke_cert,
+ 2, 'test_file')
+
+ @mock.patch.object(os, 'chdir', mock.Mock(side_effect=OSError))
+ def test_revoke_cert_project_not_found_chdir_fails(self, *args, **kargs):
+ self.assertRaises(exception.ProjectNotFound, crypto.revoke_cert,
+ 2, 'test_file')
+
+
+class CertExceptionTests(test.TestCase):
+ def test_fetch_ca_file_not_found(self):
+ with utils.tempdir() as tmpdir:
+ self.flags(ca_path=tmpdir)
+ self.flags(use_project_ca=True)
+
+ self.assertRaises(exception.CryptoCAFileNotFound, crypto.fetch_ca,
+ project_id='fake')
+
+ def test_fetch_crl_file_not_found(self):
+ with utils.tempdir() as tmpdir:
+ self.flags(ca_path=tmpdir)
+ self.flags(use_project_ca=True)
+
+ self.assertRaises(exception.CryptoCRLFileNotFound,
+ crypto.fetch_crl, project_id='fake')
+
+
+class EncryptionTests(test.TestCase):
+ pubkey = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDArtgrfBu/g2o28o+H2ng/crv"
+ "zgES91i/NNPPFTOutXelrJ9QiPTPTm+B8yspLsXifmbsmXztNOlBQgQXs6usxb4"
+ "fnJKNUZ84Vkp5esbqK/L7eyRqwPvqo7btKBMoAMVX/kUyojMpxb7Ssh6M6Y8cpi"
+ "goi+MSDPD7+5yRJ9z4mH9h7MCY6Ejv8KTcNYmVHvRhsFUcVhWcIISlNWUGiG7rf"
+ "oki060F5myQN3AXcL8gHG5/Qb1RVkQFUKZ5geQ39/wSyYA1Q65QTba/5G2QNbl2"
+ "0eAIBTyKZhN6g88ak+yARa6BLLDkrlP7L4WctHQMLsuXHohQsUO9AcOlVMARgrg"
+ "uF test@test")
+ prikey = """-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAwK7YK3wbv4NqNvKPh9p4P3K784BEvdYvzTTzxUzrrV3payfU
+Ij0z05vgfMrKS7F4n5m7Jl87TTpQUIEF7OrrMW+H5ySjVGfOFZKeXrG6ivy+3ska
+sD76qO27SgTKADFV/5FMqIzKcW+0rIejOmPHKYoKIvjEgzw+/uckSfc+Jh/YezAm
+OhI7/Ck3DWJlR70YbBVHFYVnCCEpTVlBohu636JItOtBeZskDdwF3C/IBxuf0G9U
+VZEBVCmeYHkN/f8EsmANUOuUE22v+RtkDW5dtHgCAU8imYTeoPPGpPsgEWugSyw5
+K5T+y+FnLR0DC7Llx6IULFDvQHDpVTAEYK4LhQIDAQABAoIBAF9ibrrgHnBpItx+
+qVUMbriiGK8LUXxUmqdQTljeolDZi6KzPc2RVKWtpazBSvG7skX3+XCediHd+0JP
+DNri1HlNiA6B0aUIGjoNsf6YpwsE4YwyK9cR5k5YGX4j7se3pKX2jOdngxQyw1Mh
+dkmCeWZz4l67nbSFz32qeQlwrsB56THJjgHB7elDoGCXTX/9VJyjFlCbfxVCsIng
+inrNgT0uMSYMNpAjTNOjguJt/DtXpwzei5eVpsERe0TRRVH23ycS0fuq/ancYwI/
+MDr9KSB8r+OVGeVGj3popCxECxYLBxhqS1dAQyJjhQXKwajJdHFzidjXO09hLBBz
+FiutpYUCgYEA6OFikTrPlCMGMJjSj+R9woDAOPfvCDbVZWfNo8iupiECvei88W28
+RYFnvUQRjSC0pHe//mfUSmiEaE+SjkNCdnNR+vsq9q+htfrADm84jl1mfeWatg/g
+zuGz2hAcZnux3kQMI7ufOwZNNpM2bf5B4yKamvG8tZRRxSkkAL1NV48CgYEA08/Z
+Ty9g9XPKoLnUWStDh1zwG+c0q14l2giegxzaUAG5DOgOXbXcw0VQ++uOWD5ARELG
+g9wZcbBsXxJrRpUqx+GAlv2Y1bkgiPQS1JIyhsWEUtwfAC/G+uZhCX53aI3Pbsjh
+QmkPCSp5DuOuW2PybMaw+wVe+CaI/gwAWMYDAasCgYEA4Fzkvc7PVoU33XIeywr0
+LoQkrb4QyPUrOvt7H6SkvuFm5thn0KJMlRpLfAksb69m2l2U1+HooZd4mZawN+eN
+DNmlzgxWJDypq83dYwq8jkxmBj1DhMxfZnIE+L403nelseIVYAfPLOqxUTcbZXVk
+vRQFp+nmSXqQHUe5rAy1ivkCgYEAqLu7cclchCxqDv/6mc5NTVhMLu5QlvO5U6fq
+HqitgW7d69oxF5X499YQXZ+ZFdMBf19ypTiBTIAu1M3nh6LtIa4SsjXzus5vjKpj
+FdQhTBus/hU83Pkymk1MoDOPDEtsI+UDDdSDldmv9pyKGWPVi7H86vusXCLWnwsQ
+e6fCXWECgYEAqgpGvva5kJ1ISgNwnJbwiNw0sOT9BMOsdNZBElf0kJIIy6FMPvap
+6S1ziw+XWfdQ83VIUOCL5DrwmcYzLIogS0agmnx/monfDx0Nl9+OZRxy6+AI9vkK
+86A1+DXdo+IgX3grFK1l1gPhAZPRWJZ+anrEkyR4iLq6ZoPZ3BQn97U=
+-----END RSA PRIVATE KEY-----"""
+ text = "Some text! %$*"
+
+ def _ssh_decrypt_text(self, ssh_private_key, text):
+ with utils.tempdir() as tmpdir:
+ sshkey = os.path.abspath(os.path.join(tmpdir, 'ssh.key'))
+ with open(sshkey, 'w') as f:
+ f.write(ssh_private_key)
+ try:
+ dec, _err = utils.execute('openssl',
+ 'rsautl',
+ '-decrypt',
+ '-inkey', sshkey,
+ process_input=text)
+ return dec
+ except processutils.ProcessExecutionError as exc:
+ raise exception.DecryptionFailure(reason=exc.stderr)
+
+ def test_ssh_encrypt_decrypt_text(self):
+ enc = crypto.ssh_encrypt_text(self.pubkey, self.text)
+ self.assertNotEqual(enc, self.text)
+ result = self._ssh_decrypt_text(self.prikey, enc)
+ self.assertEqual(result, self.text)
+
+ def test_ssh_encrypt_failure(self):
+ self.assertRaises(exception.EncryptionFailure,
+ crypto.ssh_encrypt_text, '', self.text)
+
+
+class ConversionTests(test.TestCase):
+ k1 = ("ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4CqmrxfU7x4sJrubpMNxeglul+d"
+ "ByrsicnvQcHDEjPzdvoz+BaoAG9bjCA5mCeTBIISsVTVXz/hxNeiuBV6LH/UR/c"
+ "27yl53ypN+821ImoexQZcKItdnjJ3gVZlDob1f9+1qDVy63NJ1c+TstkrCTRVeo"
+ "9VyE7RpdSS4UCiBe8Xwk3RkedioFxePrI0Ktc2uASw2G0G2Rl7RN7KZOJbCivfF"
+ "LQMAOu6e+7fYvuE1gxGHHj7dxaBY/ioGOm1W4JmQ1V7AKt19zTBlZKduN8FQMSF"
+ "r35CDlvoWs0+OP8nwlebKNCi/5sdL8qiSLrAcPB4LqdkAf/blNSVA2Yl83/c4lQ"
+ "== test@test")
+
+ k2 = ("-----BEGIN PUBLIC KEY-----\n"
+ "MIIBIDANBgkqhkiG9w0BAQEFAAOCAQ0AMIIBCAKCAQEA4CqmrxfU7x4sJrubpMNx\n"
+ "eglul+dByrsicnvQcHDEjPzdvoz+BaoAG9bjCA5mCeTBIISsVTVXz/hxNeiuBV6L\n"
+ "H/UR/c27yl53ypN+821ImoexQZcKItdnjJ3gVZlDob1f9+1qDVy63NJ1c+TstkrC\n"
+ "TRVeo9VyE7RpdSS4UCiBe8Xwk3RkedioFxePrI0Ktc2uASw2G0G2Rl7RN7KZOJbC\n"
+ "ivfFLQMAOu6e+7fYvuE1gxGHHj7dxaBY/ioGOm1W4JmQ1V7AKt19zTBlZKduN8FQ\n"
+ "MSFr35CDlvoWs0+OP8nwlebKNCi/5sdL8qiSLrAcPB4LqdkAf/blNSVA2Yl83/c4\n"
+ "lQIBIw==\n"
+ "-----END PUBLIC KEY-----\n")
+
+ def test_convert_keys(self):
+ result = crypto.convert_from_sshrsa_to_pkcs8(self.k1)
+ self.assertEqual(result, self.k2)
+
+ def test_convert_failure(self):
+ self.assertRaises(exception.EncryptionFailure,
+ crypto.convert_from_sshrsa_to_pkcs8, '')
diff --git a/nova/tests/unit/test_exception.py b/nova/tests/unit/test_exception.py
new file mode 100644
index 0000000000..6b1617047c
--- /dev/null
+++ b/nova/tests/unit/test_exception.py
@@ -0,0 +1,179 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import inspect
+
+import six
+
+from nova import context
+from nova import exception
+from nova import test
+
+
+class FakeNotifier(object):
+ """Acts like messaging.Notifier."""
+
+ def __init__(self):
+ self.provided_context = None
+ self.provided_event = None
+ self.provided_payload = None
+
+ def error(self, context, event, payload):
+ self.provided_context = context
+ self.provided_event = event
+ self.provided_payload = payload
+
+
+def good_function(self, context):
+ return 99
+
+
+def bad_function_exception(self, context, extra, blah="a", boo="b", zoo=None):
+ raise test.TestingException()
+
+
+class WrapExceptionTestCase(test.NoDBTestCase):
+ def test_wrap_exception_good_return(self):
+ wrapped = exception.wrap_exception('foo')
+ self.assertEqual(99, wrapped(good_function)(1, 2))
+
+ def test_wrap_exception_with_notifier(self):
+ notifier = FakeNotifier()
+ wrapped = exception.wrap_exception(notifier)
+ ctxt = context.get_admin_context()
+ self.assertRaises(test.TestingException,
+ wrapped(bad_function_exception), 1, ctxt, 3, zoo=3)
+ self.assertEqual(notifier.provided_event, "bad_function_exception")
+ self.assertEqual(notifier.provided_context, ctxt)
+ self.assertEqual(notifier.provided_payload['args']['extra'], 3)
+ for key in ['exception', 'args']:
+ self.assertIn(key, notifier.provided_payload.keys())
+
+
+class NovaExceptionTestCase(test.NoDBTestCase):
+ def test_default_error_msg(self):
+ class FakeNovaException(exception.NovaException):
+ msg_fmt = "default message"
+
+ exc = FakeNovaException()
+ self.assertEqual(six.text_type(exc), 'default message')
+
+ def test_error_msg(self):
+ self.assertEqual(six.text_type(exception.NovaException('test')),
+ 'test')
+
+ def test_default_error_msg_with_kwargs(self):
+ class FakeNovaException(exception.NovaException):
+ msg_fmt = "default message: %(code)s"
+
+ exc = FakeNovaException(code=500)
+ self.assertEqual(six.text_type(exc), 'default message: 500')
+ self.assertEqual(exc.message, 'default message: 500')
+
+ def test_error_msg_exception_with_kwargs(self):
+ class FakeNovaException(exception.NovaException):
+ msg_fmt = "default message: %(misspelled_code)s"
+
+ exc = FakeNovaException(code=500, misspelled_code='blah')
+ self.assertEqual(six.text_type(exc), 'default message: blah')
+ self.assertEqual(exc.message, 'default message: blah')
+
+ def test_default_error_code(self):
+ class FakeNovaException(exception.NovaException):
+ code = 404
+
+ exc = FakeNovaException()
+ self.assertEqual(exc.kwargs['code'], 404)
+
+ def test_error_code_from_kwarg(self):
+ class FakeNovaException(exception.NovaException):
+ code = 500
+
+ exc = FakeNovaException(code=404)
+ self.assertEqual(exc.kwargs['code'], 404)
+
+ def test_cleanse_dict(self):
+ kwargs = {'foo': 1, 'blah_pass': 2, 'zoo_password': 3, '_pass': 4}
+ self.assertEqual(exception._cleanse_dict(kwargs), {'foo': 1})
+
+ kwargs = {}
+ self.assertEqual(exception._cleanse_dict(kwargs), {})
+
+ def test_format_message_local(self):
+ class FakeNovaException(exception.NovaException):
+ msg_fmt = "some message"
+
+ exc = FakeNovaException()
+ self.assertEqual(six.text_type(exc), exc.format_message())
+
+ def test_format_message_remote(self):
+ class FakeNovaException_Remote(exception.NovaException):
+ msg_fmt = "some message"
+
+ def __unicode__(self):
+ return u"print the whole trace"
+
+ exc = FakeNovaException_Remote()
+ self.assertEqual(six.text_type(exc), u"print the whole trace")
+ self.assertEqual(exc.format_message(), "some message")
+
+ def test_format_message_remote_error(self):
+ class FakeNovaException_Remote(exception.NovaException):
+ msg_fmt = "some message %(somearg)s"
+
+ def __unicode__(self):
+ return u"print the whole trace"
+
+ self.flags(fatal_exception_format_errors=False)
+ exc = FakeNovaException_Remote(lame_arg='lame')
+ self.assertEqual(exc.format_message(), "some message %(somearg)s")
+
+
+class ExceptionTestCase(test.NoDBTestCase):
+ @staticmethod
+ def _raise_exc(exc):
+ raise exc()
+
+ def test_exceptions_raise(self):
+ # NOTE(dprince): disable format errors since we are not passing kwargs
+ self.flags(fatal_exception_format_errors=False)
+ for name in dir(exception):
+ exc = getattr(exception, name)
+ if isinstance(exc, type):
+ self.assertRaises(exc, self._raise_exc, exc)
+
+
+class ExceptionValidMessageTestCase(test.NoDBTestCase):
+
+ def test_messages(self):
+ failures = []
+
+ for name, obj in inspect.getmembers(exception):
+ if name in ['NovaException', 'InstanceFaultRollback']:
+ continue
+
+ if not inspect.isclass(obj):
+ continue
+
+ if not issubclass(obj, exception.NovaException):
+ continue
+
+ e = obj
+ if e.msg_fmt == "An unknown exception occurred.":
+ failures.append('%s needs a more specific msg_fmt' % name)
+
+ if failures:
+ self.fail('\n'.join(failures))
diff --git a/nova/tests/unit/test_flavors.py b/nova/tests/unit/test_flavors.py
new file mode 100644
index 0000000000..46fd81d6db
--- /dev/null
+++ b/nova/tests/unit/test_flavors.py
@@ -0,0 +1,593 @@
+# Copyright 2011 Ken Pepple
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for flavors code
+"""
+import time
+
+from nova.compute import flavors
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import api as sql_session
+from nova.db.sqlalchemy import models
+from nova import exception
+from nova import test
+
+
+DEFAULT_FLAVORS = [
+ {'memory_mb': 512, 'root_gb': 1, 'deleted_at': None, 'name': 'm1.tiny',
+ 'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None,
+ 'disabled': False, 'vcpus': 1, 'extra_specs': {}, 'swap': 0,
+ 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '1',
+ 'vcpu_weight': None, 'id': 2},
+ {'memory_mb': 2048, 'root_gb': 20, 'deleted_at': None, 'name': 'm1.small',
+ 'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None,
+ 'disabled': False, 'vcpus': 1, 'extra_specs': {}, 'swap': 0,
+ 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '2',
+ 'vcpu_weight': None, 'id': 5},
+ {'memory_mb': 4096, 'root_gb': 40, 'deleted_at': None, 'name': 'm1.medium',
+ 'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None,
+ 'disabled': False, 'vcpus': 2, 'extra_specs': {}, 'swap': 0,
+ 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '3',
+ 'vcpu_weight': None, 'id': 1},
+ {'memory_mb': 8192, 'root_gb': 80, 'deleted_at': None, 'name': 'm1.large',
+ 'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None,
+ 'disabled': False, 'vcpus': 4, 'extra_specs': {}, 'swap': 0,
+ 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '4',
+ 'vcpu_weight': None, 'id': 3},
+ {'memory_mb': 16384, 'root_gb': 160, 'deleted_at': None,
+ 'name': 'm1.xlarge', 'deleted': 0, 'created_at': None, 'ephemeral_gb': 0,
+ 'updated_at': None, 'disabled': False, 'vcpus': 8, 'extra_specs': {},
+ 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '5',
+ 'vcpu_weight': None, 'id': 4}
+]
+
+
+class InstanceTypeTestCase(test.TestCase):
+ """Test cases for flavor code."""
+ def _generate_name(self):
+ """return a name not in the DB."""
+ nonexistent_flavor = str(int(time.time()))
+ all_flavors = flavors.get_all_flavors()
+ while nonexistent_flavor in all_flavors:
+ nonexistent_flavor += "z"
+ else:
+ return nonexistent_flavor
+
+ def _generate_flavorid(self):
+ """return a flavorid not in the DB."""
+ nonexistent_flavor = 2700
+ flavor_ids = [value["id"] for key, value in
+ flavors.get_all_flavors().iteritems()]
+ while nonexistent_flavor in flavor_ids:
+ nonexistent_flavor += 1
+ else:
+ return nonexistent_flavor
+
+ def _existing_flavor(self):
+ """return first flavor name."""
+ return flavors.get_all_flavors().keys()[0]
+
+ def test_add_instance_type_access(self):
+ user_id = 'fake'
+ project_id = 'fake'
+ ctxt = context.RequestContext(user_id, project_id, is_admin=True)
+ flavor_id = 'flavor1'
+ type_ref = flavors.create('some flavor', 256, 1, 120, 100,
+ flavorid=flavor_id)
+ access_ref = flavors.add_flavor_access(flavor_id,
+ project_id,
+ ctxt=ctxt)
+ self.assertEqual(access_ref["project_id"], project_id)
+ self.assertEqual(access_ref["instance_type_id"], type_ref["id"])
+
+ def test_add_flavor_access_already_exists(self):
+ user_id = 'fake'
+ project_id = 'fake'
+ ctxt = context.RequestContext(user_id, project_id, is_admin=True)
+ flavor_id = 'flavor1'
+ flavors.create('some flavor', 256, 1, 120, 100, flavorid=flavor_id)
+ flavors.add_flavor_access(flavor_id, project_id, ctxt=ctxt)
+ self.assertRaises(exception.FlavorAccessExists,
+ flavors.add_flavor_access,
+ flavor_id, project_id, ctxt)
+
+ def test_add_flavor_access_invalid_flavor(self):
+ user_id = 'fake'
+ project_id = 'fake'
+ ctxt = context.RequestContext(user_id, project_id, is_admin=True)
+ flavor_id = 'no_such_flavor'
+ self.assertRaises(exception.FlavorNotFound,
+ flavors.add_flavor_access,
+ flavor_id, project_id, ctxt)
+
+ def test_remove_flavor_access(self):
+ user_id = 'fake'
+ project_id = 'fake'
+ ctxt = context.RequestContext(user_id, project_id, is_admin=True)
+ flavor_id = 'flavor1'
+ flavors.create('some flavor', 256, 1, 120, 100, flavorid=flavor_id)
+ flavors.add_flavor_access(flavor_id, project_id, ctxt)
+ flavors.remove_flavor_access(flavor_id, project_id, ctxt)
+
+ projects = flavors.get_flavor_access_by_flavor_id(flavor_id,
+ ctxt)
+ self.assertEqual([], projects)
+
+ def test_remove_flavor_access_does_not_exist(self):
+ user_id = 'fake'
+ project_id = 'fake'
+ ctxt = context.RequestContext(user_id, project_id, is_admin=True)
+ flavor_id = 'flavor1'
+ flavors.create('some flavor', 256, 1, 120, 100, flavorid=flavor_id)
+ self.assertRaises(exception.FlavorAccessNotFound,
+ flavors.remove_flavor_access,
+ flavor_id, project_id, ctxt=ctxt)
+
+ def test_get_all_instance_types(self):
+ # Ensures that all flavors can be retrieved.
+ session = sql_session.get_session()
+ total_instance_types = session.query(models.InstanceTypes).count()
+ inst_types = flavors.get_all_flavors()
+ self.assertEqual(total_instance_types, len(inst_types))
+
+ def test_non_existent_inst_type_should_not_delete(self):
+ # Ensures that flavor creation fails with invalid args.
+ self.assertRaises(exception.FlavorNotFoundByName,
+ flavors.destroy,
+ 'unknown_flavor')
+
+ def test_will_not_destroy_with_no_name(self):
+ # Ensure destroy said path of no name raises error.
+ self.assertRaises(exception.FlavorNotFoundByName,
+ flavors.destroy, None)
+
+ def test_will_not_get_bad_default_instance_type(self):
+ # ensures error raised on bad default flavor.
+ self.flags(default_flavor='unknown_flavor')
+ self.assertRaises(exception.FlavorNotFound,
+ flavors.get_default_flavor)
+
+ def test_will_get_flavor_by_id(self):
+ default_instance_type = flavors.get_default_flavor()
+ instance_type_id = default_instance_type['id']
+ fetched = flavors.get_flavor(instance_type_id)
+ self.assertEqual(default_instance_type, fetched)
+
+ def test_will_not_get_flavor_by_unknown_id(self):
+ # Ensure get by name returns default flavor with no name.
+ self.assertRaises(exception.FlavorNotFound,
+ flavors.get_flavor, 10000)
+
+ def test_will_not_get_flavor_with_bad_id(self):
+ # Ensure get by name returns default flavor with bad name.
+ self.assertRaises(exception.FlavorNotFound,
+ flavors.get_flavor, 'asdf')
+
+ def test_flavor_get_by_None_name_returns_default(self):
+ # Ensure get by name returns default flavor with no name.
+ default = flavors.get_default_flavor()
+ actual = flavors.get_flavor_by_name(None)
+ self.assertEqual(default, actual)
+
+ def test_will_not_get_flavor_with_bad_name(self):
+ # Ensure get by name returns default flavor with bad name.
+ self.assertRaises(exception.FlavorNotFound,
+ flavors.get_flavor_by_name, 10000)
+
+ def test_will_not_get_instance_by_unknown_flavor_id(self):
+ # Ensure get by flavor raises error with wrong flavorid.
+ self.assertRaises(exception.FlavorNotFound,
+ flavors.get_flavor_by_flavor_id,
+ 'unknown_flavor')
+
+ def test_will_get_instance_by_flavor_id(self):
+ default_instance_type = flavors.get_default_flavor()
+ flavorid = default_instance_type['flavorid']
+ fetched = flavors.get_flavor_by_flavor_id(flavorid)
+ self.assertEqual(default_instance_type, fetched)
+
+ def test_can_read_deleted_types_using_flavor_id(self):
+ # Ensure deleted flavors can be read when querying flavor_id.
+ inst_type_name = "test"
+ inst_type_flavor_id = "test1"
+
+ inst_type = flavors.create(inst_type_name, 256, 1, 120, 100,
+ inst_type_flavor_id)
+ self.assertEqual(inst_type_name, inst_type["name"])
+
+ # NOTE(jk0): The deleted flavor will show up here because the context
+ # in get_flavor_by_flavor_id() is set to use read_deleted by
+ # default.
+ flavors.destroy(inst_type["name"])
+ deleted_inst_type = flavors.get_flavor_by_flavor_id(
+ inst_type_flavor_id)
+ self.assertEqual(inst_type_name, deleted_inst_type["name"])
+
+ def test_read_deleted_false_converting_flavorid(self):
+ """Ensure deleted flavors are not returned when not needed (for
+ example when creating a server and attempting to translate from
+ flavorid to instance_type_id.
+ """
+ flavors.create("instance_type1", 256, 1, 120, 100, "test1")
+ flavors.destroy("instance_type1")
+ flavors.create("instance_type1_redo", 256, 1, 120, 100, "test1")
+
+ instance_type = flavors.get_flavor_by_flavor_id(
+ "test1", read_deleted="no")
+ self.assertEqual("instance_type1_redo", instance_type["name"])
+
+ def test_get_all_flavors_sorted_list_sort(self):
+ # Test default sort
+ all_flavors = flavors.get_all_flavors_sorted_list()
+ self.assertEqual(DEFAULT_FLAVORS, all_flavors)
+
+ # Test sorted by name
+ all_flavors = flavors.get_all_flavors_sorted_list(sort_key='name')
+ expected = sorted(DEFAULT_FLAVORS, key=lambda item: item['name'])
+ self.assertEqual(expected, all_flavors)
+
+ def test_get_all_flavors_sorted_list_limit(self):
+ limited_flavors = flavors.get_all_flavors_sorted_list(limit=2)
+ self.assertEqual(2, len(limited_flavors))
+
+ def test_get_all_flavors_sorted_list_marker(self):
+ all_flavors = flavors.get_all_flavors_sorted_list()
+
+ # Set the 3rd result as the marker
+ marker_flavorid = all_flavors[2]['flavorid']
+ marked_flavors = flavors.get_all_flavors_sorted_list(
+ marker=marker_flavorid)
+ # We expect everything /after/ the 3rd result
+ expected_results = all_flavors[3:]
+ self.assertEqual(expected_results, marked_flavors)
+
+ def test_get_inactive_flavors(self):
+ flav1 = flavors.create('flavor1', 256, 1, 120)
+ flav2 = flavors.create('flavor2', 512, 4, 250)
+ flavors.destroy('flavor1')
+
+ returned_flavors_ids = flavors.get_all_flavors().keys()
+ self.assertNotIn(flav1['id'], returned_flavors_ids)
+ self.assertIn(flav2['id'], returned_flavors_ids)
+
+ returned_flavors_ids = flavors.get_all_flavors(inactive=True).keys()
+ self.assertIn(flav1['id'], returned_flavors_ids)
+ self.assertIn(flav2['id'], returned_flavors_ids)
+
+ def test_get_inactive_flavors_with_same_name(self):
+ flav1 = flavors.create('flavor', 256, 1, 120)
+ flavors.destroy('flavor')
+ flav2 = flavors.create('flavor', 512, 4, 250)
+
+ returned_flavors_ids = flavors.get_all_flavors().keys()
+ self.assertNotIn(flav1['id'], returned_flavors_ids)
+ self.assertIn(flav2['id'], returned_flavors_ids)
+
+ returned_flavors_ids = flavors.get_all_flavors(inactive=True).keys()
+ self.assertIn(flav1['id'], returned_flavors_ids)
+ self.assertIn(flav2['id'], returned_flavors_ids)
+
+ def test_get_inactive_flavors_with_same_flavorid(self):
+ flav1 = flavors.create('flavor', 256, 1, 120, 100, "flavid")
+ flavors.destroy('flavor')
+ flav2 = flavors.create('flavor', 512, 4, 250, 100, "flavid")
+
+ returned_flavors_ids = flavors.get_all_flavors().keys()
+ self.assertNotIn(flav1['id'], returned_flavors_ids)
+ self.assertIn(flav2['id'], returned_flavors_ids)
+
+ returned_flavors_ids = flavors.get_all_flavors(inactive=True).keys()
+ self.assertIn(flav1['id'], returned_flavors_ids)
+ self.assertIn(flav2['id'], returned_flavors_ids)
+
+
+class InstanceTypeToolsTest(test.TestCase):
+ def _dict_to_metadata(self, data):
+ return [{'key': key, 'value': value} for key, value in data.items()]
+
+ def _test_extract_flavor(self, prefix):
+ instance_type = flavors.get_default_flavor()
+
+ metadata = {}
+ flavors.save_flavor_info(metadata, instance_type,
+ prefix)
+ instance = {'system_metadata': self._dict_to_metadata(metadata)}
+ _instance_type = flavors.extract_flavor(instance, prefix)
+
+ props = flavors.system_metadata_flavor_props.keys()
+ for key in instance_type.keys():
+ if key not in props:
+ del instance_type[key]
+
+ self.assertEqual(instance_type, _instance_type)
+
+ def test_extract_flavor(self):
+ self._test_extract_flavor('')
+
+ def test_extract_flavor_prefix(self):
+ self._test_extract_flavor('foo_')
+
+ def test_save_flavor_info(self):
+ instance_type = flavors.get_default_flavor()
+
+ example = {}
+ example_prefix = {}
+
+ for key in flavors.system_metadata_flavor_props.keys():
+ example['instance_type_%s' % key] = instance_type[key]
+ example_prefix['fooinstance_type_%s' % key] = instance_type[key]
+
+ metadata = {}
+ flavors.save_flavor_info(metadata, instance_type)
+ self.assertEqual(example, metadata)
+
+ metadata = {}
+ flavors.save_flavor_info(metadata, instance_type, 'foo')
+ self.assertEqual(example_prefix, metadata)
+
+ def test_delete_flavor_info(self):
+ instance_type = flavors.get_default_flavor()
+ metadata = {}
+ flavors.save_flavor_info(metadata, instance_type)
+ flavors.save_flavor_info(metadata, instance_type, '_')
+ flavors.delete_flavor_info(metadata, '', '_')
+ self.assertEqual(metadata, {})
+
+ def test_flavor_numa_extras_are_saved(self):
+ instance_type = flavors.get_default_flavor()
+ instance_type['extra_specs'] = {
+ 'hw:numa_mem.0': '123',
+ 'hw:numa_cpus.0': '456',
+ 'hw:numa_mem.1': '789',
+ 'hw:numa_cpus.1': 'ABC',
+ 'foo': 'bar',
+ }
+ sysmeta = flavors.save_flavor_info({}, instance_type)
+ _instance_type = flavors.extract_flavor({'system_metadata': sysmeta})
+ expected_extra_specs = {
+ 'hw:numa_mem.0': '123',
+ 'hw:numa_cpus.0': '456',
+ 'hw:numa_mem.1': '789',
+ 'hw:numa_cpus.1': 'ABC',
+ }
+ self.assertEqual(expected_extra_specs, _instance_type['extra_specs'])
+ flavors.delete_flavor_info(sysmeta, '')
+ self.assertEqual({}, sysmeta)
+
+
+class InstanceTypeFilteringTest(test.TestCase):
+ """Test cases for the filter option available for instance_type_get_all."""
+ def setUp(self):
+ super(InstanceTypeFilteringTest, self).setUp()
+ self.context = context.get_admin_context()
+
+ def assertFilterResults(self, filters, expected):
+ inst_types = db.flavor_get_all(
+ self.context, filters=filters)
+ inst_names = [i['name'] for i in inst_types]
+ self.assertEqual(inst_names, expected)
+
+ def test_no_filters(self):
+ filters = None
+ expected = ['m1.tiny', 'm1.small', 'm1.medium', 'm1.large',
+ 'm1.xlarge']
+ self.assertFilterResults(filters, expected)
+
+ def test_min_memory_mb_filter(self):
+ # Exclude tiny instance which is 512 MB.
+ filters = dict(min_memory_mb=513)
+ expected = ['m1.small', 'm1.medium', 'm1.large', 'm1.xlarge']
+ self.assertFilterResults(filters, expected)
+
+ def test_min_root_gb_filter(self):
+ # Exclude everything but large and xlarge which have >= 80 GB.
+ filters = dict(min_root_gb=80)
+ expected = ['m1.large', 'm1.xlarge']
+ self.assertFilterResults(filters, expected)
+
+ def test_min_memory_mb_AND_root_gb_filter(self):
+ # Exclude everything but large and xlarge which have >= 80 GB.
+ filters = dict(min_memory_mb=16384, min_root_gb=80)
+ expected = ['m1.xlarge']
+ self.assertFilterResults(filters, expected)
+
+
+class CreateInstanceTypeTest(test.TestCase):
+
+ def assertInvalidInput(self, *create_args, **create_kwargs):
+ self.assertRaises(exception.InvalidInput, flavors.create,
+ *create_args, **create_kwargs)
+
+ def test_create_with_valid_name(self):
+ # Names can contain alphanumeric and [_.- ]
+ flavors.create('azAZ09. -_', 64, 1, 120)
+ # And they are not limited to ascii characters
+ # E.g.: m1.huge in simplified Chinese
+ flavors.create(u'm1.\u5DE8\u5927', 6400, 100, 12000)
+
+ def test_name_with_special_characters(self):
+ # Names can contain alphanumeric and [_.- ]
+ flavors.create('_foo.bar-123', 64, 1, 120)
+
+ # Ensure instance types raises InvalidInput for invalid characters.
+ self.assertInvalidInput('foobar#', 64, 1, 120)
+
+ def test_non_ascii_name_with_special_characters(self):
+ self.assertInvalidInput(u'm1.\u5DE8\u5927 #', 64, 1, 120)
+
+ def test_name_length_checks(self):
+ MAX_LEN = 255
+
+ # Flavor name with 255 characters or less is valid.
+ flavors.create('a' * MAX_LEN, 64, 1, 120)
+
+ # Flavor name which is more than 255 characters will cause error.
+ self.assertInvalidInput('a' * (MAX_LEN + 1), 64, 1, 120)
+
+ # Flavor name which is empty should cause an error
+ self.assertInvalidInput('', 64, 1, 120)
+
+ def test_all_whitespace_flavor_names_rejected(self):
+ self.assertInvalidInput(' ', 64, 1, 120)
+
+ def test_flavorid_with_invalid_characters(self):
+ # Ensure Flavor ID can only contain [a-zA-Z0-9_.- ]
+ self.assertInvalidInput('a', 64, 1, 120, flavorid=u'\u2605')
+ self.assertInvalidInput('a', 64, 1, 120, flavorid='%%$%$@#$#@$@#$^%')
+
+ def test_flavorid_length_checks(self):
+ MAX_LEN = 255
+ # Flavor ID which is more than 255 characters will cause error.
+ self.assertInvalidInput('a', 64, 1, 120, flavorid='a' * (MAX_LEN + 1))
+
+ def test_memory_must_be_positive_db_integer(self):
+ self.assertInvalidInput('flavor1', 'foo', 1, 120)
+ self.assertInvalidInput('flavor1', -1, 1, 120)
+ self.assertInvalidInput('flavor1', 0, 1, 120)
+ self.assertInvalidInput('flavor1', db.MAX_INT + 1, 1, 120)
+ flavors.create('flavor1', 1, 1, 120)
+
+ def test_vcpus_must_be_positive_db_integer(self):
+ self.assertInvalidInput('flavor`', 64, 'foo', 120)
+ self.assertInvalidInput('flavor1', 64, -1, 120)
+ self.assertInvalidInput('flavor1', 64, 0, 120)
+ self.assertInvalidInput('flavor1', 64, db.MAX_INT + 1, 120)
+ flavors.create('flavor1', 64, 1, 120)
+
+ def test_root_gb_must_be_nonnegative_db_integer(self):
+ self.assertInvalidInput('flavor1', 64, 1, 'foo')
+ self.assertInvalidInput('flavor1', 64, 1, -1)
+ self.assertInvalidInput('flavor1', 64, 1, db.MAX_INT + 1)
+ flavors.create('flavor1', 64, 1, 0)
+ flavors.create('flavor2', 64, 1, 120)
+
+ def test_ephemeral_gb_must_be_nonnegative_db_integer(self):
+ self.assertInvalidInput('flavor1', 64, 1, 120, ephemeral_gb='foo')
+ self.assertInvalidInput('flavor1', 64, 1, 120, ephemeral_gb=-1)
+ self.assertInvalidInput('flavor1', 64, 1, 120,
+ ephemeral_gb=db.MAX_INT + 1)
+ flavors.create('flavor1', 64, 1, 120, ephemeral_gb=0)
+ flavors.create('flavor2', 64, 1, 120, ephemeral_gb=120)
+
+ def test_swap_must_be_nonnegative_db_integer(self):
+ self.assertInvalidInput('flavor1', 64, 1, 120, swap='foo')
+ self.assertInvalidInput('flavor1', 64, 1, 120, swap=-1)
+ self.assertInvalidInput('flavor1', 64, 1, 120,
+ swap=db.MAX_INT + 1)
+ flavors.create('flavor1', 64, 1, 120, swap=0)
+ flavors.create('flavor2', 64, 1, 120, swap=1)
+
+ def test_rxtx_factor_must_be_positive_float(self):
+ self.assertInvalidInput('flavor1', 64, 1, 120, rxtx_factor='foo')
+ self.assertInvalidInput('flavor1', 64, 1, 120, rxtx_factor=-1.0)
+ self.assertInvalidInput('flavor1', 64, 1, 120, rxtx_factor=0.0)
+
+ flavor = flavors.create('flavor1', 64, 1, 120, rxtx_factor=1.0)
+ self.assertEqual(1.0, flavor['rxtx_factor'])
+
+ flavor = flavors.create('flavor2', 64, 1, 120, rxtx_factor=1.1)
+ self.assertEqual(1.1, flavor['rxtx_factor'])
+
+ def test_rxtx_factor_must_be_within_sql_float_range(self):
+ _context = context.get_admin_context()
+ db.flavor_get_all(_context)
+ # We do * 10 since this is an approximation and we need to make sure
+ # the difference is noticeble.
+ over_rxtx_factor = flavors.SQL_SP_FLOAT_MAX * 10
+
+ self.assertInvalidInput('flavor1', 64, 1, 120,
+ rxtx_factor=over_rxtx_factor)
+
+ flavor = flavors.create('flavor2', 64, 1, 120,
+ rxtx_factor=flavors.SQL_SP_FLOAT_MAX)
+ self.assertEqual(flavors.SQL_SP_FLOAT_MAX, flavor['rxtx_factor'])
+
+ def test_is_public_must_be_valid_bool_string(self):
+ self.assertInvalidInput('flavor1', 64, 1, 120, is_public='foo')
+
+ flavors.create('flavor1', 64, 1, 120, is_public='TRUE')
+ flavors.create('flavor2', 64, 1, 120, is_public='False')
+ flavors.create('flavor3', 64, 1, 120, is_public='Yes')
+ flavors.create('flavor4', 64, 1, 120, is_public='No')
+ flavors.create('flavor5', 64, 1, 120, is_public='Y')
+ flavors.create('flavor6', 64, 1, 120, is_public='N')
+ flavors.create('flavor7', 64, 1, 120, is_public='1')
+ flavors.create('flavor8', 64, 1, 120, is_public='0')
+ flavors.create('flavor9', 64, 1, 120, is_public='true')
+
+ def test_flavorid_populated(self):
+ flavor1 = flavors.create('flavor1', 64, 1, 120)
+ self.assertIsNot(None, flavor1['flavorid'])
+
+ flavor2 = flavors.create('flavor2', 64, 1, 120, flavorid='')
+ self.assertIsNot(None, flavor2['flavorid'])
+
+ flavor3 = flavors.create('flavor3', 64, 1, 120, flavorid='foo')
+ self.assertEqual('foo', flavor3['flavorid'])
+
+ def test_default_values(self):
+ flavor1 = flavors.create('flavor1', 64, 1, 120)
+
+ self.assertIsNot(None, flavor1['flavorid'])
+ self.assertEqual(flavor1['ephemeral_gb'], 0)
+ self.assertEqual(flavor1['swap'], 0)
+ self.assertEqual(flavor1['rxtx_factor'], 1.0)
+
+ def test_basic_create(self):
+ # Ensure instance types can be created.
+ original_list = flavors.get_all_flavors()
+
+ # Create new type and make sure values stick
+ flavor = flavors.create('flavor', 64, 1, 120)
+ self.assertEqual(flavor['name'], 'flavor')
+ self.assertEqual(flavor['memory_mb'], 64)
+ self.assertEqual(flavor['vcpus'], 1)
+ self.assertEqual(flavor['root_gb'], 120)
+
+ # Ensure new type shows up in list
+ new_list = flavors.get_all_flavors()
+ self.assertNotEqual(len(original_list), len(new_list),
+ 'flavor was not created')
+
+ def test_create_then_delete(self):
+ original_list = flavors.get_all_flavors()
+
+ flavor = flavors.create('flavor', 64, 1, 120)
+
+ # Ensure new type shows up in list
+ new_list = flavors.get_all_flavors()
+ self.assertNotEqual(len(original_list), len(new_list),
+ 'instance type was not created')
+
+ flavors.destroy('flavor')
+ self.assertRaises(exception.FlavorNotFound,
+ flavors.get_flavor, flavor['id'])
+
+ # Deleted instance should not be in list anymore
+ new_list = flavors.get_all_flavors()
+ self.assertEqual(original_list, new_list)
+
+ def test_duplicate_names_fail(self):
+ # Ensures that name duplicates raise FlavorCreateFailed.
+ flavors.create('flavor', 256, 1, 120, 200, 'flavor1')
+ self.assertRaises(exception.FlavorExists,
+ flavors.create,
+ 'flavor', 64, 1, 120)
+
+ def test_duplicate_flavorids_fail(self):
+ # Ensures that flavorid duplicates raise FlavorCreateFailed.
+ flavors.create('flavor1', 64, 1, 120, flavorid='flavorid')
+ self.assertRaises(exception.FlavorIdExists,
+ flavors.create,
+ 'flavor2', 64, 1, 120, flavorid='flavorid')
diff --git a/nova/tests/unit/test_hacking.py b/nova/tests/unit/test_hacking.py
new file mode 100644
index 0000000000..69089c0cd3
--- /dev/null
+++ b/nova/tests/unit/test_hacking.py
@@ -0,0 +1,403 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import textwrap
+
+import mock
+import pep8
+
+from nova.hacking import checks
+from nova import test
+
+
+class HackingTestCase(test.NoDBTestCase):
+ """This class tests the hacking checks in nova.hacking.checks by passing
+ strings to the check methods like the pep8/flake8 parser would. The parser
+ loops over each line in the file and then passes the parameters to the
+ check method. The parameter names in the check method dictate what type of
+ object is passed to the check method. The parameter types are::
+
+ logical_line: A processed line with the following modifications:
+ - Multi-line statements converted to a single line.
+ - Stripped left and right.
+ - Contents of strings replaced with "xxx" of same length.
+ - Comments removed.
+ physical_line: Raw line of text from the input file.
+ lines: a list of the raw lines from the input file
+ tokens: the tokens that contribute to this logical line
+ line_number: line number in the input file
+ total_lines: number of lines in the input file
+ blank_lines: blank lines before this one
+ indent_char: indentation character in this file (" " or "\t")
+ indent_level: indentation (with tabs expanded to multiples of 8)
+ previous_indent_level: indentation on previous line
+ previous_logical: previous logical line
+ filename: Path of the file being run through pep8
+
+ When running a test on a check method the return will be False/None if
+ there is no violation in the sample input. If there is an error a tuple is
+ returned with a position in the line, and a message. So to check the result
+ just assertTrue if the check is expected to fail and assertFalse if it
+ should pass.
+ """
+ def test_virt_driver_imports(self):
+
+ expect = (0, "N311: importing code from other virt drivers forbidden")
+
+ self.assertEqual(expect, checks.import_no_virt_driver_import_deps(
+ "from nova.virt.libvirt import utils as libvirt_utils",
+ "./nova/virt/xenapi/driver.py"))
+
+ self.assertEqual(expect, checks.import_no_virt_driver_import_deps(
+ "import nova.virt.libvirt.utils as libvirt_utils",
+ "./nova/virt/xenapi/driver.py"))
+
+ self.assertIsNone(checks.import_no_virt_driver_import_deps(
+ "from nova.virt.libvirt import utils as libvirt_utils",
+ "./nova/virt/libvirt/driver.py"))
+
+ self.assertIsNone(checks.import_no_virt_driver_import_deps(
+ "import nova.virt.firewall",
+ "./nova/virt/libvirt/firewall.py"))
+
+ def test_virt_driver_config_vars(self):
+ self.assertIsInstance(checks.import_no_virt_driver_config_deps(
+ "CONF.import_opt('volume_drivers', "
+ "'nova.virt.libvirt.driver', group='libvirt')",
+ "./nova/virt/xenapi/driver.py"), tuple)
+
+ self.assertIsNone(checks.import_no_virt_driver_config_deps(
+ "CONF.import_opt('volume_drivers', "
+ "'nova.virt.libvirt.driver', group='libvirt')",
+ "./nova/virt/libvirt/volume.py"))
+
+ def test_no_vi_headers(self):
+
+ lines = ['Line 1\n', 'Line 2\n', 'Line 3\n', 'Line 4\n', 'Line 5\n',
+ 'Line 6\n', 'Line 7\n', 'Line 8\n', 'Line 9\n', 'Line 10\n',
+ 'Line 11\n', 'Line 12\n', 'Line 13\n', 'Line14\n', 'Line15\n']
+
+ self.assertIsNone(checks.no_vi_headers(
+ "Test string foo", 1, lines))
+ self.assertEqual(len(list(checks.no_vi_headers(
+ "# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
+ 2, lines))), 2)
+ self.assertIsNone(checks.no_vi_headers(
+ "# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
+ 6, lines))
+ self.assertIsNone(checks.no_vi_headers(
+ "# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
+ 9, lines))
+ self.assertEqual(len(list(checks.no_vi_headers(
+ "# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
+ 14, lines))), 2)
+ self.assertIsNone(checks.no_vi_headers(
+ "Test end string for vi",
+ 15, lines))
+
+ def test_no_author_tags(self):
+ self.assertIsInstance(checks.no_author_tags("# author: jogo"), tuple)
+ self.assertIsInstance(checks.no_author_tags("# @author: jogo"), tuple)
+ self.assertIsInstance(checks.no_author_tags("# @Author: jogo"), tuple)
+ self.assertIsInstance(checks.no_author_tags("# Author: jogo"), tuple)
+ self.assertIsInstance(checks.no_author_tags(".. moduleauthor:: jogo"),
+ tuple)
+ self.assertIsNone(checks.no_author_tags("# authorization of this"))
+ self.assertEqual(2, checks.no_author_tags("# author: jogo")[0])
+ self.assertEqual(2, checks.no_author_tags("# Author: jogo")[0])
+ self.assertEqual(3, checks.no_author_tags(".. moduleauthor:: jogo")[0])
+
+ def test_assert_true_instance(self):
+ self.assertEqual(len(list(checks.assert_true_instance(
+ "self.assertTrue(isinstance(e, "
+ "exception.BuildAbortException))"))), 1)
+
+ self.assertEqual(
+ len(list(checks.assert_true_instance("self.assertTrue()"))), 0)
+
+ def test_assert_equal_type(self):
+ self.assertEqual(len(list(checks.assert_equal_type(
+ "self.assertEqual(type(als['QuicAssist']), list)"))), 1)
+
+ self.assertEqual(
+ len(list(checks.assert_equal_type("self.assertTrue()"))), 0)
+
+ def test_assert_equal_none(self):
+ self.assertEqual(len(list(checks.assert_equal_none(
+ "self.assertEqual(A, None)"))), 1)
+
+ self.assertEqual(len(list(checks.assert_equal_none(
+ "self.assertEqual(None, A)"))), 1)
+
+ self.assertEqual(
+ len(list(checks.assert_equal_none("self.assertIsNone()"))), 0)
+
+ def test_no_translate_debug_logs(self):
+ self.assertEqual(len(list(checks.no_translate_debug_logs(
+ "LOG.debug(_('foo'))", "nova/scheduler/foo.py"))), 1)
+
+ self.assertEqual(len(list(checks.no_translate_debug_logs(
+ "LOG.debug('foo')", "nova/scheduler/foo.py"))), 0)
+
+ self.assertEqual(len(list(checks.no_translate_debug_logs(
+ "LOG.info(_('foo'))", "nova/scheduler/foo.py"))), 0)
+
+ def test_no_setting_conf_directly_in_tests(self):
+ self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
+ "CONF.option = 1", "nova/tests/test_foo.py"))), 1)
+
+ self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
+ "CONF.group.option = 1", "nova/tests/test_foo.py"))), 1)
+
+ self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
+ "CONF.option = foo = 1", "nova/tests/test_foo.py"))), 1)
+
+ # Shouldn't fail with comparisons
+ self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
+ "CONF.option == 'foo'", "nova/tests/test_foo.py"))), 0)
+
+ self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
+ "CONF.option != 1", "nova/tests/test_foo.py"))), 0)
+
+ # Shouldn't fail since not in nova/tests/
+ self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
+ "CONF.option = 1", "nova/compute/foo.py"))), 0)
+
+ def test_log_translations(self):
+ logs = ['audit', 'error', 'info', 'warn', 'warning', 'critical',
+ 'exception']
+ levels = ['_LI', '_LW', '_LE', '_LC']
+ debug = "LOG.debug('OK')"
+ self.assertEqual(0,
+ len(list(
+ checks.validate_log_translations(debug, debug, 'f'))))
+ for log in logs:
+ bad = 'LOG.%s("Bad")' % log
+ self.assertEqual(1,
+ len(list(
+ checks.validate_log_translations(bad, bad, 'f'))))
+ ok = "LOG.%s(_('OK'))" % log
+ self.assertEqual(0,
+ len(list(
+ checks.validate_log_translations(ok, ok, 'f'))))
+ ok = "LOG.%s('OK') # noqa" % log
+ self.assertEqual(0,
+ len(list(
+ checks.validate_log_translations(ok, ok, 'f'))))
+ ok = "LOG.%s(variable)" % log
+ self.assertEqual(0,
+ len(list(
+ checks.validate_log_translations(ok, ok, 'f'))))
+ for level in levels:
+ ok = "LOG.%s(%s('OK'))" % (log, level)
+ self.assertEqual(0,
+ len(list(
+ checks.validate_log_translations(ok, ok, 'f'))))
+
+ def test_no_mutable_default_args(self):
+ self.assertEqual(1, len(list(checks.no_mutable_default_args(
+ " def fake_suds_context(calls={}):"))))
+
+ self.assertEqual(1, len(list(checks.no_mutable_default_args(
+ "def get_info_from_bdm(virt_type, bdm, mapping=[])"))))
+
+ self.assertEqual(0, len(list(checks.no_mutable_default_args(
+ "defined = []"))))
+
+ self.assertEqual(0, len(list(checks.no_mutable_default_args(
+ "defined, undefined = [], {}"))))
+
+ def test_check_explicit_underscore_import(self):
+ self.assertEqual(len(list(checks.check_explicit_underscore_import(
+ "LOG.info(_('My info message'))",
+ "cinder/tests/other_files.py"))), 1)
+ self.assertEqual(len(list(checks.check_explicit_underscore_import(
+ "msg = _('My message')",
+ "cinder/tests/other_files.py"))), 1)
+ self.assertEqual(len(list(checks.check_explicit_underscore_import(
+ "from cinder.i18n import _",
+ "cinder/tests/other_files.py"))), 0)
+ self.assertEqual(len(list(checks.check_explicit_underscore_import(
+ "LOG.info(_('My info message'))",
+ "cinder/tests/other_files.py"))), 0)
+ self.assertEqual(len(list(checks.check_explicit_underscore_import(
+ "msg = _('My message')",
+ "cinder/tests/other_files.py"))), 0)
+ self.assertEqual(len(list(checks.check_explicit_underscore_import(
+ "from cinder.i18n import _, _LW",
+ "cinder/tests/other_files2.py"))), 0)
+ self.assertEqual(len(list(checks.check_explicit_underscore_import(
+ "msg = _('My message')",
+ "cinder/tests/other_files2.py"))), 0)
+ self.assertEqual(len(list(checks.check_explicit_underscore_import(
+ "_ = translations.ugettext",
+ "cinder/tests/other_files3.py"))), 0)
+ self.assertEqual(len(list(checks.check_explicit_underscore_import(
+ "msg = _('My message')",
+ "cinder/tests/other_files3.py"))), 0)
+
+ def test_use_jsonutils(self):
+ def __get_msg(fun):
+ msg = ("N324: jsonutils.%(fun)s must be used instead of "
+ "json.%(fun)s" % {'fun': fun})
+ return [(0, msg)]
+
+ for method in ('dump', 'dumps', 'load', 'loads'):
+ self.assertEqual(
+ __get_msg(method),
+ list(checks.use_jsonutils("json.%s(" % method,
+ "./nova/virt/xenapi/driver.py")))
+ self.assertEqual(0,
+ len(list(checks.use_jsonutils("json.%s(" % method,
+ "./plugins/xenserver/script.py"))))
+ self.assertEqual(0,
+ len(list(checks.use_jsonutils("jsonx.%s(" % method,
+ "./nova/virt/xenapi/driver.py"))))
+ self.assertEqual(0,
+ len(list(checks.use_jsonutils("json.dumb",
+ "./nova/virt/xenapi/driver.py"))))
+
+ # We are patching pep8 so that only the check under test is actually
+ # installed.
+ @mock.patch('pep8._checks',
+ {'physical_line': {}, 'logical_line': {}, 'tree': {}})
+ def _run_check(self, code, checker, filename=None):
+ pep8.register_check(checker)
+
+ lines = textwrap.dedent(code).strip().splitlines(True)
+
+ checker = pep8.Checker(filename=filename, lines=lines)
+ checker.check_all()
+ checker.report._deferred_print.sort()
+ return checker.report._deferred_print
+
+ def _assert_has_errors(self, code, checker, expected_errors=None,
+ filename=None):
+ actual_errors = [e[:3] for e in
+ self._run_check(code, checker, filename)]
+ self.assertEqual(expected_errors or [], actual_errors)
+
+ def test_assert_called_once(self):
+
+ checker = checks.check_assert_called_once
+ code = """
+ mock = Mock()
+ mock.method(1, 2, 3, test='wow')
+ mock.method.assert_called_once()
+ """
+ errors = [(3, 11, 'N327')]
+ self._assert_has_errors(code, checker, expected_errors=errors,
+ filename='nova/tests/test_assert.py')
+
+ def test_str_unicode_exception(self):
+
+ checker = checks.CheckForStrUnicodeExc
+ code = """
+ def f(a, b):
+ try:
+ p = str(a) + str(b)
+ except ValueError as e:
+ p = str(e)
+ return p
+ """
+ errors = [(5, 16, 'N325')]
+ self._assert_has_errors(code, checker, expected_errors=errors)
+
+ code = """
+ def f(a, b):
+ try:
+ p = unicode(a) + str(b)
+ except ValueError as e:
+ p = e
+ return p
+ """
+ errors = []
+ self._assert_has_errors(code, checker, expected_errors=errors)
+
+ code = """
+ def f(a, b):
+ try:
+ p = str(a) + str(b)
+ except ValueError as e:
+ p = unicode(e)
+ return p
+ """
+ errors = [(5, 20, 'N325')]
+ self._assert_has_errors(code, checker, expected_errors=errors)
+
+ code = """
+ def f(a, b):
+ try:
+ p = str(a) + str(b)
+ except ValueError as e:
+ try:
+ p = unicode(a) + unicode(b)
+ except ValueError as ve:
+ p = str(e) + str(ve)
+ p = e
+ return p
+ """
+ errors = [(8, 20, 'N325'), (8, 29, 'N325')]
+ self._assert_has_errors(code, checker, expected_errors=errors)
+
+ code = """
+ def f(a, b):
+ try:
+ p = str(a) + str(b)
+ except ValueError as e:
+ try:
+ p = unicode(a) + unicode(b)
+ except ValueError as ve:
+ p = str(e) + unicode(ve)
+ p = str(e)
+ return p
+ """
+ errors = [(8, 20, 'N325'), (8, 33, 'N325'), (9, 16, 'N325')]
+ self._assert_has_errors(code, checker, expected_errors=errors)
+
+ def test_trans_add(self):
+
+ checker = checks.CheckForTransAdd
+ code = """
+ def fake_tran(msg):
+ return msg
+
+
+ _ = fake_tran
+ _LI = _
+ _LW = _
+ _LE = _
+ _LC = _
+
+
+ def f(a, b):
+ msg = _('test') + 'add me'
+ msg = _LI('test') + 'add me'
+ msg = _LW('test') + 'add me'
+ msg = _LE('test') + 'add me'
+ msg = _LC('test') + 'add me'
+ msg = 'add to me' + _('test')
+ return msg
+ """
+ errors = [(13, 10, 'N326'), (14, 10, 'N326'), (15, 10, 'N326'),
+ (16, 10, 'N326'), (17, 10, 'N326'), (18, 24, 'N326')]
+ self._assert_has_errors(code, checker, expected_errors=errors)
+
+ code = """
+ def f(a, b):
+ msg = 'test' + 'add me'
+ return msg
+ """
+ errors = []
+ self._assert_has_errors(code, checker, expected_errors=errors)
diff --git a/nova/tests/unit/test_hooks.py b/nova/tests/unit/test_hooks.py
new file mode 100644
index 0000000000..9017787151
--- /dev/null
+++ b/nova/tests/unit/test_hooks.py
@@ -0,0 +1,205 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for hook customization."""
+
+import stevedore
+
+from nova import hooks
+from nova import test
+
+
+class SampleHookA(object):
+ name = "a"
+
+ def _add_called(self, op, kwargs):
+ called = kwargs.get('called', None)
+ if called is not None:
+ called.append(op + self.name)
+
+ def pre(self, *args, **kwargs):
+ self._add_called("pre", kwargs)
+
+
+class SampleHookB(SampleHookA):
+ name = "b"
+
+ def post(self, rv, *args, **kwargs):
+ self._add_called("post", kwargs)
+
+
+class SampleHookC(SampleHookA):
+ name = "c"
+
+ def pre(self, f, *args, **kwargs):
+ self._add_called("pre" + f.__name__, kwargs)
+
+ def post(self, f, rv, *args, **kwargs):
+ self._add_called("post" + f.__name__, kwargs)
+
+
+class SampleHookExceptionPre(SampleHookA):
+ name = "epre"
+ exception = Exception()
+
+ def pre(self, f, *args, **kwargs):
+ raise self.exception
+
+
+class SampleHookExceptionPost(SampleHookA):
+ name = "epost"
+ exception = Exception()
+
+ def post(self, f, rv, *args, **kwargs):
+ raise self.exception
+
+
+class MockEntryPoint(object):
+
+ def __init__(self, cls):
+ self.cls = cls
+
+ def load(self):
+ return self.cls
+
+
+class MockedHookTestCase(test.BaseHookTestCase):
+ def _mock_load_plugins(self, iload, *iargs, **ikwargs):
+ return []
+
+ def setUp(self):
+ super(MockedHookTestCase, self).setUp()
+
+ hooks.reset()
+
+ self.stubs.Set(stevedore.extension.ExtensionManager, '_load_plugins',
+ self._mock_load_plugins)
+
+
+class HookTestCase(MockedHookTestCase):
+ def _mock_load_plugins(self, iload, *iargs, **ikwargs):
+ return [
+ stevedore.extension.Extension('test_hook',
+ MockEntryPoint(SampleHookA), SampleHookA, SampleHookA()),
+ stevedore.extension.Extension('test_hook',
+ MockEntryPoint(SampleHookB), SampleHookB, SampleHookB()),
+ ]
+
+ def setUp(self):
+ super(HookTestCase, self).setUp()
+
+ hooks.reset()
+
+ self.stubs.Set(stevedore.extension.ExtensionManager, '_load_plugins',
+ self._mock_load_plugins)
+
+ @hooks.add_hook('test_hook')
+ def _hooked(self, a, b=1, c=2, called=None):
+ return 42
+
+ def test_basic(self):
+ self.assertEqual(42, self._hooked(1))
+
+ mgr = hooks._HOOKS['test_hook']
+ self.assert_has_hook('test_hook', self._hooked)
+ self.assertEqual(2, len(mgr.extensions))
+ self.assertEqual(SampleHookA, mgr.extensions[0].plugin)
+ self.assertEqual(SampleHookB, mgr.extensions[1].plugin)
+
+ def test_order_of_execution(self):
+ called_order = []
+ self._hooked(42, called=called_order)
+ self.assertEqual(['prea', 'preb', 'postb'], called_order)
+
+
+class HookTestCaseWithFunction(MockedHookTestCase):
+ def _mock_load_plugins(self, iload, *iargs, **ikwargs):
+ return [
+ stevedore.extension.Extension('function_hook',
+ MockEntryPoint(SampleHookC), SampleHookC, SampleHookC()),
+ ]
+
+ @hooks.add_hook('function_hook', pass_function=True)
+ def _hooked(self, a, b=1, c=2, called=None):
+ return 42
+
+ def test_basic(self):
+ self.assertEqual(42, self._hooked(1))
+ mgr = hooks._HOOKS['function_hook']
+
+ self.assert_has_hook('function_hook', self._hooked)
+ self.assertEqual(1, len(mgr.extensions))
+ self.assertEqual(SampleHookC, mgr.extensions[0].plugin)
+
+ def test_order_of_execution(self):
+ called_order = []
+ self._hooked(42, called=called_order)
+ self.assertEqual(['pre_hookedc', 'post_hookedc'], called_order)
+
+
+class HookFailPreTestCase(MockedHookTestCase):
+ def _mock_load_plugins(self, iload, *iargs, **ikwargs):
+ return [
+ stevedore.extension.Extension('fail_pre',
+ MockEntryPoint(SampleHookExceptionPre),
+ SampleHookExceptionPre, SampleHookExceptionPre()),
+ ]
+
+ @hooks.add_hook('fail_pre', pass_function=True)
+ def _hooked(self, a, b=1, c=2, called=None):
+ return 42
+
+ def test_hook_fail_should_still_return(self):
+ self.assertEqual(42, self._hooked(1))
+
+ mgr = hooks._HOOKS['fail_pre']
+ self.assert_has_hook('fail_pre', self._hooked)
+ self.assertEqual(1, len(mgr.extensions))
+ self.assertEqual(SampleHookExceptionPre, mgr.extensions[0].plugin)
+
+ def test_hook_fail_should_raise_fatal(self):
+ self.stubs.Set(SampleHookExceptionPre, 'exception',
+ hooks.FatalHookException())
+
+ self.assertRaises(hooks.FatalHookException,
+ self._hooked, 1)
+
+
+class HookFailPostTestCase(MockedHookTestCase):
+ def _mock_load_plugins(self, iload, *iargs, **ikwargs):
+ return [
+ stevedore.extension.Extension('fail_post',
+ MockEntryPoint(SampleHookExceptionPost),
+ SampleHookExceptionPost, SampleHookExceptionPost()),
+ ]
+
+ @hooks.add_hook('fail_post', pass_function=True)
+ def _hooked(self, a, b=1, c=2, called=None):
+ return 42
+
+ def test_hook_fail_should_still_return(self):
+ self.assertEqual(42, self._hooked(1))
+
+ mgr = hooks._HOOKS['fail_post']
+ self.assert_has_hook('fail_post', self._hooked)
+ self.assertEqual(1, len(mgr.extensions))
+ self.assertEqual(SampleHookExceptionPost, mgr.extensions[0].plugin)
+
+ def test_hook_fail_should_raise_fatal(self):
+ self.stubs.Set(SampleHookExceptionPost, 'exception',
+ hooks.FatalHookException())
+
+ self.assertRaises(hooks.FatalHookException,
+ self._hooked, 1)
diff --git a/nova/tests/unit/test_instance_types_extra_specs.py b/nova/tests/unit/test_instance_types_extra_specs.py
new file mode 100644
index 0000000000..8031376045
--- /dev/null
+++ b/nova/tests/unit/test_instance_types_extra_specs.py
@@ -0,0 +1,142 @@
+# Copyright 2011 University of Southern California
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for instance types extra specs code
+"""
+
+from nova.compute import arch
+from nova import context
+from nova import db
+from nova import exception
+from nova import test
+
+
+class InstanceTypeExtraSpecsTestCase(test.TestCase):
+
+ def setUp(self):
+ super(InstanceTypeExtraSpecsTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ values = dict(name="cg1.4xlarge",
+ memory_mb=22000,
+ vcpus=8,
+ root_gb=1690,
+ ephemeral_gb=2000,
+ flavorid=105)
+ self.specs = dict(cpu_arch=arch.X86_64,
+ cpu_model="Nehalem",
+ xpu_arch="fermi",
+ xpus="2",
+ xpu_model="Tesla 2050")
+ values['extra_specs'] = self.specs
+ ref = db.flavor_create(self.context,
+ values)
+ self.instance_type_id = ref["id"]
+ self.flavorid = ref["flavorid"]
+
+ def tearDown(self):
+ # Remove the instance type from the database
+ db.flavor_destroy(self.context, "cg1.4xlarge")
+ super(InstanceTypeExtraSpecsTestCase, self).tearDown()
+
+ def test_instance_type_specs_get(self):
+ actual_specs = db.flavor_extra_specs_get(
+ self.context,
+ self.flavorid)
+ self.assertEqual(self.specs, actual_specs)
+
+ def test_flavor_extra_specs_delete(self):
+ del self.specs["xpu_model"]
+ db.flavor_extra_specs_delete(self.context,
+ self.flavorid,
+ "xpu_model")
+ actual_specs = db.flavor_extra_specs_get(
+ self.context,
+ self.flavorid)
+ self.assertEqual(self.specs, actual_specs)
+
+ def test_instance_type_extra_specs_update(self):
+ self.specs["cpu_model"] = "Sandy Bridge"
+ db.flavor_extra_specs_update_or_create(
+ self.context,
+ self.flavorid,
+ dict(cpu_model="Sandy Bridge"))
+ actual_specs = db.flavor_extra_specs_get(
+ self.context,
+ self.flavorid)
+ self.assertEqual(self.specs, actual_specs)
+
+ def test_instance_type_extra_specs_update_with_nonexisting_flavor(self):
+ extra_specs = dict(cpu_arch=arch.X86_64)
+ nonexisting_flavorid = "some_flavor_that_does_not_exist"
+ self.assertRaises(exception.FlavorNotFound,
+ db.flavor_extra_specs_update_or_create,
+ self.context, nonexisting_flavorid, extra_specs)
+
+ def test_instance_type_extra_specs_create(self):
+ net_attrs = {
+ "net_arch": "ethernet",
+ "net_mbps": "10000"
+ }
+ self.specs.update(net_attrs)
+ db.flavor_extra_specs_update_or_create(
+ self.context,
+ self.flavorid,
+ net_attrs)
+ actual_specs = db.flavor_extra_specs_get(
+ self.context,
+ self.flavorid)
+ self.assertEqual(self.specs, actual_specs)
+
+ def test_instance_type_get_with_extra_specs(self):
+ instance_type = db.flavor_get(
+ self.context,
+ self.instance_type_id)
+ self.assertEqual(instance_type['extra_specs'],
+ self.specs)
+ instance_type = db.flavor_get(
+ self.context,
+ 5)
+ self.assertEqual(instance_type['extra_specs'], {})
+
+ def test_instance_type_get_by_name_with_extra_specs(self):
+ instance_type = db.flavor_get_by_name(
+ self.context,
+ "cg1.4xlarge")
+ self.assertEqual(instance_type['extra_specs'],
+ self.specs)
+ instance_type = db.flavor_get_by_name(
+ self.context,
+ "m1.small")
+ self.assertEqual(instance_type['extra_specs'], {})
+
+ def test_instance_type_get_by_flavor_id_with_extra_specs(self):
+ instance_type = db.flavor_get_by_flavor_id(
+ self.context,
+ 105)
+ self.assertEqual(instance_type['extra_specs'],
+ self.specs)
+ instance_type = db.flavor_get_by_flavor_id(
+ self.context,
+ 2)
+ self.assertEqual(instance_type['extra_specs'], {})
+
+ def test_instance_type_get_all(self):
+ types = db.flavor_get_all(self.context)
+
+ name2specs = {}
+ for instance_type in types:
+ name = instance_type['name']
+ name2specs[name] = instance_type['extra_specs']
+
+ self.assertEqual(name2specs['cg1.4xlarge'], self.specs)
+ self.assertEqual(name2specs['m1.small'], {})
diff --git a/nova/tests/unit/test_iptables_network.py b/nova/tests/unit/test_iptables_network.py
new file mode 100644
index 0000000000..bd20b101bb
--- /dev/null
+++ b/nova/tests/unit/test_iptables_network.py
@@ -0,0 +1,277 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Unit Tests for network code."""
+
+from nova.network import linux_net
+from nova import test
+
+
+class IptablesManagerTestCase(test.NoDBTestCase):
+
+ binary_name = linux_net.get_binary_name()
+
+ sample_filter = ['#Generated by iptables-save on Fri Feb 18 15:17:05 2011',
+ '*filter',
+ ':INPUT ACCEPT [2223527:305688874]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [2172501:140856656]',
+ ':iptables-top-rule - [0:0]',
+ ':iptables-bottom-rule - [0:0]',
+ ':%s-FORWARD - [0:0]' % (binary_name),
+ ':%s-INPUT - [0:0]' % (binary_name),
+ ':%s-local - [0:0]' % (binary_name),
+ ':%s-OUTPUT - [0:0]' % (binary_name),
+ ':nova-filter-top - [0:0]',
+ '[0:0] -A FORWARD -j nova-filter-top',
+ '[0:0] -A OUTPUT -j nova-filter-top',
+ '[0:0] -A nova-filter-top -j %s-local' % (binary_name),
+ '[0:0] -A INPUT -j %s-INPUT' % (binary_name),
+ '[0:0] -A OUTPUT -j %s-OUTPUT' % (binary_name),
+ '[0:0] -A FORWARD -j %s-FORWARD' % (binary_name),
+ '[0:0] -A INPUT -i virbr0 -p udp -m udp --dport 53 '
+ '-j ACCEPT',
+ '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 53 '
+ '-j ACCEPT',
+ '[0:0] -A INPUT -i virbr0 -p udp -m udp --dport 67 '
+ '-j ACCEPT',
+ '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 '
+ '-j ACCEPT',
+ '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 '
+ '-j ACCEPT',
+ '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT',
+ '[0:0] -A FORWARD -o virbr0 -j REJECT --reject-with '
+ 'icmp-port-unreachable',
+ '[0:0] -A FORWARD -i virbr0 -j REJECT --reject-with '
+ 'icmp-port-unreachable',
+ 'COMMIT',
+ '# Completed on Fri Feb 18 15:17:05 2011']
+
+ sample_nat = ['# Generated by iptables-save on Fri Feb 18 15:17:05 2011',
+ '*nat',
+ ':PREROUTING ACCEPT [3936:762355]',
+ ':INPUT ACCEPT [2447:225266]',
+ ':OUTPUT ACCEPT [63491:4191863]',
+ ':POSTROUTING ACCEPT [63112:4108641]',
+ ':%s-OUTPUT - [0:0]' % (binary_name),
+ ':%s-snat - [0:0]' % (binary_name),
+ ':%s-PREROUTING - [0:0]' % (binary_name),
+ ':%s-float-snat - [0:0]' % (binary_name),
+ ':%s-POSTROUTING - [0:0]' % (binary_name),
+ ':nova-postrouting-bottom - [0:0]',
+ '[0:0] -A PREROUTING -j %s-PREROUTING' % (binary_name),
+ '[0:0] -A OUTPUT -j %s-OUTPUT' % (binary_name),
+ '[0:0] -A POSTROUTING -j %s-POSTROUTING' % (binary_name),
+ '[0:0] -A nova-postrouting-bottom '
+ '-j %s-snat' % (binary_name),
+ '[0:0] -A %s-snat '
+ '-j %s-float-snat' % (binary_name, binary_name),
+ '[0:0] -A POSTROUTING -j nova-postrouting-bottom',
+ 'COMMIT',
+ '# Completed on Fri Feb 18 15:17:05 2011']
+
+ def setUp(self):
+ super(IptablesManagerTestCase, self).setUp()
+ self.manager = linux_net.IptablesManager()
+
+ def test_duplicate_rules_no_dirty(self):
+ table = self.manager.ipv4['filter']
+ table.dirty = False
+ num_rules = len(table.rules)
+ table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
+ self.assertEqual(len(table.rules), num_rules + 1)
+ self.assertTrue(table.dirty)
+ table.dirty = False
+ num_rules = len(table.rules)
+ table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
+ self.assertEqual(len(table.rules), num_rules)
+ self.assertFalse(table.dirty)
+
+ def test_clean_tables_no_apply(self):
+ for table in self.manager.ipv4.itervalues():
+ table.dirty = False
+ for table in self.manager.ipv6.itervalues():
+ table.dirty = False
+
+ def error_apply():
+ raise test.TestingException()
+
+ self.stubs.Set(self.manager, '_apply', error_apply)
+ self.manager.apply()
+
+ def test_filter_rules_are_wrapped(self):
+ current_lines = self.sample_filter
+
+ table = self.manager.ipv4['filter']
+ table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
+ new_lines = self.manager._modify_rules(current_lines, table, 'filter')
+ self.assertIn('[0:0] -A %s-FORWARD '
+ '-s 1.2.3.4/5 -j DROP' % self.binary_name, new_lines)
+
+ table.remove_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
+ new_lines = self.manager._modify_rules(current_lines, table, 'filter')
+ self.assertNotIn('[0:0] -A %s-FORWARD '
+ '-s 1.2.3.4/5 -j DROP' % self.binary_name, new_lines)
+
+ def test_remove_rules_regex(self):
+ current_lines = self.sample_nat
+ table = self.manager.ipv4['nat']
+ table.add_rule('float-snat', '-s 10.0.0.1 -j SNAT --to 10.10.10.10'
+ ' -d 10.0.0.1')
+ table.add_rule('float-snat', '-s 10.0.0.1 -j SNAT --to 10.10.10.10'
+ ' -o eth0')
+ table.add_rule('PREROUTING', '-d 10.10.10.10 -j DNAT --to 10.0.0.1')
+ table.add_rule('OUTPUT', '-d 10.10.10.10 -j DNAT --to 10.0.0.1')
+ table.add_rule('float-snat', '-s 10.0.0.10 -j SNAT --to 10.10.10.11'
+ ' -d 10.0.0.10')
+ table.add_rule('float-snat', '-s 10.0.0.10 -j SNAT --to 10.10.10.11'
+ ' -o eth0')
+ table.add_rule('PREROUTING', '-d 10.10.10.11 -j DNAT --to 10.0.0.10')
+ table.add_rule('OUTPUT', '-d 10.10.10.11 -j DNAT --to 10.0.0.10')
+ new_lines = self.manager._modify_rules(current_lines, table, 'nat')
+ self.assertEqual(len(new_lines) - len(current_lines), 8)
+ regex = '.*\s+%s(/32|\s+|$)'
+ num_removed = table.remove_rules_regex(regex % '10.10.10.10')
+ self.assertEqual(num_removed, 4)
+ new_lines = self.manager._modify_rules(current_lines, table, 'nat')
+ self.assertEqual(len(new_lines) - len(current_lines), 4)
+ num_removed = table.remove_rules_regex(regex % '10.10.10.11')
+ self.assertEqual(num_removed, 4)
+ new_lines = self.manager._modify_rules(current_lines, table, 'nat')
+ self.assertEqual(new_lines, current_lines)
+
+ def test_nat_rules(self):
+ current_lines = self.sample_nat
+ new_lines = self.manager._modify_rules(current_lines,
+ self.manager.ipv4['nat'],
+ 'nat')
+
+ for line in [':%s-OUTPUT - [0:0]' % (self.binary_name),
+ ':%s-float-snat - [0:0]' % (self.binary_name),
+ ':%s-snat - [0:0]' % (self.binary_name),
+ ':%s-PREROUTING - [0:0]' % (self.binary_name),
+ ':%s-POSTROUTING - [0:0]' % (self.binary_name)]:
+ self.assertTrue(line in new_lines, "One of our chains went"
+ " missing.")
+
+ seen_lines = set()
+ for line in new_lines:
+ line = line.strip()
+ self.assertTrue(line not in seen_lines,
+ "Duplicate line: %s" % line)
+ seen_lines.add(line)
+
+ last_postrouting_line = ''
+
+ for line in new_lines:
+ if line.startswith('[0:0] -A POSTROUTING'):
+ last_postrouting_line = line
+
+ self.assertTrue('-j nova-postrouting-bottom' in last_postrouting_line,
+ "Last POSTROUTING rule does not jump to "
+ "nova-postouting-bottom: %s" % last_postrouting_line)
+
+ for chain in ['POSTROUTING', 'PREROUTING', 'OUTPUT']:
+ self.assertTrue('[0:0] -A %s -j %s-%s' %
+ (chain, self.binary_name, chain) in new_lines,
+ "Built-in chain %s not wrapped" % (chain,))
+
+ def test_filter_rules(self):
+ current_lines = self.sample_filter
+ new_lines = self.manager._modify_rules(current_lines,
+ self.manager.ipv4['filter'],
+ 'nat')
+
+ for line in [':%s-FORWARD - [0:0]' % (self.binary_name),
+ ':%s-INPUT - [0:0]' % (self.binary_name),
+ ':%s-local - [0:0]' % (self.binary_name),
+ ':%s-OUTPUT - [0:0]' % (self.binary_name)]:
+ self.assertTrue(line in new_lines, "One of our chains went"
+ " missing.")
+
+ seen_lines = set()
+ for line in new_lines:
+ line = line.strip()
+ self.assertTrue(line not in seen_lines,
+ "Duplicate line: %s" % line)
+ seen_lines.add(line)
+
+ for chain in ['FORWARD', 'OUTPUT']:
+ for line in new_lines:
+ if line.startswith('[0:0] -A %s' % chain):
+ self.assertTrue('-j nova-filter-top' in line,
+ "First %s rule does not "
+ "jump to nova-filter-top" % chain)
+ break
+
+ self.assertTrue('[0:0] -A nova-filter-top '
+ '-j %s-local' % self.binary_name in new_lines,
+ "nova-filter-top does not jump to wrapped local chain")
+
+ for chain in ['INPUT', 'OUTPUT', 'FORWARD']:
+ self.assertTrue('[0:0] -A %s -j %s-%s' %
+ (chain, self.binary_name, chain) in new_lines,
+ "Built-in chain %s not wrapped" % (chain,))
+
+ def test_missing_table(self):
+ current_lines = []
+ new_lines = self.manager._modify_rules(current_lines,
+ self.manager.ipv4['filter'],
+ 'filter')
+
+ for line in ['*filter',
+ 'COMMIT']:
+ self.assertTrue(line in new_lines, "One of iptables key lines"
+ "went missing.")
+
+ self.assertTrue(len(new_lines) > 4, "No iptables rules added")
+
+ self.assertTrue("#Generated by nova" == new_lines[0] and
+ "*filter" == new_lines[1] and
+ "COMMIT" == new_lines[-2] and
+ "#Completed by nova" == new_lines[-1],
+ "iptables rules not generated in the correct order")
+
+ def test_iptables_top_order(self):
+ # Test iptables_top_regex
+ current_lines = list(self.sample_filter)
+ current_lines[12:12] = ['[0:0] -A FORWARD -j iptables-top-rule']
+ self.flags(iptables_top_regex='-j iptables-top-rule')
+ new_lines = self.manager._modify_rules(current_lines,
+ self.manager.ipv4['filter'],
+ 'filter')
+ self.assertEqual(current_lines, new_lines)
+
+ def test_iptables_bottom_order(self):
+ # Test iptables_bottom_regex
+ current_lines = list(self.sample_filter)
+ current_lines[26:26] = ['[0:0] -A FORWARD -j iptables-bottom-rule']
+ self.flags(iptables_bottom_regex='-j iptables-bottom-rule')
+ new_lines = self.manager._modify_rules(current_lines,
+ self.manager.ipv4['filter'],
+ 'filter')
+ self.assertEqual(current_lines, new_lines)
+
+ def test_iptables_preserve_order(self):
+ # Test both iptables_top_regex and iptables_bottom_regex
+ current_lines = list(self.sample_filter)
+ current_lines[12:12] = ['[0:0] -A FORWARD -j iptables-top-rule']
+ current_lines[27:27] = ['[0:0] -A FORWARD -j iptables-bottom-rule']
+ self.flags(iptables_top_regex='-j iptables-top-rule')
+ self.flags(iptables_bottom_regex='-j iptables-bottom-rule')
+ new_lines = self.manager._modify_rules(current_lines,
+ self.manager.ipv4['filter'],
+ 'filter')
+ self.assertEqual(current_lines, new_lines)
diff --git a/nova/tests/unit/test_ipv6.py b/nova/tests/unit/test_ipv6.py
new file mode 100644
index 0000000000..4aa6c2a803
--- /dev/null
+++ b/nova/tests/unit/test_ipv6.py
@@ -0,0 +1,88 @@
+# Copyright (c) 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Test suite for IPv6."""
+
+from nova import ipv6
+from nova import test
+
+
+class IPv6RFC2462TestCase(test.NoDBTestCase):
+ """Unit tests for IPv6 rfc2462 backend operations."""
+ def setUp(self):
+ super(IPv6RFC2462TestCase, self).setUp()
+ self.flags(ipv6_backend='rfc2462')
+ ipv6.reset_backend()
+
+ def test_to_global(self):
+ addr = ipv6.to_global('2001:db8::', '02:16:3e:33:44:55', 'test')
+ self.assertEqual(addr, '2001:db8::16:3eff:fe33:4455')
+
+ def test_to_mac(self):
+ mac = ipv6.to_mac('2001:db8::216:3eff:fe33:4455')
+ self.assertEqual(mac, '00:16:3e:33:44:55')
+
+ def test_to_global_with_bad_mac(self):
+ bad_mac = '02:16:3e:33:44:5Z'
+ self.assertRaises(TypeError, ipv6.to_global,
+ '2001:db8::', bad_mac, 'test')
+
+ def test_to_global_with_bad_prefix(self):
+ bad_prefix = '82'
+ self.assertRaises(TypeError, ipv6.to_global,
+ bad_prefix,
+ '2001:db8::216:3eff:fe33:4455',
+ 'test')
+
+ def test_to_global_with_bad_project(self):
+ bad_project = 'non-existent-project-name'
+ self.assertRaises(TypeError, ipv6.to_global,
+ '2001:db8::',
+ '2001:db8::a94a:8fe5:ff33:4455',
+ bad_project)
+
+
+class IPv6AccountIdentiferTestCase(test.NoDBTestCase):
+ """Unit tests for IPv6 account_identifier backend operations."""
+ def setUp(self):
+ super(IPv6AccountIdentiferTestCase, self).setUp()
+ self.flags(ipv6_backend='account_identifier')
+ ipv6.reset_backend()
+
+ def test_to_global(self):
+ addr = ipv6.to_global('2001:db8::', '02:16:3e:33:44:55', 'test')
+ self.assertEqual(addr, '2001:db8::a94a:8fe5:ff33:4455')
+
+ def test_to_mac(self):
+ mac = ipv6.to_mac('2001:db8::a94a:8fe5:ff33:4455')
+ self.assertEqual(mac, '02:16:3e:33:44:55')
+
+ def test_to_global_with_bad_mac(self):
+ bad_mac = '02:16:3e:33:44:5X'
+ self.assertRaises(TypeError, ipv6.to_global,
+ '2001:db8::', bad_mac, 'test')
+
+ def test_to_global_with_bad_prefix(self):
+ bad_prefix = '78'
+ self.assertRaises(TypeError, ipv6.to_global,
+ bad_prefix,
+ '2001:db8::a94a:8fe5:ff33:4455',
+ 'test')
+
+ def test_to_global_with_bad_project(self):
+ bad_project = 'non-existent-project-name'
+ self.assertRaises(TypeError, ipv6.to_global,
+ '2001:db8::',
+ '2001:db8::a94a:8fe5:ff33:4455',
+ bad_project)
diff --git a/nova/tests/unit/test_linuxscsi.py b/nova/tests/unit/test_linuxscsi.py
new file mode 100644
index 0000000000..8b1a26a546
--- /dev/null
+++ b/nova/tests/unit/test_linuxscsi.py
@@ -0,0 +1,134 @@
+# Copyright 2010 OpenStack Foundation
+# (c) Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+from nova.openstack.common import log as logging
+from nova.storage import linuxscsi
+from nova import test
+from nova import utils
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+
+
+class StorageLinuxSCSITestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(StorageLinuxSCSITestCase, self).setUp()
+ self.executes = []
+
+ def fake_execute(*cmd, **kwargs):
+ self.executes.append(cmd)
+ return None, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ def test_find_multipath_device_3par(self):
+ def fake_execute(*cmd, **kwargs):
+ out = ("mpath6 (350002ac20398383d) dm-3 3PARdata,VV\n"
+ "size=2.0G features='0' hwhandler='0' wp=rw\n"
+ "`-+- policy='round-robin 0' prio=-1 status=active\n"
+ " |- 0:0:0:1 sde 8:64 active undef running\n"
+ " `- 2:0:0:1 sdf 8:80 active undef running\n"
+ )
+ return out, None
+
+ def fake_execute2(*cmd, **kwargs):
+ out = ("350002ac20398383d dm-3 3PARdata,VV\n"
+ "size=2.0G features='0' hwhandler='0' wp=rw\n"
+ "`-+- policy='round-robin 0' prio=-1 status=active\n"
+ " |- 0:0:0:1 sde 8:64 active undef running\n"
+ " `- 2:0:0:1 sdf 8:80 active undef running\n"
+ )
+ return out, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ info = linuxscsi.find_multipath_device('/dev/sde')
+ LOG.error("info = %s" % info)
+ self.assertEqual("/dev/mapper/350002ac20398383d", info["device"])
+ self.assertEqual("/dev/sde", info['devices'][0]['device'])
+ self.assertEqual("0", info['devices'][0]['host'])
+ self.assertEqual("0", info['devices'][0]['id'])
+ self.assertEqual("0", info['devices'][0]['channel'])
+ self.assertEqual("1", info['devices'][0]['lun'])
+
+ self.assertEqual("/dev/sdf", info['devices'][1]['device'])
+ self.assertEqual("2", info['devices'][1]['host'])
+ self.assertEqual("0", info['devices'][1]['id'])
+ self.assertEqual("0", info['devices'][1]['channel'])
+ self.assertEqual("1", info['devices'][1]['lun'])
+
+ def test_find_multipath_device_svc(self):
+ def fake_execute(*cmd, **kwargs):
+ out = ("36005076da00638089c000000000004d5 dm-2 IBM,2145\n"
+ "size=954M features='1 queue_if_no_path' hwhandler='0'"
+ " wp=rw\n"
+ "|-+- policy='round-robin 0' prio=-1 status=active\n"
+ "| |- 6:0:2:0 sde 8:64 active undef running\n"
+ "| `- 6:0:4:0 sdg 8:96 active undef running\n"
+ "`-+- policy='round-robin 0' prio=-1 status=enabled\n"
+ " |- 6:0:3:0 sdf 8:80 active undef running\n"
+ " `- 6:0:5:0 sdh 8:112 active undef running\n"
+ )
+ return out, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ info = linuxscsi.find_multipath_device('/dev/sde')
+ LOG.error("info = %s" % info)
+ self.assertEqual("/dev/mapper/36005076da00638089c000000000004d5",
+ info["device"])
+ self.assertEqual("/dev/sde", info['devices'][0]['device'])
+ self.assertEqual("6", info['devices'][0]['host'])
+ self.assertEqual("0", info['devices'][0]['channel'])
+ self.assertEqual("2", info['devices'][0]['id'])
+ self.assertEqual("0", info['devices'][0]['lun'])
+
+ self.assertEqual("/dev/sdf", info['devices'][2]['device'])
+ self.assertEqual("6", info['devices'][2]['host'])
+ self.assertEqual("0", info['devices'][2]['channel'])
+ self.assertEqual("3", info['devices'][2]['id'])
+ self.assertEqual("0", info['devices'][2]['lun'])
+
+ def test_find_multipath_device_ds8000(self):
+ def fake_execute(*cmd, **kwargs):
+ out = ("36005076303ffc48e0000000000000101 dm-2 IBM,2107900\n"
+ "size=1.0G features='1 queue_if_no_path' hwhandler='0'"
+ " wp=rw\n"
+ "`-+- policy='round-robin 0' prio=-1 status=active\n"
+ " |- 6:0:2:0 sdd 8:64 active undef running\n"
+ " `- 6:1:0:3 sdc 8:32 active undef running\n"
+ )
+ return out, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ info = linuxscsi.find_multipath_device('/dev/sdd')
+ LOG.error("info = %s" % info)
+ self.assertEqual("/dev/mapper/36005076303ffc48e0000000000000101",
+ info["device"])
+ self.assertEqual("/dev/sdd", info['devices'][0]['device'])
+ self.assertEqual("6", info['devices'][0]['host'])
+ self.assertEqual("0", info['devices'][0]['channel'])
+ self.assertEqual("2", info['devices'][0]['id'])
+ self.assertEqual("0", info['devices'][0]['lun'])
+
+ self.assertEqual("/dev/sdc", info['devices'][1]['device'])
+ self.assertEqual("6", info['devices'][1]['host'])
+ self.assertEqual("1", info['devices'][1]['channel'])
+ self.assertEqual("0", info['devices'][1]['id'])
+ self.assertEqual("3", info['devices'][1]['lun'])
diff --git a/nova/tests/unit/test_loadables.py b/nova/tests/unit/test_loadables.py
new file mode 100644
index 0000000000..9f29d850e9
--- /dev/null
+++ b/nova/tests/unit/test_loadables.py
@@ -0,0 +1,113 @@
+# Copyright 2012 OpenStack Foundation # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Loadable class handling.
+"""
+
+from nova import exception
+from nova import test
+from nova.tests.unit import fake_loadables
+
+
+class LoadablesTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(LoadablesTestCase, self).setUp()
+ self.fake_loader = fake_loadables.FakeLoader()
+ # The name that we imported above for testing
+ self.test_package = 'nova.tests.unit.fake_loadables'
+
+ def test_loader_init(self):
+ self.assertEqual(self.fake_loader.package, self.test_package)
+ # Test the path of the module
+ ending_path = '/' + self.test_package.replace('.', '/')
+ self.assertTrue(self.fake_loader.path.endswith(ending_path))
+ self.assertEqual(self.fake_loader.loadable_cls_type,
+ fake_loadables.FakeLoadable)
+
+ def _compare_classes(self, classes, expected):
+ class_names = [cls.__name__ for cls in classes]
+ self.assertEqual(set(class_names), set(expected))
+
+ def test_get_all_classes(self):
+ classes = self.fake_loader.get_all_classes()
+ expected_class_names = ['FakeLoadableSubClass1',
+ 'FakeLoadableSubClass2',
+ 'FakeLoadableSubClass5',
+ 'FakeLoadableSubClass6']
+ self._compare_classes(classes, expected_class_names)
+
+ def test_get_matching_classes(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass1',
+ prefix + '.fake_loadable2.FakeLoadableSubClass5']
+ classes = self.fake_loader.get_matching_classes(test_classes)
+ expected_class_names = ['FakeLoadableSubClass1',
+ 'FakeLoadableSubClass5']
+ self._compare_classes(classes, expected_class_names)
+
+ def test_get_matching_classes_with_underscore(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass1',
+ prefix + '.fake_loadable2._FakeLoadableSubClass7']
+ self.assertRaises(exception.ClassNotFound,
+ self.fake_loader.get_matching_classes,
+ test_classes)
+
+ def test_get_matching_classes_with_wrong_type1(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass4',
+ prefix + '.fake_loadable2.FakeLoadableSubClass5']
+ self.assertRaises(exception.ClassNotFound,
+ self.fake_loader.get_matching_classes,
+ test_classes)
+
+ def test_get_matching_classes_with_wrong_type2(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass1',
+ prefix + '.fake_loadable2.FakeLoadableSubClass8']
+ self.assertRaises(exception.ClassNotFound,
+ self.fake_loader.get_matching_classes,
+ test_classes)
+
+ def test_get_matching_classes_with_one_function(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.return_valid_classes',
+ prefix + '.fake_loadable2.FakeLoadableSubClass5']
+ classes = self.fake_loader.get_matching_classes(test_classes)
+ expected_class_names = ['FakeLoadableSubClass1',
+ 'FakeLoadableSubClass2',
+ 'FakeLoadableSubClass5']
+ self._compare_classes(classes, expected_class_names)
+
+ def test_get_matching_classes_with_two_functions(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.return_valid_classes',
+ prefix + '.fake_loadable2.return_valid_class']
+ classes = self.fake_loader.get_matching_classes(test_classes)
+ expected_class_names = ['FakeLoadableSubClass1',
+ 'FakeLoadableSubClass2',
+ 'FakeLoadableSubClass6']
+ self._compare_classes(classes, expected_class_names)
+
+ def test_get_matching_classes_with_function_including_invalids(self):
+ # When using a method, no checking is done on valid classes.
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.return_invalid_classes',
+ prefix + '.fake_loadable2.return_valid_class']
+ classes = self.fake_loader.get_matching_classes(test_classes)
+ expected_class_names = ['FakeLoadableSubClass1',
+ '_FakeLoadableSubClass3',
+ 'FakeLoadableSubClass4',
+ 'FakeLoadableSubClass6']
+ self._compare_classes(classes, expected_class_names)
diff --git a/nova/tests/unit/test_matchers.py b/nova/tests/unit/test_matchers.py
new file mode 100644
index 0000000000..77fefafca8
--- /dev/null
+++ b/nova/tests/unit/test_matchers.py
@@ -0,0 +1,349 @@
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import testtools
+from testtools.tests.matchers import helpers
+
+from nova.tests.unit import matchers
+
+
+class TestDictMatches(testtools.TestCase, helpers.TestMatchersInterface):
+
+ matches_matcher = matchers.DictMatches(
+ {'foo': 'bar', 'baz': 'DONTCARE',
+ 'cat': {'tabby': True, 'fluffy': False}}
+ )
+
+ matches_matches = [
+ {'foo': 'bar', 'baz': 'noox', 'cat': {'tabby': True, 'fluffy': False}},
+ {'foo': 'bar', 'baz': 'quux', 'cat': {'tabby': True, 'fluffy': False}},
+ ]
+
+ matches_mismatches = [
+ {},
+ {'foo': 'bar', 'baz': 'qux'},
+ {'foo': 'bop', 'baz': 'qux',
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'foo': 'bar', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': True}},
+ {'foo': 'bar', 'cat': {'tabby': True, 'fluffy': False}},
+ ]
+
+ str_examples = [
+ ("DictMatches({'baz': 'DONTCARE', 'cat':"
+ " {'fluffy': False, 'tabby': True}, 'foo': 'bar'})",
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Keys in d1 and not d2: set(['foo', 'baz', 'cat'])."
+ " Keys in d2 and not d1: set([])", {}, matches_matcher),
+ ("Dictionaries do not match at fluffy. d1: False d2: True",
+ {'foo': 'bar', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': True}}, matches_matcher),
+ ("Dictionaries do not match at foo. d1: bar d2: bop",
+ {'foo': 'bop', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': False}}, matches_matcher),
+ ]
+
+
+class TestDictListMatches(testtools.TestCase, helpers.TestMatchersInterface):
+
+ matches_matcher = matchers.DictListMatches(
+ [{'foo': 'bar', 'baz': 'DONTCARE',
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'dog': 'yorkie'},
+ ])
+
+ matches_matches = [
+ [{'foo': 'bar', 'baz': 'qoox',
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'dog': 'yorkie'}],
+ [{'foo': 'bar', 'baz': False,
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'dog': 'yorkie'}],
+ ]
+
+ matches_mismatches = [
+ [],
+ {},
+ [{'foo': 'bar', 'baz': 'qoox',
+ 'cat': {'tabby': True, 'fluffy': True}},
+ {'dog': 'yorkie'}],
+ [{'foo': 'bar', 'baz': False,
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'cat': 'yorkie'}],
+ [{'foo': 'bop', 'baz': False,
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'dog': 'yorkie'}],
+ ]
+
+ str_examples = [
+ ("DictListMatches([{'baz': 'DONTCARE', 'cat':"
+ " {'fluffy': False, 'tabby': True}, 'foo': 'bar'},\n"
+ " {'dog': 'yorkie'}])",
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Length mismatch: len(L1)=2 != len(L2)=0", {}, matches_matcher),
+ ("Dictionaries do not match at fluffy. d1: True d2: False",
+ [{'foo': 'bar', 'baz': 'qoox',
+ 'cat': {'tabby': True, 'fluffy': True}},
+ {'dog': 'yorkie'}],
+ matches_matcher),
+ ]
+
+
+class TestIsSubDictOf(testtools.TestCase, helpers.TestMatchersInterface):
+
+ matches_matcher = matchers.IsSubDictOf(
+ {'foo': 'bar', 'baz': 'DONTCARE',
+ 'cat': {'tabby': True, 'fluffy': False}}
+ )
+
+ matches_matches = [
+ {'foo': 'bar', 'baz': 'noox', 'cat': {'tabby': True, 'fluffy': False}},
+ {'foo': 'bar', 'baz': 'quux'}
+ ]
+
+ matches_mismatches = [
+ {'foo': 'bop', 'baz': 'qux',
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'foo': 'bar', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': True}},
+ {'foo': 'bar', 'cat': {'tabby': True, 'fluffy': False}, 'dog': None},
+ ]
+
+ str_examples = [
+ ("IsSubDictOf({'foo': 'bar', 'baz': 'DONTCARE',"
+ " 'cat': {'fluffy': False, 'tabby': True}})",
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Dictionaries do not match at fluffy. d1: False d2: True",
+ {'foo': 'bar', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': True}}, matches_matcher),
+ ("Dictionaries do not match at foo. d1: bar d2: bop",
+ {'foo': 'bop', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': False}}, matches_matcher),
+ ]
+
+
+class TestXMLMatches(testtools.TestCase, helpers.TestMatchersInterface):
+
+ matches_matcher = matchers.XMLMatches("""<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="DONTCARE"/>
+ <children>
+ <!--This is a comment-->
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>DONTCARE</child3>
+ <?spam processing instruction?>
+ </children>
+</root>""")
+
+ matches_matches = ["""<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key2="spam" key1="spam"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children><child1>child 1</child1>
+<child2>child 2</child2>
+<child3>blah</child3>
+ </children>
+</root>""",
+ ]
+
+ matches_mismatches = ["""<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>mismatch text</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key3="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="quux" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child4>child 4</child4>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ </children>
+</root>""",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ <child4>child 4</child4>
+ </children>
+</root>""",
+ ]
+
+ str_examples = [
+ ("XMLMatches('<?xml version=\"1.0\"?>\\n"
+ "<root>\\n"
+ " <text>some text here</text>\\n"
+ " <text>some other text here</text>\\n"
+ " <attrs key1=\"spam\" key2=\"DONTCARE\"/>\\n"
+ " <children>\\n"
+ " <!--This is a comment-->\\n"
+ " <child1>child 1</child1>\\n"
+ " <child2>child 2</child2>\\n"
+ " <child3>DONTCARE</child3>\\n"
+ " <?spam processing instruction?>\\n"
+ " </children>\\n"
+ "</root>')", matches_matcher),
+ ]
+
+ describe_examples = [
+ ("/root/text[1]: XML text value mismatch: expected text value: "
+ "'some other text here'; actual value: 'mismatch text'",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>mismatch text</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""", matches_matcher),
+ ("/root/attrs[2]: XML attributes mismatch: keys only in expected: "
+ "key2; keys only in actual: key3",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key3="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""", matches_matcher),
+ ("/root/attrs[2]: XML attribute value mismatch: expected value of "
+ "attribute key1: 'spam'; actual value: 'quux'",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="quux" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""", matches_matcher),
+ ("/root/children[3]: XML tag mismatch at index 1: expected tag "
+ "<child2>; actual tag <child4>",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child4>child 4</child4>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""", matches_matcher),
+ ("/root/children[3]: XML expected child element <child3> not "
+ "present at index 2",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ </children>
+</root>""", matches_matcher),
+ ("/root/children[3]: XML unexpected child element <child4> "
+ "present at index 3",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ <child4>child 4</child4>
+ </children>
+</root>""", matches_matcher),
+ ]
diff --git a/nova/tests/unit/test_metadata.py b/nova/tests/unit/test_metadata.py
new file mode 100644
index 0000000000..90f57f1af2
--- /dev/null
+++ b/nova/tests/unit/test_metadata.py
@@ -0,0 +1,865 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for metadata service."""
+
+import base64
+import hashlib
+import hmac
+import re
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.metadata import base
+from nova.api.metadata import handler
+from nova.api.metadata import password
+from nova import block_device
+from nova.compute import flavors
+from nova.conductor import api as conductor_api
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import api
+from nova import exception
+from nova.network import api as network_api
+from nova import objects
+from nova import test
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_network
+from nova.tests.unit.objects import test_instance_info_cache
+from nova.tests.unit.objects import test_security_group
+from nova.virt import netutils
+
+CONF = cfg.CONF
+
+USER_DATA_STRING = ("This is an encoded string")
+ENCODE_USER_DATA_STRING = base64.b64encode(USER_DATA_STRING)
+
+INSTANCE = fake_instance.fake_db_instance(**
+ {'id': 1,
+ 'uuid': 'b65cee2f-8c69-4aeb-be2f-f79742548fc2',
+ 'name': 'fake',
+ 'project_id': 'test',
+ 'key_name': "mykey",
+ 'key_data': "ssh-rsa AAAAB3Nzai....N3NtHw== someuser@somehost",
+ 'host': 'test',
+ 'launch_index': 1,
+ 'instance_type': {'name': 'm1.tiny'},
+ 'reservation_id': 'r-xxxxxxxx',
+ 'user_data': ENCODE_USER_DATA_STRING,
+ 'image_ref': 7,
+ 'vcpus': 1,
+ 'fixed_ips': [],
+ 'root_device_name': '/dev/sda1',
+ 'info_cache': test_instance_info_cache.fake_info_cache,
+ 'hostname': 'test.novadomain',
+ 'display_name': 'my_displayname',
+ 'metadata': {},
+ 'system_metadata': {},
+ })
+
+
+def fake_inst_obj(context):
+ return objects.Instance._from_db_object(
+ context, objects.Instance(), INSTANCE,
+ expected_attrs=['metadata', 'system_metadata',
+ 'info_cache'])
+
+
+def get_default_sys_meta():
+ return flavors.save_flavor_info(
+ {}, flavors.get_default_flavor())
+
+
+def return_non_existing_address(*args, **kwarg):
+ raise exception.NotFound()
+
+
+def fake_InstanceMetadata(stubs, inst_data, address=None,
+ sgroups=None, content=None, extra_md=None,
+ vd_driver=None, network_info=None):
+ content = content or []
+ extra_md = extra_md or {}
+ if sgroups is None:
+ sgroups = [dict(test_security_group.fake_secgroup,
+ name='default')]
+
+ def sg_get(*args, **kwargs):
+ return sgroups
+
+ stubs.Set(api, 'security_group_get_by_instance', sg_get)
+ return base.InstanceMetadata(inst_data, address=address,
+ content=content, extra_md=extra_md,
+ vd_driver=vd_driver, network_info=network_info)
+
+
+def fake_request(stubs, mdinst, relpath, address="127.0.0.1",
+ fake_get_metadata=None, headers=None,
+ fake_get_metadata_by_instance_id=None):
+
+ def get_metadata_by_remote_address(address):
+ return mdinst
+
+ app = handler.MetadataRequestHandler()
+
+ if fake_get_metadata is None:
+ fake_get_metadata = get_metadata_by_remote_address
+
+ if stubs:
+ stubs.Set(app, 'get_metadata_by_remote_address', fake_get_metadata)
+
+ if fake_get_metadata_by_instance_id:
+ stubs.Set(app, 'get_metadata_by_instance_id',
+ fake_get_metadata_by_instance_id)
+
+ request = webob.Request.blank(relpath)
+ request.remote_addr = address
+
+ if headers is not None:
+ request.headers.update(headers)
+
+ response = request.get_response(app)
+ return response
+
+
+class MetadataTestCase(test.TestCase):
+ def setUp(self):
+ super(MetadataTestCase, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+ self.instance = fake_inst_obj(self.context)
+ self.instance.system_metadata = get_default_sys_meta()
+ self.flags(use_local=True, group='conductor')
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+
+ def test_can_pickle_metadata(self):
+ # Make sure that InstanceMetadata is possible to pickle. This is
+ # required for memcache backend to work correctly.
+ md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone())
+ pickle.dumps(md, protocol=0)
+
+ def test_user_data(self):
+ inst = self.instance.obj_clone()
+ inst['user_data'] = base64.b64encode("happy")
+ md = fake_InstanceMetadata(self.stubs, inst)
+ self.assertEqual(
+ md.get_ec2_metadata(version='2009-04-04')['user-data'], "happy")
+
+ def test_no_user_data(self):
+ inst = self.instance.obj_clone()
+ inst.user_data = None
+ md = fake_InstanceMetadata(self.stubs, inst)
+ obj = object()
+ self.assertEqual(
+ md.get_ec2_metadata(version='2009-04-04').get('user-data', obj),
+ obj)
+
+ def test_security_groups(self):
+ inst = self.instance.obj_clone()
+ sgroups = [dict(test_security_group.fake_secgroup, name='default'),
+ dict(test_security_group.fake_secgroup, name='other')]
+ expected = ['default', 'other']
+
+ md = fake_InstanceMetadata(self.stubs, inst, sgroups=sgroups)
+ data = md.get_ec2_metadata(version='2009-04-04')
+ self.assertEqual(data['meta-data']['security-groups'], expected)
+
+ def test_local_hostname_fqdn(self):
+ md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone())
+ data = md.get_ec2_metadata(version='2009-04-04')
+ self.assertEqual(data['meta-data']['local-hostname'],
+ "%s.%s" % (self.instance['hostname'], CONF.dhcp_domain))
+
+ def test_format_instance_mapping(self):
+ # Make sure that _format_instance_mappings works.
+ ctxt = None
+ instance_ref0 = objects.Instance(**{'id': 0,
+ 'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
+ 'root_device_name': None,
+ 'default_ephemeral_device': None,
+ 'default_swap_device': None})
+ instance_ref1 = objects.Instance(**{'id': 0,
+ 'uuid': 'b65cee2f-8c69-4aeb-be2f-f79742548fc2',
+ 'root_device_name': '/dev/sda1',
+ 'default_ephemeral_device': None,
+ 'default_swap_device': None})
+
+ def fake_bdm_get(ctxt, uuid, use_slave=False):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': 87654321,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'delete_on_termination': True,
+ 'device_name': '/dev/sdh'}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': 'swap',
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdc'}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': None,
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdb'})]
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_bdm_get)
+
+ expected = {'ami': 'sda1',
+ 'root': '/dev/sda1',
+ 'ephemeral0': '/dev/sdb',
+ 'swap': '/dev/sdc',
+ 'ebs0': '/dev/sdh'}
+
+ conductor_api.LocalAPI()
+
+ self.assertEqual(base._format_instance_mapping(ctxt,
+ instance_ref0), block_device._DEFAULT_MAPPINGS)
+ self.assertEqual(base._format_instance_mapping(ctxt,
+ instance_ref1), expected)
+
+ def test_pubkey(self):
+ md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone())
+ pubkey_ent = md.lookup("/2009-04-04/meta-data/public-keys")
+
+ self.assertEqual(base.ec2_md_print(pubkey_ent),
+ "0=%s" % self.instance['key_name'])
+ self.assertEqual(base.ec2_md_print(pubkey_ent['0']['openssh-key']),
+ self.instance['key_data'])
+
+ def test_image_type_ramdisk(self):
+ inst = self.instance.obj_clone()
+ inst['ramdisk_id'] = 'ari-853667c0'
+ md = fake_InstanceMetadata(self.stubs, inst)
+ data = md.lookup("/latest/meta-data/ramdisk-id")
+
+ self.assertIsNotNone(data)
+ self.assertTrue(re.match('ari-[0-9a-f]{8}', data))
+
+ def test_image_type_kernel(self):
+ inst = self.instance.obj_clone()
+ inst['kernel_id'] = 'aki-c2e26ff2'
+ md = fake_InstanceMetadata(self.stubs, inst)
+ data = md.lookup("/2009-04-04/meta-data/kernel-id")
+
+ self.assertTrue(re.match('aki-[0-9a-f]{8}', data))
+
+ self.assertEqual(
+ md.lookup("/ec2/2009-04-04/meta-data/kernel-id"), data)
+
+ inst.kernel_id = None
+ md = fake_InstanceMetadata(self.stubs, inst)
+ self.assertRaises(base.InvalidMetadataPath,
+ md.lookup, "/2009-04-04/meta-data/kernel-id")
+
+ def test_check_version(self):
+ inst = self.instance.obj_clone()
+ md = fake_InstanceMetadata(self.stubs, inst)
+
+ self.assertTrue(md._check_version('1.0', '2009-04-04'))
+ self.assertFalse(md._check_version('2009-04-04', '1.0'))
+
+ self.assertFalse(md._check_version('2009-04-04', '2008-09-01'))
+ self.assertTrue(md._check_version('2008-09-01', '2009-04-04'))
+
+ self.assertTrue(md._check_version('2009-04-04', '2009-04-04'))
+
+ def test_InstanceMetadata_uses_passed_network_info(self):
+ network_info = []
+
+ self.mox.StubOutWithMock(netutils, "get_injected_network_template")
+ netutils.get_injected_network_template(network_info).AndReturn(False)
+ self.mox.ReplayAll()
+
+ base.InstanceMetadata(fake_inst_obj(self.context),
+ network_info=network_info)
+
+ def test_InstanceMetadata_invoke_metadata_for_config_drive(self):
+ inst = self.instance.obj_clone()
+ inst_md = base.InstanceMetadata(inst)
+ for (path, value) in inst_md.metadata_for_config_drive():
+ self.assertIsNotNone(path)
+
+ def test_InstanceMetadata_queries_network_API_when_needed(self):
+ network_info_from_api = []
+
+ self.mox.StubOutWithMock(netutils, "get_injected_network_template")
+
+ netutils.get_injected_network_template(
+ network_info_from_api).AndReturn(False)
+
+ self.mox.ReplayAll()
+
+ base.InstanceMetadata(fake_inst_obj(self.context))
+
+ def test_local_ipv4_from_nw_info(self):
+ nw_info = fake_network.fake_get_instance_nw_info(self.stubs,
+ num_networks=2)
+ expected_local = "192.168.1.100"
+ md = fake_InstanceMetadata(self.stubs, self.instance,
+ network_info=nw_info)
+ data = md.get_ec2_metadata(version='2009-04-04')
+ self.assertEqual(data['meta-data']['local-ipv4'], expected_local)
+
+ def test_local_ipv4_from_address(self):
+ nw_info = fake_network.fake_get_instance_nw_info(self.stubs,
+ num_networks=2)
+ expected_local = "fake"
+ md = fake_InstanceMetadata(self.stubs, self.instance,
+ network_info=nw_info, address="fake")
+ data = md.get_ec2_metadata(version='2009-04-04')
+ self.assertEqual(data['meta-data']['local-ipv4'], expected_local)
+
+ def test_local_ipv4_from_nw_none(self):
+ md = fake_InstanceMetadata(self.stubs, self.instance,
+ network_info=[])
+ data = md.get_ec2_metadata(version='2009-04-04')
+ self.assertEqual(data['meta-data']['local-ipv4'], '')
+
+
+class OpenStackMetadataTestCase(test.TestCase):
+ def setUp(self):
+ super(OpenStackMetadataTestCase, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+ self.instance = fake_inst_obj(self.context)
+ self.instance['system_metadata'] = get_default_sys_meta()
+ self.flags(use_local=True, group='conductor')
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+
+ def test_with_primitive_instance(self):
+ mdinst = fake_InstanceMetadata(self.stubs, INSTANCE)
+ result = mdinst.lookup('/openstack')
+ self.assertIn('latest', result)
+
+ def test_top_level_listing(self):
+ # request for /openstack/<version>/ should show metadata.json
+ inst = self.instance.obj_clone()
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+
+ result = mdinst.lookup("/openstack")
+
+ # trailing / should not affect anything
+ self.assertEqual(result, mdinst.lookup("/openstack/"))
+
+ # the 'content' should not show up in directory listing
+ self.assertNotIn(base.CONTENT_DIR, result)
+ self.assertIn('2012-08-10', result)
+ self.assertIn('latest', result)
+
+ def test_version_content_listing(self):
+ # request for /openstack/<version>/ should show metadata.json
+ inst = self.instance.obj_clone()
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+
+ listing = mdinst.lookup("/openstack/2012-08-10")
+ self.assertIn("meta_data.json", listing)
+
+ def test_returns_apis_supported_in_havana_version(self):
+ mdinst = fake_InstanceMetadata(self.stubs, self.instance)
+ havana_supported_apis = mdinst.lookup("/openstack/2013-10-17")
+
+ self.assertEqual([base.MD_JSON_NAME, base.UD_NAME, base.PASS_NAME,
+ base.VD_JSON_NAME], havana_supported_apis)
+
+ def test_returns_apis_supported_in_folsom_version(self):
+ mdinst = fake_InstanceMetadata(self.stubs, self.instance)
+ folsom_supported_apis = mdinst.lookup("/openstack/2012-08-10")
+
+ self.assertEqual([base.MD_JSON_NAME, base.UD_NAME],
+ folsom_supported_apis)
+
+ def test_returns_apis_supported_in_grizzly_version(self):
+ mdinst = fake_InstanceMetadata(self.stubs, self.instance)
+ grizzly_supported_apis = mdinst.lookup("/openstack/2013-04-04")
+
+ self.assertEqual([base.MD_JSON_NAME, base.UD_NAME, base.PASS_NAME],
+ grizzly_supported_apis)
+
+ def test_metadata_json(self):
+ inst = self.instance.obj_clone()
+ content = [
+ ('/etc/my.conf', "content of my.conf"),
+ ('/root/hello', "content of /root/hello"),
+ ]
+
+ mdinst = fake_InstanceMetadata(self.stubs, inst,
+ content=content)
+ mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
+ mdjson = mdinst.lookup("/openstack/latest/meta_data.json")
+
+ mddict = jsonutils.loads(mdjson)
+
+ self.assertEqual(mddict['uuid'], self.instance['uuid'])
+ self.assertIn('files', mddict)
+
+ self.assertIn('public_keys', mddict)
+ self.assertEqual(mddict['public_keys'][self.instance['key_name']],
+ self.instance['key_data'])
+
+ self.assertIn('launch_index', mddict)
+ self.assertEqual(mddict['launch_index'], self.instance['launch_index'])
+
+ # verify that each of the things we put in content
+ # resulted in an entry in 'files', that their content
+ # there is as expected, and that /content lists them.
+ for (path, content) in content:
+ fent = [f for f in mddict['files'] if f['path'] == path]
+ self.assertEqual(1, len(fent))
+ fent = fent[0]
+ found = mdinst.lookup("/openstack%s" % fent['content_path'])
+ self.assertEqual(found, content)
+
+ def test_extra_md(self):
+ # make sure extra_md makes it through to metadata
+ inst = self.instance.obj_clone()
+ extra = {'foo': 'bar', 'mylist': [1, 2, 3],
+ 'mydict': {"one": 1, "two": 2}}
+ mdinst = fake_InstanceMetadata(self.stubs, inst, extra_md=extra)
+
+ mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
+ mddict = jsonutils.loads(mdjson)
+
+ for key, val in extra.iteritems():
+ self.assertEqual(mddict[key], val)
+
+ def test_password(self):
+ # make sure extra_md makes it through to metadata
+ inst = self.instance.obj_clone()
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+
+ result = mdinst.lookup("/openstack/latest/password")
+ self.assertEqual(result, password.handle_password)
+
+ def test_userdata(self):
+ inst = self.instance.obj_clone()
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+
+ userdata_found = mdinst.lookup("/openstack/2012-08-10/user_data")
+ self.assertEqual(USER_DATA_STRING, userdata_found)
+
+ # since we had user-data in this instance, it should be in listing
+ self.assertIn('user_data', mdinst.lookup("/openstack/2012-08-10"))
+
+ inst.user_data = None
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+
+ # since this instance had no user-data it should not be there.
+ self.assertNotIn('user_data', mdinst.lookup("/openstack/2012-08-10"))
+
+ self.assertRaises(base.InvalidMetadataPath,
+ mdinst.lookup, "/openstack/2012-08-10/user_data")
+
+ def test_random_seed(self):
+ inst = self.instance.obj_clone()
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+
+ # verify that 2013-04-04 has the 'random' field
+ mdjson = mdinst.lookup("/openstack/2013-04-04/meta_data.json")
+ mddict = jsonutils.loads(mdjson)
+
+ self.assertIn("random_seed", mddict)
+ self.assertEqual(len(base64.b64decode(mddict["random_seed"])), 512)
+
+ # verify that older version do not have it
+ mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
+ self.assertNotIn("random_seed", jsonutils.loads(mdjson))
+
+ def test_no_dashes_in_metadata(self):
+ # top level entries in meta_data should not contain '-' in their name
+ inst = self.instance.obj_clone()
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+ mdjson = jsonutils.loads(
+ mdinst.lookup("/openstack/latest/meta_data.json"))
+
+ self.assertEqual([], [k for k in mdjson.keys() if k.find("-") != -1])
+
+ def test_vendor_data_presence(self):
+ inst = self.instance.obj_clone()
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+
+ # verify that 2013-10-17 has the vendor_data.json file
+ result = mdinst.lookup("/openstack/2013-10-17")
+ self.assertIn('vendor_data.json', result)
+
+ # verify that older version do not have it
+ result = mdinst.lookup("/openstack/2013-04-04")
+ self.assertNotIn('vendor_data.json', result)
+
+ def test_vendor_data_response(self):
+ inst = self.instance.obj_clone()
+
+ mydata = {'mykey1': 'value1', 'mykey2': 'value2'}
+
+ class myVdriver(base.VendorDataDriver):
+ def __init__(self, *args, **kwargs):
+ super(myVdriver, self).__init__(*args, **kwargs)
+ data = mydata.copy()
+ uuid = kwargs['instance']['uuid']
+ data.update({'inst_uuid': uuid})
+ self.data = data
+
+ def get(self):
+ return self.data
+
+ mdinst = fake_InstanceMetadata(self.stubs, inst, vd_driver=myVdriver)
+
+ # verify that 2013-10-17 has the vendor_data.json file
+ vdpath = "/openstack/2013-10-17/vendor_data.json"
+ vd = jsonutils.loads(mdinst.lookup(vdpath))
+
+ # the instance should be passed through, and our class copies the
+ # uuid through to 'inst_uuid'.
+ self.assertEqual(vd['inst_uuid'], inst['uuid'])
+
+ # check the other expected values
+ for k, v in mydata.items():
+ self.assertEqual(vd[k], v)
+
+
+class MetadataHandlerTestCase(test.TestCase):
+ """Test that metadata is returning proper values."""
+
+ def setUp(self):
+ super(MetadataHandlerTestCase, self).setUp()
+
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+ self.context = context.RequestContext('fake', 'fake')
+ self.instance = fake_inst_obj(self.context)
+ self.instance.system_metadata = get_default_sys_meta()
+ self.flags(use_local=True, group='conductor')
+ self.mdinst = fake_InstanceMetadata(self.stubs, self.instance,
+ address=None, sgroups=None)
+
+ def test_callable(self):
+
+ def verify(req, meta_data):
+ self.assertIsInstance(meta_data, CallableMD)
+ return "foo"
+
+ class CallableMD(object):
+ def lookup(self, path_info):
+ return verify
+
+ response = fake_request(self.stubs, CallableMD(), "/bar")
+ self.assertEqual(response.status_int, 200)
+ self.assertEqual(response.body, "foo")
+
+ def test_root(self):
+ expected = "\n".join(base.VERSIONS) + "\nlatest"
+ response = fake_request(self.stubs, self.mdinst, "/")
+ self.assertEqual(response.body, expected)
+
+ response = fake_request(self.stubs, self.mdinst, "/foo/../")
+ self.assertEqual(response.body, expected)
+
+ def test_root_metadata_proxy_enabled(self):
+ self.flags(service_metadata_proxy=True,
+ group='neutron')
+
+ expected = "\n".join(base.VERSIONS) + "\nlatest"
+ response = fake_request(self.stubs, self.mdinst, "/")
+ self.assertEqual(response.body, expected)
+
+ response = fake_request(self.stubs, self.mdinst, "/foo/../")
+ self.assertEqual(response.body, expected)
+
+ def test_version_root(self):
+ response = fake_request(self.stubs, self.mdinst, "/2009-04-04")
+ response_ctype = response.headers['Content-Type']
+ self.assertTrue(response_ctype.startswith("text/plain"))
+ self.assertEqual(response.body, 'meta-data/\nuser-data')
+
+ response = fake_request(self.stubs, self.mdinst, "/9999-99-99")
+ self.assertEqual(response.status_int, 404)
+
+ def test_json_data(self):
+ response = fake_request(self.stubs, self.mdinst,
+ "/openstack/latest/meta_data.json")
+ response_ctype = response.headers['Content-Type']
+ self.assertTrue(response_ctype.startswith("application/json"))
+
+ response = fake_request(self.stubs, self.mdinst,
+ "/openstack/latest/vendor_data.json")
+ response_ctype = response.headers['Content-Type']
+ self.assertTrue(response_ctype.startswith("application/json"))
+
+ def test_user_data_non_existing_fixed_address(self):
+ self.stubs.Set(network_api.API, 'get_fixed_ip_by_address',
+ return_non_existing_address)
+ response = fake_request(None, self.mdinst, "/2009-04-04/user-data",
+ "127.1.1.1")
+ self.assertEqual(response.status_int, 404)
+
+ def test_fixed_address_none(self):
+ response = fake_request(None, self.mdinst,
+ relpath="/2009-04-04/user-data", address=None)
+ self.assertEqual(response.status_int, 500)
+
+ def test_invalid_path_is_404(self):
+ response = fake_request(self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data-invalid")
+ self.assertEqual(response.status_int, 404)
+
+ def test_user_data_with_use_forwarded_header(self):
+ expected_addr = "192.192.192.2"
+
+ def fake_get_metadata(address):
+ if address == expected_addr:
+ return self.mdinst
+ else:
+ raise Exception("Expected addr of %s, got %s" %
+ (expected_addr, address))
+
+ self.flags(use_forwarded_for=True)
+ response = fake_request(self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="168.168.168.1",
+ fake_get_metadata=fake_get_metadata,
+ headers={'X-Forwarded-For': expected_addr})
+
+ self.assertEqual(response.status_int, 200)
+ response_ctype = response.headers['Content-Type']
+ self.assertTrue(response_ctype.startswith("text/plain"))
+ self.assertEqual(response.body,
+ base64.b64decode(self.instance['user_data']))
+
+ response = fake_request(self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="168.168.168.1",
+ fake_get_metadata=fake_get_metadata,
+ headers=None)
+ self.assertEqual(response.status_int, 500)
+
+ @mock.patch('nova.utils.constant_time_compare')
+ def test_by_instance_id_uses_constant_time_compare(self, mock_compare):
+ mock_compare.side_effect = test.TestingException
+
+ req = webob.Request.blank('/')
+ hnd = handler.MetadataRequestHandler()
+
+ req.headers['X-Instance-ID'] = 'fake-inst'
+ req.headers['X-Tenant-ID'] = 'fake-proj'
+
+ self.assertRaises(test.TestingException,
+ hnd._handle_instance_id_request, req)
+
+ self.assertEqual(1, mock_compare.call_count)
+
+ def test_user_data_with_neutron_instance_id(self):
+ expected_instance_id = 'a-b-c-d'
+
+ def fake_get_metadata(instance_id, remote_address):
+ if remote_address is None:
+ raise Exception('Expected X-Forwared-For header')
+ elif instance_id == expected_instance_id:
+ return self.mdinst
+ else:
+ # raise the exception to aid with 500 response code test
+ raise Exception("Expected instance_id of %s, got %s" %
+ (expected_instance_id, instance_id))
+
+ signed = hmac.new(
+ CONF.neutron.metadata_proxy_shared_secret,
+ expected_instance_id,
+ hashlib.sha256).hexdigest()
+
+ # try a request with service disabled
+ response = fake_request(
+ self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ headers={'X-Instance-ID': 'a-b-c-d',
+ 'X-Tenant-ID': 'test',
+ 'X-Instance-ID-Signature': signed})
+ self.assertEqual(response.status_int, 200)
+
+ # now enable the service
+ self.flags(service_metadata_proxy=True,
+ group='neutron')
+ response = fake_request(
+ self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ fake_get_metadata_by_instance_id=fake_get_metadata,
+ headers={'X-Forwarded-For': '192.192.192.2',
+ 'X-Instance-ID': 'a-b-c-d',
+ 'X-Tenant-ID': 'test',
+ 'X-Instance-ID-Signature': signed})
+
+ self.assertEqual(response.status_int, 200)
+ response_ctype = response.headers['Content-Type']
+ self.assertTrue(response_ctype.startswith("text/plain"))
+ self.assertEqual(response.body,
+ base64.b64decode(self.instance['user_data']))
+
+ # mismatched signature
+ response = fake_request(
+ self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ fake_get_metadata_by_instance_id=fake_get_metadata,
+ headers={'X-Forwarded-For': '192.192.192.2',
+ 'X-Instance-ID': 'a-b-c-d',
+ 'X-Tenant-ID': 'test',
+ 'X-Instance-ID-Signature': ''})
+
+ self.assertEqual(response.status_int, 403)
+
+ # missing X-Tenant-ID from request
+ response = fake_request(
+ self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ fake_get_metadata_by_instance_id=fake_get_metadata,
+ headers={'X-Forwarded-For': '192.192.192.2',
+ 'X-Instance-ID': 'a-b-c-d',
+ 'X-Instance-ID-Signature': signed})
+
+ self.assertEqual(response.status_int, 400)
+
+ # mismatched X-Tenant-ID
+ response = fake_request(
+ self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ fake_get_metadata_by_instance_id=fake_get_metadata,
+ headers={'X-Forwarded-For': '192.192.192.2',
+ 'X-Instance-ID': 'a-b-c-d',
+ 'X-Tenant-ID': 'FAKE',
+ 'X-Instance-ID-Signature': signed})
+
+ self.assertEqual(response.status_int, 404)
+
+ # without X-Forwarded-For
+ response = fake_request(
+ self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ fake_get_metadata_by_instance_id=fake_get_metadata,
+ headers={'X-Instance-ID': 'a-b-c-d',
+ 'X-Tenant-ID': 'test',
+ 'X-Instance-ID-Signature': signed})
+
+ self.assertEqual(response.status_int, 500)
+
+ # unexpected Instance-ID
+ signed = hmac.new(
+ CONF.neutron.metadata_proxy_shared_secret,
+ 'z-z-z-z',
+ hashlib.sha256).hexdigest()
+
+ response = fake_request(
+ self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ fake_get_metadata_by_instance_id=fake_get_metadata,
+ headers={'X-Forwarded-For': '192.192.192.2',
+ 'X-Instance-ID': 'z-z-z-z',
+ 'X-Tenant-ID': 'test',
+ 'X-Instance-ID-Signature': signed})
+ self.assertEqual(response.status_int, 500)
+
+ def test_get_metadata(self):
+ def _test_metadata_path(relpath):
+ # recursively confirm a http 200 from all meta-data elements
+ # available at relpath.
+ response = fake_request(self.stubs, self.mdinst,
+ relpath=relpath)
+ for item in response.body.split('\n'):
+ if 'public-keys' in relpath:
+ # meta-data/public-keys/0=keyname refers to
+ # meta-data/public-keys/0
+ item = item.split('=')[0]
+ if item.endswith('/'):
+ path = relpath + '/' + item
+ _test_metadata_path(path)
+ continue
+
+ path = relpath + '/' + item
+ response = fake_request(self.stubs, self.mdinst, relpath=path)
+ self.assertEqual(response.status_int, 200, message=path)
+
+ _test_metadata_path('/2009-04-04/meta-data')
+
+
+class MetadataPasswordTestCase(test.TestCase):
+ def setUp(self):
+ super(MetadataPasswordTestCase, self).setUp()
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+ self.context = context.RequestContext('fake', 'fake')
+ self.instance = fake_inst_obj(self.context)
+ self.instance.system_metadata = get_default_sys_meta()
+ self.flags(use_local=True, group='conductor')
+ self.mdinst = fake_InstanceMetadata(self.stubs, self.instance,
+ address=None, sgroups=None)
+ self.flags(use_local=True, group='conductor')
+
+ def test_get_password(self):
+ request = webob.Request.blank('')
+ self.mdinst.password = 'foo'
+ result = password.handle_password(request, self.mdinst)
+ self.assertEqual(result, 'foo')
+
+ def test_bad_method(self):
+ request = webob.Request.blank('')
+ request.method = 'PUT'
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ password.handle_password, request, self.mdinst)
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ def _try_set_password(self, get_by_uuid, val='bar'):
+ request = webob.Request.blank('')
+ request.method = 'POST'
+ request.body = val
+ get_by_uuid.return_value = self.instance
+
+ with mock.patch.object(self.instance, 'save') as save:
+ password.handle_password(request, self.mdinst)
+ save.assert_called_once_with()
+
+ self.assertIn('password_0', self.instance.system_metadata)
+
+ def test_set_password(self):
+ self.mdinst.password = ''
+ self._try_set_password()
+
+ def test_conflict(self):
+ self.mdinst.password = 'foo'
+ self.assertRaises(webob.exc.HTTPConflict,
+ self._try_set_password)
+
+ def test_too_large(self):
+ self.mdinst.password = ''
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._try_set_password,
+ val=('a' * (password.MAX_SIZE + 1)))
diff --git a/nova/tests/unit/test_notifications.py b/nova/tests/unit/test_notifications.py
new file mode 100644
index 0000000000..bce03da1c3
--- /dev/null
+++ b/nova/tests/unit/test_notifications.py
@@ -0,0 +1,394 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for common notifications."""
+
+import copy
+
+import mock
+from oslo.config import cfg
+
+from nova.compute import flavors
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova.network import api as network_api
+from nova import notifications
+from nova import test
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_notifier
+
+CONF = cfg.CONF
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+
+
+class NotificationsTestCase(test.TestCase):
+
+ def setUp(self):
+ super(NotificationsTestCase, self).setUp()
+
+ self.net_info = fake_network.fake_get_instance_nw_info(self.stubs, 1,
+ 1)
+
+ def fake_get_nw_info(cls, ctxt, instance):
+ self.assertTrue(ctxt.is_admin)
+ return self.net_info
+
+ self.stubs.Set(network_api.API, 'get_instance_nw_info',
+ fake_get_nw_info)
+ fake_network.set_stub_network_methods(self.stubs)
+
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ self.flags(compute_driver='nova.virt.fake.FakeDriver',
+ network_manager='nova.network.manager.FlatManager',
+ notify_on_state_change="vm_and_task_state",
+ host='testhost')
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ self.instance = self._wrapped_create()
+
+ def _wrapped_create(self, params=None):
+ instance_type = flavors.get_flavor_by_name('m1.tiny')
+ sys_meta = flavors.save_flavor_info({}, instance_type)
+ inst = {}
+ inst['image_ref'] = 1
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
+ inst['instance_type_id'] = instance_type['id']
+ inst['root_gb'] = 0
+ inst['ephemeral_gb'] = 0
+ inst['access_ip_v4'] = '1.2.3.4'
+ inst['access_ip_v6'] = 'feed:5eed'
+ inst['display_name'] = 'test_instance'
+ inst['hostname'] = 'test_instance_hostname'
+ inst['node'] = 'test_instance_node'
+ inst['system_metadata'] = sys_meta
+ if params:
+ inst.update(params)
+ return db.instance_create(self.context, inst)
+
+ def test_send_api_fault_disabled(self):
+ self.flags(notify_api_faults=False)
+ notifications.send_api_fault("http://example.com/foo", 500, None)
+ self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
+
+ def test_send_api_fault(self):
+ self.flags(notify_api_faults=True)
+ exception = None
+ try:
+ # Get a real exception with a call stack.
+ raise test.TestingException("junk")
+ except test.TestingException as e:
+ exception = e
+
+ notifications.send_api_fault("http://example.com/foo", 500, exception)
+
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ n = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(n.priority, 'ERROR')
+ self.assertEqual(n.event_type, 'api.fault')
+ self.assertEqual(n.payload['url'], 'http://example.com/foo')
+ self.assertEqual(n.payload['status'], 500)
+ self.assertIsNotNone(n.payload['exception'])
+
+ def test_notif_disabled(self):
+
+ # test config disable of the notifications
+ self.flags(notify_on_state_change=None)
+
+ old = copy.copy(self.instance)
+ self.instance["vm_state"] = vm_states.ACTIVE
+
+ old_vm_state = old['vm_state']
+ new_vm_state = self.instance["vm_state"]
+ old_task_state = old['task_state']
+ new_task_state = self.instance["task_state"]
+
+ notifications.send_update_with_states(self.context, self.instance,
+ old_vm_state, new_vm_state, old_task_state, new_task_state,
+ verify_states=True)
+
+ notifications.send_update(self.context, old, self.instance)
+ self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
+
+ def test_task_notif(self):
+
+ # test config disable of just the task state notifications
+ self.flags(notify_on_state_change="vm_state")
+
+ # we should not get a notification on task stgate chagne now
+ old = copy.copy(self.instance)
+ self.instance["task_state"] = task_states.SPAWNING
+
+ old_vm_state = old['vm_state']
+ new_vm_state = self.instance["vm_state"]
+ old_task_state = old['task_state']
+ new_task_state = self.instance["task_state"]
+
+ notifications.send_update_with_states(self.context, self.instance,
+ old_vm_state, new_vm_state, old_task_state, new_task_state,
+ verify_states=True)
+
+ self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
+
+ # ok now enable task state notifications and re-try
+ self.flags(notify_on_state_change="vm_and_task_state")
+
+ notifications.send_update(self.context, old, self.instance)
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+
+ def test_send_no_notif(self):
+
+ # test notification on send no initial vm state:
+ old_vm_state = self.instance['vm_state']
+ new_vm_state = self.instance['vm_state']
+ old_task_state = self.instance['task_state']
+ new_task_state = self.instance['task_state']
+
+ notifications.send_update_with_states(self.context, self.instance,
+ old_vm_state, new_vm_state, old_task_state, new_task_state,
+ service="compute", host=None, verify_states=True)
+
+ self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
+
+ def test_send_on_vm_change(self):
+
+ # pretend we just transitioned to ACTIVE:
+ params = {"vm_state": vm_states.ACTIVE}
+ (old_ref, new_ref) = db.instance_update_and_get_original(self.context,
+ self.instance['uuid'], params)
+ notifications.send_update(self.context, old_ref, new_ref)
+
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+
+ def test_send_on_task_change(self):
+
+ # pretend we just transitioned to task SPAWNING:
+ params = {"task_state": task_states.SPAWNING}
+ (old_ref, new_ref) = db.instance_update_and_get_original(self.context,
+ self.instance['uuid'], params)
+ notifications.send_update(self.context, old_ref, new_ref)
+
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+
+ def test_no_update_with_states(self):
+
+ notifications.send_update_with_states(self.context, self.instance,
+ vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
+ task_states.SPAWNING, verify_states=True)
+ self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
+
+ def test_vm_update_with_states(self):
+
+ notifications.send_update_with_states(self.context, self.instance,
+ vm_states.BUILDING, vm_states.ACTIVE, task_states.SPAWNING,
+ task_states.SPAWNING, verify_states=True)
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ notif = fake_notifier.NOTIFICATIONS[0]
+ payload = notif.payload
+ access_ip_v4 = self.instance["access_ip_v4"]
+ access_ip_v6 = self.instance["access_ip_v6"]
+ display_name = self.instance["display_name"]
+ hostname = self.instance["hostname"]
+ node = self.instance["node"]
+
+ self.assertEqual(vm_states.BUILDING, payload["old_state"])
+ self.assertEqual(vm_states.ACTIVE, payload["state"])
+ self.assertEqual(task_states.SPAWNING, payload["old_task_state"])
+ self.assertEqual(task_states.SPAWNING, payload["new_task_state"])
+ self.assertEqual(payload["access_ip_v4"], access_ip_v4)
+ self.assertEqual(payload["access_ip_v6"], access_ip_v6)
+ self.assertEqual(payload["display_name"], display_name)
+ self.assertEqual(payload["hostname"], hostname)
+ self.assertEqual(payload["node"], node)
+
+ def test_task_update_with_states(self):
+ self.flags(notify_on_state_change="vm_and_task_state")
+
+ notifications.send_update_with_states(self.context, self.instance,
+ vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
+ None, verify_states=True)
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ notif = fake_notifier.NOTIFICATIONS[0]
+ payload = notif.payload
+ access_ip_v4 = self.instance["access_ip_v4"]
+ access_ip_v6 = self.instance["access_ip_v6"]
+ display_name = self.instance["display_name"]
+ hostname = self.instance["hostname"]
+
+ self.assertEqual(vm_states.BUILDING, payload["old_state"])
+ self.assertEqual(vm_states.BUILDING, payload["state"])
+ self.assertEqual(task_states.SPAWNING, payload["old_task_state"])
+ self.assertIsNone(payload["new_task_state"])
+ self.assertEqual(payload["access_ip_v4"], access_ip_v4)
+ self.assertEqual(payload["access_ip_v6"], access_ip_v6)
+ self.assertEqual(payload["display_name"], display_name)
+ self.assertEqual(payload["hostname"], hostname)
+
+ def test_update_no_service_name(self):
+ notifications.send_update_with_states(self.context, self.instance,
+ vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
+ None)
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+
+ # service name should default to 'compute'
+ notif = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('compute.testhost', notif.publisher_id)
+
+ def test_update_with_service_name(self):
+ notifications.send_update_with_states(self.context, self.instance,
+ vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
+ None, service="testservice")
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+
+ # service name should default to 'compute'
+ notif = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('testservice.testhost', notif.publisher_id)
+
+ def test_update_with_host_name(self):
+ notifications.send_update_with_states(self.context, self.instance,
+ vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
+ None, host="someotherhost")
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+
+ # service name should default to 'compute'
+ notif = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('compute.someotherhost', notif.publisher_id)
+
+ def test_payload_has_fixed_ip_labels(self):
+ info = notifications.info_from_instance(self.context, self.instance,
+ self.net_info, None)
+ self.assertIn("fixed_ips", info)
+ self.assertEqual(info["fixed_ips"][0]["label"], "test1")
+
+ def test_payload_has_vif_mac_address(self):
+ info = notifications.info_from_instance(self.context, self.instance,
+ self.net_info, None)
+ self.assertIn("fixed_ips", info)
+ self.assertEqual(self.net_info[0]['address'],
+ info["fixed_ips"][0]["vif_mac"])
+
+ def test_payload_has_cell_name_empty(self):
+ info = notifications.info_from_instance(self.context, self.instance,
+ self.net_info, None)
+ self.assertIn("cell_name", info)
+ self.assertIsNone(self.instance['cell_name'])
+ self.assertEqual("", info["cell_name"])
+
+ def test_payload_has_cell_name(self):
+ self.instance['cell_name'] = "cell1"
+ info = notifications.info_from_instance(self.context, self.instance,
+ self.net_info, None)
+ self.assertIn("cell_name", info)
+ self.assertEqual("cell1", info["cell_name"])
+
+ def test_payload_has_progress_empty(self):
+ info = notifications.info_from_instance(self.context, self.instance,
+ self.net_info, None)
+ self.assertIn("progress", info)
+ self.assertIsNone(self.instance['progress'])
+ self.assertEqual("", info["progress"])
+
+ def test_payload_has_progress(self):
+ self.instance['progress'] = 50
+ info = notifications.info_from_instance(self.context, self.instance,
+ self.net_info, None)
+ self.assertIn("progress", info)
+ self.assertEqual(50, info["progress"])
+
+ def test_send_access_ip_update(self):
+ notifications.send_update(self.context, self.instance, self.instance)
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ notif = fake_notifier.NOTIFICATIONS[0]
+ payload = notif.payload
+ access_ip_v4 = self.instance["access_ip_v4"]
+ access_ip_v6 = self.instance["access_ip_v6"]
+
+ self.assertEqual(payload["access_ip_v4"], access_ip_v4)
+ self.assertEqual(payload["access_ip_v6"], access_ip_v6)
+
+ def test_send_name_update(self):
+ param = {"display_name": "new_display_name"}
+ new_name_inst = self._wrapped_create(params=param)
+ notifications.send_update(self.context, self.instance, new_name_inst)
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ notif = fake_notifier.NOTIFICATIONS[0]
+ payload = notif.payload
+ old_display_name = self.instance["display_name"]
+ new_display_name = new_name_inst["display_name"]
+
+ self.assertEqual(payload["old_display_name"], old_display_name)
+ self.assertEqual(payload["display_name"], new_display_name)
+
+ def test_send_no_state_change(self):
+ called = [False]
+
+ def sending_no_state_change(context, instance, **kwargs):
+ called[0] = True
+ self.stubs.Set(notifications, '_send_instance_update_notification',
+ sending_no_state_change)
+ notifications.send_update(self.context, self.instance, self.instance)
+ self.assertTrue(called[0])
+
+ def test_fail_sending_update(self):
+ def fail_sending(context, instance, **kwargs):
+ raise Exception('failed to notify')
+ self.stubs.Set(notifications, '_send_instance_update_notification',
+ fail_sending)
+
+ notifications.send_update(self.context, self.instance, self.instance)
+ self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
+
+
+class NotificationsFormatTestCase(test.NoDBTestCase):
+
+ def test_state_computation(self):
+ instance = {'vm_state': mock.sentinel.vm_state,
+ 'task_state': mock.sentinel.task_state}
+ states = notifications._compute_states_payload(instance)
+ self.assertEqual(mock.sentinel.vm_state, states['state'])
+ self.assertEqual(mock.sentinel.vm_state, states['old_state'])
+ self.assertEqual(mock.sentinel.task_state, states['old_task_state'])
+ self.assertEqual(mock.sentinel.task_state, states['new_task_state'])
+
+ states = notifications._compute_states_payload(
+ instance,
+ old_vm_state=mock.sentinel.old_vm_state,
+ )
+ self.assertEqual(mock.sentinel.vm_state, states['state'])
+ self.assertEqual(mock.sentinel.old_vm_state, states['old_state'])
+ self.assertEqual(mock.sentinel.task_state, states['old_task_state'])
+ self.assertEqual(mock.sentinel.task_state, states['new_task_state'])
+
+ states = notifications._compute_states_payload(
+ instance,
+ old_vm_state=mock.sentinel.old_vm_state,
+ old_task_state=mock.sentinel.old_task_state,
+ new_vm_state=mock.sentinel.new_vm_state,
+ new_task_state=mock.sentinel.new_task_state,
+ )
+
+ self.assertEqual(mock.sentinel.new_vm_state, states['state'])
+ self.assertEqual(mock.sentinel.old_vm_state, states['old_state'])
+ self.assertEqual(mock.sentinel.old_task_state,
+ states['old_task_state'])
+ self.assertEqual(mock.sentinel.new_task_state,
+ states['new_task_state'])
diff --git a/nova/tests/unit/test_nova_manage.py b/nova/tests/unit/test_nova_manage.py
new file mode 100644
index 0000000000..9ffaf66e81
--- /dev/null
+++ b/nova/tests/unit/test_nova_manage.py
@@ -0,0 +1,467 @@
+# Copyright 2011 OpenStack Foundation
+# Copyright 2011 Ilya Alekseyev
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import StringIO
+import sys
+
+import fixtures
+import mock
+
+from nova.cmd import manage
+from nova import context
+from nova import db
+from nova import exception
+from nova.i18n import _
+from nova import test
+from nova.tests.unit.db import fakes as db_fakes
+from nova.tests.unit.objects import test_network
+
+
+class FixedIpCommandsTestCase(test.TestCase):
+ def setUp(self):
+ super(FixedIpCommandsTestCase, self).setUp()
+ db_fakes.stub_out_db_network_api(self.stubs)
+ self.commands = manage.FixedIpCommands()
+
+ def test_reserve(self):
+ self.commands.reserve('192.168.0.100')
+ address = db.fixed_ip_get_by_address(context.get_admin_context(),
+ '192.168.0.100')
+ self.assertEqual(address['reserved'], True)
+
+ def test_reserve_nonexistent_address(self):
+ self.assertEqual(2, self.commands.reserve('55.55.55.55'))
+
+ def test_unreserve(self):
+ self.commands.unreserve('192.168.0.100')
+ address = db.fixed_ip_get_by_address(context.get_admin_context(),
+ '192.168.0.100')
+ self.assertEqual(address['reserved'], False)
+
+ def test_unreserve_nonexistent_address(self):
+ self.assertEqual(2, self.commands.unreserve('55.55.55.55'))
+
+ def test_list(self):
+ self.useFixture(fixtures.MonkeyPatch('sys.stdout',
+ StringIO.StringIO()))
+ self.commands.list()
+ self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100'))
+
+ def test_list_just_one_host(self):
+ def fake_fixed_ip_get_by_host(*args, **kwargs):
+ return [db_fakes.fixed_ip_fields]
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.db.fixed_ip_get_by_host',
+ fake_fixed_ip_get_by_host))
+ self.useFixture(fixtures.MonkeyPatch('sys.stdout',
+ StringIO.StringIO()))
+ self.commands.list('banana')
+ self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100'))
+
+
+class FloatingIpCommandsTestCase(test.TestCase):
+ def setUp(self):
+ super(FloatingIpCommandsTestCase, self).setUp()
+ db_fakes.stub_out_db_network_api(self.stubs)
+ self.commands = manage.FloatingIpCommands()
+
+ def test_address_to_hosts(self):
+ def assert_loop(result, expected):
+ for ip in result:
+ self.assertIn(str(ip), expected)
+
+ address_to_hosts = self.commands.address_to_hosts
+ # /32 and /31
+ self.assertRaises(exception.InvalidInput, address_to_hosts,
+ '192.168.100.1/32')
+ self.assertRaises(exception.InvalidInput, address_to_hosts,
+ '192.168.100.1/31')
+ # /30
+ expected = ["192.168.100.%s" % i for i in range(1, 3)]
+ result = address_to_hosts('192.168.100.0/30')
+ self.assertEqual(2, len(list(result)))
+ assert_loop(result, expected)
+ # /29
+ expected = ["192.168.100.%s" % i for i in range(1, 7)]
+ result = address_to_hosts('192.168.100.0/29')
+ self.assertEqual(6, len(list(result)))
+ assert_loop(result, expected)
+ # /28
+ expected = ["192.168.100.%s" % i for i in range(1, 15)]
+ result = address_to_hosts('192.168.100.0/28')
+ self.assertEqual(14, len(list(result)))
+ assert_loop(result, expected)
+ # /16
+ result = address_to_hosts('192.168.100.0/16')
+ self.assertEqual(65534, len(list(result)))
+ # NOTE(dripton): I don't test /13 because it makes the test take 3s.
+ # /12 gives over a million IPs, which is ridiculous.
+ self.assertRaises(exception.InvalidInput, address_to_hosts,
+ '192.168.100.1/12')
+
+
+class NetworkCommandsTestCase(test.TestCase):
+ def setUp(self):
+ super(NetworkCommandsTestCase, self).setUp()
+ self.commands = manage.NetworkCommands()
+ self.net = {'id': 0,
+ 'label': 'fake',
+ 'injected': False,
+ 'cidr': '192.168.0.0/24',
+ 'cidr_v6': 'dead:beef::/64',
+ 'multi_host': False,
+ 'gateway_v6': 'dead:beef::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'bridge': 'fa0',
+ 'bridge_interface': 'fake_fa0',
+ 'gateway': '192.168.0.1',
+ 'broadcast': '192.168.0.255',
+ 'dns1': '8.8.8.8',
+ 'dns2': '8.8.4.4',
+ 'vlan': 200,
+ 'vlan_start': 201,
+ 'vpn_public_address': '10.0.0.2',
+ 'vpn_public_port': '2222',
+ 'vpn_private_address': '192.168.0.2',
+ 'dhcp_start': '192.168.0.3',
+ 'project_id': 'fake_project',
+ 'host': 'fake_host',
+ 'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}
+
+ def fake_network_get_by_cidr(context, cidr):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(cidr, self.fake_net['cidr'])
+ return db_fakes.FakeModel(dict(test_network.fake_network,
+ **self.fake_net))
+
+ def fake_network_get_by_uuid(context, uuid):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(uuid, self.fake_net['uuid'])
+ return db_fakes.FakeModel(dict(test_network.fake_network,
+ **self.fake_net))
+
+ def fake_network_update(context, network_id, values):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(network_id, self.fake_net['id'])
+ self.assertEqual(values, self.fake_update_value)
+ self.fake_network_get_by_cidr = fake_network_get_by_cidr
+ self.fake_network_get_by_uuid = fake_network_get_by_uuid
+ self.fake_network_update = fake_network_update
+
+ def test_create(self):
+
+ def fake_create_networks(obj, context, **kwargs):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(kwargs['label'], 'Test')
+ self.assertEqual(kwargs['cidr'], '10.2.0.0/24')
+ self.assertEqual(kwargs['multi_host'], False)
+ self.assertEqual(kwargs['num_networks'], 1)
+ self.assertEqual(kwargs['network_size'], 256)
+ self.assertEqual(kwargs['vlan'], 200)
+ self.assertEqual(kwargs['vlan_start'], 201)
+ self.assertEqual(kwargs['vpn_start'], 2000)
+ self.assertEqual(kwargs['cidr_v6'], 'fd00:2::/120')
+ self.assertEqual(kwargs['gateway'], '10.2.0.1')
+ self.assertEqual(kwargs['gateway_v6'], 'fd00:2::22')
+ self.assertEqual(kwargs['bridge'], 'br200')
+ self.assertEqual(kwargs['bridge_interface'], 'eth0')
+ self.assertEqual(kwargs['dns1'], '8.8.8.8')
+ self.assertEqual(kwargs['dns2'], '8.8.4.4')
+ self.flags(network_manager='nova.network.manager.VlanManager')
+ from nova.network import manager as net_manager
+ self.stubs.Set(net_manager.VlanManager, 'create_networks',
+ fake_create_networks)
+ self.commands.create(
+ label='Test',
+ cidr='10.2.0.0/24',
+ num_networks=1,
+ network_size=256,
+ multi_host='F',
+ vlan=200,
+ vlan_start=201,
+ vpn_start=2000,
+ cidr_v6='fd00:2::/120',
+ gateway='10.2.0.1',
+ gateway_v6='fd00:2::22',
+ bridge='br200',
+ bridge_interface='eth0',
+ dns1='8.8.8.8',
+ dns2='8.8.4.4',
+ uuid='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
+
+ def test_list(self):
+
+ def fake_network_get_all(context):
+ return [db_fakes.FakeModel(self.net)]
+ self.stubs.Set(db, 'network_get_all', fake_network_get_all)
+ output = StringIO.StringIO()
+ sys.stdout = output
+ self.commands.list()
+ sys.stdout = sys.__stdout__
+ result = output.getvalue()
+ _fmt = "\t".join(["%(id)-5s", "%(cidr)-18s", "%(cidr_v6)-15s",
+ "%(dhcp_start)-15s", "%(dns1)-15s", "%(dns2)-15s",
+ "%(vlan)-15s", "%(project_id)-15s", "%(uuid)-15s"])
+ head = _fmt % {'id': _('id'),
+ 'cidr': _('IPv4'),
+ 'cidr_v6': _('IPv6'),
+ 'dhcp_start': _('start address'),
+ 'dns1': _('DNS1'),
+ 'dns2': _('DNS2'),
+ 'vlan': _('VlanID'),
+ 'project_id': _('project'),
+ 'uuid': _("uuid")}
+ body = _fmt % {'id': self.net['id'],
+ 'cidr': self.net['cidr'],
+ 'cidr_v6': self.net['cidr_v6'],
+ 'dhcp_start': self.net['dhcp_start'],
+ 'dns1': self.net['dns1'],
+ 'dns2': self.net['dns2'],
+ 'vlan': self.net['vlan'],
+ 'project_id': self.net['project_id'],
+ 'uuid': self.net['uuid']}
+ answer = '%s\n%s\n' % (head, body)
+ self.assertEqual(result, answer)
+
+ def test_delete(self):
+ self.fake_net = self.net
+ self.fake_net['project_id'] = None
+ self.fake_net['host'] = None
+ self.stubs.Set(db, 'network_get_by_uuid',
+ self.fake_network_get_by_uuid)
+
+ def fake_network_delete_safe(context, network_id):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(network_id, self.fake_net['id'])
+ self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
+ self.commands.delete(uuid=self.fake_net['uuid'])
+
+ def test_delete_by_cidr(self):
+ self.fake_net = self.net
+ self.fake_net['project_id'] = None
+ self.fake_net['host'] = None
+ self.stubs.Set(db, 'network_get_by_cidr',
+ self.fake_network_get_by_cidr)
+
+ def fake_network_delete_safe(context, network_id):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(network_id, self.fake_net['id'])
+ self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
+ self.commands.delete(fixed_range=self.fake_net['cidr'])
+
+ def _test_modify_base(self, update_value, project, host, dis_project=None,
+ dis_host=None):
+ self.fake_net = self.net
+ self.fake_update_value = update_value
+ self.stubs.Set(db, 'network_get_by_cidr',
+ self.fake_network_get_by_cidr)
+ self.stubs.Set(db, 'network_update', self.fake_network_update)
+ self.commands.modify(self.fake_net['cidr'], project=project, host=host,
+ dis_project=dis_project, dis_host=dis_host)
+
+ def test_modify_associate(self):
+ self._test_modify_base(update_value={'project_id': 'test_project',
+ 'host': 'test_host'},
+ project='test_project', host='test_host')
+
+ def test_modify_unchanged(self):
+ self._test_modify_base(update_value={}, project=None, host=None)
+
+ def test_modify_disassociate(self):
+ self._test_modify_base(update_value={'project_id': None, 'host': None},
+ project=None, host=None, dis_project=True,
+ dis_host=True)
+
+
+class NeutronV2NetworkCommandsTestCase(test.TestCase):
+ def setUp(self):
+ super(NeutronV2NetworkCommandsTestCase, self).setUp()
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ self.commands = manage.NetworkCommands()
+
+ def test_create(self):
+ self.assertEqual(2, self.commands.create())
+
+ def test_list(self):
+ self.assertEqual(2, self.commands.list())
+
+ def test_delete(self):
+ self.assertEqual(2, self.commands.delete())
+
+ def test_modify(self):
+ self.assertEqual(2, self.commands.modify('192.168.0.1'))
+
+
+class ProjectCommandsTestCase(test.TestCase):
+ def setUp(self):
+ super(ProjectCommandsTestCase, self).setUp()
+ self.commands = manage.ProjectCommands()
+
+ def test_quota(self):
+ output = StringIO.StringIO()
+ sys.stdout = output
+ self.commands.quota(project_id='admin',
+ key='instances',
+ value='unlimited',
+ )
+
+ sys.stdout = sys.__stdout__
+ result = output.getvalue()
+ print_format = "%-36s %-10s" % ('instances', 'unlimited')
+ self.assertEqual((print_format in result), True)
+
+ def test_quota_update_invalid_key(self):
+ self.assertEqual(2, self.commands.quota('admin', 'volumes1', '10'))
+
+
+class DBCommandsTestCase(test.TestCase):
+ def setUp(self):
+ super(DBCommandsTestCase, self).setUp()
+ self.commands = manage.DbCommands()
+
+ def test_archive_deleted_rows_negative(self):
+ self.assertEqual(1, self.commands.archive_deleted_rows(-1))
+
+
+class ServiceCommandsTestCase(test.TestCase):
+ def setUp(self):
+ super(ServiceCommandsTestCase, self).setUp()
+ self.commands = manage.ServiceCommands()
+
+ def test_service_enable_invalid_params(self):
+ self.assertEqual(2, self.commands.enable('nohost', 'noservice'))
+
+ def test_service_disable_invalid_params(self):
+ self.assertEqual(2, self.commands.disable('nohost', 'noservice'))
+
+
+class CellCommandsTestCase(test.TestCase):
+ def setUp(self):
+ super(CellCommandsTestCase, self).setUp()
+ self.commands = manage.CellCommands()
+
+ def test_create_transport_hosts_multiple(self):
+ """Test the _create_transport_hosts method
+ when broker_hosts is set.
+ """
+ brokers = "127.0.0.1:5672,127.0.0.2:5671"
+ thosts = self.commands._create_transport_hosts(
+ 'guest', 'devstack',
+ broker_hosts=brokers)
+ self.assertEqual(2, len(thosts))
+ self.assertEqual('127.0.0.1', thosts[0].hostname)
+ self.assertEqual(5672, thosts[0].port)
+ self.assertEqual('127.0.0.2', thosts[1].hostname)
+ self.assertEqual(5671, thosts[1].port)
+
+ def test_create_transport_hosts_single(self):
+ """Test the _create_transport_hosts method when hostname is passed."""
+ thosts = self.commands._create_transport_hosts('guest', 'devstack',
+ hostname='127.0.0.1',
+ port=80)
+ self.assertEqual(1, len(thosts))
+ self.assertEqual('127.0.0.1', thosts[0].hostname)
+ self.assertEqual(80, thosts[0].port)
+
+ def test_create_transport_hosts_single_broker(self):
+ """Test the _create_transport_hosts method for single broker_hosts."""
+ thosts = self.commands._create_transport_hosts(
+ 'guest', 'devstack',
+ broker_hosts='127.0.0.1:5672')
+ self.assertEqual(1, len(thosts))
+ self.assertEqual('127.0.0.1', thosts[0].hostname)
+ self.assertEqual(5672, thosts[0].port)
+
+ def test_create_transport_hosts_both(self):
+ """Test the _create_transport_hosts method when both broker_hosts
+ and hostname/port are passed.
+ """
+ thosts = self.commands._create_transport_hosts(
+ 'guest', 'devstack',
+ broker_hosts='127.0.0.1:5672',
+ hostname='127.0.0.2', port=80)
+ self.assertEqual(1, len(thosts))
+ self.assertEqual('127.0.0.1', thosts[0].hostname)
+ self.assertEqual(5672, thosts[0].port)
+
+ def test_create_transport_hosts_wrong_val(self):
+ """Test the _create_transport_hosts method when broker_hosts
+ is wrongly sepcified
+ """
+ self.assertRaises(ValueError,
+ self.commands._create_transport_hosts,
+ 'guest', 'devstack',
+ broker_hosts='127.0.0.1:5672,127.0.0.1')
+
+ def test_create_transport_hosts_wrong_port_val(self):
+ """Test the _create_transport_hosts method when port in
+ broker_hosts is wrongly sepcified
+ """
+ self.assertRaises(ValueError,
+ self.commands._create_transport_hosts,
+ 'guest', 'devstack',
+ broker_hosts='127.0.0.1:')
+
+ def test_create_transport_hosts_wrong_port_arg(self):
+ """Test the _create_transport_hosts method when port
+ argument is wrongly sepcified
+ """
+ self.assertRaises(ValueError,
+ self.commands._create_transport_hosts,
+ 'guest', 'devstack',
+ hostname='127.0.0.1', port='ab')
+
+ @mock.patch.object(context, 'get_admin_context')
+ @mock.patch.object(db, 'cell_create')
+ def test_create_broker_hosts(self, mock_db_cell_create, mock_ctxt):
+ """Test the create function when broker_hosts is
+ passed
+ """
+ cell_tp_url = "fake://guest:devstack@127.0.0.1:5432"
+ cell_tp_url += ",guest:devstack@127.0.0.2:9999/"
+ ctxt = mock.sentinel
+ mock_ctxt.return_value = mock.sentinel
+ self.commands.create("test",
+ broker_hosts='127.0.0.1:5432,127.0.0.2:9999',
+ woffset=0, wscale=0,
+ username="guest", password="devstack")
+ exp_values = {'name': "test",
+ 'is_parent': False,
+ 'transport_url': cell_tp_url,
+ 'weight_offset': 0.0,
+ 'weight_scale': 0.0}
+ mock_db_cell_create.assert_called_once_with(ctxt, exp_values)
+
+ @mock.patch.object(context, 'get_admin_context')
+ @mock.patch.object(db, 'cell_create')
+ def test_create_hostname(self, mock_db_cell_create, mock_ctxt):
+ """Test the create function when hostname and port is
+ passed
+ """
+ cell_tp_url = "fake://guest:devstack@127.0.0.1:9999/"
+ ctxt = mock.sentinel
+ mock_ctxt.return_value = mock.sentinel
+ self.commands.create("test",
+ hostname='127.0.0.1', port="9999",
+ woffset=0, wscale=0,
+ username="guest", password="devstack")
+ exp_values = {'name': "test",
+ 'is_parent': False,
+ 'transport_url': cell_tp_url,
+ 'weight_offset': 0.0,
+ 'weight_scale': 0.0}
+ mock_db_cell_create.assert_called_once_with(ctxt, exp_values)
diff --git a/nova/tests/unit/test_objectstore.py b/nova/tests/unit/test_objectstore.py
new file mode 100644
index 0000000000..a8023d5f01
--- /dev/null
+++ b/nova/tests/unit/test_objectstore.py
@@ -0,0 +1,155 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Unittets for S3 objectstore clone.
+"""
+
+import os
+import shutil
+import tempfile
+
+import boto
+from boto import exception as boto_exception
+from boto.s3 import connection as s3
+from oslo.config import cfg
+
+from nova.objectstore import s3server
+from nova import test
+from nova import wsgi
+
+CONF = cfg.CONF
+CONF.import_opt('s3_host', 'nova.image.s3')
+
+# Create a unique temporary directory. We don't delete after test to
+# allow checking the contents after running tests. Users and/or tools
+# running the tests need to remove the tests directories.
+OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-')
+
+# Create bucket/images path
+os.makedirs(os.path.join(OSS_TEMPDIR, 'images'))
+os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets'))
+
+
+class S3APITestCase(test.NoDBTestCase):
+ """Test objectstore through S3 API."""
+
+ def setUp(self):
+ """Setup users, projects, and start a test server."""
+ super(S3APITestCase, self).setUp()
+ self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
+ s3_host='127.0.0.1')
+
+ shutil.rmtree(CONF.buckets_path)
+ os.mkdir(CONF.buckets_path)
+
+ router = s3server.S3Application(CONF.buckets_path)
+ self.server = wsgi.Server("S3 Objectstore",
+ router,
+ host=CONF.s3_host,
+ port=0)
+ self.server.start()
+
+ if not boto.config.has_section('Boto'):
+ boto.config.add_section('Boto')
+
+ boto.config.set('Boto', 'num_retries', '0')
+ conn = s3.S3Connection(aws_access_key_id='fake',
+ aws_secret_access_key='fake',
+ host=CONF.s3_host,
+ port=self.server.port,
+ is_secure=False,
+ calling_format=s3.OrdinaryCallingFormat())
+ self.conn = conn
+
+ def get_http_connection(*args):
+ """Get a new S3 connection, don't attempt to reuse connections."""
+ return self.conn.new_http_connection(*args)
+
+ self.conn.get_http_connection = get_http_connection
+
+ def _ensure_no_buckets(self, buckets): # pylint: disable=C0111
+ self.assertEqual(len(buckets), 0, "Bucket list was not empty")
+ return True
+
+ def _ensure_one_bucket(self, buckets, name): # pylint: disable=C0111
+ self.assertEqual(len(buckets), 1,
+ "Bucket list didn't have exactly one element in it")
+ self.assertEqual(buckets[0].name, name, "Wrong name")
+ return True
+
+ def test_list_buckets(self):
+ # Make sure we are starting with no buckets.
+ self._ensure_no_buckets(self.conn.get_all_buckets())
+
+ def test_create_and_delete_bucket(self):
+ # Test bucket creation and deletion.
+ bucket_name = 'testbucket'
+
+ self.conn.create_bucket(bucket_name)
+ self._ensure_one_bucket(self.conn.get_all_buckets(), bucket_name)
+ self.conn.delete_bucket(bucket_name)
+ self._ensure_no_buckets(self.conn.get_all_buckets())
+
+ def test_create_bucket_and_key_and_delete_key_again(self):
+ # Test key operations on buckets.
+ bucket_name = 'testbucket'
+ key_name = 'somekey'
+ key_contents = 'somekey'
+
+ b = self.conn.create_bucket(bucket_name)
+ k = b.new_key(key_name)
+ k.set_contents_from_string(key_contents)
+
+ bucket = self.conn.get_bucket(bucket_name)
+
+ # make sure the contents are correct
+ key = bucket.get_key(key_name)
+ self.assertEqual(key.get_contents_as_string(), key_contents,
+ "Bad contents")
+
+ # delete the key
+ key.delete()
+
+ self._ensure_no_buckets(bucket.get_all_keys())
+
+ def test_unknown_bucket(self):
+ # NOTE(unicell): Since Boto v2.25.0, the underlying implementation
+ # of get_bucket method changed from GET to HEAD.
+ #
+ # Prior to v2.25.0, default validate=True fetched a list of keys in the
+ # bucket and raises S3ResponseError. As a side effect of switching to
+ # HEAD request, get_bucket call now generates less error message.
+ #
+ # To keep original semantics, additional get_all_keys call is
+ # suggestted per Boto document. This case tests both validate=False and
+ # validate=True case for completeness.
+ #
+ # http://docs.pythonboto.org/en/latest/releasenotes/v2.25.0.html
+ # http://docs.pythonboto.org/en/latest/s3_tut.html#accessing-a-bucket
+ bucket_name = 'falalala'
+ self.assertRaises(boto_exception.S3ResponseError,
+ self.conn.get_bucket,
+ bucket_name)
+ bucket = self.conn.get_bucket(bucket_name, validate=False)
+ self.assertRaises(boto_exception.S3ResponseError,
+ bucket.get_all_keys,
+ maxkeys=0)
+
+ def tearDown(self):
+ """Tear down test server."""
+ self.server.stop()
+ super(S3APITestCase, self).tearDown()
diff --git a/nova/tests/unit/test_pipelib.py b/nova/tests/unit/test_pipelib.py
new file mode 100644
index 0000000000..99d840a839
--- /dev/null
+++ b/nova/tests/unit/test_pipelib.py
@@ -0,0 +1,74 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+from nova.cloudpipe import pipelib
+from nova import context
+from nova import crypto
+from nova import test
+from nova import utils
+
+CONF = cfg.CONF
+
+
+class PipelibTest(test.TestCase):
+ def setUp(self):
+ super(PipelibTest, self).setUp()
+ self.cloudpipe = pipelib.CloudPipe()
+ self.project = "222"
+ self.user = "111"
+ self.context = context.RequestContext(self.user, self.project)
+
+ def test_get_encoded_zip(self):
+ with utils.tempdir() as tmpdir:
+ self.flags(ca_path=tmpdir)
+ crypto.ensure_ca_filesystem()
+
+ ret = self.cloudpipe.get_encoded_zip(self.project)
+ self.assertTrue(ret)
+
+ def test_launch_vpn_instance(self):
+ self.stubs.Set(self.cloudpipe.compute_api,
+ "create",
+ lambda *a, **kw: (None, "r-fakeres"))
+ with utils.tempdir() as tmpdir:
+ self.flags(ca_path=tmpdir, keys_path=tmpdir)
+ crypto.ensure_ca_filesystem()
+ self.cloudpipe.launch_vpn_instance(self.context)
+
+ def test_setup_security_group(self):
+ group_name = "%s%s" % (self.project, CONF.vpn_key_suffix)
+
+ # First attempt, does not exist (thus its created)
+ res1_group = self.cloudpipe.setup_security_group(self.context)
+ self.assertEqual(res1_group, group_name)
+
+ # Second attempt, it exists in the DB
+ res2_group = self.cloudpipe.setup_security_group(self.context)
+ self.assertEqual(res1_group, res2_group)
+
+ def test_setup_key_pair(self):
+ key_name = "%s%s" % (self.project, CONF.vpn_key_suffix)
+ with utils.tempdir() as tmpdir:
+ self.flags(keys_path=tmpdir)
+
+ # First attempt, key does not exist (thus it is generated)
+ res1_key = self.cloudpipe.setup_key_pair(self.context)
+ self.assertEqual(res1_key, key_name)
+
+ # Second attempt, it exists in the DB
+ res2_key = self.cloudpipe.setup_key_pair(self.context)
+ self.assertEqual(res2_key, res1_key)
diff --git a/nova/tests/unit/test_policy.py b/nova/tests/unit/test_policy.py
new file mode 100644
index 0000000000..59663076be
--- /dev/null
+++ b/nova/tests/unit/test_policy.py
@@ -0,0 +1,231 @@
+# Copyright 2011 Piston Cloud Computing, Inc.
+# All Rights Reserved.
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Test of Policy Engine For Nova."""
+
+import os.path
+import StringIO
+
+import mock
+import six.moves.urllib.request as urlrequest
+
+from nova import context
+from nova import exception
+from nova.openstack.common import policy as common_policy
+from nova import policy
+from nova import test
+from nova.tests.unit import policy_fixture
+from nova import utils
+
+
+class PolicyFileTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(PolicyFileTestCase, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+ self.target = {}
+
+ def test_modified_policy_reloads(self):
+ with utils.tempdir() as tmpdir:
+ tmpfilename = os.path.join(tmpdir, 'policy')
+
+ self.flags(policy_file=tmpfilename)
+
+ # NOTE(uni): context construction invokes policy check to determin
+ # is_admin or not. As a side-effect, policy reset is needed here
+ # to flush existing policy cache.
+ policy.reset()
+
+ action = "example:test"
+ with open(tmpfilename, "w") as policyfile:
+ policyfile.write('{"example:test": ""}')
+ policy.enforce(self.context, action, self.target)
+ with open(tmpfilename, "w") as policyfile:
+ policyfile.write('{"example:test": "!"}')
+ policy._ENFORCER.load_rules(True)
+ self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
+ self.context, action, self.target)
+
+
+class PolicyTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(PolicyTestCase, self).setUp()
+ rules = {
+ "true": '@',
+ "example:allowed": '@',
+ "example:denied": "!",
+ "example:get_http": "http://www.example.com",
+ "example:my_file": "role:compute_admin or "
+ "project_id:%(project_id)s",
+ "example:early_and_fail": "! and @",
+ "example:early_or_success": "@ or !",
+ "example:lowercase_admin": "role:admin or role:sysadmin",
+ "example:uppercase_admin": "role:ADMIN or role:sysadmin",
+ }
+ policy.reset()
+ policy.init()
+ policy.set_rules(dict((k, common_policy.parse_rule(v))
+ for k, v in rules.items()))
+ self.context = context.RequestContext('fake', 'fake', roles=['member'])
+ self.target = {}
+
+ def test_enforce_nonexistent_action_throws(self):
+ action = "example:noexist"
+ self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
+ self.context, action, self.target)
+
+ def test_enforce_bad_action_throws(self):
+ action = "example:denied"
+ self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
+ self.context, action, self.target)
+
+ def test_enforce_bad_action_noraise(self):
+ action = "example:denied"
+ result = policy.enforce(self.context, action, self.target, False)
+ self.assertEqual(result, False)
+
+ def test_enforce_good_action(self):
+ action = "example:allowed"
+ result = policy.enforce(self.context, action, self.target)
+ self.assertEqual(result, True)
+
+ @mock.patch.object(urlrequest, 'urlopen',
+ return_value=StringIO.StringIO("True"))
+ def test_enforce_http_true(self, mock_urlrequest):
+ action = "example:get_http"
+ target = {}
+ result = policy.enforce(self.context, action, target)
+ self.assertEqual(result, True)
+
+ @mock.patch.object(urlrequest, 'urlopen',
+ return_value=StringIO.StringIO("False"))
+ def test_enforce_http_false(self, mock_urlrequest):
+ action = "example:get_http"
+ target = {}
+ self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
+ self.context, action, target)
+
+ def test_templatized_enforcement(self):
+ target_mine = {'project_id': 'fake'}
+ target_not_mine = {'project_id': 'another'}
+ action = "example:my_file"
+ policy.enforce(self.context, action, target_mine)
+ self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
+ self.context, action, target_not_mine)
+
+ def test_early_AND_enforcement(self):
+ action = "example:early_and_fail"
+ self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
+ self.context, action, self.target)
+
+ def test_early_OR_enforcement(self):
+ action = "example:early_or_success"
+ policy.enforce(self.context, action, self.target)
+
+ def test_ignore_case_role_check(self):
+ lowercase_action = "example:lowercase_admin"
+ uppercase_action = "example:uppercase_admin"
+ # NOTE(dprince) we mix case in the Admin role here to ensure
+ # case is ignored
+ admin_context = context.RequestContext('admin',
+ 'fake',
+ roles=['AdMiN'])
+ policy.enforce(admin_context, lowercase_action, self.target)
+ policy.enforce(admin_context, uppercase_action, self.target)
+
+
+class DefaultPolicyTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(DefaultPolicyTestCase, self).setUp()
+
+ self.rules = {
+ "default": '',
+ "example:exist": "!",
+ }
+
+ self._set_rules('default')
+
+ self.context = context.RequestContext('fake', 'fake')
+
+ def _set_rules(self, default_rule):
+ policy.reset()
+ rules = dict((k, common_policy.parse_rule(v))
+ for k, v in self.rules.items())
+ policy.init(rules=rules, default_rule=default_rule, use_conf=False)
+
+ def test_policy_called(self):
+ self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
+ self.context, "example:exist", {})
+
+ def test_not_found_policy_calls_default(self):
+ policy.enforce(self.context, "example:noexist", {})
+
+ def test_default_not_found(self):
+ self._set_rules("default_noexist")
+ self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
+ self.context, "example:noexist", {})
+
+
+class IsAdminCheckTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(IsAdminCheckTestCase, self).setUp()
+ policy.init()
+
+ def test_init_true(self):
+ check = policy.IsAdminCheck('is_admin', 'True')
+
+ self.assertEqual(check.kind, 'is_admin')
+ self.assertEqual(check.match, 'True')
+ self.assertEqual(check.expected, True)
+
+ def test_init_false(self):
+ check = policy.IsAdminCheck('is_admin', 'nottrue')
+
+ self.assertEqual(check.kind, 'is_admin')
+ self.assertEqual(check.match, 'False')
+ self.assertEqual(check.expected, False)
+
+ def test_call_true(self):
+ check = policy.IsAdminCheck('is_admin', 'True')
+
+ self.assertEqual(check('target', dict(is_admin=True),
+ policy._ENFORCER), True)
+ self.assertEqual(check('target', dict(is_admin=False),
+ policy._ENFORCER), False)
+
+ def test_call_false(self):
+ check = policy.IsAdminCheck('is_admin', 'False')
+
+ self.assertEqual(check('target', dict(is_admin=True),
+ policy._ENFORCER), False)
+ self.assertEqual(check('target', dict(is_admin=False),
+ policy._ENFORCER), True)
+
+
+class AdminRolePolicyTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(AdminRolePolicyTestCase, self).setUp()
+ self.policy = self.useFixture(policy_fixture.RoleBasedPolicyFixture())
+ self.context = context.RequestContext('fake', 'fake', roles=['member'])
+ self.actions = policy.get_rules().keys()
+ self.target = {}
+
+ def test_enforce_admin_actions_with_nonadmin_context_throws(self):
+ """Check if non-admin context passed to admin actions throws
+ Policy not authorized exception
+ """
+ for action in self.actions:
+ self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
+ self.context, action, self.target)
diff --git a/nova/tests/unit/test_quota.py b/nova/tests/unit/test_quota.py
new file mode 100644
index 0000000000..9152f09a57
--- /dev/null
+++ b/nova/tests/unit/test_quota.py
@@ -0,0 +1,2765 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from oslo.config import cfg
+from oslo.utils import timeutils
+
+from nova import compute
+from nova.compute import flavors
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import api as sqa_api
+from nova.db.sqlalchemy import models as sqa_models
+from nova import exception
+from nova import quota
+from nova import test
+import nova.tests.unit.image.fake
+
+CONF = cfg.CONF
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+
+
+class QuotaIntegrationTestCase(test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(QuotaIntegrationTestCase, self).setUp()
+ self.flags(compute_driver='nova.virt.fake.FakeDriver',
+ quota_instances=2,
+ quota_cores=4,
+ quota_floating_ips=1,
+ network_manager='nova.network.manager.FlatDHCPManager')
+
+ # Apparently needed by the RPC tests...
+ self.network = self.start_service('network')
+
+ self.user_id = 'admin'
+ self.project_id = 'admin'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id,
+ is_admin=True)
+
+ nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
+
+ self.compute_api = compute.API()
+
+ def tearDown(self):
+ super(QuotaIntegrationTestCase, self).tearDown()
+ nova.tests.unit.image.fake.FakeImageService_reset()
+
+ def _create_instance(self, cores=2):
+ """Create a test instance."""
+ inst = {}
+ inst['image_id'] = 'cedef40a-ed67-4d10-800e-17455edce175'
+ inst['reservation_id'] = 'r-fakeres'
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
+ inst['instance_type_id'] = '3' # m1.large
+ inst['vcpus'] = cores
+ return db.instance_create(self.context, inst)
+
+ def test_too_many_instances(self):
+ instance_uuids = []
+ for i in range(CONF.quota_instances):
+ instance = self._create_instance()
+ instance_uuids.append(instance['uuid'])
+ inst_type = flavors.get_flavor_by_name('m1.small')
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ try:
+ self.compute_api.create(self.context, min_count=1, max_count=1,
+ instance_type=inst_type,
+ image_href=image_uuid)
+ except exception.QuotaError as e:
+ expected_kwargs = {'code': 413, 'resource': 'cores', 'req': 1,
+ 'used': 4, 'allowed': 4, 'overs': 'cores,instances'}
+ self.assertEqual(e.kwargs, expected_kwargs)
+ else:
+ self.fail('Expected QuotaError exception')
+ for instance_uuid in instance_uuids:
+ db.instance_destroy(self.context, instance_uuid)
+
+ def test_too_many_cores(self):
+ instance = self._create_instance(cores=4)
+ inst_type = flavors.get_flavor_by_name('m1.small')
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ try:
+ self.compute_api.create(self.context, min_count=1, max_count=1,
+ instance_type=inst_type,
+ image_href=image_uuid)
+ except exception.QuotaError as e:
+ expected_kwargs = {'code': 413, 'resource': 'cores', 'req': 1,
+ 'used': 4, 'allowed': 4, 'overs': 'cores'}
+ self.assertEqual(e.kwargs, expected_kwargs)
+ else:
+ self.fail('Expected QuotaError exception')
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_many_cores_with_unlimited_quota(self):
+ # Setting cores quota to unlimited:
+ self.flags(quota_cores=-1)
+ instance = self._create_instance(cores=4)
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_too_many_addresses(self):
+ address = '192.168.0.100'
+ db.floating_ip_create(context.get_admin_context(),
+ {'address': address,
+ 'project_id': self.project_id})
+ self.assertRaises(exception.QuotaError,
+ self.network.allocate_floating_ip,
+ self.context,
+ self.project_id)
+ db.floating_ip_destroy(context.get_admin_context(), address)
+
+ def test_auto_assigned(self):
+ address = '192.168.0.100'
+ db.floating_ip_create(context.get_admin_context(),
+ {'address': address,
+ 'project_id': self.project_id})
+ # auto allocated addresses should not be counted
+ self.assertRaises(exception.NoMoreFloatingIps,
+ self.network.allocate_floating_ip,
+ self.context,
+ self.project_id,
+ True)
+ db.floating_ip_destroy(context.get_admin_context(), address)
+
+ def test_too_many_metadata_items(self):
+ metadata = {}
+ for i in range(CONF.quota_metadata_items + 1):
+ metadata['key%s' % i] = 'value%s' % i
+ inst_type = flavors.get_flavor_by_name('m1.small')
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ self.assertRaises(exception.QuotaError, self.compute_api.create,
+ self.context,
+ min_count=1,
+ max_count=1,
+ instance_type=inst_type,
+ image_href=image_uuid,
+ metadata=metadata)
+
+ def _create_with_injected_files(self, files):
+ api = self.compute_api
+ inst_type = flavors.get_flavor_by_name('m1.small')
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ api.create(self.context, min_count=1, max_count=1,
+ instance_type=inst_type, image_href=image_uuid,
+ injected_files=files)
+
+ def test_no_injected_files(self):
+ api = self.compute_api
+ inst_type = flavors.get_flavor_by_name('m1.small')
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ api.create(self.context,
+ instance_type=inst_type,
+ image_href=image_uuid)
+
+ def test_max_injected_files(self):
+ files = []
+ for i in xrange(CONF.quota_injected_files):
+ files.append(('/my/path%d' % i, 'config = test\n'))
+ self._create_with_injected_files(files) # no QuotaError
+
+ def test_too_many_injected_files(self):
+ files = []
+ for i in xrange(CONF.quota_injected_files + 1):
+ files.append(('/my/path%d' % i, 'my\ncontent%d\n' % i))
+ self.assertRaises(exception.QuotaError,
+ self._create_with_injected_files, files)
+
+ def test_max_injected_file_content_bytes(self):
+ max = CONF.quota_injected_file_content_bytes
+ content = ''.join(['a' for i in xrange(max)])
+ files = [('/test/path', content)]
+ self._create_with_injected_files(files) # no QuotaError
+
+ def test_too_many_injected_file_content_bytes(self):
+ max = CONF.quota_injected_file_content_bytes
+ content = ''.join(['a' for i in xrange(max + 1)])
+ files = [('/test/path', content)]
+ self.assertRaises(exception.QuotaError,
+ self._create_with_injected_files, files)
+
+ def test_max_injected_file_path_bytes(self):
+ max = CONF.quota_injected_file_path_length
+ path = ''.join(['a' for i in xrange(max)])
+ files = [(path, 'config = quotatest')]
+ self._create_with_injected_files(files) # no QuotaError
+
+ def test_too_many_injected_file_path_bytes(self):
+ max = CONF.quota_injected_file_path_length
+ path = ''.join(['a' for i in xrange(max + 1)])
+ files = [(path, 'config = quotatest')]
+ self.assertRaises(exception.QuotaError,
+ self._create_with_injected_files, files)
+
+ def test_reservation_expire(self):
+ self.useFixture(test.TimeOverride())
+
+ def assertInstancesReserved(reserved):
+ result = quota.QUOTAS.get_project_quotas(self.context,
+ self.context.project_id)
+ self.assertEqual(result['instances']['reserved'], reserved)
+
+ quota.QUOTAS.reserve(self.context,
+ expire=60,
+ instances=2)
+
+ assertInstancesReserved(2)
+
+ timeutils.advance_time_seconds(80)
+
+ quota.QUOTAS.expire(self.context)
+
+ assertInstancesReserved(0)
+
+
+class FakeContext(object):
+ def __init__(self, project_id, quota_class):
+ self.is_admin = False
+ self.user_id = 'fake_user'
+ self.project_id = project_id
+ self.quota_class = quota_class
+ self.read_deleted = 'no'
+
+ def elevated(self):
+ elevated = self.__class__(self.project_id, self.quota_class)
+ elevated.is_admin = True
+ return elevated
+
+
+class FakeDriver(object):
+ def __init__(self, by_project=None, by_user=None, by_class=None,
+ reservations=None):
+ self.called = []
+ self.by_project = by_project or {}
+ self.by_user = by_user or {}
+ self.by_class = by_class or {}
+ self.reservations = reservations or []
+
+ def get_by_project_and_user(self, context, project_id, user_id, resource):
+ self.called.append(('get_by_project_and_user',
+ context, project_id, user_id, resource))
+ try:
+ return self.by_user[user_id][resource]
+ except KeyError:
+ raise exception.ProjectUserQuotaNotFound(project_id=project_id,
+ user_id=user_id)
+
+ def get_by_project(self, context, project_id, resource):
+ self.called.append(('get_by_project', context, project_id, resource))
+ try:
+ return self.by_project[project_id][resource]
+ except KeyError:
+ raise exception.ProjectQuotaNotFound(project_id=project_id)
+
+ def get_by_class(self, context, quota_class, resource):
+ self.called.append(('get_by_class', context, quota_class, resource))
+ try:
+ return self.by_class[quota_class][resource]
+ except KeyError:
+ raise exception.QuotaClassNotFound(class_name=quota_class)
+
+ def get_defaults(self, context, resources):
+ self.called.append(('get_defaults', context, resources))
+ return resources
+
+ def get_class_quotas(self, context, resources, quota_class,
+ defaults=True):
+ self.called.append(('get_class_quotas', context, resources,
+ quota_class, defaults))
+ return resources
+
+ def get_user_quotas(self, context, resources, project_id, user_id,
+ quota_class=None, defaults=True, usages=True):
+ self.called.append(('get_user_quotas', context, resources,
+ project_id, user_id, quota_class, defaults,
+ usages))
+ return resources
+
+ def get_project_quotas(self, context, resources, project_id,
+ quota_class=None, defaults=True, usages=True,
+ remains=False):
+ self.called.append(('get_project_quotas', context, resources,
+ project_id, quota_class, defaults, usages,
+ remains))
+ return resources
+
+ def limit_check(self, context, resources, values, project_id=None,
+ user_id=None):
+ self.called.append(('limit_check', context, resources,
+ values, project_id, user_id))
+
+ def reserve(self, context, resources, deltas, expire=None,
+ project_id=None, user_id=None):
+ self.called.append(('reserve', context, resources, deltas,
+ expire, project_id, user_id))
+ return self.reservations
+
+ def commit(self, context, reservations, project_id=None, user_id=None):
+ self.called.append(('commit', context, reservations, project_id,
+ user_id))
+
+ def rollback(self, context, reservations, project_id=None, user_id=None):
+ self.called.append(('rollback', context, reservations, project_id,
+ user_id))
+
+ def usage_reset(self, context, resources):
+ self.called.append(('usage_reset', context, resources))
+
+ def destroy_all_by_project_and_user(self, context, project_id, user_id):
+ self.called.append(('destroy_all_by_project_and_user', context,
+ project_id, user_id))
+
+ def destroy_all_by_project(self, context, project_id):
+ self.called.append(('destroy_all_by_project', context, project_id))
+
+ def expire(self, context):
+ self.called.append(('expire', context))
+
+
+class BaseResourceTestCase(test.TestCase):
+ def test_no_flag(self):
+ resource = quota.BaseResource('test_resource')
+
+ self.assertEqual(resource.name, 'test_resource')
+ self.assertIsNone(resource.flag)
+ self.assertEqual(resource.default, -1)
+
+ def test_with_flag(self):
+ # We know this flag exists, so use it...
+ self.flags(quota_instances=10)
+ resource = quota.BaseResource('test_resource', 'quota_instances')
+
+ self.assertEqual(resource.name, 'test_resource')
+ self.assertEqual(resource.flag, 'quota_instances')
+ self.assertEqual(resource.default, 10)
+
+ def test_with_flag_no_quota(self):
+ self.flags(quota_instances=-1)
+ resource = quota.BaseResource('test_resource', 'quota_instances')
+
+ self.assertEqual(resource.name, 'test_resource')
+ self.assertEqual(resource.flag, 'quota_instances')
+ self.assertEqual(resource.default, -1)
+
+ def test_quota_no_project_no_class(self):
+ self.flags(quota_instances=10)
+ resource = quota.BaseResource('test_resource', 'quota_instances')
+ driver = FakeDriver()
+ context = FakeContext(None, None)
+ quota_value = resource.quota(driver, context)
+
+ self.assertEqual(quota_value, 10)
+
+ def test_quota_with_project_no_class(self):
+ self.flags(quota_instances=10)
+ resource = quota.BaseResource('test_resource', 'quota_instances')
+ driver = FakeDriver(by_project=dict(
+ test_project=dict(test_resource=15),
+ ))
+ context = FakeContext('test_project', None)
+ quota_value = resource.quota(driver, context)
+
+ self.assertEqual(quota_value, 15)
+
+ def test_quota_no_project_with_class(self):
+ self.flags(quota_instances=10)
+ resource = quota.BaseResource('test_resource', 'quota_instances')
+ driver = FakeDriver(by_class=dict(
+ test_class=dict(test_resource=20),
+ ))
+ context = FakeContext(None, 'test_class')
+ quota_value = resource.quota(driver, context)
+
+ self.assertEqual(quota_value, 20)
+
+ def test_quota_with_project_with_class(self):
+ self.flags(quota_instances=10)
+ resource = quota.BaseResource('test_resource', 'quota_instances')
+ driver = FakeDriver(by_project=dict(
+ test_project=dict(test_resource=15),
+ ),
+ by_class=dict(
+ test_class=dict(test_resource=20),
+ ))
+ context = FakeContext('test_project', 'test_class')
+ quota_value = resource.quota(driver, context)
+
+ self.assertEqual(quota_value, 15)
+
+ def test_quota_override_project_with_class(self):
+ self.flags(quota_instances=10)
+ resource = quota.BaseResource('test_resource', 'quota_instances')
+ driver = FakeDriver(by_project=dict(
+ test_project=dict(test_resource=15),
+ override_project=dict(test_resource=20),
+ ))
+ context = FakeContext('test_project', 'test_class')
+ quota_value = resource.quota(driver, context,
+ project_id='override_project')
+
+ self.assertEqual(quota_value, 20)
+
+ def test_quota_with_project_override_class(self):
+ self.flags(quota_instances=10)
+ resource = quota.BaseResource('test_resource', 'quota_instances')
+ driver = FakeDriver(by_class=dict(
+ test_class=dict(test_resource=15),
+ override_class=dict(test_resource=20),
+ ))
+ context = FakeContext('test_project', 'test_class')
+ quota_value = resource.quota(driver, context,
+ quota_class='override_class')
+
+ self.assertEqual(quota_value, 20)
+
+ def test_valid_method_call_check_invalid_input(self):
+ resources = {'dummy': 1}
+
+ self.assertRaises(exception.InvalidQuotaMethodUsage,
+ quota._valid_method_call_check_resources,
+ resources, 'limit')
+
+ def test_valid_method_call_check_invalid_method(self):
+ resources = {'key_pairs': 1}
+
+ self.assertRaises(exception.InvalidQuotaMethodUsage,
+ quota._valid_method_call_check_resources,
+ resources, 'dummy')
+
+ def test_valid_method_call_check_multiple(self):
+ resources = {'key_pairs': 1, 'dummy': 2}
+
+ self.assertRaises(exception.InvalidQuotaMethodUsage,
+ quota._valid_method_call_check_resources,
+ resources, 'check')
+
+ resources = {'key_pairs': 1, 'instances': 2, 'dummy': 3}
+
+ self.assertRaises(exception.InvalidQuotaMethodUsage,
+ quota._valid_method_call_check_resources,
+ resources, 'check')
+
+ def test_valid_method_call_check_wrong_method_reserve(self):
+ resources = {'key_pairs': 1}
+
+ self.assertRaises(exception.InvalidQuotaMethodUsage,
+ quota._valid_method_call_check_resources,
+ resources, 'reserve')
+
+ def test_valid_method_call_check_wrong_method_check(self):
+ resources = {'fixed_ips': 1}
+
+ self.assertRaises(exception.InvalidQuotaMethodUsage,
+ quota._valid_method_call_check_resources,
+ resources, 'check')
+
+
+class QuotaEngineTestCase(test.TestCase):
+ def test_init(self):
+ quota_obj = quota.QuotaEngine()
+
+ self.assertEqual(quota_obj._resources, {})
+ self.assertIsInstance(quota_obj._driver, quota.DbQuotaDriver)
+
+ def test_init_override_string(self):
+ quota_obj = quota.QuotaEngine(
+ quota_driver_class='nova.tests.unit.test_quota.FakeDriver')
+
+ self.assertEqual(quota_obj._resources, {})
+ self.assertIsInstance(quota_obj._driver, FakeDriver)
+
+ def test_init_override_obj(self):
+ quota_obj = quota.QuotaEngine(quota_driver_class=FakeDriver)
+
+ self.assertEqual(quota_obj._resources, {})
+ self.assertEqual(quota_obj._driver, FakeDriver)
+
+ def test_register_resource(self):
+ quota_obj = quota.QuotaEngine()
+ resource = quota.AbsoluteResource('test_resource')
+ quota_obj.register_resource(resource)
+
+ self.assertEqual(quota_obj._resources, dict(test_resource=resource))
+
+ def test_register_resources(self):
+ quota_obj = quota.QuotaEngine()
+ resources = [
+ quota.AbsoluteResource('test_resource1'),
+ quota.AbsoluteResource('test_resource2'),
+ quota.AbsoluteResource('test_resource3'),
+ ]
+ quota_obj.register_resources(resources)
+
+ self.assertEqual(quota_obj._resources, dict(
+ test_resource1=resources[0],
+ test_resource2=resources[1],
+ test_resource3=resources[2],
+ ))
+
+ def test_get_by_project_and_user(self):
+ context = FakeContext('test_project', 'test_class')
+ driver = FakeDriver(by_user=dict(
+ fake_user=dict(test_resource=42)))
+ quota_obj = quota.QuotaEngine(quota_driver_class=driver)
+ result = quota_obj.get_by_project_and_user(context, 'test_project',
+ 'fake_user', 'test_resource')
+
+ self.assertEqual(driver.called, [
+ ('get_by_project_and_user', context, 'test_project',
+ 'fake_user', 'test_resource'),
+ ])
+ self.assertEqual(result, 42)
+
+ def test_get_by_project(self):
+ context = FakeContext('test_project', 'test_class')
+ driver = FakeDriver(by_project=dict(
+ test_project=dict(test_resource=42)))
+ quota_obj = quota.QuotaEngine(quota_driver_class=driver)
+ result = quota_obj.get_by_project(context, 'test_project',
+ 'test_resource')
+
+ self.assertEqual(driver.called, [
+ ('get_by_project', context, 'test_project', 'test_resource'),
+ ])
+ self.assertEqual(result, 42)
+
+ def test_get_by_class(self):
+ context = FakeContext('test_project', 'test_class')
+ driver = FakeDriver(by_class=dict(
+ test_class=dict(test_resource=42)))
+ quota_obj = quota.QuotaEngine(quota_driver_class=driver)
+ result = quota_obj.get_by_class(context, 'test_class', 'test_resource')
+
+ self.assertEqual(driver.called, [
+ ('get_by_class', context, 'test_class', 'test_resource'),
+ ])
+ self.assertEqual(result, 42)
+
+ def _make_quota_obj(self, driver):
+ quota_obj = quota.QuotaEngine(quota_driver_class=driver)
+ resources = [
+ quota.AbsoluteResource('test_resource4'),
+ quota.AbsoluteResource('test_resource3'),
+ quota.AbsoluteResource('test_resource2'),
+ quota.AbsoluteResource('test_resource1'),
+ ]
+ quota_obj.register_resources(resources)
+
+ return quota_obj
+
+ def test_get_defaults(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ result = quota_obj.get_defaults(context)
+
+ self.assertEqual(driver.called, [
+ ('get_defaults', context, quota_obj._resources),
+ ])
+ self.assertEqual(result, quota_obj._resources)
+
+ def test_get_class_quotas(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ result1 = quota_obj.get_class_quotas(context, 'test_class')
+ result2 = quota_obj.get_class_quotas(context, 'test_class', False)
+
+ self.assertEqual(driver.called, [
+ ('get_class_quotas', context, quota_obj._resources,
+ 'test_class', True),
+ ('get_class_quotas', context, quota_obj._resources,
+ 'test_class', False),
+ ])
+ self.assertEqual(result1, quota_obj._resources)
+ self.assertEqual(result2, quota_obj._resources)
+
+ def test_get_user_quotas(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ result1 = quota_obj.get_user_quotas(context, 'test_project',
+ 'fake_user')
+ result2 = quota_obj.get_user_quotas(context, 'test_project',
+ 'fake_user',
+ quota_class='test_class',
+ defaults=False,
+ usages=False)
+
+ self.assertEqual(driver.called, [
+ ('get_user_quotas', context, quota_obj._resources,
+ 'test_project', 'fake_user', None, True, True),
+ ('get_user_quotas', context, quota_obj._resources,
+ 'test_project', 'fake_user', 'test_class', False, False),
+ ])
+ self.assertEqual(result1, quota_obj._resources)
+ self.assertEqual(result2, quota_obj._resources)
+
+ def test_get_project_quotas(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ result1 = quota_obj.get_project_quotas(context, 'test_project')
+ result2 = quota_obj.get_project_quotas(context, 'test_project',
+ quota_class='test_class',
+ defaults=False,
+ usages=False)
+
+ self.assertEqual(driver.called, [
+ ('get_project_quotas', context, quota_obj._resources,
+ 'test_project', None, True, True, False),
+ ('get_project_quotas', context, quota_obj._resources,
+ 'test_project', 'test_class', False, False, False),
+ ])
+ self.assertEqual(result1, quota_obj._resources)
+ self.assertEqual(result2, quota_obj._resources)
+
+ def test_count_no_resource(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ self.assertRaises(exception.QuotaResourceUnknown,
+ quota_obj.count, context, 'test_resource5',
+ True, foo='bar')
+
+ def test_count_wrong_resource(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ self.assertRaises(exception.QuotaResourceUnknown,
+ quota_obj.count, context, 'test_resource1',
+ True, foo='bar')
+
+ def test_count(self):
+ def fake_count(context, *args, **kwargs):
+ self.assertEqual(args, (True,))
+ self.assertEqual(kwargs, dict(foo='bar'))
+ return 5
+
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ quota_obj.register_resource(quota.CountableResource('test_resource5',
+ fake_count))
+ result = quota_obj.count(context, 'test_resource5', True, foo='bar')
+
+ self.assertEqual(result, 5)
+
+ def test_limit_check(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ quota_obj.limit_check(context, test_resource1=4, test_resource2=3,
+ test_resource3=2, test_resource4=1)
+
+ self.assertEqual(driver.called, [
+ ('limit_check', context, quota_obj._resources, dict(
+ test_resource1=4,
+ test_resource2=3,
+ test_resource3=2,
+ test_resource4=1,
+ ), None, None),
+ ])
+
+ def test_reserve(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver(reservations=[
+ 'resv-01', 'resv-02', 'resv-03', 'resv-04',
+ ])
+ quota_obj = self._make_quota_obj(driver)
+ result1 = quota_obj.reserve(context, test_resource1=4,
+ test_resource2=3, test_resource3=2,
+ test_resource4=1)
+ result2 = quota_obj.reserve(context, expire=3600,
+ test_resource1=1, test_resource2=2,
+ test_resource3=3, test_resource4=4)
+ result3 = quota_obj.reserve(context, project_id='fake_project',
+ test_resource1=1, test_resource2=2,
+ test_resource3=3, test_resource4=4)
+
+ self.assertEqual(driver.called, [
+ ('reserve', context, quota_obj._resources, dict(
+ test_resource1=4,
+ test_resource2=3,
+ test_resource3=2,
+ test_resource4=1,
+ ), None, None, None),
+ ('reserve', context, quota_obj._resources, dict(
+ test_resource1=1,
+ test_resource2=2,
+ test_resource3=3,
+ test_resource4=4,
+ ), 3600, None, None),
+ ('reserve', context, quota_obj._resources, dict(
+ test_resource1=1,
+ test_resource2=2,
+ test_resource3=3,
+ test_resource4=4,
+ ), None, 'fake_project', None),
+ ])
+ self.assertEqual(result1, [
+ 'resv-01', 'resv-02', 'resv-03', 'resv-04',
+ ])
+ self.assertEqual(result2, [
+ 'resv-01', 'resv-02', 'resv-03', 'resv-04',
+ ])
+ self.assertEqual(result3, [
+ 'resv-01', 'resv-02', 'resv-03', 'resv-04',
+ ])
+
+ def test_commit(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03'])
+
+ self.assertEqual(driver.called, [
+ ('commit', context, ['resv-01', 'resv-02', 'resv-03'], None,
+ None),
+ ])
+
+ def test_rollback(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03'])
+
+ self.assertEqual(driver.called, [
+ ('rollback', context, ['resv-01', 'resv-02', 'resv-03'], None,
+ None),
+ ])
+
+ def test_usage_reset(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ quota_obj.usage_reset(context, ['res1', 'res2', 'res3'])
+
+ self.assertEqual(driver.called, [
+ ('usage_reset', context, ['res1', 'res2', 'res3']),
+ ])
+
+ def test_destroy_all_by_project_and_user(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ quota_obj.destroy_all_by_project_and_user(context,
+ 'test_project', 'fake_user')
+
+ self.assertEqual(driver.called, [
+ ('destroy_all_by_project_and_user', context, 'test_project',
+ 'fake_user'),
+ ])
+
+ def test_destroy_all_by_project(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ quota_obj.destroy_all_by_project(context, 'test_project')
+
+ self.assertEqual(driver.called, [
+ ('destroy_all_by_project', context, 'test_project'),
+ ])
+
+ def test_expire(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ quota_obj.expire(context)
+
+ self.assertEqual(driver.called, [
+ ('expire', context),
+ ])
+
+ def test_resources(self):
+ quota_obj = self._make_quota_obj(None)
+
+ self.assertEqual(quota_obj.resources,
+ ['test_resource1', 'test_resource2',
+ 'test_resource3', 'test_resource4'])
+
+
+class DbQuotaDriverTestCase(test.TestCase):
+ def setUp(self):
+ super(DbQuotaDriverTestCase, self).setUp()
+
+ self.flags(quota_instances=10,
+ quota_cores=20,
+ quota_ram=50 * 1024,
+ quota_floating_ips=10,
+ quota_fixed_ips=10,
+ quota_metadata_items=128,
+ quota_injected_files=5,
+ quota_injected_file_content_bytes=10 * 1024,
+ quota_injected_file_path_length=255,
+ quota_security_groups=10,
+ quota_security_group_rules=20,
+ quota_server_groups=10,
+ quota_server_group_members=10,
+ reservation_expire=86400,
+ until_refresh=0,
+ max_age=0,
+ )
+
+ self.driver = quota.DbQuotaDriver()
+
+ self.calls = []
+
+ self.useFixture(test.TimeOverride())
+
+ def test_get_defaults(self):
+ # Use our pre-defined resources
+ self._stub_quota_class_get_default()
+ result = self.driver.get_defaults(None, quota.QUOTAS._resources)
+
+ self.assertEqual(result, dict(
+ instances=5,
+ cores=20,
+ ram=25 * 1024,
+ floating_ips=10,
+ fixed_ips=10,
+ metadata_items=64,
+ injected_files=5,
+ injected_file_content_bytes=5 * 1024,
+ injected_file_path_bytes=255,
+ security_groups=10,
+ security_group_rules=20,
+ key_pairs=100,
+ server_groups=10,
+ server_group_members=10,
+ ))
+
+ def _stub_quota_class_get_default(self):
+ # Stub out quota_class_get_default
+ def fake_qcgd(context):
+ self.calls.append('quota_class_get_default')
+ return dict(
+ instances=5,
+ ram=25 * 1024,
+ metadata_items=64,
+ injected_file_content_bytes=5 * 1024,
+ )
+ self.stubs.Set(db, 'quota_class_get_default', fake_qcgd)
+
+ def _stub_quota_class_get_all_by_name(self):
+ # Stub out quota_class_get_all_by_name
+ def fake_qcgabn(context, quota_class):
+ self.calls.append('quota_class_get_all_by_name')
+ self.assertEqual(quota_class, 'test_class')
+ return dict(
+ instances=5,
+ ram=25 * 1024,
+ metadata_items=64,
+ injected_file_content_bytes=5 * 1024,
+ )
+ self.stubs.Set(db, 'quota_class_get_all_by_name', fake_qcgabn)
+
+ def test_get_class_quotas(self):
+ self._stub_quota_class_get_all_by_name()
+ result = self.driver.get_class_quotas(None, quota.QUOTAS._resources,
+ 'test_class')
+
+ self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
+ self.assertEqual(result, dict(
+ instances=5,
+ cores=20,
+ ram=25 * 1024,
+ floating_ips=10,
+ fixed_ips=10,
+ metadata_items=64,
+ injected_files=5,
+ injected_file_content_bytes=5 * 1024,
+ injected_file_path_bytes=255,
+ security_groups=10,
+ security_group_rules=20,
+ key_pairs=100,
+ server_groups=10,
+ server_group_members=10,
+ ))
+
+ def test_get_class_quotas_no_defaults(self):
+ self._stub_quota_class_get_all_by_name()
+ result = self.driver.get_class_quotas(None, quota.QUOTAS._resources,
+ 'test_class', False)
+
+ self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
+ self.assertEqual(result, dict(
+ instances=5,
+ ram=25 * 1024,
+ metadata_items=64,
+ injected_file_content_bytes=5 * 1024,
+ ))
+
+ def _stub_get_by_project_and_user(self):
+ def fake_qgabpau(context, project_id, user_id):
+ self.calls.append('quota_get_all_by_project_and_user')
+ self.assertEqual(project_id, 'test_project')
+ self.assertEqual(user_id, 'fake_user')
+ return dict(
+ cores=10,
+ injected_files=2,
+ injected_file_path_bytes=127,
+ )
+
+ def fake_qgabp(context, project_id):
+ self.calls.append('quota_get_all_by_project')
+ self.assertEqual(project_id, 'test_project')
+ return {
+ 'cores': 10,
+ 'injected_files': 2,
+ 'injected_file_path_bytes': 127,
+ }
+
+ def fake_qugabpau(context, project_id, user_id):
+ self.calls.append('quota_usage_get_all_by_project_and_user')
+ self.assertEqual(project_id, 'test_project')
+ self.assertEqual(user_id, 'fake_user')
+ return dict(
+ instances=dict(in_use=2, reserved=2),
+ cores=dict(in_use=4, reserved=4),
+ ram=dict(in_use=10 * 1024, reserved=0),
+ floating_ips=dict(in_use=2, reserved=0),
+ metadata_items=dict(in_use=0, reserved=0),
+ injected_files=dict(in_use=0, reserved=0),
+ injected_file_content_bytes=dict(in_use=0, reserved=0),
+ injected_file_path_bytes=dict(in_use=0, reserved=0),
+ )
+
+ self.stubs.Set(db, 'quota_get_all_by_project_and_user', fake_qgabpau)
+ self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
+ self.stubs.Set(db, 'quota_usage_get_all_by_project_and_user',
+ fake_qugabpau)
+
+ self._stub_quota_class_get_all_by_name()
+
+ def test_get_user_quotas(self):
+ self.maxDiff = None
+ self._stub_get_by_project_and_user()
+ result = self.driver.get_user_quotas(
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources, 'test_project', 'fake_user')
+
+ self.assertEqual(self.calls, [
+ 'quota_get_all_by_project_and_user',
+ 'quota_get_all_by_project',
+ 'quota_usage_get_all_by_project_and_user',
+ 'quota_class_get_all_by_name',
+ ])
+ self.assertEqual(result, dict(
+ instances=dict(
+ limit=5,
+ in_use=2,
+ reserved=2,
+ ),
+ cores=dict(
+ limit=10,
+ in_use=4,
+ reserved=4,
+ ),
+ ram=dict(
+ limit=25 * 1024,
+ in_use=10 * 1024,
+ reserved=0,
+ ),
+ floating_ips=dict(
+ limit=10,
+ in_use=2,
+ reserved=0,
+ ),
+ fixed_ips=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ metadata_items=dict(
+ limit=64,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_files=dict(
+ limit=2,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_content_bytes=dict(
+ limit=5 * 1024,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_path_bytes=dict(
+ limit=127,
+ in_use=0,
+ reserved=0,
+ ),
+ security_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ security_group_rules=dict(
+ limit=20,
+ in_use=0,
+ reserved=0,
+ ),
+ key_pairs=dict(
+ limit=100,
+ in_use=0,
+ reserved=0,
+ ),
+ server_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ server_group_members=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ ))
+
+ def _stub_get_by_project_and_user_specific(self):
+ def fake_quota_get(context, project_id, resource, user_id=None):
+ self.calls.append('quota_get')
+ self.assertEqual(project_id, 'test_project')
+ self.assertEqual(user_id, 'fake_user')
+ self.assertEqual(resource, 'test_resource')
+ return dict(
+ test_resource=dict(in_use=20, reserved=10),
+ )
+ self.stubs.Set(db, 'quota_get', fake_quota_get)
+
+ def test_get_by_project_and_user(self):
+ self._stub_get_by_project_and_user_specific()
+ result = self.driver.get_by_project_and_user(
+ FakeContext('test_project', 'test_class'),
+ 'test_project', 'fake_user', 'test_resource')
+
+ self.assertEqual(self.calls, ['quota_get'])
+ self.assertEqual(result, dict(
+ test_resource=dict(in_use=20, reserved=10),
+ ))
+
+ def _stub_get_by_project(self):
+ def fake_qgabp(context, project_id):
+ self.calls.append('quota_get_all_by_project')
+ self.assertEqual(project_id, 'test_project')
+ return dict(
+ cores=10,
+ injected_files=2,
+ injected_file_path_bytes=127,
+ )
+
+ def fake_qugabp(context, project_id):
+ self.calls.append('quota_usage_get_all_by_project')
+ self.assertEqual(project_id, 'test_project')
+ return dict(
+ instances=dict(in_use=2, reserved=2),
+ cores=dict(in_use=4, reserved=4),
+ ram=dict(in_use=10 * 1024, reserved=0),
+ floating_ips=dict(in_use=2, reserved=0),
+ metadata_items=dict(in_use=0, reserved=0),
+ injected_files=dict(in_use=0, reserved=0),
+ injected_file_content_bytes=dict(in_use=0, reserved=0),
+ injected_file_path_bytes=dict(in_use=0, reserved=0),
+ )
+
+ self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
+ self.stubs.Set(db, 'quota_usage_get_all_by_project', fake_qugabp)
+
+ self._stub_quota_class_get_all_by_name()
+ self._stub_quota_class_get_default()
+
+ def test_get_project_quotas(self):
+ self.maxDiff = None
+ self._stub_get_by_project()
+ result = self.driver.get_project_quotas(
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources, 'test_project')
+
+ self.assertEqual(self.calls, [
+ 'quota_get_all_by_project',
+ 'quota_usage_get_all_by_project',
+ 'quota_class_get_all_by_name',
+ 'quota_class_get_default',
+ ])
+ self.assertEqual(result, dict(
+ instances=dict(
+ limit=5,
+ in_use=2,
+ reserved=2,
+ ),
+ cores=dict(
+ limit=10,
+ in_use=4,
+ reserved=4,
+ ),
+ ram=dict(
+ limit=25 * 1024,
+ in_use=10 * 1024,
+ reserved=0,
+ ),
+ floating_ips=dict(
+ limit=10,
+ in_use=2,
+ reserved=0,
+ ),
+ fixed_ips=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ metadata_items=dict(
+ limit=64,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_files=dict(
+ limit=2,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_content_bytes=dict(
+ limit=5 * 1024,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_path_bytes=dict(
+ limit=127,
+ in_use=0,
+ reserved=0,
+ ),
+ security_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ security_group_rules=dict(
+ limit=20,
+ in_use=0,
+ reserved=0,
+ ),
+ key_pairs=dict(
+ limit=100,
+ in_use=0,
+ reserved=0,
+ ),
+ server_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ server_group_members=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ ))
+
+ def test_get_user_quotas_alt_context_no_class(self):
+ self.maxDiff = None
+ self._stub_get_by_project_and_user()
+ result = self.driver.get_user_quotas(
+ FakeContext('test_project', None),
+ quota.QUOTAS._resources, 'test_project', 'fake_user')
+
+ self.assertEqual(self.calls, [
+ 'quota_get_all_by_project_and_user',
+ 'quota_get_all_by_project',
+ 'quota_usage_get_all_by_project_and_user',
+ ])
+ self.assertEqual(result, dict(
+ instances=dict(
+ limit=10,
+ in_use=2,
+ reserved=2,
+ ),
+ cores=dict(
+ limit=10,
+ in_use=4,
+ reserved=4,
+ ),
+ ram=dict(
+ limit=50 * 1024,
+ in_use=10 * 1024,
+ reserved=0,
+ ),
+ floating_ips=dict(
+ limit=10,
+ in_use=2,
+ reserved=0,
+ ),
+ fixed_ips=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ metadata_items=dict(
+ limit=128,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_files=dict(
+ limit=2,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_content_bytes=dict(
+ limit=10 * 1024,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_path_bytes=dict(
+ limit=127,
+ in_use=0,
+ reserved=0,
+ ),
+ security_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ security_group_rules=dict(
+ limit=20,
+ in_use=0,
+ reserved=0,
+ ),
+ key_pairs=dict(
+ limit=100,
+ in_use=0,
+ reserved=0,
+ ),
+ server_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ server_group_members=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ ))
+
+ def test_get_project_quotas_alt_context_no_class(self):
+ self.maxDiff = None
+ self._stub_get_by_project()
+ result = self.driver.get_project_quotas(
+ FakeContext('other_project', 'other_class'),
+ quota.QUOTAS._resources, 'test_project')
+
+ self.assertEqual(self.calls, [
+ 'quota_get_all_by_project',
+ 'quota_usage_get_all_by_project',
+ 'quota_class_get_default',
+ ])
+ self.assertEqual(result, dict(
+ instances=dict(
+ limit=5,
+ in_use=2,
+ reserved=2,
+ ),
+ cores=dict(
+ limit=10,
+ in_use=4,
+ reserved=4,
+ ),
+ ram=dict(
+ limit=25 * 1024,
+ in_use=10 * 1024,
+ reserved=0,
+ ),
+ floating_ips=dict(
+ limit=10,
+ in_use=2,
+ reserved=0,
+ ),
+ fixed_ips=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ metadata_items=dict(
+ limit=64,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_files=dict(
+ limit=2,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_content_bytes=dict(
+ limit=5 * 1024,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_path_bytes=dict(
+ limit=127,
+ in_use=0,
+ reserved=0,
+ ),
+ security_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ security_group_rules=dict(
+ limit=20,
+ in_use=0,
+ reserved=0,
+ ),
+ key_pairs=dict(
+ limit=100,
+ in_use=0,
+ reserved=0,
+ ),
+ server_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ server_group_members=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ ))
+
+ def test_get_user_quotas_alt_context_with_class(self):
+ self.maxDiff = None
+ self._stub_get_by_project_and_user()
+ result = self.driver.get_user_quotas(
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources, 'test_project', 'fake_user',
+ quota_class='test_class')
+
+ self.assertEqual(self.calls, [
+ 'quota_get_all_by_project_and_user',
+ 'quota_get_all_by_project',
+ 'quota_usage_get_all_by_project_and_user',
+ 'quota_class_get_all_by_name',
+ ])
+ self.assertEqual(result, dict(
+ instances=dict(
+ limit=5,
+ in_use=2,
+ reserved=2,
+ ),
+ cores=dict(
+ limit=10,
+ in_use=4,
+ reserved=4,
+ ),
+ ram=dict(
+ limit=25 * 1024,
+ in_use=10 * 1024,
+ reserved=0,
+ ),
+ floating_ips=dict(
+ limit=10,
+ in_use=2,
+ reserved=0,
+ ),
+ fixed_ips=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ metadata_items=dict(
+ limit=64,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_files=dict(
+ limit=2,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_content_bytes=dict(
+ limit=5 * 1024,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_path_bytes=dict(
+ limit=127,
+ in_use=0,
+ reserved=0,
+ ),
+ security_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ security_group_rules=dict(
+ limit=20,
+ in_use=0,
+ reserved=0,
+ ),
+ key_pairs=dict(
+ limit=100,
+ in_use=0,
+ reserved=0,
+ ),
+ server_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ server_group_members=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ ))
+
+ def test_get_project_quotas_alt_context_with_class(self):
+ self.maxDiff = None
+ self._stub_get_by_project()
+ result = self.driver.get_project_quotas(
+ FakeContext('other_project', 'other_class'),
+ quota.QUOTAS._resources, 'test_project', quota_class='test_class')
+
+ self.assertEqual(self.calls, [
+ 'quota_get_all_by_project',
+ 'quota_usage_get_all_by_project',
+ 'quota_class_get_all_by_name',
+ 'quota_class_get_default',
+ ])
+ self.assertEqual(result, dict(
+ instances=dict(
+ limit=5,
+ in_use=2,
+ reserved=2,
+ ),
+ cores=dict(
+ limit=10,
+ in_use=4,
+ reserved=4,
+ ),
+ ram=dict(
+ limit=25 * 1024,
+ in_use=10 * 1024,
+ reserved=0,
+ ),
+ floating_ips=dict(
+ limit=10,
+ in_use=2,
+ reserved=0,
+ ),
+ fixed_ips=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ metadata_items=dict(
+ limit=64,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_files=dict(
+ limit=2,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_content_bytes=dict(
+ limit=5 * 1024,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_path_bytes=dict(
+ limit=127,
+ in_use=0,
+ reserved=0,
+ ),
+ security_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ security_group_rules=dict(
+ limit=20,
+ in_use=0,
+ reserved=0,
+ ),
+ key_pairs=dict(
+ limit=100,
+ in_use=0,
+ reserved=0,
+ ),
+ server_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ server_group_members=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ ))
+
+ def test_get_user_quotas_no_defaults(self):
+ self._stub_get_by_project_and_user()
+ result = self.driver.get_user_quotas(
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources, 'test_project', 'fake_user',
+ defaults=False)
+
+ self.assertEqual(self.calls, [
+ 'quota_get_all_by_project_and_user',
+ 'quota_get_all_by_project',
+ 'quota_usage_get_all_by_project_and_user',
+ 'quota_class_get_all_by_name',
+ ])
+ self.assertEqual(result, dict(
+ cores=dict(
+ limit=10,
+ in_use=4,
+ reserved=4,
+ ),
+ injected_files=dict(
+ limit=2,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_path_bytes=dict(
+ limit=127,
+ in_use=0,
+ reserved=0,
+ ),
+ ))
+
+ def test_get_project_quotas_no_defaults(self):
+ self._stub_get_by_project()
+ result = self.driver.get_project_quotas(
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources, 'test_project', defaults=False)
+
+ self.assertEqual(self.calls, [
+ 'quota_get_all_by_project',
+ 'quota_usage_get_all_by_project',
+ 'quota_class_get_all_by_name',
+ 'quota_class_get_default',
+ ])
+ self.assertEqual(result, dict(
+ cores=dict(
+ limit=10,
+ in_use=4,
+ reserved=4,
+ ),
+ injected_files=dict(
+ limit=2,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_path_bytes=dict(
+ limit=127,
+ in_use=0,
+ reserved=0,
+ ),
+ ))
+
+ def test_get_user_quotas_no_usages(self):
+ self._stub_get_by_project_and_user()
+ result = self.driver.get_user_quotas(
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources, 'test_project', 'fake_user', usages=False)
+
+ self.assertEqual(self.calls, [
+ 'quota_get_all_by_project_and_user',
+ 'quota_get_all_by_project',
+ 'quota_class_get_all_by_name',
+ ])
+ self.assertEqual(result, dict(
+ instances=dict(
+ limit=5,
+ ),
+ cores=dict(
+ limit=10,
+ ),
+ ram=dict(
+ limit=25 * 1024,
+ ),
+ floating_ips=dict(
+ limit=10,
+ ),
+ fixed_ips=dict(
+ limit=10,
+ ),
+ metadata_items=dict(
+ limit=64,
+ ),
+ injected_files=dict(
+ limit=2,
+ ),
+ injected_file_content_bytes=dict(
+ limit=5 * 1024,
+ ),
+ injected_file_path_bytes=dict(
+ limit=127,
+ ),
+ security_groups=dict(
+ limit=10,
+ ),
+ security_group_rules=dict(
+ limit=20,
+ ),
+ key_pairs=dict(
+ limit=100,
+ ),
+ server_groups=dict(
+ limit=10,
+ ),
+ server_group_members=dict(
+ limit=10,
+ ),
+ ))
+
+ def test_get_project_quotas_no_usages(self):
+ self._stub_get_by_project()
+ result = self.driver.get_project_quotas(
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources, 'test_project', usages=False)
+
+ self.assertEqual(self.calls, [
+ 'quota_get_all_by_project',
+ 'quota_class_get_all_by_name',
+ 'quota_class_get_default',
+ ])
+ self.assertEqual(result, dict(
+ instances=dict(
+ limit=5,
+ ),
+ cores=dict(
+ limit=10,
+ ),
+ ram=dict(
+ limit=25 * 1024,
+ ),
+ floating_ips=dict(
+ limit=10,
+ ),
+ fixed_ips=dict(
+ limit=10,
+ ),
+ metadata_items=dict(
+ limit=64,
+ ),
+ injected_files=dict(
+ limit=2,
+ ),
+ injected_file_content_bytes=dict(
+ limit=5 * 1024,
+ ),
+ injected_file_path_bytes=dict(
+ limit=127,
+ ),
+ security_groups=dict(
+ limit=10,
+ ),
+ security_group_rules=dict(
+ limit=20,
+ ),
+ key_pairs=dict(
+ limit=100,
+ ),
+ server_groups=dict(
+ limit=10,
+ ),
+ server_group_members=dict(
+ limit=10,
+ ),
+ ))
+
+ def _stub_get_settable_quotas(self):
+ def fake_get_project_quotas(context, resources, project_id,
+ quota_class=None, defaults=True,
+ usages=True, remains=False,
+ project_quotas=None):
+ self.calls.append('get_project_quotas')
+ result = {}
+ for k, v in resources.items():
+ limit = v.default
+ reserved = 0
+ if k == 'instances':
+ remains = v.default - 5
+ in_use = 1
+ elif k == 'cores':
+ remains = -1
+ in_use = 5
+ limit = -1
+ else:
+ remains = v.default
+ in_use = 0
+ result[k] = {'limit': limit, 'in_use': in_use,
+ 'reserved': reserved, 'remains': remains}
+ return result
+
+ def fake_get_user_quotas(context, resources, project_id, user_id,
+ quota_class=None, defaults=True,
+ usages=True, project_quotas=None,
+ user_quotas=None):
+ self.calls.append('get_user_quotas')
+ result = {}
+ for k, v in resources.items():
+ reserved = 0
+ if k == 'instances':
+ in_use = 1
+ elif k == 'cores':
+ in_use = 5
+ reserved = 10
+ else:
+ in_use = 0
+ result[k] = {'limit': v.default,
+ 'in_use': in_use, 'reserved': reserved}
+ return result
+
+ def fake_qgabpau(context, project_id, user_id):
+ self.calls.append('quota_get_all_by_project_and_user')
+ return {'instances': 2, 'cores': -1}
+
+ self.stubs.Set(self.driver, 'get_project_quotas',
+ fake_get_project_quotas)
+ self.stubs.Set(self.driver, 'get_user_quotas',
+ fake_get_user_quotas)
+ self.stubs.Set(db, 'quota_get_all_by_project_and_user',
+ fake_qgabpau)
+
+ def test_get_settable_quotas_with_user(self):
+ self._stub_get_settable_quotas()
+ result = self.driver.get_settable_quotas(
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources, 'test_project', user_id='test_user')
+
+ self.assertEqual(self.calls, [
+ 'get_project_quotas',
+ 'quota_get_all_by_project_and_user',
+ 'get_user_quotas',
+ ])
+ self.assertEqual(result, {
+ 'instances': {
+ 'minimum': 1,
+ 'maximum': 7,
+ },
+ 'cores': {
+ 'minimum': 15,
+ 'maximum': -1,
+ },
+ 'ram': {
+ 'minimum': 0,
+ 'maximum': 50 * 1024,
+ },
+ 'floating_ips': {
+ 'minimum': 0,
+ 'maximum': 10,
+ },
+ 'fixed_ips': {
+ 'minimum': 0,
+ 'maximum': 10,
+ },
+ 'metadata_items': {
+ 'minimum': 0,
+ 'maximum': 128,
+ },
+ 'injected_files': {
+ 'minimum': 0,
+ 'maximum': 5,
+ },
+ 'injected_file_content_bytes': {
+ 'minimum': 0,
+ 'maximum': 10 * 1024,
+ },
+ 'injected_file_path_bytes': {
+ 'minimum': 0,
+ 'maximum': 255,
+ },
+ 'security_groups': {
+ 'minimum': 0,
+ 'maximum': 10,
+ },
+ 'security_group_rules': {
+ 'minimum': 0,
+ 'maximum': 20,
+ },
+ 'key_pairs': {
+ 'minimum': 0,
+ 'maximum': 100,
+ },
+ 'server_groups': {
+ 'minimum': 0,
+ 'maximum': 10,
+ },
+ 'server_group_members': {
+ 'minimum': 0,
+ 'maximum': 10,
+ },
+ })
+
+ def test_get_settable_quotas_without_user(self):
+ self._stub_get_settable_quotas()
+ result = self.driver.get_settable_quotas(
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources, 'test_project')
+
+ self.assertEqual(self.calls, [
+ 'get_project_quotas',
+ ])
+ self.assertEqual(result, {
+ 'instances': {
+ 'minimum': 5,
+ 'maximum': -1,
+ },
+ 'cores': {
+ 'minimum': 5,
+ 'maximum': -1,
+ },
+ 'ram': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'floating_ips': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'fixed_ips': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'metadata_items': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'injected_files': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'injected_file_content_bytes': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'injected_file_path_bytes': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'security_groups': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'security_group_rules': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'key_pairs': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'server_groups': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'server_group_members': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ })
+
+ def test_get_settable_quotas_by_user_with_unlimited_value(self):
+ self._stub_get_settable_quotas()
+ result = self.driver.get_settable_quotas(
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources, 'test_project', user_id='test_user')
+
+ self.assertEqual(self.calls, [
+ 'get_project_quotas',
+ 'quota_get_all_by_project_and_user',
+ 'get_user_quotas',
+ ])
+ self.assertEqual(result, {
+ 'instances': {
+ 'minimum': 1,
+ 'maximum': 7,
+ },
+ 'cores': {
+ 'minimum': 15,
+ 'maximum': -1,
+ },
+ 'ram': {
+ 'minimum': 0,
+ 'maximum': 50 * 1024,
+ },
+ 'floating_ips': {
+ 'minimum': 0,
+ 'maximum': 10,
+ },
+ 'fixed_ips': {
+ 'minimum': 0,
+ 'maximum': 10,
+ },
+ 'metadata_items': {
+ 'minimum': 0,
+ 'maximum': 128,
+ },
+ 'injected_files': {
+ 'minimum': 0,
+ 'maximum': 5,
+ },
+ 'injected_file_content_bytes': {
+ 'minimum': 0,
+ 'maximum': 10 * 1024,
+ },
+ 'injected_file_path_bytes': {
+ 'minimum': 0,
+ 'maximum': 255,
+ },
+ 'security_groups': {
+ 'minimum': 0,
+ 'maximum': 10,
+ },
+ 'security_group_rules': {
+ 'minimum': 0,
+ 'maximum': 20,
+ },
+ 'key_pairs': {
+ 'minimum': 0,
+ 'maximum': 100,
+ },
+ 'server_groups': {
+ 'minimum': 0,
+ 'maximum': 10,
+ },
+ 'server_group_members': {
+ 'minimum': 0,
+ 'maximum': 10,
+ },
+ })
+
+ def _stub_get_project_quotas(self):
+ def fake_get_project_quotas(context, resources, project_id,
+ quota_class=None, defaults=True,
+ usages=True, remains=False,
+ project_quotas=None):
+ self.calls.append('get_project_quotas')
+ return dict((k, dict(limit=v.default))
+ for k, v in resources.items())
+
+ self.stubs.Set(self.driver, 'get_project_quotas',
+ fake_get_project_quotas)
+
+ def test_get_quotas_has_sync_unknown(self):
+ self._stub_get_project_quotas()
+ self.assertRaises(exception.QuotaResourceUnknown,
+ self.driver._get_quotas,
+ None, quota.QUOTAS._resources,
+ ['unknown'], True)
+ self.assertEqual(self.calls, [])
+
+ def test_get_quotas_no_sync_unknown(self):
+ self._stub_get_project_quotas()
+ self.assertRaises(exception.QuotaResourceUnknown,
+ self.driver._get_quotas,
+ None, quota.QUOTAS._resources,
+ ['unknown'], False)
+ self.assertEqual(self.calls, [])
+
+ def test_get_quotas_has_sync_no_sync_resource(self):
+ self._stub_get_project_quotas()
+ self.assertRaises(exception.QuotaResourceUnknown,
+ self.driver._get_quotas,
+ None, quota.QUOTAS._resources,
+ ['metadata_items'], True)
+ self.assertEqual(self.calls, [])
+
+ def test_get_quotas_no_sync_has_sync_resource(self):
+ self._stub_get_project_quotas()
+ self.assertRaises(exception.QuotaResourceUnknown,
+ self.driver._get_quotas,
+ None, quota.QUOTAS._resources,
+ ['instances'], False)
+ self.assertEqual(self.calls, [])
+
+ def test_get_quotas_has_sync(self):
+ self._stub_get_project_quotas()
+ result = self.driver._get_quotas(FakeContext('test_project',
+ 'test_class'),
+ quota.QUOTAS._resources,
+ ['instances', 'cores', 'ram',
+ 'floating_ips', 'security_groups',
+ 'server_groups'],
+ True,
+ project_id='test_project')
+
+ self.assertEqual(self.calls, ['get_project_quotas'])
+ self.assertEqual(result, dict(
+ instances=10,
+ cores=20,
+ ram=50 * 1024,
+ floating_ips=10,
+ security_groups=10,
+ server_groups=10,
+ ))
+
+ def test_get_quotas_no_sync(self):
+ self._stub_get_project_quotas()
+ result = self.driver._get_quotas(FakeContext('test_project',
+ 'test_class'),
+ quota.QUOTAS._resources,
+ ['metadata_items', 'injected_files',
+ 'injected_file_content_bytes',
+ 'injected_file_path_bytes',
+ 'security_group_rules',
+ 'server_group_members'], False,
+ project_id='test_project')
+
+ self.assertEqual(self.calls, ['get_project_quotas'])
+ self.assertEqual(result, dict(
+ metadata_items=128,
+ injected_files=5,
+ injected_file_content_bytes=10 * 1024,
+ injected_file_path_bytes=255,
+ security_group_rules=20,
+ server_group_members=10,
+ ))
+
+ def test_limit_check_under(self):
+ self._stub_get_project_quotas()
+ self.assertRaises(exception.InvalidQuotaValue,
+ self.driver.limit_check,
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(metadata_items=-1))
+
+ def test_limit_check_over(self):
+ self._stub_get_project_quotas()
+ self.assertRaises(exception.OverQuota,
+ self.driver.limit_check,
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(metadata_items=129))
+
+ def test_limit_check_project_overs(self):
+ self._stub_get_project_quotas()
+ self.assertRaises(exception.OverQuota,
+ self.driver.limit_check,
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(injected_file_content_bytes=10241,
+ injected_file_path_bytes=256))
+
+ def test_limit_check_unlimited(self):
+ self.flags(quota_metadata_items=-1)
+ self._stub_get_project_quotas()
+ self.driver.limit_check(FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(metadata_items=32767))
+
+ def test_limit_check(self):
+ self._stub_get_project_quotas()
+ self.driver.limit_check(FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(metadata_items=128))
+
+ def _stub_quota_reserve(self):
+ def fake_quota_reserve(context, resources, quotas, user_quotas, deltas,
+ expire, until_refresh, max_age, project_id=None,
+ user_id=None):
+ self.calls.append(('quota_reserve', expire, until_refresh,
+ max_age))
+ return ['resv-1', 'resv-2', 'resv-3']
+ self.stubs.Set(db, 'quota_reserve', fake_quota_reserve)
+
+ def test_reserve_bad_expire(self):
+ self._stub_get_project_quotas()
+ self._stub_quota_reserve()
+ self.assertRaises(exception.InvalidReservationExpiration,
+ self.driver.reserve,
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(instances=2), expire='invalid')
+ self.assertEqual(self.calls, [])
+
+ def test_reserve_default_expire(self):
+ self._stub_get_project_quotas()
+ self._stub_quota_reserve()
+ result = self.driver.reserve(FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(instances=2))
+
+ expire = timeutils.utcnow() + datetime.timedelta(seconds=86400)
+ self.assertEqual(self.calls, [
+ 'get_project_quotas',
+ ('quota_reserve', expire, 0, 0),
+ ])
+ self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
+
+ def test_reserve_int_expire(self):
+ self._stub_get_project_quotas()
+ self._stub_quota_reserve()
+ result = self.driver.reserve(FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(instances=2), expire=3600)
+
+ expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
+ self.assertEqual(self.calls, [
+ 'get_project_quotas',
+ ('quota_reserve', expire, 0, 0),
+ ])
+ self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
+
+ def test_reserve_timedelta_expire(self):
+ self._stub_get_project_quotas()
+ self._stub_quota_reserve()
+ expire_delta = datetime.timedelta(seconds=60)
+ result = self.driver.reserve(FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(instances=2), expire=expire_delta)
+
+ expire = timeutils.utcnow() + expire_delta
+ self.assertEqual(self.calls, [
+ 'get_project_quotas',
+ ('quota_reserve', expire, 0, 0),
+ ])
+ self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
+
+ def test_reserve_datetime_expire(self):
+ self._stub_get_project_quotas()
+ self._stub_quota_reserve()
+ expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
+ result = self.driver.reserve(FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(instances=2), expire=expire)
+
+ self.assertEqual(self.calls, [
+ 'get_project_quotas',
+ ('quota_reserve', expire, 0, 0),
+ ])
+ self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
+
+ def test_reserve_until_refresh(self):
+ self._stub_get_project_quotas()
+ self._stub_quota_reserve()
+ self.flags(until_refresh=500)
+ expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
+ result = self.driver.reserve(FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(instances=2), expire=expire)
+
+ self.assertEqual(self.calls, [
+ 'get_project_quotas',
+ ('quota_reserve', expire, 500, 0),
+ ])
+ self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
+
+ def test_reserve_max_age(self):
+ self._stub_get_project_quotas()
+ self._stub_quota_reserve()
+ self.flags(max_age=86400)
+ expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
+ result = self.driver.reserve(FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(instances=2), expire=expire)
+
+ self.assertEqual(self.calls, [
+ 'get_project_quotas',
+ ('quota_reserve', expire, 0, 86400),
+ ])
+ self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
+
+ def test_usage_reset(self):
+ calls = []
+
+ def fake_quota_usage_update(context, project_id, user_id, resource,
+ **kwargs):
+ calls.append(('quota_usage_update', context, project_id, user_id,
+ resource, kwargs))
+ if resource == 'nonexist':
+ raise exception.QuotaUsageNotFound(project_id=project_id)
+ self.stubs.Set(db, 'quota_usage_update', fake_quota_usage_update)
+
+ ctx = FakeContext('test_project', 'test_class')
+ resources = ['res1', 'res2', 'nonexist', 'res4']
+ self.driver.usage_reset(ctx, resources)
+
+ # Make sure we had some calls
+ self.assertEqual(len(calls), len(resources))
+
+ # Extract the elevated context that was used and do some
+ # sanity checks
+ elevated = calls[0][1]
+ self.assertEqual(elevated.project_id, ctx.project_id)
+ self.assertEqual(elevated.quota_class, ctx.quota_class)
+ self.assertEqual(elevated.is_admin, True)
+
+ # Now check that all the expected calls were made
+ exemplar = [('quota_usage_update', elevated, 'test_project',
+ 'fake_user', res, dict(in_use=-1)) for res in resources]
+ self.assertEqual(calls, exemplar)
+
+
+class FakeSession(object):
+ def begin(self):
+ return self
+
+ def add(self, instance):
+ pass
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_traceback):
+ return False
+
+
+class FakeUsage(sqa_models.QuotaUsage):
+ def save(self, *args, **kwargs):
+ pass
+
+
+class QuotaReserveSqlAlchemyTestCase(test.TestCase):
+ # nova.db.sqlalchemy.api.quota_reserve is so complex it needs its
+ # own test case, and since it's a quota manipulator, this is the
+ # best place to put it...
+
+ def setUp(self):
+ super(QuotaReserveSqlAlchemyTestCase, self).setUp()
+ self.sync_called = set()
+ self.quotas = dict(
+ instances=5,
+ cores=10,
+ ram=10 * 1024,
+ fixed_ips=5,
+ )
+ self.deltas = dict(
+ instances=2,
+ cores=4,
+ ram=2 * 1024,
+ fixed_ips=2,
+ )
+
+ def make_sync(res_name):
+ def sync(context, project_id, user_id, session):
+ self.sync_called.add(res_name)
+ if res_name in self.usages:
+ if self.usages[res_name].in_use < 0:
+ return {res_name: 2}
+ else:
+ return {res_name: self.usages[res_name].in_use - 1}
+ return {res_name: 0}
+ return sync
+ self.resources = {}
+
+ _existing_quota_sync_func_dict = dict(sqa_api.QUOTA_SYNC_FUNCTIONS)
+
+ def restore_sync_functions():
+ sqa_api.QUOTA_SYNC_FUNCTIONS.clear()
+ sqa_api.QUOTA_SYNC_FUNCTIONS.update(_existing_quota_sync_func_dict)
+
+ self.addCleanup(restore_sync_functions)
+
+ for res_name in ('instances', 'cores', 'ram', 'fixed_ips'):
+ method_name = '_sync_%s' % res_name
+ sqa_api.QUOTA_SYNC_FUNCTIONS[method_name] = make_sync(res_name)
+ res = quota.ReservableResource(res_name, '_sync_%s' % res_name)
+ self.resources[res_name] = res
+
+ self.expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
+ self.usages = {}
+ self.usages_created = {}
+ self.reservations_created = {}
+ self.usages_list = [
+ dict(resource='instances',
+ project_id='test_project',
+ user_id='fake_user',
+ in_use=2,
+ reserved=2,
+ until_refresh=None),
+ dict(resource='cores',
+ project_id='test_project',
+ user_id='fake_user',
+ in_use=2,
+ reserved=4,
+ until_refresh=None),
+ dict(resource='ram',
+ project_id='test_project',
+ user_id='fake_user',
+ in_use=2,
+ reserved=2 * 1024,
+ until_refresh=None),
+ dict(resource='fixed_ips',
+ project_id='test_project',
+ user_id=None,
+ in_use=2,
+ reserved=2,
+ until_refresh=None),
+ ]
+
+ def fake_get_session():
+ return FakeSession()
+
+ def fake_get_project_user_quota_usages(context, session, project_id,
+ user_id):
+ return self.usages.copy(), self.usages.copy()
+
+ def fake_quota_usage_create(project_id, user_id, resource,
+ in_use, reserved, until_refresh,
+ session=None, save=True):
+ quota_usage_ref = self._make_quota_usage(
+ project_id, user_id, resource, in_use, reserved, until_refresh,
+ timeutils.utcnow(), timeutils.utcnow())
+
+ self.usages_created[resource] = quota_usage_ref
+
+ return quota_usage_ref
+
+ def fake_reservation_create(uuid, usage_id, project_id,
+ user_id, resource, delta, expire,
+ session=None):
+ reservation_ref = self._make_reservation(
+ uuid, usage_id, project_id, user_id, resource, delta, expire,
+ timeutils.utcnow(), timeutils.utcnow())
+
+ self.reservations_created[resource] = reservation_ref
+
+ return reservation_ref
+
+ self.stubs.Set(sqa_api, 'get_session', fake_get_session)
+ self.stubs.Set(sqa_api, '_get_project_user_quota_usages',
+ fake_get_project_user_quota_usages)
+ self.stubs.Set(sqa_api, '_quota_usage_create', fake_quota_usage_create)
+ self.stubs.Set(sqa_api, '_reservation_create', fake_reservation_create)
+
+ self.useFixture(test.TimeOverride())
+
+ def _make_quota_usage(self, project_id, user_id, resource, in_use,
+ reserved, until_refresh, created_at, updated_at):
+ quota_usage_ref = FakeUsage()
+ quota_usage_ref.id = len(self.usages) + len(self.usages_created)
+ quota_usage_ref.project_id = project_id
+ quota_usage_ref.user_id = user_id
+ quota_usage_ref.resource = resource
+ quota_usage_ref.in_use = in_use
+ quota_usage_ref.reserved = reserved
+ quota_usage_ref.until_refresh = until_refresh
+ quota_usage_ref.created_at = created_at
+ quota_usage_ref.updated_at = updated_at
+ quota_usage_ref.deleted_at = None
+ quota_usage_ref.deleted = False
+
+ return quota_usage_ref
+
+ def init_usage(self, project_id, user_id, resource, in_use, reserved=0,
+ until_refresh=None, created_at=None, updated_at=None):
+ if created_at is None:
+ created_at = timeutils.utcnow()
+ if updated_at is None:
+ updated_at = timeutils.utcnow()
+ if resource == 'fixed_ips':
+ user_id = None
+
+ quota_usage_ref = self._make_quota_usage(project_id, user_id, resource,
+ in_use, reserved,
+ until_refresh,
+ created_at, updated_at)
+
+ self.usages[resource] = quota_usage_ref
+
+ def compare_usage(self, usage_dict, expected):
+ for usage in expected:
+ resource = usage['resource']
+ for key, value in usage.items():
+ actual = getattr(usage_dict[resource], key)
+ self.assertEqual(actual, value,
+ "%s != %s on usage for resource %s" %
+ (actual, value, resource))
+
+ def _make_reservation(self, uuid, usage_id, project_id, user_id, resource,
+ delta, expire, created_at, updated_at):
+ reservation_ref = sqa_models.Reservation()
+ reservation_ref.id = len(self.reservations_created)
+ reservation_ref.uuid = uuid
+ reservation_ref.usage_id = usage_id
+ reservation_ref.project_id = project_id
+ reservation_ref.user_id = user_id
+ reservation_ref.resource = resource
+ reservation_ref.delta = delta
+ reservation_ref.expire = expire
+ reservation_ref.created_at = created_at
+ reservation_ref.updated_at = updated_at
+ reservation_ref.deleted_at = None
+ reservation_ref.deleted = False
+
+ return reservation_ref
+
+ def compare_reservation(self, reservations, expected):
+ reservations = set(reservations)
+ for resv in expected:
+ resource = resv['resource']
+ resv_obj = self.reservations_created[resource]
+
+ self.assertIn(resv_obj.uuid, reservations)
+ reservations.discard(resv_obj.uuid)
+
+ for key, value in resv.items():
+ actual = getattr(resv_obj, key)
+ self.assertEqual(actual, value,
+ "%s != %s on reservation for resource %s" %
+ (actual, value, resource))
+
+ self.assertEqual(len(reservations), 0)
+
+ def _update_reservations_list(self, usage_id_change=False,
+ delta_change=False):
+ reservations_list = [
+ dict(resource='instances',
+ project_id='test_project',
+ delta=2),
+ dict(resource='cores',
+ project_id='test_project',
+ delta=4),
+ dict(resource='ram',
+ delta=2 * 1024),
+ dict(resource='fixed_ips',
+ project_id='test_project',
+ delta=2),
+ ]
+ if usage_id_change:
+ reservations_list[0]["usage_id"] = self.usages_created['instances']
+ reservations_list[1]["usage_id"] = self.usages_created['cores']
+ reservations_list[2]["usage_id"] = self.usages_created['ram']
+ reservations_list[3]["usage_id"] = self.usages_created['fixed_ips']
+ else:
+ reservations_list[0]["usage_id"] = self.usages['instances']
+ reservations_list[1]["usage_id"] = self.usages['cores']
+ reservations_list[2]["usage_id"] = self.usages['ram']
+ reservations_list[3]["usage_id"] = self.usages['fixed_ips']
+ if delta_change:
+ reservations_list[0]["delta"] = -2
+ reservations_list[1]["delta"] = -4
+ reservations_list[2]["delta"] = -2 * 1024
+ reservations_list[3]["delta"] = -2
+ return reservations_list
+
+ def _init_usages(self, *in_use, **kwargs):
+ for i, option in enumerate(('instances', 'cores', 'ram', 'fixed_ips')):
+ self.init_usage('test_project', 'fake_user',
+ option, in_use[i], **kwargs)
+ return FakeContext('test_project', 'test_class')
+
+ def test_quota_reserve_create_usages(self):
+ context = FakeContext('test_project', 'test_class')
+ result = sqa_api.quota_reserve(context, self.resources, self.quotas,
+ self.quotas, self.deltas, self.expire,
+ 0, 0)
+
+ self.assertEqual(self.sync_called, set(['instances', 'cores',
+ 'ram', 'fixed_ips']))
+ self.usages_list[0]["in_use"] = 0
+ self.usages_list[1]["in_use"] = 0
+ self.usages_list[2]["in_use"] = 0
+ self.usages_list[3]["in_use"] = 0
+ self.compare_usage(self.usages_created, self.usages_list)
+ reservations_list = self._update_reservations_list(True)
+ self.compare_reservation(result, reservations_list)
+
+ def test_quota_reserve_negative_in_use(self):
+ context = self._init_usages(-1, -1, -1, -1, until_refresh=1)
+ result = sqa_api.quota_reserve(context, self.resources, self.quotas,
+ self.quotas, self.deltas, self.expire,
+ 5, 0)
+
+ self.assertEqual(self.sync_called, set(['instances', 'cores',
+ 'ram', 'fixed_ips']))
+ self.usages_list[0]["until_refresh"] = 5
+ self.usages_list[1]["until_refresh"] = 5
+ self.usages_list[2]["until_refresh"] = 5
+ self.usages_list[3]["until_refresh"] = 5
+ self.compare_usage(self.usages, self.usages_list)
+ self.assertEqual(self.usages_created, {})
+ self.compare_reservation(result, self._update_reservations_list())
+
+ def test_quota_reserve_until_refresh(self):
+ context = self._init_usages(3, 3, 3, 3, until_refresh=1)
+ result = sqa_api.quota_reserve(context, self.resources, self.quotas,
+ self.quotas, self.deltas, self.expire,
+ 5, 0)
+
+ self.assertEqual(self.sync_called, set(['instances', 'cores',
+ 'ram', 'fixed_ips']))
+ self.usages_list[0]["until_refresh"] = 5
+ self.usages_list[1]["until_refresh"] = 5
+ self.usages_list[2]["until_refresh"] = 5
+ self.usages_list[3]["until_refresh"] = 5
+ self.compare_usage(self.usages, self.usages_list)
+ self.assertEqual(self.usages_created, {})
+ self.compare_reservation(result, self._update_reservations_list())
+
+ def test_quota_reserve_max_age(self):
+ max_age = 3600
+ record_created = (timeutils.utcnow() -
+ datetime.timedelta(seconds=max_age))
+ context = self._init_usages(3, 3, 3, 3, created_at=record_created,
+ updated_at=record_created)
+ result = sqa_api.quota_reserve(context, self.resources, self.quotas,
+ self.quotas, self.deltas, self.expire,
+ 0, max_age)
+
+ self.assertEqual(self.sync_called, set(['instances', 'cores',
+ 'ram', 'fixed_ips']))
+ self.compare_usage(self.usages, self.usages_list)
+ self.assertEqual(self.usages_created, {})
+ self.compare_reservation(result, self._update_reservations_list())
+
+ def test_quota_reserve_no_refresh(self):
+ context = self._init_usages(3, 3, 3, 3)
+ result = sqa_api.quota_reserve(context, self.resources, self.quotas,
+ self.quotas, self.deltas, self.expire,
+ 0, 0)
+
+ self.assertEqual(self.sync_called, set([]))
+ self.usages_list[0]["in_use"] = 3
+ self.usages_list[1]["in_use"] = 3
+ self.usages_list[2]["in_use"] = 3
+ self.usages_list[3]["in_use"] = 3
+ self.compare_usage(self.usages, self.usages_list)
+ self.assertEqual(self.usages_created, {})
+ self.compare_reservation(result, self._update_reservations_list())
+
+ def test_quota_reserve_unders(self):
+ context = self._init_usages(1, 3, 1 * 1024, 1)
+ self.deltas["instances"] = -2
+ self.deltas["cores"] = -4
+ self.deltas["ram"] = -2 * 1024
+ self.deltas["fixed_ips"] = -2
+ result = sqa_api.quota_reserve(context, self.resources, self.quotas,
+ self.quotas, self.deltas, self.expire,
+ 0, 0)
+
+ self.assertEqual(self.sync_called, set([]))
+ self.usages_list[0]["in_use"] = 1
+ self.usages_list[0]["reserved"] = 0
+ self.usages_list[1]["in_use"] = 3
+ self.usages_list[1]["reserved"] = 0
+ self.usages_list[2]["in_use"] = 1 * 1024
+ self.usages_list[2]["reserved"] = 0
+ self.usages_list[3]["in_use"] = 1
+ self.usages_list[3]["reserved"] = 0
+ self.compare_usage(self.usages, self.usages_list)
+ self.assertEqual(self.usages_created, {})
+ reservations_list = self._update_reservations_list(False, True)
+ self.compare_reservation(result, reservations_list)
+
+ def test_quota_reserve_overs(self):
+ context = self._init_usages(4, 8, 10 * 1024, 4)
+ try:
+ sqa_api.quota_reserve(context, self.resources, self.quotas,
+ self.quotas, self.deltas, self.expire, 0, 0)
+ except exception.OverQuota as e:
+ expected_kwargs = {'code': 500,
+ 'usages': {'instances': {'reserved': 0, 'in_use': 4},
+ 'ram': {'reserved': 0, 'in_use': 10240},
+ 'fixed_ips': {'reserved': 0, 'in_use': 4},
+ 'cores': {'reserved': 0, 'in_use': 8}},
+ 'headroom': {'cores': 2, 'ram': 0, 'fixed_ips': 1,
+ 'instances': 1},
+ 'overs': ['cores', 'fixed_ips', 'instances', 'ram'],
+ 'quotas': {'cores': 10, 'ram': 10240,
+ 'fixed_ips': 5, 'instances': 5}}
+ self.assertEqual(e.kwargs, expected_kwargs)
+ else:
+ self.fail('Expected OverQuota failure')
+ self.assertEqual(self.sync_called, set([]))
+ self.usages_list[0]["in_use"] = 4
+ self.usages_list[0]["reserved"] = 0
+ self.usages_list[1]["in_use"] = 8
+ self.usages_list[1]["reserved"] = 0
+ self.usages_list[2]["in_use"] = 10 * 1024
+ self.usages_list[2]["reserved"] = 0
+ self.usages_list[3]["in_use"] = 4
+ self.usages_list[3]["reserved"] = 0
+ self.compare_usage(self.usages, self.usages_list)
+ self.assertEqual(self.usages_created, {})
+ self.assertEqual(self.reservations_created, {})
+
+ def test_quota_reserve_cores_unlimited(self):
+ # Requesting 8 cores, quota_cores set to unlimited:
+ self.flags(quota_cores=-1)
+ self._init_usages(1, 8, 1 * 1024, 1)
+ self.assertEqual(self.sync_called, set([]))
+ self.usages_list[0]["in_use"] = 1
+ self.usages_list[0]["reserved"] = 0
+ self.usages_list[1]["in_use"] = 8
+ self.usages_list[1]["reserved"] = 0
+ self.usages_list[2]["in_use"] = 1 * 1024
+ self.usages_list[2]["reserved"] = 0
+ self.usages_list[3]["in_use"] = 1
+ self.usages_list[3]["reserved"] = 0
+ self.compare_usage(self.usages, self.usages_list)
+ self.assertEqual(self.usages_created, {})
+ self.assertEqual(self.reservations_created, {})
+
+ def test_quota_reserve_ram_unlimited(self):
+ # Requesting 10*1024 ram, quota_ram set to unlimited:
+ self.flags(quota_ram=-1)
+ self._init_usages(1, 1, 10 * 1024, 1)
+ self.assertEqual(self.sync_called, set([]))
+ self.usages_list[0]["in_use"] = 1
+ self.usages_list[0]["reserved"] = 0
+ self.usages_list[1]["in_use"] = 1
+ self.usages_list[1]["reserved"] = 0
+ self.usages_list[2]["in_use"] = 10 * 1024
+ self.usages_list[2]["reserved"] = 0
+ self.usages_list[3]["in_use"] = 1
+ self.usages_list[3]["reserved"] = 0
+ self.compare_usage(self.usages, self.usages_list)
+ self.assertEqual(self.usages_created, {})
+ self.assertEqual(self.reservations_created, {})
+
+ def test_quota_reserve_reduction(self):
+ context = self._init_usages(10, 20, 20 * 1024, 10)
+ self.deltas["instances"] = -2
+ self.deltas["cores"] = -4
+ self.deltas["ram"] = -2 * 1024
+ self.deltas["fixed_ips"] = -2
+ result = sqa_api.quota_reserve(context, self.resources, self.quotas,
+ self.quotas, self.deltas, self.expire,
+ 0, 0)
+
+ self.assertEqual(self.sync_called, set([]))
+ self.usages_list[0]["in_use"] = 10
+ self.usages_list[0]["reserved"] = 0
+ self.usages_list[1]["in_use"] = 20
+ self.usages_list[1]["reserved"] = 0
+ self.usages_list[2]["in_use"] = 20 * 1024
+ self.usages_list[2]["reserved"] = 0
+ self.usages_list[3]["in_use"] = 10
+ self.usages_list[3]["reserved"] = 0
+ self.compare_usage(self.usages, self.usages_list)
+ self.assertEqual(self.usages_created, {})
+ reservations_list = self._update_reservations_list(False, True)
+ self.compare_reservation(result, reservations_list)
+
+
+class NoopQuotaDriverTestCase(test.TestCase):
+ def setUp(self):
+ super(NoopQuotaDriverTestCase, self).setUp()
+
+ self.flags(quota_instances=10,
+ quota_cores=20,
+ quota_ram=50 * 1024,
+ quota_floating_ips=10,
+ quota_metadata_items=128,
+ quota_injected_files=5,
+ quota_injected_file_content_bytes=10 * 1024,
+ quota_injected_file_path_length=255,
+ quota_security_groups=10,
+ quota_security_group_rules=20,
+ reservation_expire=86400,
+ until_refresh=0,
+ max_age=0,
+ )
+
+ self.expected_with_usages = {}
+ self.expected_without_usages = {}
+ self.expected_without_dict = {}
+ self.expected_settable_quotas = {}
+ for r in quota.QUOTAS._resources:
+ self.expected_with_usages[r] = dict(limit=-1,
+ in_use=-1,
+ reserved=-1)
+ self.expected_without_usages[r] = dict(limit=-1)
+ self.expected_without_dict[r] = -1
+ self.expected_settable_quotas[r] = dict(minimum=0, maximum=-1)
+
+ self.driver = quota.NoopQuotaDriver()
+
+ def test_get_defaults(self):
+ # Use our pre-defined resources
+ result = self.driver.get_defaults(None, quota.QUOTAS._resources)
+ self.assertEqual(self.expected_without_dict, result)
+
+ def test_get_class_quotas(self):
+ result = self.driver.get_class_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_class')
+ self.assertEqual(self.expected_without_dict, result)
+
+ def test_get_class_quotas_no_defaults(self):
+ result = self.driver.get_class_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_class',
+ False)
+ self.assertEqual(self.expected_without_dict, result)
+
+ def test_get_project_quotas(self):
+ result = self.driver.get_project_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_project')
+ self.assertEqual(self.expected_with_usages, result)
+
+ def test_get_user_quotas(self):
+ result = self.driver.get_user_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_project',
+ 'fake_user')
+ self.assertEqual(self.expected_with_usages, result)
+
+ def test_get_project_quotas_no_defaults(self):
+ result = self.driver.get_project_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_project',
+ defaults=False)
+ self.assertEqual(self.expected_with_usages, result)
+
+ def test_get_user_quotas_no_defaults(self):
+ result = self.driver.get_user_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_project',
+ 'fake_user',
+ defaults=False)
+ self.assertEqual(self.expected_with_usages, result)
+
+ def test_get_project_quotas_no_usages(self):
+ result = self.driver.get_project_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_project',
+ usages=False)
+ self.assertEqual(self.expected_without_usages, result)
+
+ def test_get_user_quotas_no_usages(self):
+ result = self.driver.get_user_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_project',
+ 'fake_user',
+ usages=False)
+ self.assertEqual(self.expected_without_usages, result)
+
+ def test_get_settable_quotas_with_user(self):
+ result = self.driver.get_settable_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_project',
+ 'fake_user')
+ self.assertEqual(self.expected_settable_quotas, result)
+
+ def test_get_settable_quotas_without_user(self):
+ result = self.driver.get_settable_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_project')
+ self.assertEqual(self.expected_settable_quotas, result)
diff --git a/nova/tests/unit/test_safeutils.py b/nova/tests/unit/test_safeutils.py
new file mode 100644
index 0000000000..66d20ca79e
--- /dev/null
+++ b/nova/tests/unit/test_safeutils.py
@@ -0,0 +1,98 @@
+# Copyright 2011 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import safe_utils
+from nova import test
+
+
+class GetCallArgsTestCase(test.NoDBTestCase):
+ def _test_func(self, instance, red=None, blue=None):
+ pass
+
+ def test_all_kwargs(self):
+ args = ()
+ kwargs = {'instance': {'uuid': 1}, 'red': 3, 'blue': 4}
+ callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs)
+ # implicit self counts as an arg
+ self.assertEqual(4, len(callargs))
+ self.assertIn('instance', callargs)
+ self.assertEqual({'uuid': 1}, callargs['instance'])
+ self.assertIn('red', callargs)
+ self.assertEqual(3, callargs['red'])
+ self.assertIn('blue', callargs)
+ self.assertEqual(4, callargs['blue'])
+
+ def test_all_args(self):
+ args = ({'uuid': 1}, 3, 4)
+ kwargs = {}
+ callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs)
+ # implicit self counts as an arg
+ self.assertEqual(4, len(callargs))
+ self.assertIn('instance', callargs)
+ self.assertEqual({'uuid': 1}, callargs['instance'])
+ self.assertIn('red', callargs)
+ self.assertEqual(3, callargs['red'])
+ self.assertIn('blue', callargs)
+ self.assertEqual(4, callargs['blue'])
+
+ def test_mixed_args(self):
+ args = ({'uuid': 1}, 3)
+ kwargs = {'blue': 4}
+ callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs)
+ # implicit self counts as an arg
+ self.assertEqual(4, len(callargs))
+ self.assertIn('instance', callargs)
+ self.assertEqual({'uuid': 1}, callargs['instance'])
+ self.assertIn('red', callargs)
+ self.assertEqual(3, callargs['red'])
+ self.assertIn('blue', callargs)
+ self.assertEqual(4, callargs['blue'])
+
+ def test_partial_kwargs(self):
+ args = ()
+ kwargs = {'instance': {'uuid': 1}, 'red': 3}
+ callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs)
+ # implicit self counts as an arg
+ self.assertEqual(4, len(callargs))
+ self.assertIn('instance', callargs)
+ self.assertEqual({'uuid': 1}, callargs['instance'])
+ self.assertIn('red', callargs)
+ self.assertEqual(3, callargs['red'])
+ self.assertIn('blue', callargs)
+ self.assertIsNone(callargs['blue'])
+
+ def test_partial_args(self):
+ args = ({'uuid': 1}, 3)
+ kwargs = {}
+ callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs)
+ # implicit self counts as an arg
+ self.assertEqual(4, len(callargs))
+ self.assertIn('instance', callargs)
+ self.assertEqual({'uuid': 1}, callargs['instance'])
+ self.assertIn('red', callargs)
+ self.assertEqual(3, callargs['red'])
+ self.assertIn('blue', callargs)
+ self.assertIsNone(callargs['blue'])
+
+ def test_partial_mixed_args(self):
+ args = (3,)
+ kwargs = {'instance': {'uuid': 1}}
+ callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs)
+ self.assertEqual(4, len(callargs))
+ self.assertIn('instance', callargs)
+ self.assertEqual({'uuid': 1}, callargs['instance'])
+ self.assertIn('red', callargs)
+ self.assertEqual(3, callargs['red'])
+ self.assertIn('blue', callargs)
+ self.assertIsNone(callargs['blue'])
diff --git a/nova/tests/unit/test_service.py b/nova/tests/unit/test_service.py
new file mode 100644
index 0000000000..bb36143869
--- /dev/null
+++ b/nova/tests/unit/test_service.py
@@ -0,0 +1,370 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Unit Tests for remote procedure calls using queue
+"""
+
+import sys
+
+import mock
+import mox
+from oslo.concurrency import processutils
+from oslo.config import cfg
+import testtools
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import manager
+from nova.openstack.common import service as _service
+from nova import rpc
+from nova import service
+from nova import test
+from nova.tests.unit import utils
+from nova import wsgi
+
+test_service_opts = [
+ cfg.StrOpt("fake_manager",
+ default="nova.tests.unit.test_service.FakeManager",
+ help="Manager for testing"),
+ cfg.StrOpt("test_service_listen",
+ default='127.0.0.1',
+ help="Host to bind test service to"),
+ cfg.IntOpt("test_service_listen_port",
+ default=0,
+ help="Port number to bind test service to"),
+ ]
+
+CONF = cfg.CONF
+CONF.register_opts(test_service_opts)
+
+
+class FakeManager(manager.Manager):
+ """Fake manager for tests."""
+ def test_method(self):
+ return 'manager'
+
+
+class ExtendedService(service.Service):
+ def test_method(self):
+ return 'service'
+
+
+class ServiceManagerTestCase(test.TestCase):
+ """Test cases for Services."""
+
+ def test_message_gets_to_manager(self):
+ serv = service.Service('test',
+ 'test',
+ 'test',
+ 'nova.tests.unit.test_service.FakeManager')
+ serv.start()
+ self.assertEqual(serv.test_method(), 'manager')
+
+ def test_override_manager_method(self):
+ serv = ExtendedService('test',
+ 'test',
+ 'test',
+ 'nova.tests.unit.test_service.FakeManager')
+ serv.start()
+ self.assertEqual(serv.test_method(), 'service')
+
+ def test_service_with_min_down_time(self):
+ CONF.set_override('service_down_time', 10)
+ CONF.set_override('report_interval', 10)
+ serv = service.Service('test',
+ 'test',
+ 'test',
+ 'nova.tests.unit.test_service.FakeManager')
+ serv.start()
+ self.assertEqual(CONF.service_down_time, 25)
+
+
+class ServiceFlagsTestCase(test.TestCase):
+ def test_service_enabled_on_create_based_on_flag(self):
+ self.flags(enable_new_services=True)
+ host = 'foo'
+ binary = 'nova-fake'
+ app = service.Service.create(host=host, binary=binary)
+ app.start()
+ app.stop()
+ ref = db.service_get(context.get_admin_context(), app.service_id)
+ db.service_destroy(context.get_admin_context(), app.service_id)
+ self.assertFalse(ref['disabled'])
+
+ def test_service_disabled_on_create_based_on_flag(self):
+ self.flags(enable_new_services=False)
+ host = 'foo'
+ binary = 'nova-fake'
+ app = service.Service.create(host=host, binary=binary)
+ app.start()
+ app.stop()
+ ref = db.service_get(context.get_admin_context(), app.service_id)
+ db.service_destroy(context.get_admin_context(), app.service_id)
+ self.assertTrue(ref['disabled'])
+
+
+class ServiceTestCase(test.TestCase):
+ """Test cases for Services."""
+
+ def setUp(self):
+ super(ServiceTestCase, self).setUp()
+ self.host = 'foo'
+ self.binary = 'nova-fake'
+ self.topic = 'fake'
+ self.mox.StubOutWithMock(db, 'service_create')
+ self.mox.StubOutWithMock(db, 'service_get_by_args')
+ self.flags(use_local=True, group='conductor')
+
+ def test_create(self):
+
+ # NOTE(vish): Create was moved out of mox replay to make sure that
+ # the looping calls are created in StartService.
+ app = service.Service.create(host=self.host, binary=self.binary,
+ topic=self.topic)
+
+ self.assertTrue(app)
+
+ def _service_start_mocks(self):
+ service_create = {'host': self.host,
+ 'binary': self.binary,
+ 'topic': self.topic,
+ 'report_count': 0}
+ service_ref = {'host': self.host,
+ 'binary': self.binary,
+ 'topic': self.topic,
+ 'report_count': 0,
+ 'id': 1}
+
+ db.service_get_by_args(mox.IgnoreArg(),
+ self.host, self.binary).AndRaise(exception.NotFound())
+ db.service_create(mox.IgnoreArg(),
+ service_create).AndReturn(service_ref)
+ return service_ref
+
+ def test_init_and_start_hooks(self):
+ self.manager_mock = self.mox.CreateMock(FakeManager)
+ self.mox.StubOutWithMock(sys.modules[__name__],
+ 'FakeManager', use_mock_anything=True)
+ self.mox.StubOutWithMock(self.manager_mock, 'init_host')
+ self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook')
+ self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook')
+
+ FakeManager(host=self.host).AndReturn(self.manager_mock)
+
+ self.manager_mock.service_name = self.topic
+ self.manager_mock.additional_endpoints = []
+
+ # init_host is called before any service record is created
+ self.manager_mock.init_host()
+ self._service_start_mocks()
+ # pre_start_hook is called after service record is created,
+ # but before RPC consumer is created
+ self.manager_mock.pre_start_hook()
+ # post_start_hook is called after RPC consumer is created.
+ self.manager_mock.post_start_hook()
+
+ self.mox.ReplayAll()
+
+ serv = service.Service(self.host,
+ self.binary,
+ self.topic,
+ 'nova.tests.unit.test_service.FakeManager')
+ serv.start()
+
+ def _test_service_check_create_race(self, ex):
+ self.manager_mock = self.mox.CreateMock(FakeManager)
+ self.mox.StubOutWithMock(sys.modules[__name__], 'FakeManager',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(self.manager_mock, 'init_host')
+ self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook')
+ self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook')
+
+ FakeManager(host=self.host).AndReturn(self.manager_mock)
+
+ # init_host is called before any service record is created
+ self.manager_mock.init_host()
+
+ db.service_get_by_args(mox.IgnoreArg(), self.host, self.binary
+ ).AndRaise(exception.NotFound)
+ db.service_create(mox.IgnoreArg(), mox.IgnoreArg()
+ ).AndRaise(ex)
+
+ class TestException(Exception):
+ pass
+
+ db.service_get_by_args(mox.IgnoreArg(), self.host, self.binary
+ ).AndRaise(TestException)
+
+ self.mox.ReplayAll()
+
+ serv = service.Service(self.host,
+ self.binary,
+ self.topic,
+ 'nova.tests.unit.test_service.FakeManager')
+ self.assertRaises(TestException, serv.start)
+
+ def test_service_check_create_race_topic_exists(self):
+ ex = exception.ServiceTopicExists(host='foo', topic='bar')
+ self._test_service_check_create_race(ex)
+
+ def test_service_check_create_race_binary_exists(self):
+ ex = exception.ServiceBinaryExists(host='foo', binary='bar')
+ self._test_service_check_create_race(ex)
+
+ def test_parent_graceful_shutdown(self):
+ self.manager_mock = self.mox.CreateMock(FakeManager)
+ self.mox.StubOutWithMock(sys.modules[__name__],
+ 'FakeManager', use_mock_anything=True)
+ self.mox.StubOutWithMock(self.manager_mock, 'init_host')
+ self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook')
+ self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook')
+
+ self.mox.StubOutWithMock(_service.Service, 'stop')
+
+ FakeManager(host=self.host).AndReturn(self.manager_mock)
+
+ self.manager_mock.service_name = self.topic
+ self.manager_mock.additional_endpoints = []
+
+ # init_host is called before any service record is created
+ self.manager_mock.init_host()
+ self._service_start_mocks()
+ # pre_start_hook is called after service record is created,
+ # but before RPC consumer is created
+ self.manager_mock.pre_start_hook()
+ # post_start_hook is called after RPC consumer is created.
+ self.manager_mock.post_start_hook()
+
+ _service.Service.stop()
+
+ self.mox.ReplayAll()
+
+ serv = service.Service(self.host,
+ self.binary,
+ self.topic,
+ 'nova.tests.unit.test_service.FakeManager')
+ serv.start()
+
+ serv.stop()
+
+ @mock.patch('nova.servicegroup.API')
+ @mock.patch('nova.conductor.api.LocalAPI.service_get_by_args')
+ def test_parent_graceful_shutdown_with_cleanup_host(self,
+ mock_svc_get_by_args,
+ mock_API):
+ mock_svc_get_by_args.return_value = {'id': 'some_value'}
+ mock_manager = mock.Mock()
+
+ serv = service.Service(self.host,
+ self.binary,
+ self.topic,
+ 'nova.tests.unit.test_service.FakeManager')
+
+ serv.manager = mock_manager
+ serv.manager.additional_endpoints = []
+
+ serv.start()
+ serv.manager.init_host.assert_called_with()
+
+ serv.stop()
+ serv.manager.cleanup_host.assert_called_with()
+
+ @mock.patch('nova.servicegroup.API')
+ @mock.patch('nova.conductor.api.LocalAPI.service_get_by_args')
+ @mock.patch.object(rpc, 'get_server')
+ def test_service_stop_waits_for_rpcserver(
+ self, mock_rpc, mock_svc_get_by_args, mock_API):
+ mock_svc_get_by_args.return_value = {'id': 'some_value'}
+ serv = service.Service(self.host,
+ self.binary,
+ self.topic,
+ 'nova.tests.unit.test_service.FakeManager')
+ serv.start()
+ serv.stop()
+ serv.rpcserver.start.assert_called_once_with()
+ serv.rpcserver.stop.assert_called_once_with()
+ serv.rpcserver.wait.assert_called_once_with()
+
+
+class TestWSGIService(test.TestCase):
+
+ def setUp(self):
+ super(TestWSGIService, self).setUp()
+ self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
+
+ def test_service_random_port(self):
+ test_service = service.WSGIService("test_service")
+ test_service.start()
+ self.assertNotEqual(0, test_service.port)
+ test_service.stop()
+
+ def test_workers_set_default(self):
+ test_service = service.WSGIService("osapi_compute")
+ self.assertEqual(test_service.workers, processutils.get_worker_count())
+
+ def test_workers_set_good_user_setting(self):
+ CONF.set_override('osapi_compute_workers', 8)
+ test_service = service.WSGIService("osapi_compute")
+ self.assertEqual(test_service.workers, 8)
+
+ def test_workers_set_zero_user_setting(self):
+ CONF.set_override('osapi_compute_workers', 0)
+ test_service = service.WSGIService("osapi_compute")
+ # If a value less than 1 is used, defaults to number of procs available
+ self.assertEqual(test_service.workers, processutils.get_worker_count())
+
+ def test_service_start_with_illegal_workers(self):
+ CONF.set_override("osapi_compute_workers", -1)
+ self.assertRaises(exception.InvalidInput,
+ service.WSGIService, "osapi_compute")
+
+ @testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support")
+ def test_service_random_port_with_ipv6(self):
+ CONF.set_default("test_service_listen", "::1")
+ test_service = service.WSGIService("test_service")
+ test_service.start()
+ self.assertEqual("::1", test_service.host)
+ self.assertNotEqual(0, test_service.port)
+ test_service.stop()
+
+ def test_reset_pool_size_to_default(self):
+ test_service = service.WSGIService("test_service")
+ test_service.start()
+
+ # Stopping the service, which in turn sets pool size to 0
+ test_service.stop()
+ self.assertEqual(test_service.server._pool.size, 0)
+
+ # Resetting pool size to default
+ test_service.reset()
+ test_service.start()
+ self.assertEqual(test_service.server._pool.size,
+ CONF.wsgi_default_pool_size)
+
+
+class TestLauncher(test.TestCase):
+
+ def setUp(self):
+ super(TestLauncher, self).setUp()
+ self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
+ self.service = service.WSGIService("test_service")
+
+ def test_launch_app(self):
+ service.serve(self.service)
+ self.assertNotEqual(0, self.service.port)
+ service._launcher.stop()
diff --git a/nova/tests/unit/test_test.py b/nova/tests/unit/test_test.py
new file mode 100644
index 0000000000..a0ee2ab809
--- /dev/null
+++ b/nova/tests/unit/test_test.py
@@ -0,0 +1,60 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for the testing base code."""
+
+from oslo.config import cfg
+from oslo import messaging
+
+from nova.openstack.common import log as logging
+from nova import rpc
+from nova import test
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+CONF.import_opt('use_local', 'nova.conductor.api', group='conductor')
+
+
+class IsolationTestCase(test.TestCase):
+ """Ensure that things are cleaned up after failed tests.
+
+ These tests don't really do much here, but if isolation fails a bunch
+ of other tests should fail.
+
+ """
+ def test_service_isolation(self):
+ self.flags(use_local=True, group='conductor')
+ self.useFixture(test.ServiceFixture('compute'))
+
+ def test_rpc_consumer_isolation(self):
+ class NeverCalled(object):
+
+ def __getattribute__(*args):
+ assert False, "I should never get called."
+
+ server = rpc.get_server(messaging.Target(topic='compute',
+ server=CONF.host),
+ endpoints=[NeverCalled()])
+ server.start()
+
+
+class BadLogTestCase(test.TestCase):
+ """Make sure a mis-formatted debug log will get caught."""
+
+ def test_bad_debug_log(self):
+ self.assertRaises(KeyError,
+ LOG.debug, "this is a misformated %(log)s", {'nothing': 'nothing'})
diff --git a/nova/tests/unit/test_test_utils.py b/nova/tests/unit/test_test_utils.py
new file mode 100644
index 0000000000..8cc87fba65
--- /dev/null
+++ b/nova/tests/unit/test_test_utils.py
@@ -0,0 +1,70 @@
+# Copyright 2010 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import errno
+import socket
+import tempfile
+
+import fixtures
+
+from nova import db
+from nova import test
+from nova.tests.unit import utils as test_utils
+
+
+class TestUtilsTestCase(test.TestCase):
+ def test_get_test_admin_context(self):
+ # get_test_admin_context's return value behaves like admin context.
+ ctxt = test_utils.get_test_admin_context()
+
+ # TODO(soren): This should verify the full interface context
+ # objects expose.
+ self.assertTrue(ctxt.is_admin)
+
+ def test_get_test_instance(self):
+ # get_test_instance's return value looks like an instance_ref.
+ instance_ref = test_utils.get_test_instance()
+ ctxt = test_utils.get_test_admin_context()
+ db.instance_get(ctxt, instance_ref['id'])
+
+ def _test_get_test_network_info(self):
+ """Does the return value match a real network_info structure."""
+ # The challenge here is to define what exactly such a structure
+ # must look like.
+ pass
+
+ def test_ipv6_supported(self):
+ self.assertIn(test_utils.is_ipv6_supported(), (False, True))
+
+ def fake_open(path):
+ raise IOError
+
+ def fake_socket_fail(x, y):
+ e = socket.error()
+ e.errno = errno.EAFNOSUPPORT
+ raise e
+
+ def fake_socket_ok(x, y):
+ return tempfile.TemporaryFile()
+
+ with fixtures.MonkeyPatch('socket.socket', fake_socket_fail):
+ self.assertFalse(test_utils.is_ipv6_supported())
+
+ with fixtures.MonkeyPatch('socket.socket', fake_socket_ok):
+ with fixtures.MonkeyPatch('sys.platform', 'windows'):
+ self.assertTrue(test_utils.is_ipv6_supported())
+
+ with fixtures.MonkeyPatch('sys.platform', 'linux2'):
+ with fixtures.MonkeyPatch('__builtin__.open', fake_open):
+ self.assertFalse(test_utils.is_ipv6_supported())
diff --git a/nova/tests/unit/test_utils.py b/nova/tests/unit/test_utils.py
new file mode 100644
index 0000000000..8c26a38998
--- /dev/null
+++ b/nova/tests/unit/test_utils.py
@@ -0,0 +1,981 @@
+# Copyright 2011 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import __builtin__
+import datetime
+import functools
+import hashlib
+import importlib
+import os
+import os.path
+import StringIO
+import tempfile
+
+import mox
+import netaddr
+from oslo.concurrency import processutils
+from oslo.config import cfg
+from oslo.utils import timeutils
+
+import nova
+from nova import exception
+from nova import test
+from nova import utils
+
+CONF = cfg.CONF
+
+
+class GetMyIP4AddressTestCase(test.NoDBTestCase):
+ def test_get_my_ipv4_address_with_no_ipv4(self):
+ response = """172.16.0.0/16 via 172.16.251.13 dev tun1
+172.16.251.1 via 172.16.251.13 dev tun1
+172.16.251.13 dev tun1 proto kernel scope link src 172.16.251.14
+172.24.0.0/16 via 172.16.251.13 dev tun1
+192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1"""
+
+ def fake_execute(*args, **kwargs):
+ return response, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ address = utils.get_my_ipv4_address()
+ self.assertEqual(address, '127.0.0.1')
+
+ def test_get_my_ipv4_address_bad_process(self):
+ def fake_execute(*args, **kwargs):
+ raise processutils.ProcessExecutionError()
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ address = utils.get_my_ipv4_address()
+ self.assertEqual(address, '127.0.0.1')
+
+ def test_get_my_ipv4_address_with_single_interface(self):
+ response_route = """default via 192.168.1.1 dev wlan0 proto static
+192.168.1.0/24 dev wlan0 proto kernel scope link src 192.168.1.137 metric 9
+"""
+ response_addr = """
+1: lo inet 127.0.0.1/8 scope host lo
+3: wlan0 inet 192.168.1.137/24 brd 192.168.1.255 scope global wlan0
+"""
+
+ def fake_execute(*args, **kwargs):
+ if 'route' in args:
+ return response_route, None
+ return response_addr, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ address = utils.get_my_ipv4_address()
+ self.assertEqual(address, '192.168.1.137')
+
+ def test_get_my_ipv4_address_with_multi_ipv4_on_single_interface(self):
+ response_route = """
+172.18.56.0/24 dev customer proto kernel scope link src 172.18.56.22
+169.254.0.0/16 dev customer scope link metric 1031
+default via 172.18.56.1 dev customer
+"""
+ response_addr = (""
+"31: customer inet 172.18.56.22/24 brd 172.18.56.255 scope global"
+" customer\n"
+"31: customer inet 172.18.56.32/24 brd 172.18.56.255 scope global "
+"secondary customer")
+
+ def fake_execute(*args, **kwargs):
+ if 'route' in args:
+ return response_route, None
+ return response_addr, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ address = utils.get_my_ipv4_address()
+ self.assertEqual(address, '172.18.56.22')
+
+ def test_get_my_ipv4_address_with_multiple_interfaces(self):
+ response_route = """
+169.1.9.0/24 dev eth1 proto kernel scope link src 169.1.9.10
+172.17.248.0/21 dev eth0 proto kernel scope link src 172.17.255.9
+169.254.0.0/16 dev eth0 scope link metric 1002
+169.254.0.0/16 dev eth1 scope link metric 1003
+default via 172.17.248.1 dev eth0 proto static
+"""
+ response_addr = """
+1: lo inet 127.0.0.1/8 scope host lo
+2: eth0 inet 172.17.255.9/21 brd 172.17.255.255 scope global eth0
+3: eth1 inet 169.1.9.10/24 scope global eth1
+"""
+
+ def fake_execute(*args, **kwargs):
+ if 'route' in args:
+ return response_route, None
+ return response_addr, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ address = utils.get_my_ipv4_address()
+ self.assertEqual(address, '172.17.255.9')
+
+
+class GenericUtilsTestCase(test.NoDBTestCase):
+ def test_parse_server_string(self):
+ result = utils.parse_server_string('::1')
+ self.assertEqual(('::1', ''), result)
+ result = utils.parse_server_string('[::1]:8773')
+ self.assertEqual(('::1', '8773'), result)
+ result = utils.parse_server_string('2001:db8::192.168.1.1')
+ self.assertEqual(('2001:db8::192.168.1.1', ''), result)
+ result = utils.parse_server_string('[2001:db8::192.168.1.1]:8773')
+ self.assertEqual(('2001:db8::192.168.1.1', '8773'), result)
+ result = utils.parse_server_string('192.168.1.1')
+ self.assertEqual(('192.168.1.1', ''), result)
+ result = utils.parse_server_string('192.168.1.2:8773')
+ self.assertEqual(('192.168.1.2', '8773'), result)
+ result = utils.parse_server_string('192.168.1.3')
+ self.assertEqual(('192.168.1.3', ''), result)
+ result = utils.parse_server_string('www.example.com:8443')
+ self.assertEqual(('www.example.com', '8443'), result)
+ result = utils.parse_server_string('www.example.com')
+ self.assertEqual(('www.example.com', ''), result)
+ # error case
+ result = utils.parse_server_string('www.exa:mple.com:8443')
+ self.assertEqual(('', ''), result)
+
+ def test_hostname_unicode_sanitization(self):
+ hostname = u"\u7684.test.example.com"
+ self.assertEqual("test.example.com",
+ utils.sanitize_hostname(hostname))
+
+ def test_hostname_sanitize_periods(self):
+ hostname = "....test.example.com..."
+ self.assertEqual("test.example.com",
+ utils.sanitize_hostname(hostname))
+
+ def test_hostname_sanitize_dashes(self):
+ hostname = "----test.example.com---"
+ self.assertEqual("test.example.com",
+ utils.sanitize_hostname(hostname))
+
+ def test_hostname_sanitize_characters(self):
+ hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+"
+ self.assertEqual("91----test-host.example.com-0",
+ utils.sanitize_hostname(hostname))
+
+ def test_hostname_translate(self):
+ hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>"
+ self.assertEqual("hello", utils.sanitize_hostname(hostname))
+
+ def test_read_cached_file(self):
+ self.mox.StubOutWithMock(os.path, "getmtime")
+ os.path.getmtime(mox.IgnoreArg()).AndReturn(1)
+ self.mox.ReplayAll()
+
+ cache_data = {"data": 1123, "mtime": 1}
+ data = utils.read_cached_file("/this/is/a/fake", cache_data)
+ self.assertEqual(cache_data["data"], data)
+
+ def test_read_modified_cached_file(self):
+ self.mox.StubOutWithMock(os.path, "getmtime")
+ self.mox.StubOutWithMock(__builtin__, 'open')
+ os.path.getmtime(mox.IgnoreArg()).AndReturn(2)
+
+ fake_contents = "lorem ipsum"
+ fake_file = self.mox.CreateMockAnything()
+ fake_file.read().AndReturn(fake_contents)
+ fake_context_manager = self.mox.CreateMockAnything()
+ fake_context_manager.__enter__().AndReturn(fake_file)
+ fake_context_manager.__exit__(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ __builtin__.open(mox.IgnoreArg()).AndReturn(fake_context_manager)
+
+ self.mox.ReplayAll()
+ cache_data = {"data": 1123, "mtime": 1}
+ self.reload_called = False
+
+ def test_reload(reloaded_data):
+ self.assertEqual(reloaded_data, fake_contents)
+ self.reload_called = True
+
+ data = utils.read_cached_file("/this/is/a/fake", cache_data,
+ reload_func=test_reload)
+ self.assertEqual(data, fake_contents)
+ self.assertTrue(self.reload_called)
+
+ def test_generate_password(self):
+ password = utils.generate_password()
+ self.assertTrue([c for c in password if c in '0123456789'])
+ self.assertTrue([c for c in password
+ if c in 'abcdefghijklmnopqrstuvwxyz'])
+ self.assertTrue([c for c in password
+ if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'])
+
+ def test_read_file_as_root(self):
+ def fake_execute(*args, **kwargs):
+ if args[1] == 'bad':
+ raise processutils.ProcessExecutionError()
+ return 'fakecontents', None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ contents = utils.read_file_as_root('good')
+ self.assertEqual(contents, 'fakecontents')
+ self.assertRaises(exception.FileNotFound,
+ utils.read_file_as_root, 'bad')
+
+ def test_temporary_chown(self):
+ def fake_execute(*args, **kwargs):
+ if args[0] == 'chown':
+ fake_execute.uid = args[1]
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ with tempfile.NamedTemporaryFile() as f:
+ with utils.temporary_chown(f.name, owner_uid=2):
+ self.assertEqual(fake_execute.uid, 2)
+ self.assertEqual(fake_execute.uid, os.getuid())
+
+ def test_xhtml_escape(self):
+ self.assertEqual('&quot;foo&quot;', utils.xhtml_escape('"foo"'))
+ self.assertEqual('&apos;foo&apos;', utils.xhtml_escape("'foo'"))
+ self.assertEqual('&amp;', utils.xhtml_escape('&'))
+ self.assertEqual('&gt;', utils.xhtml_escape('>'))
+ self.assertEqual('&lt;', utils.xhtml_escape('<'))
+ self.assertEqual('&lt;foo&gt;', utils.xhtml_escape('<foo>'))
+
+ def test_is_valid_ipv4(self):
+ self.assertTrue(utils.is_valid_ipv4('127.0.0.1'))
+ self.assertFalse(utils.is_valid_ipv4('::1'))
+ self.assertFalse(utils.is_valid_ipv4('bacon'))
+ self.assertFalse(utils.is_valid_ipv4(""))
+ self.assertFalse(utils.is_valid_ipv4(10))
+
+ def test_is_valid_ipv6(self):
+ self.assertTrue(utils.is_valid_ipv6("::1"))
+ self.assertTrue(utils.is_valid_ipv6(
+ "abcd:ef01:2345:6789:abcd:ef01:192.168.254.254"))
+ self.assertTrue(utils.is_valid_ipv6(
+ "0000:0000:0000:0000:0000:0000:0000:0001"))
+ self.assertFalse(utils.is_valid_ipv6("foo"))
+ self.assertFalse(utils.is_valid_ipv6("127.0.0.1"))
+ self.assertFalse(utils.is_valid_ipv6(""))
+ self.assertFalse(utils.is_valid_ipv6(10))
+
+ def test_is_valid_ipv6_cidr(self):
+ self.assertTrue(utils.is_valid_ipv6_cidr("2600::/64"))
+ self.assertTrue(utils.is_valid_ipv6_cidr(
+ "abcd:ef01:2345:6789:abcd:ef01:192.168.254.254/48"))
+ self.assertTrue(utils.is_valid_ipv6_cidr(
+ "0000:0000:0000:0000:0000:0000:0000:0001/32"))
+ self.assertTrue(utils.is_valid_ipv6_cidr(
+ "0000:0000:0000:0000:0000:0000:0000:0001"))
+ self.assertFalse(utils.is_valid_ipv6_cidr("foo"))
+ self.assertFalse(utils.is_valid_ipv6_cidr("127.0.0.1"))
+
+ def test_get_shortened_ipv6(self):
+ self.assertEqual("abcd:ef01:2345:6789:abcd:ef01:c0a8:fefe",
+ utils.get_shortened_ipv6(
+ "abcd:ef01:2345:6789:abcd:ef01:192.168.254.254"))
+ self.assertEqual("::1", utils.get_shortened_ipv6(
+ "0000:0000:0000:0000:0000:0000:0000:0001"))
+ self.assertEqual("caca::caca:0:babe:201:102",
+ utils.get_shortened_ipv6(
+ "caca:0000:0000:caca:0000:babe:0201:0102"))
+ self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6,
+ "127.0.0.1")
+ self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6,
+ "failure")
+
+ def test_get_shortened_ipv6_cidr(self):
+ self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr(
+ "2600:0000:0000:0000:0000:0000:0000:0000/64"))
+ self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr(
+ "2600::1/64"))
+ self.assertRaises(netaddr.AddrFormatError,
+ utils.get_shortened_ipv6_cidr,
+ "127.0.0.1")
+ self.assertRaises(netaddr.AddrFormatError,
+ utils.get_shortened_ipv6_cidr,
+ "failure")
+
+ def test_get_hash_str(self):
+ base_str = "foo"
+ value = hashlib.md5(base_str).hexdigest()
+ self.assertEqual(
+ value, utils.get_hash_str(base_str))
+
+
+class MonkeyPatchTestCase(test.NoDBTestCase):
+ """Unit test for utils.monkey_patch()."""
+ def setUp(self):
+ super(MonkeyPatchTestCase, self).setUp()
+ self.example_package = 'nova.tests.unit.monkey_patch_example.'
+ self.flags(
+ monkey_patch=True,
+ monkey_patch_modules=[self.example_package + 'example_a' + ':'
+ + self.example_package + 'example_decorator'])
+
+ def test_monkey_patch(self):
+ utils.monkey_patch()
+ nova.tests.unit.monkey_patch_example.CALLED_FUNCTION = []
+ from nova.tests.unit.monkey_patch_example import example_a
+ from nova.tests.unit.monkey_patch_example import example_b
+
+ self.assertEqual('Example function', example_a.example_function_a())
+ exampleA = example_a.ExampleClassA()
+ exampleA.example_method()
+ ret_a = exampleA.example_method_add(3, 5)
+ self.assertEqual(ret_a, 8)
+
+ self.assertEqual('Example function', example_b.example_function_b())
+ exampleB = example_b.ExampleClassB()
+ exampleB.example_method()
+ ret_b = exampleB.example_method_add(3, 5)
+
+ self.assertEqual(ret_b, 8)
+ package_a = self.example_package + 'example_a.'
+ self.assertIn(package_a + 'example_function_a',
+ nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
+
+ self.assertIn(package_a + 'ExampleClassA.example_method',
+ nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
+ self.assertIn(package_a + 'ExampleClassA.example_method_add',
+ nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
+ package_b = self.example_package + 'example_b.'
+ self.assertNotIn(package_b + 'example_function_b',
+ nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
+ self.assertNotIn(package_b + 'ExampleClassB.example_method',
+ nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
+ self.assertNotIn(package_b + 'ExampleClassB.example_method_add',
+ nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
+
+
+class MonkeyPatchDefaultTestCase(test.NoDBTestCase):
+ """Unit test for default monkey_patch_modules value."""
+
+ def setUp(self):
+ super(MonkeyPatchDefaultTestCase, self).setUp()
+ self.flags(
+ monkey_patch=True)
+
+ def test_monkey_patch_default_mod(self):
+ # monkey_patch_modules is defined to be
+ # <module_to_patch>:<decorator_to_patch_with>
+ # Here we check that both parts of the default values are
+ # valid
+ for module in CONF.monkey_patch_modules:
+ m = module.split(':', 1)
+ # Check we can import the module to be patched
+ importlib.import_module(m[0])
+ # check the decorator is valid
+ decorator_name = m[1].rsplit('.', 1)
+ decorator_module = importlib.import_module(decorator_name[0])
+ getattr(decorator_module, decorator_name[1])
+
+
+class AuditPeriodTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(AuditPeriodTest, self).setUp()
+ # a fairly random time to test with
+ self.test_time = datetime.datetime(second=23,
+ minute=12,
+ hour=8,
+ day=5,
+ month=3,
+ year=2012)
+ timeutils.set_time_override(override_time=self.test_time)
+
+ def tearDown(self):
+ timeutils.clear_time_override()
+ super(AuditPeriodTest, self).tearDown()
+
+ def test_hour(self):
+ begin, end = utils.last_completed_audit_period(unit='hour')
+ self.assertEqual(begin, datetime.datetime(
+ hour=7,
+ day=5,
+ month=3,
+ year=2012))
+ self.assertEqual(end, datetime.datetime(
+ hour=8,
+ day=5,
+ month=3,
+ year=2012))
+
+ def test_hour_with_offset_before_current(self):
+ begin, end = utils.last_completed_audit_period(unit='hour@10')
+ self.assertEqual(begin, datetime.datetime(
+ minute=10,
+ hour=7,
+ day=5,
+ month=3,
+ year=2012))
+ self.assertEqual(end, datetime.datetime(
+ minute=10,
+ hour=8,
+ day=5,
+ month=3,
+ year=2012))
+
+ def test_hour_with_offset_after_current(self):
+ begin, end = utils.last_completed_audit_period(unit='hour@30')
+ self.assertEqual(begin, datetime.datetime(
+ minute=30,
+ hour=6,
+ day=5,
+ month=3,
+ year=2012))
+ self.assertEqual(end, datetime.datetime(
+ minute=30,
+ hour=7,
+ day=5,
+ month=3,
+ year=2012))
+
+ def test_day(self):
+ begin, end = utils.last_completed_audit_period(unit='day')
+ self.assertEqual(begin, datetime.datetime(
+ day=4,
+ month=3,
+ year=2012))
+ self.assertEqual(end, datetime.datetime(
+ day=5,
+ month=3,
+ year=2012))
+
+ def test_day_with_offset_before_current(self):
+ begin, end = utils.last_completed_audit_period(unit='day@6')
+ self.assertEqual(begin, datetime.datetime(
+ hour=6,
+ day=4,
+ month=3,
+ year=2012))
+ self.assertEqual(end, datetime.datetime(
+ hour=6,
+ day=5,
+ month=3,
+ year=2012))
+
+ def test_day_with_offset_after_current(self):
+ begin, end = utils.last_completed_audit_period(unit='day@10')
+ self.assertEqual(begin, datetime.datetime(
+ hour=10,
+ day=3,
+ month=3,
+ year=2012))
+ self.assertEqual(end, datetime.datetime(
+ hour=10,
+ day=4,
+ month=3,
+ year=2012))
+
+ def test_month(self):
+ begin, end = utils.last_completed_audit_period(unit='month')
+ self.assertEqual(begin, datetime.datetime(
+ day=1,
+ month=2,
+ year=2012))
+ self.assertEqual(end, datetime.datetime(
+ day=1,
+ month=3,
+ year=2012))
+
+ def test_month_with_offset_before_current(self):
+ begin, end = utils.last_completed_audit_period(unit='month@2')
+ self.assertEqual(begin, datetime.datetime(
+ day=2,
+ month=2,
+ year=2012))
+ self.assertEqual(end, datetime.datetime(
+ day=2,
+ month=3,
+ year=2012))
+
+ def test_month_with_offset_after_current(self):
+ begin, end = utils.last_completed_audit_period(unit='month@15')
+ self.assertEqual(begin, datetime.datetime(
+ day=15,
+ month=1,
+ year=2012))
+ self.assertEqual(end, datetime.datetime(
+ day=15,
+ month=2,
+ year=2012))
+
+ def test_year(self):
+ begin, end = utils.last_completed_audit_period(unit='year')
+ self.assertEqual(begin, datetime.datetime(
+ day=1,
+ month=1,
+ year=2011))
+ self.assertEqual(end, datetime.datetime(
+ day=1,
+ month=1,
+ year=2012))
+
+ def test_year_with_offset_before_current(self):
+ begin, end = utils.last_completed_audit_period(unit='year@2')
+ self.assertEqual(begin, datetime.datetime(
+ day=1,
+ month=2,
+ year=2011))
+ self.assertEqual(end, datetime.datetime(
+ day=1,
+ month=2,
+ year=2012))
+
+ def test_year_with_offset_after_current(self):
+ begin, end = utils.last_completed_audit_period(unit='year@6')
+ self.assertEqual(begin, datetime.datetime(
+ day=1,
+ month=6,
+ year=2010))
+ self.assertEqual(end, datetime.datetime(
+ day=1,
+ month=6,
+ year=2011))
+
+
+class MkfsTestCase(test.NoDBTestCase):
+
+ def test_mkfs(self):
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('mkfs', '-t', 'ext4', '-F', '/my/block/dev',
+ run_as_root=False)
+ utils.execute('mkfs', '-t', 'msdos', '/my/msdos/block/dev',
+ run_as_root=False)
+ utils.execute('mkswap', '/my/swap/block/dev',
+ run_as_root=False)
+ self.mox.ReplayAll()
+
+ utils.mkfs('ext4', '/my/block/dev')
+ utils.mkfs('msdos', '/my/msdos/block/dev')
+ utils.mkfs('swap', '/my/swap/block/dev')
+
+ def test_mkfs_with_label(self):
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('mkfs', '-t', 'ext4', '-F',
+ '-L', 'ext4-vol', '/my/block/dev', run_as_root=False)
+ utils.execute('mkfs', '-t', 'msdos',
+ '-n', 'msdos-vol', '/my/msdos/block/dev',
+ run_as_root=False)
+ utils.execute('mkswap', '-L', 'swap-vol', '/my/swap/block/dev',
+ run_as_root=False)
+ self.mox.ReplayAll()
+
+ utils.mkfs('ext4', '/my/block/dev', 'ext4-vol')
+ utils.mkfs('msdos', '/my/msdos/block/dev', 'msdos-vol')
+ utils.mkfs('swap', '/my/swap/block/dev', 'swap-vol')
+
+
+class LastBytesTestCase(test.NoDBTestCase):
+ """Test the last_bytes() utility method."""
+
+ def setUp(self):
+ super(LastBytesTestCase, self).setUp()
+ self.f = StringIO.StringIO('1234567890')
+
+ def test_truncated(self):
+ self.f.seek(0, os.SEEK_SET)
+ out, remaining = utils.last_bytes(self.f, 5)
+ self.assertEqual(out, '67890')
+ self.assertTrue(remaining > 0)
+
+ def test_read_all(self):
+ self.f.seek(0, os.SEEK_SET)
+ out, remaining = utils.last_bytes(self.f, 1000)
+ self.assertEqual(out, '1234567890')
+ self.assertFalse(remaining > 0)
+
+ def test_seek_too_far_real_file(self):
+ # StringIO doesn't raise IOError if you see past the start of the file.
+ flo = tempfile.TemporaryFile()
+ content = '1234567890'
+ flo.write(content)
+ self.assertEqual((content, 0), utils.last_bytes(flo, 1000))
+
+
+class IntLikeTestCase(test.NoDBTestCase):
+
+ def test_is_int_like(self):
+ self.assertTrue(utils.is_int_like(1))
+ self.assertTrue(utils.is_int_like("1"))
+ self.assertTrue(utils.is_int_like("514"))
+ self.assertTrue(utils.is_int_like("0"))
+
+ self.assertFalse(utils.is_int_like(1.1))
+ self.assertFalse(utils.is_int_like("1.1"))
+ self.assertFalse(utils.is_int_like("1.1.1"))
+ self.assertFalse(utils.is_int_like(None))
+ self.assertFalse(utils.is_int_like("0."))
+ self.assertFalse(utils.is_int_like("aaaaaa"))
+ self.assertFalse(utils.is_int_like("...."))
+ self.assertFalse(utils.is_int_like("1g"))
+ self.assertFalse(
+ utils.is_int_like("0cc3346e-9fef-4445-abe6-5d2b2690ec64"))
+ self.assertFalse(utils.is_int_like("a1"))
+
+
+class MetadataToDictTestCase(test.NoDBTestCase):
+ def test_metadata_to_dict(self):
+ self.assertEqual(utils.metadata_to_dict(
+ [{'key': 'foo1', 'value': 'bar'},
+ {'key': 'foo2', 'value': 'baz'}]),
+ {'foo1': 'bar', 'foo2': 'baz'})
+
+ def test_metadata_to_dict_empty(self):
+ self.assertEqual(utils.metadata_to_dict([]), {})
+
+ def test_dict_to_metadata(self):
+ expected = [{'key': 'foo1', 'value': 'bar1'},
+ {'key': 'foo2', 'value': 'bar2'}]
+ self.assertEqual(utils.dict_to_metadata(dict(foo1='bar1',
+ foo2='bar2')),
+ expected)
+
+ def test_dict_to_metadata_empty(self):
+ self.assertEqual(utils.dict_to_metadata({}), [])
+
+
+class WrappedCodeTestCase(test.NoDBTestCase):
+ """Test the get_wrapped_function utility method."""
+
+ def _wrapper(self, function):
+ @functools.wraps(function)
+ def decorated_function(self, *args, **kwargs):
+ function(self, *args, **kwargs)
+ return decorated_function
+
+ def test_single_wrapped(self):
+ @self._wrapper
+ def wrapped(self, instance, red=None, blue=None):
+ pass
+
+ func = utils.get_wrapped_function(wrapped)
+ func_code = func.func_code
+ self.assertEqual(4, len(func_code.co_varnames))
+ self.assertIn('self', func_code.co_varnames)
+ self.assertIn('instance', func_code.co_varnames)
+ self.assertIn('red', func_code.co_varnames)
+ self.assertIn('blue', func_code.co_varnames)
+
+ def test_double_wrapped(self):
+ @self._wrapper
+ @self._wrapper
+ def wrapped(self, instance, red=None, blue=None):
+ pass
+
+ func = utils.get_wrapped_function(wrapped)
+ func_code = func.func_code
+ self.assertEqual(4, len(func_code.co_varnames))
+ self.assertIn('self', func_code.co_varnames)
+ self.assertIn('instance', func_code.co_varnames)
+ self.assertIn('red', func_code.co_varnames)
+ self.assertIn('blue', func_code.co_varnames)
+
+ def test_triple_wrapped(self):
+ @self._wrapper
+ @self._wrapper
+ @self._wrapper
+ def wrapped(self, instance, red=None, blue=None):
+ pass
+
+ func = utils.get_wrapped_function(wrapped)
+ func_code = func.func_code
+ self.assertEqual(4, len(func_code.co_varnames))
+ self.assertIn('self', func_code.co_varnames)
+ self.assertIn('instance', func_code.co_varnames)
+ self.assertIn('red', func_code.co_varnames)
+ self.assertIn('blue', func_code.co_varnames)
+
+
+class ExpectedArgsTestCase(test.NoDBTestCase):
+ def test_passes(self):
+ @utils.expects_func_args('foo', 'baz')
+ def dec(f):
+ return f
+
+ @dec
+ def func(foo, bar, baz="lol"):
+ pass
+
+ def test_raises(self):
+ @utils.expects_func_args('foo', 'baz')
+ def dec(f):
+ return f
+
+ def func(bar, baz):
+ pass
+
+ self.assertRaises(TypeError, dec, func)
+
+ def test_var_no_of_args(self):
+ @utils.expects_func_args('foo')
+ def dec(f):
+ return f
+
+ @dec
+ def func(bar, *args, **kwargs):
+ pass
+
+ def test_more_layers(self):
+ @utils.expects_func_args('foo', 'baz')
+ def dec(f):
+ return f
+
+ def dec_2(f):
+ def inner_f(*a, **k):
+ return f()
+ return inner_f
+
+ @dec_2
+ def func(bar, baz):
+ pass
+
+ self.assertRaises(TypeError, dec, func)
+
+
+class StringLengthTestCase(test.NoDBTestCase):
+ def test_check_string_length(self):
+ self.assertIsNone(utils.check_string_length(
+ 'test', 'name', max_length=255))
+ self.assertRaises(exception.InvalidInput,
+ utils.check_string_length,
+ 11, 'name', max_length=255)
+ self.assertRaises(exception.InvalidInput,
+ utils.check_string_length,
+ '', 'name', min_length=1)
+ self.assertRaises(exception.InvalidInput,
+ utils.check_string_length,
+ 'a' * 256, 'name', max_length=255)
+
+ def test_check_string_length_noname(self):
+ self.assertIsNone(utils.check_string_length(
+ 'test', max_length=255))
+ self.assertRaises(exception.InvalidInput,
+ utils.check_string_length,
+ 11, max_length=255)
+ self.assertRaises(exception.InvalidInput,
+ utils.check_string_length,
+ '', min_length=1)
+ self.assertRaises(exception.InvalidInput,
+ utils.check_string_length,
+ 'a' * 256, max_length=255)
+
+
+class ValidateIntegerTestCase(test.NoDBTestCase):
+ def test_valid_inputs(self):
+ self.assertEqual(
+ utils.validate_integer(42, "answer"), 42)
+ self.assertEqual(
+ utils.validate_integer("42", "answer"), 42)
+ self.assertEqual(
+ utils.validate_integer(
+ "7", "lucky", min_value=7, max_value=8), 7)
+ self.assertEqual(
+ utils.validate_integer(
+ 7, "lucky", min_value=6, max_value=7), 7)
+ self.assertEqual(
+ utils.validate_integer(
+ 300, "Spartaaa!!!", min_value=300), 300)
+ self.assertEqual(
+ utils.validate_integer(
+ "300", "Spartaaa!!!", max_value=300), 300)
+
+ def test_invalid_inputs(self):
+ self.assertRaises(exception.InvalidInput,
+ utils.validate_integer,
+ "im-not-an-int", "not-an-int")
+ self.assertRaises(exception.InvalidInput,
+ utils.validate_integer,
+ 3.14, "Pie")
+ self.assertRaises(exception.InvalidInput,
+ utils.validate_integer,
+ "299", "Sparta no-show",
+ min_value=300, max_value=300)
+ self.assertRaises(exception.InvalidInput,
+ utils.validate_integer,
+ 55, "doing 55 in a 54",
+ max_value=54)
+ self.assertRaises(exception.InvalidInput,
+ utils.validate_integer,
+ unichr(129), "UnicodeError",
+ max_value=1000)
+
+
+class ValidateNeutronConfiguration(test.NoDBTestCase):
+ def test_nova_network(self):
+ self.assertFalse(utils.is_neutron())
+
+ def test_neutron(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ self.assertTrue(utils.is_neutron())
+
+ def test_quantum(self):
+ self.flags(network_api_class='nova.network.quantumv2.api.API')
+ self.assertTrue(utils.is_neutron())
+
+
+class AutoDiskConfigUtilTestCase(test.NoDBTestCase):
+ def test_is_auto_disk_config_disabled(self):
+ self.assertTrue(utils.is_auto_disk_config_disabled("Disabled "))
+
+ def test_is_auto_disk_config_disabled_none(self):
+ self.assertFalse(utils.is_auto_disk_config_disabled(None))
+
+ def test_is_auto_disk_config_disabled_false(self):
+ self.assertFalse(utils.is_auto_disk_config_disabled("false"))
+
+
+class GetSystemMetadataFromImageTestCase(test.NoDBTestCase):
+ def get_image(self):
+ image_meta = {
+ "id": "fake-image",
+ "name": "fake-name",
+ "min_ram": 1,
+ "min_disk": 1,
+ "disk_format": "raw",
+ "container_format": "bare",
+ }
+
+ return image_meta
+
+ def get_flavor(self):
+ flavor = {
+ "id": "fake.flavor",
+ "root_gb": 10,
+ }
+
+ return flavor
+
+ def test_base_image_properties(self):
+ image = self.get_image()
+
+ # Verify that we inherit all the needed keys
+ sys_meta = utils.get_system_metadata_from_image(image)
+ for key in utils.SM_INHERITABLE_KEYS:
+ sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
+ self.assertEqual(image[key], sys_meta.get(sys_key))
+
+ # Verify that everything else is ignored
+ self.assertEqual(len(sys_meta), len(utils.SM_INHERITABLE_KEYS))
+
+ def test_inherit_image_properties(self):
+ image = self.get_image()
+ image["properties"] = {"foo1": "bar", "foo2": "baz"}
+
+ sys_meta = utils.get_system_metadata_from_image(image)
+
+ # Verify that we inherit all the image properties
+ for key, expected in image["properties"].iteritems():
+ sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
+ self.assertEqual(sys_meta[sys_key], expected)
+
+ def test_vhd_min_disk_image(self):
+ image = self.get_image()
+ flavor = self.get_flavor()
+
+ image["disk_format"] = "vhd"
+
+ sys_meta = utils.get_system_metadata_from_image(image, flavor)
+
+ # Verify that the min_disk property is taken from
+ # flavor's root_gb when using vhd disk format
+ sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, "min_disk")
+ self.assertEqual(sys_meta[sys_key], flavor["root_gb"])
+
+ def test_dont_inherit_empty_values(self):
+ image = self.get_image()
+
+ for key in utils.SM_INHERITABLE_KEYS:
+ image[key] = None
+
+ sys_meta = utils.get_system_metadata_from_image(image)
+
+ # Verify that the empty properties have not been inherited
+ for key in utils.SM_INHERITABLE_KEYS:
+ sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
+ self.assertNotIn(sys_key, sys_meta)
+
+
+class GetImageFromSystemMetadataTestCase(test.NoDBTestCase):
+ def get_system_metadata(self):
+ sys_meta = {
+ "image_min_ram": 1,
+ "image_min_disk": 1,
+ "image_disk_format": "raw",
+ "image_container_format": "bare",
+ }
+
+ return sys_meta
+
+ def test_image_from_system_metadata(self):
+ sys_meta = self.get_system_metadata()
+ sys_meta["%soo1" % utils.SM_IMAGE_PROP_PREFIX] = "bar"
+ sys_meta["%soo2" % utils.SM_IMAGE_PROP_PREFIX] = "baz"
+
+ image = utils.get_image_from_system_metadata(sys_meta)
+
+ # Verify that we inherit all the needed keys
+ for key in utils.SM_INHERITABLE_KEYS:
+ sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
+ self.assertEqual(image[key], sys_meta.get(sys_key))
+
+ # Verify that we inherit the rest of metadata as properties
+ self.assertIn("properties", image)
+
+ for key, value in image["properties"].iteritems():
+ sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
+ self.assertEqual(image["properties"][key], sys_meta[sys_key])
+
+ def test_dont_inherit_empty_values(self):
+ sys_meta = self.get_system_metadata()
+
+ for key in utils.SM_INHERITABLE_KEYS:
+ sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
+ sys_meta[sys_key] = None
+
+ image = utils.get_image_from_system_metadata(sys_meta)
+
+ # Verify that the empty properties have not been inherited
+ for key in utils.SM_INHERITABLE_KEYS:
+ self.assertNotIn(key, image)
+
+ def test_non_inheritable_image_properties(self):
+ sys_meta = self.get_system_metadata()
+ sys_meta["%soo1" % utils.SM_IMAGE_PROP_PREFIX] = "bar"
+
+ self.flags(non_inheritable_image_properties=["foo1"])
+
+ image = utils.get_image_from_system_metadata(sys_meta)
+
+ # Verify that the foo1 key has not been inherited
+ self.assertNotIn("foo1", image)
+
+
+class VersionTestCase(test.NoDBTestCase):
+ def test_convert_version_to_int(self):
+ self.assertEqual(utils.convert_version_to_int('6.2.0'), 6002000)
+ self.assertEqual(utils.convert_version_to_int((6, 4, 3)), 6004003)
+ self.assertEqual(utils.convert_version_to_int((5, )), 5)
+ self.assertRaises(exception.NovaException,
+ utils.convert_version_to_int, '5a.6b')
+
+ def test_convert_version_to_string(self):
+ self.assertEqual(utils.convert_version_to_str(6007000), '6.7.0')
+ self.assertEqual(utils.convert_version_to_str(4), '4')
+
+ def test_convert_version_to_tuple(self):
+ self.assertEqual(utils.convert_version_to_tuple('6.7.0'), (6, 7, 0))
+
+
+class ConstantTimeCompareTestCase(test.NoDBTestCase):
+ def test_constant_time_compare(self):
+ self.assertTrue(utils.constant_time_compare("abcd1234", "abcd1234"))
+ self.assertFalse(utils.constant_time_compare("abcd1234", "a"))
+ self.assertFalse(utils.constant_time_compare("abcd1234", "ABCD234"))
diff --git a/nova/tests/unit/test_versions.py b/nova/tests/unit/test_versions.py
new file mode 100644
index 0000000000..06baca8b05
--- /dev/null
+++ b/nova/tests/unit/test_versions.py
@@ -0,0 +1,60 @@
+# Copyright 2011 Ken Pepple
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import __builtin__
+import StringIO
+
+from oslo.config import cfg
+
+from nova import test
+from nova import version
+
+
+class VersionTestCase(test.NoDBTestCase):
+ """Test cases for Versions code."""
+
+ def test_version_string_with_package_is_good(self):
+ """Ensure uninstalled code get version string."""
+
+ self.stubs.Set(version.version_info, 'version', '5.5.5.5')
+ self.stubs.Set(version, 'NOVA_PACKAGE', 'g9ec3421')
+ self.assertEqual("5.5.5.5-g9ec3421",
+ version.version_string_with_package())
+
+ def test_release_file(self):
+ version.loaded = False
+ real_open = __builtin__.open
+ real_find_file = cfg.CONF.find_file
+
+ def fake_find_file(self, name):
+ if name == "release":
+ return "/etc/nova/release"
+ return real_find_file(self, name)
+
+ def fake_open(path, *args, **kwargs):
+ if path == "/etc/nova/release":
+ data = """[Nova]
+vendor = ACME Corporation
+product = ACME Nova
+package = 1337"""
+ return StringIO.StringIO(data)
+
+ return real_open(path, *args, **kwargs)
+
+ self.stubs.Set(__builtin__, 'open', fake_open)
+ self.stubs.Set(cfg.ConfigOpts, 'find_file', fake_find_file)
+
+ self.assertEqual(version.vendor_string(), "ACME Corporation")
+ self.assertEqual(version.product_string(), "ACME Nova")
+ self.assertEqual(version.package_string(), "1337")
diff --git a/nova/tests/unit/test_weights.py b/nova/tests/unit/test_weights.py
new file mode 100644
index 0000000000..d6804037a7
--- /dev/null
+++ b/nova/tests/unit/test_weights.py
@@ -0,0 +1,53 @@
+# Copyright 2011-2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For weights.
+"""
+
+from nova import test
+from nova import weights
+
+
+class TestWeigher(test.NoDBTestCase):
+ def test_no_multiplier(self):
+ class FakeWeigher(weights.BaseWeigher):
+ def _weigh_object(self, *args, **kwargs):
+ pass
+
+ self.assertEqual(1.0,
+ FakeWeigher().weight_multiplier())
+
+ def test_no_weight_object(self):
+ class FakeWeigher(weights.BaseWeigher):
+ def weight_multiplier(self, *args, **kwargs):
+ pass
+ self.assertRaises(TypeError,
+ FakeWeigher)
+
+ def test_normalization(self):
+ # weight_list, expected_result, minval, maxval
+ map_ = (
+ ((), (), None, None),
+ ((0.0, 0.0), (0.0, 0.0), None, None),
+ ((1.0, 1.0), (0.0, 0.0), None, None),
+
+ ((20.0, 50.0), (0.0, 1.0), None, None),
+ ((20.0, 50.0), (0.0, 0.375), None, 100.0),
+ ((20.0, 50.0), (0.4, 1.0), 0.0, None),
+ ((20.0, 50.0), (0.2, 0.5), 0.0, 100.0),
+ )
+ for seq, result, minval, maxval in map_:
+ ret = weights.normalize(seq, minval=minval, maxval=maxval)
+ self.assertEqual(tuple(ret), result)
diff --git a/nova/tests/unit/test_wsgi.py b/nova/tests/unit/test_wsgi.py
new file mode 100644
index 0000000000..0a08a7651f
--- /dev/null
+++ b/nova/tests/unit/test_wsgi.py
@@ -0,0 +1,263 @@
+# Copyright 2011 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for `nova.wsgi`."""
+
+import os.path
+import tempfile
+import urllib2
+
+import eventlet
+import eventlet.wsgi
+import mock
+from oslo.config import cfg
+import requests
+import testtools
+import webob
+
+import nova.exception
+from nova import test
+from nova.tests.unit import utils
+import nova.wsgi
+
+SSL_CERT_DIR = os.path.normpath(os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ 'ssl_cert'))
+CONF = cfg.CONF
+
+
+class TestLoaderNothingExists(test.NoDBTestCase):
+ """Loader tests where os.path.exists always returns False."""
+
+ def setUp(self):
+ super(TestLoaderNothingExists, self).setUp()
+ self.stubs.Set(os.path, 'exists', lambda _: False)
+
+ def test_relpath_config_not_found(self):
+ self.flags(api_paste_config='api-paste.ini')
+ self.assertRaises(
+ nova.exception.ConfigNotFound,
+ nova.wsgi.Loader,
+ )
+
+ def test_asbpath_config_not_found(self):
+ self.flags(api_paste_config='/etc/nova/api-paste.ini')
+ self.assertRaises(
+ nova.exception.ConfigNotFound,
+ nova.wsgi.Loader,
+ )
+
+
+class TestLoaderNormalFilesystem(test.NoDBTestCase):
+ """Loader tests with normal filesystem (unmodified os.path module)."""
+
+ _paste_config = """
+[app:test_app]
+use = egg:Paste#static
+document_root = /tmp
+ """
+
+ def setUp(self):
+ super(TestLoaderNormalFilesystem, self).setUp()
+ self.config = tempfile.NamedTemporaryFile(mode="w+t")
+ self.config.write(self._paste_config.lstrip())
+ self.config.seek(0)
+ self.config.flush()
+ self.loader = nova.wsgi.Loader(self.config.name)
+
+ def test_config_found(self):
+ self.assertEqual(self.config.name, self.loader.config_path)
+
+ def test_app_not_found(self):
+ self.assertRaises(
+ nova.exception.PasteAppNotFound,
+ self.loader.load_app,
+ "nonexistent app",
+ )
+
+ def test_app_found(self):
+ url_parser = self.loader.load_app("test_app")
+ self.assertEqual("/tmp", url_parser.directory)
+
+ def tearDown(self):
+ self.config.close()
+ super(TestLoaderNormalFilesystem, self).tearDown()
+
+
+class TestWSGIServer(test.NoDBTestCase):
+ """WSGI server tests."""
+
+ def test_no_app(self):
+ server = nova.wsgi.Server("test_app", None)
+ self.assertEqual("test_app", server.name)
+
+ def test_custom_max_header_line(self):
+ self.flags(max_header_line=4096) # Default value is 16384.
+ nova.wsgi.Server("test_custom_max_header_line", None)
+ self.assertEqual(CONF.max_header_line, eventlet.wsgi.MAX_HEADER_LINE)
+
+ def test_start_random_port(self):
+ server = nova.wsgi.Server("test_random_port", None,
+ host="127.0.0.1", port=0)
+ server.start()
+ self.assertNotEqual(0, server.port)
+ server.stop()
+ server.wait()
+
+ @testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support")
+ def test_start_random_port_with_ipv6(self):
+ server = nova.wsgi.Server("test_random_port", None,
+ host="::1", port=0)
+ server.start()
+ self.assertEqual("::1", server.host)
+ self.assertNotEqual(0, server.port)
+ server.stop()
+ server.wait()
+
+ def test_server_pool_waitall(self):
+ # test pools waitall method gets called while stopping server
+ server = nova.wsgi.Server("test_server", None,
+ host="127.0.0.1", port=4444)
+ server.start()
+ with mock.patch.object(server._pool,
+ 'waitall') as mock_waitall:
+ server.stop()
+ server.wait()
+ mock_waitall.assert_called_once_with()
+
+ def test_uri_length_limit(self):
+ server = nova.wsgi.Server("test_uri_length_limit", None,
+ host="127.0.0.1", max_url_len=16384)
+ server.start()
+
+ uri = "http://127.0.0.1:%d/%s" % (server.port, 10000 * 'x')
+ resp = requests.get(uri, proxies={"http": ""})
+ eventlet.sleep(0)
+ self.assertNotEqual(resp.status_code,
+ requests.codes.REQUEST_URI_TOO_LARGE)
+
+ uri = "http://127.0.0.1:%d/%s" % (server.port, 20000 * 'x')
+ resp = requests.get(uri, proxies={"http": ""})
+ eventlet.sleep(0)
+ self.assertEqual(resp.status_code,
+ requests.codes.REQUEST_URI_TOO_LARGE)
+ server.stop()
+ server.wait()
+
+ def test_reset_pool_size_to_default(self):
+ server = nova.wsgi.Server("test_resize", None,
+ host="127.0.0.1", max_url_len=16384)
+ server.start()
+
+ # Stopping the server, which in turn sets pool size to 0
+ server.stop()
+ self.assertEqual(server._pool.size, 0)
+
+ # Resetting pool size to default
+ server.reset()
+ server.start()
+ self.assertEqual(server._pool.size, CONF.wsgi_default_pool_size)
+
+
+class TestWSGIServerWithSSL(test.NoDBTestCase):
+ """WSGI server with SSL tests."""
+
+ def setUp(self):
+ super(TestWSGIServerWithSSL, self).setUp()
+ self.flags(enabled_ssl_apis=['fake_ssl'],
+ ssl_cert_file=os.path.join(SSL_CERT_DIR, 'certificate.crt'),
+ ssl_key_file=os.path.join(SSL_CERT_DIR, 'privatekey.key'))
+
+ def test_ssl_server(self):
+
+ def test_app(env, start_response):
+ start_response('200 OK', {})
+ return ['PONG']
+
+ fake_ssl_server = nova.wsgi.Server("fake_ssl", test_app,
+ host="127.0.0.1", port=0,
+ use_ssl=True)
+ fake_ssl_server.start()
+ self.assertNotEqual(0, fake_ssl_server.port)
+
+ cli = eventlet.connect(("localhost", fake_ssl_server.port))
+ cli = eventlet.wrap_ssl(cli,
+ ca_certs=os.path.join(SSL_CERT_DIR, 'ca.crt'))
+
+ cli.write('POST / HTTP/1.1\r\nHost: localhost\r\n'
+ 'Connection: close\r\nContent-length:4\r\n\r\nPING')
+ response = cli.read(8192)
+ self.assertEqual(response[-4:], "PONG")
+
+ fake_ssl_server.stop()
+ fake_ssl_server.wait()
+
+ def test_two_servers(self):
+
+ def test_app(env, start_response):
+ start_response('200 OK', {})
+ return ['PONG']
+
+ fake_ssl_server = nova.wsgi.Server("fake_ssl", test_app,
+ host="127.0.0.1", port=0, use_ssl=True)
+ fake_ssl_server.start()
+ self.assertNotEqual(0, fake_ssl_server.port)
+
+ fake_server = nova.wsgi.Server("fake", test_app,
+ host="127.0.0.1", port=0)
+ fake_server.start()
+ self.assertNotEqual(0, fake_server.port)
+
+ cli = eventlet.connect(("localhost", fake_ssl_server.port))
+ cli = eventlet.wrap_ssl(cli,
+ ca_certs=os.path.join(SSL_CERT_DIR, 'ca.crt'))
+
+ cli.write('POST / HTTP/1.1\r\nHost: localhost\r\n'
+ 'Connection: close\r\nContent-length:4\r\n\r\nPING')
+ response = cli.read(8192)
+ self.assertEqual(response[-4:], "PONG")
+
+ cli = eventlet.connect(("localhost", fake_server.port))
+
+ cli.sendall('POST / HTTP/1.1\r\nHost: localhost\r\n'
+ 'Connection: close\r\nContent-length:4\r\n\r\nPING')
+ response = cli.recv(8192)
+ self.assertEqual(response[-4:], "PONG")
+
+ fake_ssl_server.stop()
+ fake_ssl_server.wait()
+
+ @testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support")
+ def test_app_using_ipv6_and_ssl(self):
+ greetings = 'Hello, World!!!'
+
+ @webob.dec.wsgify
+ def hello_world(req):
+ return greetings
+
+ server = nova.wsgi.Server("fake_ssl",
+ hello_world,
+ host="::1",
+ port=0,
+ use_ssl=True)
+
+ server.start()
+
+ response = urllib2.urlopen('https://[::1]:%d/' % server.port)
+ self.assertEqual(greetings, response.read())
+
+ server.stop()
+ server.wait()
diff --git a/nova/tests/unit/utils.py b/nova/tests/unit/utils.py
new file mode 100644
index 0000000000..58d0825587
--- /dev/null
+++ b/nova/tests/unit/utils.py
@@ -0,0 +1,217 @@
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import errno
+import platform
+import socket
+import sys
+
+from oslo.config import cfg
+
+from nova.compute import flavors
+import nova.context
+import nova.db
+from nova import exception
+from nova.image import glance
+from nova.network import minidns
+from nova.network import model as network_model
+from nova import objects
+import nova.utils
+
+CONF = cfg.CONF
+CONF.import_opt('use_ipv6', 'nova.netconf')
+
+
+def get_test_admin_context():
+ return nova.context.get_admin_context()
+
+
+def get_test_image_info(context, instance_ref):
+ if not context:
+ context = get_test_admin_context()
+
+ image_ref = instance_ref['image_ref']
+ image_service, image_id = glance.get_remote_image_service(context,
+ image_ref)
+ return image_service.show(context, image_id)
+
+
+def get_test_flavor(context=None, options=None):
+ options = options or {}
+ if not context:
+ context = get_test_admin_context()
+
+ test_flavor = {'name': 'kinda.big',
+ 'flavorid': 'someid',
+ 'memory_mb': 2048,
+ 'vcpus': 4,
+ 'root_gb': 40,
+ 'ephemeral_gb': 80,
+ 'swap': 1024}
+
+ test_flavor.update(options)
+
+ try:
+ flavor_ref = nova.db.flavor_create(context, test_flavor)
+ except (exception.FlavorExists, exception.FlavorIdExists):
+ flavor_ref = nova.db.flavor_get_by_name(context, 'kinda.big')
+ return flavor_ref
+
+
+def get_test_instance(context=None, flavor=None, obj=False):
+ if not context:
+ context = get_test_admin_context()
+
+ if not flavor:
+ flavor = get_test_flavor(context)
+
+ metadata = {}
+ flavors.save_flavor_info(metadata, flavor, '')
+
+ test_instance = {'memory_kb': '2048000',
+ 'basepath': '/some/path',
+ 'bridge_name': 'br100',
+ 'vcpus': 4,
+ 'root_gb': 40,
+ 'bridge': 'br101',
+ 'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'instance_type_id': '5',
+ 'system_metadata': metadata,
+ 'extra_specs': {},
+ 'user_id': context.user_id,
+ 'project_id': context.project_id,
+ }
+
+ if obj:
+ instance = objects.Instance(context, **test_instance)
+ instance.create()
+ else:
+ instance = nova.db.instance_create(context, test_instance)
+ return instance
+
+
+def get_test_network_info(count=1):
+ ipv6 = CONF.use_ipv6
+ fake = 'fake'
+ fake_ip = '0.0.0.0'
+ fake_vlan = 100
+ fake_bridge_interface = 'eth0'
+
+ def current():
+ subnet_4 = network_model.Subnet(cidr=fake_ip,
+ dns=[network_model.IP(fake_ip),
+ network_model.IP(fake_ip)],
+ gateway=network_model.IP(fake_ip),
+ ips=[network_model.IP(fake_ip),
+ network_model.IP(fake_ip)],
+ routes=None,
+ dhcp_server=fake_ip)
+ subnet_6 = network_model.Subnet(cidr=fake_ip,
+ gateway=network_model.IP(fake_ip),
+ ips=[network_model.IP(fake_ip),
+ network_model.IP(fake_ip),
+ network_model.IP(fake_ip)],
+ routes=None,
+ version=6)
+ subnets = [subnet_4]
+ if ipv6:
+ subnets.append(subnet_6)
+ network = network_model.Network(id=None,
+ bridge=fake,
+ label=None,
+ subnets=subnets,
+ vlan=fake_vlan,
+ bridge_interface=fake_bridge_interface,
+ injected=False)
+ vif = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address=fake,
+ network=network,
+ type=network_model.VIF_TYPE_BRIDGE,
+ devname=None,
+ ovs_interfaceid=None)
+
+ return vif
+
+ return network_model.NetworkInfo([current() for x in xrange(0, count)])
+
+
+def is_osx():
+ return platform.mac_ver()[0] != ''
+
+
+def coreutils_readlink_available():
+ _out, err = nova.utils.trycmd('readlink', '-nm', '/')
+ return err == ''
+
+
+test_dns_managers = []
+
+
+def dns_manager():
+ global test_dns_managers
+ manager = minidns.MiniDNS()
+ test_dns_managers.append(manager)
+ return manager
+
+
+def cleanup_dns_managers():
+ global test_dns_managers
+ for manager in test_dns_managers:
+ manager.delete_dns_file()
+ test_dns_managers = []
+
+
+def killer_xml_body():
+ return (("""<!DOCTYPE x [
+ <!ENTITY a "%(a)s">
+ <!ENTITY b "%(b)s">
+ <!ENTITY c "%(c)s">]>
+ <foo>
+ <bar>
+ <v1>%(d)s</v1>
+ </bar>
+ </foo>""") % {
+ 'a': 'A' * 10,
+ 'b': '&a;' * 10,
+ 'c': '&b;' * 10,
+ 'd': '&c;' * 9999,
+ }).strip()
+
+
+def is_ipv6_supported():
+ has_ipv6_support = socket.has_ipv6
+ try:
+ s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ s.close()
+ except socket.error as e:
+ if e.errno == errno.EAFNOSUPPORT:
+ has_ipv6_support = False
+ else:
+ raise
+
+ # check if there is at least one interface with ipv6
+ if has_ipv6_support and sys.platform.startswith('linux'):
+ try:
+ with open('/proc/net/if_inet6') as f:
+ if not f.read():
+ has_ipv6_support = False
+ except IOError:
+ has_ipv6_support = False
+
+ return has_ipv6_support
+
+
+def get_api_version(request):
+ if request.path[2:3].isdigit():
+ return int(request.path[2:3])
diff --git a/nova/tests/unit/virt/__init__.py b/nova/tests/unit/virt/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/__init__.py
diff --git a/nova/tests/unit/virt/disk/__init__.py b/nova/tests/unit/virt/disk/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/disk/__init__.py
diff --git a/nova/tests/unit/virt/disk/mount/__init__.py b/nova/tests/unit/virt/disk/mount/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/disk/mount/__init__.py
diff --git a/nova/tests/unit/virt/disk/mount/test_loop.py b/nova/tests/unit/virt/disk/mount/test_loop.py
new file mode 100644
index 0000000000..6375c9386b
--- /dev/null
+++ b/nova/tests/unit/virt/disk/mount/test_loop.py
@@ -0,0 +1,98 @@
+# Copyright 2012 Michael Still
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import fixtures
+
+from nova import test
+from nova.virt.disk.mount import loop
+
+
+def _fake_noop(*args, **kwargs):
+ return
+
+
+def _fake_trycmd_losetup_works(*args, **kwargs):
+ return '/dev/loop0', ''
+
+
+def _fake_trycmd_losetup_fails(*args, **kwards):
+ return '', 'doh'
+
+
+class LoopTestCase(test.NoDBTestCase):
+ def test_get_dev(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ l = loop.LoopMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ _fake_trycmd_losetup_works))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute',
+ _fake_noop))
+
+ # No error logged, device consumed
+ self.assertTrue(l.get_dev())
+ self.assertTrue(l.linked)
+ self.assertEqual('', l.error)
+ self.assertEqual('/dev/loop0', l.device)
+
+ # Free
+ l.unget_dev()
+ self.assertFalse(l.linked)
+ self.assertEqual('', l.error)
+ self.assertIsNone(l.device)
+
+ def test_inner_get_dev_fails(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ l = loop.LoopMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ _fake_trycmd_losetup_fails))
+
+ # No error logged, device consumed
+ self.assertFalse(l._inner_get_dev())
+ self.assertFalse(l.linked)
+ self.assertNotEqual('', l.error)
+ self.assertIsNone(l.device)
+
+ # Free
+ l.unget_dev()
+ self.assertFalse(l.linked)
+ self.assertIsNone(l.device)
+
+ def test_get_dev_timeout(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ l = loop.LoopMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('time.sleep', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ _fake_trycmd_losetup_fails))
+ self.useFixture(fixtures.MonkeyPatch(('nova.virt.disk.mount.api.'
+ 'MAX_DEVICE_WAIT'), -10))
+
+ # Always fail to get a device
+ def fake_get_dev_fails():
+ return False
+ l._inner_get_dev = fake_get_dev_fails
+
+ # Fail to get a device
+ self.assertFalse(l.get_dev())
+
+ def test_unget_dev(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ l = loop.LoopMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute',
+ _fake_noop))
+
+ # This just checks that a free of something we don't have doesn't
+ # throw an exception
+ l.unget_dev()
diff --git a/nova/tests/unit/virt/disk/mount/test_nbd.py b/nova/tests/unit/virt/disk/mount/test_nbd.py
new file mode 100644
index 0000000000..d048511d16
--- /dev/null
+++ b/nova/tests/unit/virt/disk/mount/test_nbd.py
@@ -0,0 +1,331 @@
+# Copyright 2012 Michael Still and Canonical Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import os
+import tempfile
+import time
+
+import eventlet
+import fixtures
+
+from nova import test
+from nova.virt.disk.mount import nbd
+
+ORIG_EXISTS = os.path.exists
+ORIG_LISTDIR = os.listdir
+
+
+def _fake_exists_no_users(path):
+ if path.startswith('/sys/block/nbd'):
+ if path.endswith('pid'):
+ return False
+ return True
+ return ORIG_EXISTS(path)
+
+
+def _fake_listdir_nbd_devices(path):
+ if path.startswith('/sys/block'):
+ return ['nbd0', 'nbd1']
+ return ORIG_LISTDIR(path)
+
+
+def _fake_exists_all_used(path):
+ if path.startswith('/sys/block/nbd'):
+ return True
+ return ORIG_EXISTS(path)
+
+
+def _fake_detect_nbd_devices_none(self):
+ return []
+
+
+def _fake_detect_nbd_devices(self):
+ return ['nbd0', 'nbd1']
+
+
+def _fake_noop(*args, **kwargs):
+ return
+
+
+class NbdTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(NbdTestCase, self).setUp()
+ self.stubs.Set(nbd.NbdMount, '_detect_nbd_devices',
+ _fake_detect_nbd_devices)
+ self.useFixture(fixtures.MonkeyPatch('os.listdir',
+ _fake_listdir_nbd_devices))
+
+ def test_nbd_no_devices(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ self.stubs.Set(nbd.NbdMount, '_detect_nbd_devices',
+ _fake_detect_nbd_devices_none)
+ n = nbd.NbdMount(None, tempdir)
+ self.assertIsNone(n._allocate_nbd())
+
+ def test_nbd_no_free_devices(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ _fake_exists_all_used))
+ self.assertIsNone(n._allocate_nbd())
+
+ def test_nbd_not_loaded(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+
+ # Fake out os.path.exists
+ def fake_exists(path):
+ if path.startswith('/sys/block/nbd'):
+ return False
+ return ORIG_EXISTS(path)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists', fake_exists))
+
+ # This should fail, as we don't have the module "loaded"
+ # TODO(mikal): work out how to force english as the gettext language
+ # so that the error check always passes
+ self.assertIsNone(n._allocate_nbd())
+ self.assertEqual('nbd unavailable: module not loaded', n.error)
+
+ def test_nbd_allocation(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ _fake_exists_no_users))
+ self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop))
+
+ # Allocate a nbd device
+ self.assertEqual('/dev/nbd0', n._allocate_nbd())
+
+ def test_nbd_allocation_one_in_use(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop))
+
+ # Fake out os.path.exists
+ def fake_exists(path):
+ if path.startswith('/sys/block/nbd'):
+ if path == '/sys/block/nbd0/pid':
+ return True
+ if path.endswith('pid'):
+ return False
+ return True
+ return ORIG_EXISTS(path)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists', fake_exists))
+
+ # Allocate a nbd device, should not be the in use one
+ # TODO(mikal): Note that there is a leak here, as the in use nbd device
+ # is removed from the list, but not returned so it will never be
+ # re-added. I will fix this in a later patch.
+ self.assertEqual('/dev/nbd1', n._allocate_nbd())
+
+ def test_inner_get_dev_no_devices(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ self.stubs.Set(nbd.NbdMount, '_detect_nbd_devices',
+ _fake_detect_nbd_devices_none)
+ n = nbd.NbdMount(None, tempdir)
+ self.assertFalse(n._inner_get_dev())
+
+ def test_inner_get_dev_qemu_fails(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ _fake_exists_no_users))
+
+ # We have a trycmd that always fails
+ def fake_trycmd(*args, **kwargs):
+ return '', 'broken'
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd))
+
+ # Error logged, no device consumed
+ self.assertFalse(n._inner_get_dev())
+ self.assertTrue(n.error.startswith('qemu-nbd error'))
+
+ def test_inner_get_dev_qemu_timeout(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ _fake_exists_no_users))
+
+ # We have a trycmd that always passed
+ def fake_trycmd(*args, **kwargs):
+ return '', ''
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd))
+ self.useFixture(fixtures.MonkeyPatch('time.sleep', _fake_noop))
+
+ # Error logged, no device consumed
+ self.assertFalse(n._inner_get_dev())
+ self.assertTrue(n.error.endswith('did not show up'))
+
+ def fake_exists_one(self, path):
+ # We need the pid file for the device which is allocated to exist, but
+ # only once it is allocated to us
+ if path.startswith('/sys/block/nbd'):
+ if path == '/sys/block/nbd1/pid':
+ return False
+ if path.endswith('pid'):
+ return False
+ return True
+ return ORIG_EXISTS(path)
+
+ def fake_trycmd_creates_pid(self, *args, **kwargs):
+ def fake_exists_two(path):
+ if path.startswith('/sys/block/nbd'):
+ if path == '/sys/block/nbd0/pid':
+ return True
+ if path.endswith('pid'):
+ return False
+ return True
+ return ORIG_EXISTS(path)
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ fake_exists_two))
+ return '', ''
+
+ def test_inner_get_dev_works(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ self.fake_exists_one))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ self.fake_trycmd_creates_pid))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop))
+
+ # No error logged, device consumed
+ self.assertTrue(n._inner_get_dev())
+ self.assertTrue(n.linked)
+ self.assertEqual('', n.error)
+ self.assertEqual('/dev/nbd0', n.device)
+
+ # Free
+ n.unget_dev()
+ self.assertFalse(n.linked)
+ self.assertEqual('', n.error)
+ self.assertIsNone(n.device)
+
+ def test_unget_dev_simple(self):
+ # This test is just checking we don't get an exception when we unget
+ # something we don't have
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop))
+ n.unget_dev()
+
+ def test_get_dev(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ self.fake_exists_one))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ self.fake_trycmd_creates_pid))
+
+ # No error logged, device consumed
+ self.assertTrue(n.get_dev())
+ self.assertTrue(n.linked)
+ self.assertEqual('', n.error)
+ self.assertEqual('/dev/nbd0', n.device)
+
+ # Free
+ n.unget_dev()
+ self.assertFalse(n.linked)
+ self.assertEqual('', n.error)
+ self.assertIsNone(n.device)
+
+ def test_get_dev_timeout(self):
+ # Always fail to get a device
+ def fake_get_dev_fails(self):
+ return False
+ self.stubs.Set(nbd.NbdMount, '_inner_get_dev', fake_get_dev_fails)
+
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ n = nbd.NbdMount(None, tempdir)
+ self.useFixture(fixtures.MonkeyPatch('random.shuffle', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('time.sleep', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.execute', _fake_noop))
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ self.fake_exists_one))
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ self.fake_trycmd_creates_pid))
+ self.useFixture(fixtures.MonkeyPatch(('nova.virt.disk.mount.api.'
+ 'MAX_DEVICE_WAIT'), -10))
+
+ # No error logged, device consumed
+ self.assertFalse(n.get_dev())
+
+ def test_do_mount_need_to_specify_fs_type(self):
+ # NOTE(mikal): Bug 1094373 saw a regression where we failed to
+ # communicate a failed mount properly.
+ def fake_trycmd(*args, **kwargs):
+ return '', 'broken'
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd))
+
+ imgfile = tempfile.NamedTemporaryFile()
+ self.addCleanup(imgfile.close)
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ mount = nbd.NbdMount(imgfile.name, tempdir)
+
+ def fake_returns_true(*args, **kwargs):
+ return True
+ mount.get_dev = fake_returns_true
+ mount.map_dev = fake_returns_true
+
+ self.assertFalse(mount.do_mount())
+
+ def test_device_creation_race(self):
+ # Make sure that even if two threads create instances at the same time
+ # they cannot choose the same nbd number (see bug 1207422)
+
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ free_devices = _fake_detect_nbd_devices(None)[:]
+ chosen_devices = []
+
+ def fake_find_unused(self):
+ return os.path.join('/dev', free_devices[-1])
+
+ def delay_and_remove_device(*args, **kwargs):
+ # Ensure that context switch happens before the device is marked
+ # as used. This will cause a failure without nbd-allocation-lock
+ # in place.
+ time.sleep(0.1)
+
+ # We always choose the top device in find_unused - remove it now.
+ free_devices.pop()
+
+ return '', ''
+
+ def pid_exists(pidfile):
+ return pidfile not in [os.path.join('/sys/block', dev, 'pid')
+ for dev in free_devices]
+
+ self.stubs.Set(nbd.NbdMount, '_allocate_nbd', fake_find_unused)
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd',
+ delay_and_remove_device))
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists',
+ pid_exists))
+
+ def get_a_device():
+ n = nbd.NbdMount(None, tempdir)
+ n.get_dev()
+ chosen_devices.append(n.device)
+
+ thread1 = eventlet.spawn(get_a_device)
+ thread2 = eventlet.spawn(get_a_device)
+ thread1.wait()
+ thread2.wait()
+
+ self.assertEqual(2, len(chosen_devices))
+ self.assertNotEqual(chosen_devices[0], chosen_devices[1])
diff --git a/nova/tests/unit/virt/disk/test_api.py b/nova/tests/unit/virt/disk/test_api.py
new file mode 100644
index 0000000000..1f62c33b51
--- /dev/null
+++ b/nova/tests/unit/virt/disk/test_api.py
@@ -0,0 +1,153 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import tempfile
+
+import fixtures
+from oslo.concurrency import processutils
+
+from nova import test
+from nova import utils
+from nova.virt.disk import api
+from nova.virt.disk.mount import api as mount
+
+
+class FakeMount(object):
+ device = None
+
+ @staticmethod
+ def instance_for_format(imgfile, mountdir, partition, imgfmt):
+ return FakeMount()
+
+ def get_dev(self):
+ pass
+
+ def unget_dev(self):
+ pass
+
+
+class APITestCase(test.NoDBTestCase):
+ def test_can_resize_need_fs_type_specified(self):
+ # NOTE(mikal): Bug 1094373 saw a regression where we failed to
+ # treat a failure to mount as a failure to be able to resize the
+ # filesystem
+ def _fake_get_disk_size(path):
+ return 10
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.disk.api.get_disk_size', _fake_get_disk_size))
+
+ def fake_trycmd(*args, **kwargs):
+ return '', 'broken'
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd))
+
+ def fake_returns_true(*args, **kwargs):
+ return True
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.disk.mount.nbd.NbdMount.get_dev',
+ fake_returns_true))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.disk.mount.nbd.NbdMount.map_dev',
+ fake_returns_true))
+
+ # Force the use of localfs, which is what was used during the failure
+ # reported in the bug
+ def fake_import_fails(*args, **kwargs):
+ raise Exception('Failed')
+ self.useFixture(fixtures.MonkeyPatch(
+ 'oslo.utils.import_module',
+ fake_import_fails))
+
+ imgfile = tempfile.NamedTemporaryFile()
+ self.addCleanup(imgfile.close)
+ self.assertFalse(api.is_image_partitionless(imgfile, use_cow=True))
+
+ def test_resize2fs_success(self):
+ imgfile = tempfile.NamedTemporaryFile()
+
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('e2fsck',
+ '-fp',
+ imgfile,
+ check_exit_code=[0, 1, 2],
+ run_as_root=False)
+ utils.execute('resize2fs',
+ imgfile,
+ check_exit_code=False,
+ run_as_root=False)
+
+ self.mox.ReplayAll()
+ api.resize2fs(imgfile)
+
+ def test_resize2fs_e2fsck_fails(self):
+ imgfile = tempfile.NamedTemporaryFile()
+
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('e2fsck',
+ '-fp',
+ imgfile,
+ check_exit_code=[0, 1, 2],
+ run_as_root=False).AndRaise(
+ processutils.ProcessExecutionError("fs error"))
+ self.mox.ReplayAll()
+ api.resize2fs(imgfile)
+
+ def test_extend_qcow_success(self):
+ imgfile = tempfile.NamedTemporaryFile()
+ imgsize = 10
+ device = "/dev/sdh"
+ use_cow = True
+
+ self.flags(resize_fs_using_block_device=True)
+ mounter = FakeMount.instance_for_format(
+ imgfile, None, None, 'qcow2')
+ mounter.device = device
+
+ self.mox.StubOutWithMock(api, 'can_resize_image')
+ self.mox.StubOutWithMock(utils, 'execute')
+ self.mox.StubOutWithMock(api, 'is_image_partitionless')
+ self.mox.StubOutWithMock(mounter, 'get_dev')
+ self.mox.StubOutWithMock(mounter, 'unget_dev')
+ self.mox.StubOutWithMock(api, 'resize2fs')
+ self.mox.StubOutWithMock(mount.Mount, 'instance_for_format')
+
+ api.can_resize_image(imgfile, imgsize).AndReturn(True)
+ utils.execute('qemu-img', 'resize', imgfile, imgsize)
+ api.is_image_partitionless(imgfile, use_cow).AndReturn(True)
+ mount.Mount.instance_for_format(
+ imgfile, None, None, 'qcow2').AndReturn(mounter)
+ mounter.get_dev().AndReturn(True)
+ api.resize2fs(mounter.device, run_as_root=True, check_exit_code=[0])
+ mounter.unget_dev()
+
+ self.mox.ReplayAll()
+ api.extend(imgfile, imgsize, use_cow=use_cow)
+
+ def test_extend_raw_success(self):
+ imgfile = tempfile.NamedTemporaryFile()
+ imgsize = 10
+ use_cow = False
+
+ self.mox.StubOutWithMock(api, 'can_resize_image')
+ self.mox.StubOutWithMock(utils, 'execute')
+ self.mox.StubOutWithMock(api, 'is_image_partitionless')
+ self.mox.StubOutWithMock(api, 'resize2fs')
+
+ api.can_resize_image(imgfile, imgsize).AndReturn(True)
+ utils.execute('qemu-img', 'resize', imgfile, imgsize)
+ api.is_image_partitionless(imgfile, use_cow).AndReturn(True)
+ api.resize2fs(imgfile, run_as_root=False, check_exit_code=[0])
+
+ self.mox.ReplayAll()
+ api.extend(imgfile, imgsize, use_cow=use_cow)
diff --git a/nova/tests/unit/virt/disk/test_inject.py b/nova/tests/unit/virt/disk/test_inject.py
new file mode 100644
index 0000000000..97c8a08013
--- /dev/null
+++ b/nova/tests/unit/virt/disk/test_inject.py
@@ -0,0 +1,284 @@
+# Copyright (C) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import sys
+
+from nova import exception
+from nova import test
+from nova.tests.unit.virt.disk.vfs import fakeguestfs
+from nova.virt.disk import api as diskapi
+from nova.virt.disk.vfs import guestfs as vfsguestfs
+
+
+class VirtDiskTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(VirtDiskTest, self).setUp()
+ sys.modules['guestfs'] = fakeguestfs
+ vfsguestfs.guestfs = fakeguestfs
+
+ def test_inject_data(self):
+
+ self.assertTrue(diskapi.inject_data("/some/file", use_cow=True))
+
+ self.assertTrue(diskapi.inject_data("/some/file",
+ mandatory=('files',)))
+
+ self.assertTrue(diskapi.inject_data("/some/file", key="mysshkey",
+ mandatory=('key',)))
+
+ os_name = os.name
+ os.name = 'nt' # Cause password injection to fail
+ self.assertRaises(exception.NovaException,
+ diskapi.inject_data,
+ "/some/file", admin_password="p",
+ mandatory=('admin_password',))
+ self.assertFalse(diskapi.inject_data("/some/file", admin_password="p"))
+ os.name = os_name
+
+ self.assertFalse(diskapi.inject_data("/some/fail/file",
+ key="mysshkey"))
+
+ def test_inject_data_key(self):
+
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ diskapi._inject_key_into_fs("mysshkey", vfs)
+
+ self.assertIn("/root/.ssh", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/root/.ssh"],
+ {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700})
+ self.assertIn("/root/.ssh/authorized_keys", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/root/.ssh/authorized_keys"],
+ {'isdir': False,
+ 'content': "Hello World\n# The following ssh " +
+ "key was injected by Nova\nmysshkey\n",
+ 'gid': 100,
+ 'uid': 100,
+ 'mode': 0o600})
+
+ vfs.teardown()
+
+ def test_inject_data_key_with_selinux(self):
+
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ vfs.make_path("etc/selinux")
+ vfs.make_path("etc/rc.d")
+ diskapi._inject_key_into_fs("mysshkey", vfs)
+
+ self.assertIn("/etc/rc.d/rc.local", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/etc/rc.d/rc.local"],
+ {'isdir': False,
+ 'content': "Hello World#!/bin/sh\n# Added by " +
+ "Nova to ensure injected ssh keys " +
+ "have the right context\nrestorecon " +
+ "-RF root/.ssh 2>/dev/null || :\n",
+ 'gid': 100,
+ 'uid': 100,
+ 'mode': 0o700})
+
+ self.assertIn("/root/.ssh", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/root/.ssh"],
+ {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700})
+ self.assertIn("/root/.ssh/authorized_keys", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/root/.ssh/authorized_keys"],
+ {'isdir': False,
+ 'content': "Hello World\n# The following ssh " +
+ "key was injected by Nova\nmysshkey\n",
+ 'gid': 100,
+ 'uid': 100,
+ 'mode': 0o600})
+
+ vfs.teardown()
+
+ def test_inject_data_key_with_selinux_append_with_newline(self):
+
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ vfs.replace_file("/etc/rc.d/rc.local", "#!/bin/sh\necho done")
+ vfs.make_path("etc/selinux")
+ vfs.make_path("etc/rc.d")
+ diskapi._inject_key_into_fs("mysshkey", vfs)
+
+ self.assertIn("/etc/rc.d/rc.local", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/etc/rc.d/rc.local"],
+ {'isdir': False,
+ 'content': "#!/bin/sh\necho done\n# Added "
+ "by Nova to ensure injected ssh keys have "
+ "the right context\nrestorecon -RF "
+ "root/.ssh 2>/dev/null || :\n",
+ 'gid': 100,
+ 'uid': 100,
+ 'mode': 0o700})
+ vfs.teardown()
+
+ def test_inject_net(self):
+
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ diskapi._inject_net_into_fs("mynetconfig", vfs)
+
+ self.assertIn("/etc/network/interfaces", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/etc/network/interfaces"],
+ {'content': 'mynetconfig',
+ 'gid': 100,
+ 'isdir': False,
+ 'mode': 0o700,
+ 'uid': 100})
+ vfs.teardown()
+
+ def test_inject_metadata(self):
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ diskapi._inject_metadata_into_fs({"foo": "bar", "eek": "wizz"}, vfs)
+
+ self.assertIn("/meta.js", vfs.handle.files)
+ self.assertEqual({'content': '{"foo": "bar", ' +
+ '"eek": "wizz"}',
+ 'gid': 100,
+ 'isdir': False,
+ 'mode': 0o700,
+ 'uid': 100},
+ vfs.handle.files["/meta.js"])
+ vfs.teardown()
+
+ def test_inject_admin_password(self):
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ def fake_salt():
+ return "1234567890abcdef"
+
+ self.stubs.Set(diskapi, '_generate_salt', fake_salt)
+
+ vfs.handle.write("/etc/shadow",
+ "root:$1$12345678$xxxxx:14917:0:99999:7:::\n" +
+ "bin:*:14495:0:99999:7:::\n" +
+ "daemon:*:14495:0:99999:7:::\n")
+
+ vfs.handle.write("/etc/passwd",
+ "root:x:0:0:root:/root:/bin/bash\n" +
+ "bin:x:1:1:bin:/bin:/sbin/nologin\n" +
+ "daemon:x:2:2:daemon:/sbin:/sbin/nologin\n")
+
+ diskapi._inject_admin_password_into_fs("123456", vfs)
+
+ self.assertEqual(vfs.handle.files["/etc/passwd"],
+ {'content': "root:x:0:0:root:/root:/bin/bash\n" +
+ "bin:x:1:1:bin:/bin:/sbin/nologin\n" +
+ "daemon:x:2:2:daemon:/sbin:" +
+ "/sbin/nologin\n",
+ 'gid': 100,
+ 'isdir': False,
+ 'mode': 0o700,
+ 'uid': 100})
+ shadow = vfs.handle.files["/etc/shadow"]
+
+ # if the encrypted password is only 13 characters long, then
+ # nova.virt.disk.api:_set_password fell back to DES.
+ if len(shadow['content']) == 91:
+ self.assertEqual(shadow,
+ {'content': "root:12tir.zIbWQ3c" +
+ ":14917:0:99999:7:::\n" +
+ "bin:*:14495:0:99999:7:::\n" +
+ "daemon:*:14495:0:99999:7:::\n",
+ 'gid': 100,
+ 'isdir': False,
+ 'mode': 0o700,
+ 'uid': 100})
+ else:
+ self.assertEqual(shadow,
+ {'content': "root:$1$12345678$a4ge4d5iJ5vw" +
+ "vbFS88TEN0:14917:0:99999:7:::\n" +
+ "bin:*:14495:0:99999:7:::\n" +
+ "daemon:*:14495:0:99999:7:::\n",
+ 'gid': 100,
+ 'isdir': False,
+ 'mode': 0o700,
+ 'uid': 100})
+ vfs.teardown()
+
+ def test_inject_files_into_fs(self):
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ diskapi._inject_files_into_fs([("/path/to/not/exists/file",
+ "inject-file-contents")],
+ vfs)
+
+ self.assertIn("/path/to/not/exists", vfs.handle.files)
+ shadow_dir = vfs.handle.files["/path/to/not/exists"]
+ self.assertEqual(shadow_dir,
+ {"isdir": True,
+ "gid": 0,
+ "uid": 0,
+ "mode": 0o744})
+
+ shadow_file = vfs.handle.files["/path/to/not/exists/file"]
+ self.assertEqual(shadow_file,
+ {"isdir": False,
+ "content": "inject-file-contents",
+ "gid": 100,
+ "uid": 100,
+ "mode": 0o700})
+ vfs.teardown()
+
+ def test_inject_files_into_fs_dir_exists(self):
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ called = {'make_path': False}
+
+ def fake_has_file(*args, **kwargs):
+ return True
+
+ def fake_make_path(*args, **kwargs):
+ called['make_path'] = True
+
+ self.stubs.Set(vfs, 'has_file', fake_has_file)
+ self.stubs.Set(vfs, 'make_path', fake_make_path)
+
+ # test for already exists dir
+ diskapi._inject_files_into_fs([("/path/to/exists/file",
+ "inject-file-contents")],
+ vfs)
+
+ self.assertIn("/path/to/exists/file", vfs.handle.files)
+ self.assertFalse(called['make_path'])
+
+ # test for root dir
+ diskapi._inject_files_into_fs([("/inject-file",
+ "inject-file-contents")],
+ vfs)
+
+ self.assertIn("/inject-file", vfs.handle.files)
+ self.assertFalse(called['make_path'])
+
+ # test for null dir
+ vfs.handle.files.pop("/inject-file")
+ diskapi._inject_files_into_fs([("inject-file",
+ "inject-file-contents")],
+ vfs)
+
+ self.assertIn("/inject-file", vfs.handle.files)
+ self.assertFalse(called['make_path'])
+
+ vfs.teardown()
diff --git a/nova/tests/unit/virt/disk/vfs/__init__.py b/nova/tests/unit/virt/disk/vfs/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/disk/vfs/__init__.py
diff --git a/nova/tests/unit/virt/disk/vfs/fakeguestfs.py b/nova/tests/unit/virt/disk/vfs/fakeguestfs.py
new file mode 100644
index 0000000000..5e5efa7a14
--- /dev/null
+++ b/nova/tests/unit/virt/disk/vfs/fakeguestfs.py
@@ -0,0 +1,188 @@
+# Copyright 2012 Red Hat, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+EVENT_APPLIANCE = 0x1
+EVENT_LIBRARY = 0x2
+EVENT_WARNING = 0x3
+EVENT_TRACE = 0x4
+
+
+class GuestFS(object):
+ SUPPORT_CLOSE_ON_EXIT = True
+ SUPPORT_RETURN_DICT = True
+
+ def __init__(self, **kwargs):
+ if not self.SUPPORT_CLOSE_ON_EXIT and 'close_on_exit' in kwargs:
+ raise TypeError('close_on_exit')
+ if not self.SUPPORT_RETURN_DICT and 'python_return_dict' in kwargs:
+ raise TypeError('python_return_dict')
+
+ self._python_return_dict = kwargs.get('python_return_dict', False)
+ self.kwargs = kwargs
+ self.drives = []
+ self.running = False
+ self.closed = False
+ self.mounts = []
+ self.files = {}
+ self.auginit = False
+ self.root_mounted = False
+ self.backend_settings = None
+ self.trace_enabled = False
+ self.verbose_enabled = False
+ self.event_callback = None
+
+ def launch(self):
+ self.running = True
+
+ def shutdown(self):
+ self.running = False
+ self.mounts = []
+ self.drives = []
+
+ def set_backend_settings(self, settings):
+ self.backend_settings = settings
+
+ def close(self):
+ self.closed = True
+
+ def add_drive_opts(self, file, *args, **kwargs):
+ if file == "/some/fail/file":
+ raise RuntimeError("%s: No such file or directory", file)
+
+ self.drives.append((file, kwargs['format']))
+
+ def add_drive(self, file, format=None, *args, **kwargs):
+ self.add_drive_opts(file, format=None, *args, **kwargs)
+
+ def inspect_os(self):
+ return ["/dev/guestvgf/lv_root"]
+
+ def inspect_get_mountpoints(self, dev):
+ mountpoints = [("/home", "/dev/mapper/guestvgf-lv_home"),
+ ("/", "/dev/mapper/guestvgf-lv_root"),
+ ("/boot", "/dev/vda1")]
+
+ if self.SUPPORT_RETURN_DICT and self._python_return_dict:
+ return dict(mountpoints)
+ else:
+ return mountpoints
+
+ def mount_options(self, options, device, mntpoint):
+ if mntpoint == "/":
+ self.root_mounted = True
+ else:
+ if not self.root_mounted:
+ raise RuntimeError(
+ "mount: %s: No such file or directory" % mntpoint)
+ self.mounts.append((options, device, mntpoint))
+
+ def mkdir_p(self, path):
+ if path not in self.files:
+ self.files[path] = {
+ "isdir": True,
+ "gid": 100,
+ "uid": 100,
+ "mode": 0o700
+ }
+
+ def read_file(self, path):
+ if path not in self.files:
+ self.files[path] = {
+ "isdir": False,
+ "content": "Hello World",
+ "gid": 100,
+ "uid": 100,
+ "mode": 0o700
+ }
+
+ return self.files[path]["content"]
+
+ def write(self, path, content):
+ if path not in self.files:
+ self.files[path] = {
+ "isdir": False,
+ "content": "Hello World",
+ "gid": 100,
+ "uid": 100,
+ "mode": 0o700
+ }
+
+ self.files[path]["content"] = content
+
+ def write_append(self, path, content):
+ if path not in self.files:
+ self.files[path] = {
+ "isdir": False,
+ "content": "Hello World",
+ "gid": 100,
+ "uid": 100,
+ "mode": 0o700
+ }
+
+ self.files[path]["content"] = self.files[path]["content"] + content
+
+ def stat(self, path):
+ if path not in self.files:
+ raise RuntimeError("No such file: " + path)
+
+ return self.files[path]["mode"]
+
+ def chown(self, uid, gid, path):
+ if path not in self.files:
+ raise RuntimeError("No such file: " + path)
+
+ if uid != -1:
+ self.files[path]["uid"] = uid
+ if gid != -1:
+ self.files[path]["gid"] = gid
+
+ def chmod(self, mode, path):
+ if path not in self.files:
+ raise RuntimeError("No such file: " + path)
+
+ self.files[path]["mode"] = mode
+
+ def aug_init(self, root, flags):
+ self.auginit = True
+
+ def aug_close(self):
+ self.auginit = False
+
+ def aug_get(self, cfgpath):
+ if not self.auginit:
+ raise RuntimeError("Augeus not initialized")
+
+ if cfgpath == "/files/etc/passwd/root/uid":
+ return 0
+ elif cfgpath == "/files/etc/passwd/fred/uid":
+ return 105
+ elif cfgpath == "/files/etc/passwd/joe/uid":
+ return 110
+ elif cfgpath == "/files/etc/group/root/gid":
+ return 0
+ elif cfgpath == "/files/etc/group/users/gid":
+ return 500
+ elif cfgpath == "/files/etc/group/admins/gid":
+ return 600
+ raise RuntimeError("Unknown path %s", cfgpath)
+
+ def set_trace(self, enabled):
+ self.trace_enabled = enabled
+
+ def set_verbose(self, enabled):
+ self.verbose_enabled = enabled
+
+ def set_event_callback(self, func, events):
+ self.event_callback = (func, events)
diff --git a/nova/tests/unit/virt/disk/vfs/test_guestfs.py b/nova/tests/unit/virt/disk/vfs/test_guestfs.py
new file mode 100644
index 0000000000..33dd100329
--- /dev/null
+++ b/nova/tests/unit/virt/disk/vfs/test_guestfs.py
@@ -0,0 +1,264 @@
+# Copyright (C) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sys
+
+from nova import exception
+from nova import test
+from nova.tests.unit.virt.disk.vfs import fakeguestfs
+from nova.virt.disk.vfs import guestfs as vfsimpl
+
+
+class VirtDiskVFSGuestFSTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(VirtDiskVFSGuestFSTest, self).setUp()
+ sys.modules['guestfs'] = fakeguestfs
+ vfsimpl.guestfs = fakeguestfs
+
+ def _do_test_appliance_setup_inspect(self, forcetcg):
+ if forcetcg:
+ vfsimpl.force_tcg()
+ else:
+ vfsimpl.force_tcg(False)
+
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
+ imgfmt="qcow2",
+ partition=-1)
+ vfs.setup()
+
+ if forcetcg:
+ self.assertEqual("force_tcg", vfs.handle.backend_settings)
+ vfsimpl.force_tcg(False)
+ else:
+ self.assertIsNone(vfs.handle.backend_settings)
+
+ self.assertTrue(vfs.handle.running)
+ self.assertEqual(3, len(vfs.handle.mounts))
+ self.assertEqual("/dev/mapper/guestvgf-lv_root",
+ vfs.handle.mounts[0][1])
+ self.assertEqual("/dev/vda1",
+ vfs.handle.mounts[1][1])
+ self.assertEqual("/dev/mapper/guestvgf-lv_home",
+ vfs.handle.mounts[2][1])
+ self.assertEqual("/", vfs.handle.mounts[0][2])
+ self.assertEqual("/boot", vfs.handle.mounts[1][2])
+ self.assertEqual("/home", vfs.handle.mounts[2][2])
+
+ handle = vfs.handle
+ vfs.teardown()
+
+ self.assertIsNone(vfs.handle)
+ self.assertFalse(handle.running)
+ self.assertTrue(handle.closed)
+ self.assertEqual(0, len(handle.mounts))
+
+ def test_appliance_setup_inspect_auto(self):
+ self._do_test_appliance_setup_inspect(False)
+
+ def test_appliance_setup_inspect_tcg(self):
+ self._do_test_appliance_setup_inspect(True)
+
+ def test_appliance_setup_inspect_no_root_raises(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
+ imgfmt="qcow2",
+ partition=-1)
+ # call setup to init the handle so we can stub it
+ vfs.setup()
+
+ self.assertIsNone(vfs.handle.backend_settings)
+
+ def fake_inspect_os():
+ return []
+
+ self.stubs.Set(vfs.handle, 'inspect_os', fake_inspect_os)
+ self.assertRaises(exception.NovaException, vfs.setup_os_inspect)
+
+ def test_appliance_setup_inspect_multi_boots_raises(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
+ imgfmt="qcow2",
+ partition=-1)
+ # call setup to init the handle so we can stub it
+ vfs.setup()
+
+ self.assertIsNone(vfs.handle.backend_settings)
+
+ def fake_inspect_os():
+ return ['fake1', 'fake2']
+
+ self.stubs.Set(vfs.handle, 'inspect_os', fake_inspect_os)
+ self.assertRaises(exception.NovaException, vfs.setup_os_inspect)
+
+ def test_appliance_setup_static_nopart(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
+ imgfmt="qcow2",
+ partition=None)
+ vfs.setup()
+
+ self.assertIsNone(vfs.handle.backend_settings)
+ self.assertTrue(vfs.handle.running)
+ self.assertEqual(1, len(vfs.handle.mounts))
+ self.assertEqual("/dev/sda", vfs.handle.mounts[0][1])
+ self.assertEqual("/", vfs.handle.mounts[0][2])
+
+ handle = vfs.handle
+ vfs.teardown()
+
+ self.assertIsNone(vfs.handle)
+ self.assertFalse(handle.running)
+ self.assertTrue(handle.closed)
+ self.assertEqual(0, len(handle.mounts))
+
+ def test_appliance_setup_static_part(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
+ imgfmt="qcow2",
+ partition=2)
+ vfs.setup()
+
+ self.assertIsNone(vfs.handle.backend_settings)
+ self.assertTrue(vfs.handle.running)
+ self.assertEqual(1, len(vfs.handle.mounts))
+ self.assertEqual("/dev/sda2", vfs.handle.mounts[0][1])
+ self.assertEqual("/", vfs.handle.mounts[0][2])
+
+ handle = vfs.handle
+ vfs.teardown()
+
+ self.assertIsNone(vfs.handle)
+ self.assertFalse(handle.running)
+ self.assertTrue(handle.closed)
+ self.assertEqual(0, len(handle.mounts))
+
+ def test_makepath(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.make_path("/some/dir")
+ vfs.make_path("/other/dir")
+
+ self.assertIn("/some/dir", vfs.handle.files)
+ self.assertIn("/other/dir", vfs.handle.files)
+ self.assertTrue(vfs.handle.files["/some/dir"]["isdir"])
+ self.assertTrue(vfs.handle.files["/other/dir"]["isdir"])
+
+ vfs.teardown()
+
+ def test_append_file(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.append_file("/some/file", " Goodbye")
+
+ self.assertIn("/some/file", vfs.handle.files)
+ self.assertEqual("Hello World Goodbye",
+ vfs.handle.files["/some/file"]["content"])
+
+ vfs.teardown()
+
+ def test_replace_file(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.replace_file("/some/file", "Goodbye")
+
+ self.assertIn("/some/file", vfs.handle.files)
+ self.assertEqual("Goodbye",
+ vfs.handle.files["/some/file"]["content"])
+
+ vfs.teardown()
+
+ def test_read_file(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertEqual("Hello World", vfs.read_file("/some/file"))
+
+ vfs.teardown()
+
+ def test_has_file(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.read_file("/some/file")
+
+ self.assertTrue(vfs.has_file("/some/file"))
+ self.assertFalse(vfs.has_file("/other/file"))
+
+ vfs.teardown()
+
+ def test_set_permissions(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.read_file("/some/file")
+
+ self.assertEqual(0o700, vfs.handle.files["/some/file"]["mode"])
+
+ vfs.set_permissions("/some/file", 0o7777)
+ self.assertEqual(0o7777, vfs.handle.files["/some/file"]["mode"])
+
+ vfs.teardown()
+
+ def test_set_ownership(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.read_file("/some/file")
+
+ self.assertEqual(100, vfs.handle.files["/some/file"]["uid"])
+ self.assertEqual(100, vfs.handle.files["/some/file"]["gid"])
+
+ vfs.set_ownership("/some/file", "fred", None)
+ self.assertEqual(105, vfs.handle.files["/some/file"]["uid"])
+ self.assertEqual(100, vfs.handle.files["/some/file"]["gid"])
+
+ vfs.set_ownership("/some/file", None, "users")
+ self.assertEqual(105, vfs.handle.files["/some/file"]["uid"])
+ self.assertEqual(500, vfs.handle.files["/some/file"]["gid"])
+
+ vfs.set_ownership("/some/file", "joe", "admins")
+ self.assertEqual(110, vfs.handle.files["/some/file"]["uid"])
+ self.assertEqual(600, vfs.handle.files["/some/file"]["gid"])
+
+ vfs.teardown()
+
+ def test_close_on_error(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertFalse(vfs.handle.kwargs['close_on_exit'])
+ vfs.teardown()
+ self.stubs.Set(fakeguestfs.GuestFS, 'SUPPORT_CLOSE_ON_EXIT', False)
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertNotIn('close_on_exit', vfs.handle.kwargs)
+ vfs.teardown()
+
+ def test_python_return_dict(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertFalse(vfs.handle.kwargs['python_return_dict'])
+ vfs.teardown()
+ self.stubs.Set(fakeguestfs.GuestFS, 'SUPPORT_RETURN_DICT', False)
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertNotIn('python_return_dict', vfs.handle.kwargs)
+ vfs.teardown()
+
+ def test_setup_debug_disable(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertFalse(vfs.handle.trace_enabled)
+ self.assertFalse(vfs.handle.verbose_enabled)
+ self.assertIsNone(vfs.handle.event_callback)
+
+ def test_setup_debug_enabled(self):
+ self.flags(debug=True, group='guestfs')
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertTrue(vfs.handle.trace_enabled)
+ self.assertTrue(vfs.handle.verbose_enabled)
+ self.assertIsNotNone(vfs.handle.event_callback)
diff --git a/nova/tests/unit/virt/disk/vfs/test_localfs.py b/nova/tests/unit/virt/disk/vfs/test_localfs.py
new file mode 100644
index 0000000000..6e7780e74b
--- /dev/null
+++ b/nova/tests/unit/virt/disk/vfs/test_localfs.py
@@ -0,0 +1,385 @@
+# Copyright (C) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.concurrency import processutils
+from oslo.config import cfg
+
+from nova import exception
+from nova import test
+from nova.tests.unit import utils as tests_utils
+import nova.utils
+from nova.virt.disk.vfs import localfs as vfsimpl
+
+CONF = cfg.CONF
+
+dirs = []
+files = {}
+commands = []
+
+
+def fake_execute(*args, **kwargs):
+ commands.append({"args": args, "kwargs": kwargs})
+
+ if args[0] == "readlink":
+ if args[1] == "-nm":
+ if args[2] in ["/scratch/dir/some/file",
+ "/scratch/dir/some/dir",
+ "/scratch/dir/other/dir",
+ "/scratch/dir/other/file"]:
+ return args[2], ""
+ elif args[1] == "-e":
+ if args[2] in files:
+ return args[2], ""
+
+ return "", "No such file"
+ elif args[0] == "mkdir":
+ dirs.append(args[2])
+ elif args[0] == "chown":
+ owner = args[1]
+ path = args[2]
+ if path not in files:
+ raise Exception("No such file: " + path)
+
+ sep = owner.find(':')
+ if sep != -1:
+ user = owner[0:sep]
+ group = owner[sep + 1:]
+ else:
+ user = owner
+ group = None
+
+ if user:
+ if user == "fred":
+ uid = 105
+ else:
+ uid = 110
+ files[path]["uid"] = uid
+ if group:
+ if group == "users":
+ gid = 500
+ else:
+ gid = 600
+ files[path]["gid"] = gid
+ elif args[0] == "chgrp":
+ group = args[1]
+ path = args[2]
+ if path not in files:
+ raise Exception("No such file: " + path)
+
+ if group == "users":
+ gid = 500
+ else:
+ gid = 600
+ files[path]["gid"] = gid
+ elif args[0] == "chmod":
+ mode = args[1]
+ path = args[2]
+ if path not in files:
+ raise Exception("No such file: " + path)
+
+ files[path]["mode"] = int(mode, 8)
+ elif args[0] == "cat":
+ path = args[1]
+ if path not in files:
+ files[path] = {
+ "content": "Hello World",
+ "gid": 100,
+ "uid": 100,
+ "mode": 0o700
+ }
+ return files[path]["content"], ""
+ elif args[0] == "tee":
+ if args[1] == "-a":
+ path = args[2]
+ append = True
+ else:
+ path = args[1]
+ append = False
+ if path not in files:
+ files[path] = {
+ "content": "Hello World",
+ "gid": 100,
+ "uid": 100,
+ "mode": 0o700,
+ }
+ if append:
+ files[path]["content"] += kwargs["process_input"]
+ else:
+ files[path]["content"] = kwargs["process_input"]
+
+
+class VirtDiskVFSLocalFSTestPaths(test.NoDBTestCase):
+ def setUp(self):
+ super(VirtDiskVFSLocalFSTestPaths, self).setUp()
+
+ real_execute = processutils.execute
+
+ def nonroot_execute(*cmd_parts, **kwargs):
+ kwargs.pop('run_as_root', None)
+ return real_execute(*cmd_parts, **kwargs)
+
+ self.stubs.Set(processutils, 'execute', nonroot_execute)
+
+ def test_check_safe_path(self):
+ if not tests_utils.coreutils_readlink_available():
+ self.skipTest("coreutils readlink(1) unavailable")
+ vfs = vfsimpl.VFSLocalFS("dummy.img")
+ vfs.imgdir = "/foo"
+ ret = vfs._canonical_path('etc/something.conf')
+ self.assertEqual(ret, '/foo/etc/something.conf')
+
+ def test_check_unsafe_path(self):
+ if not tests_utils.coreutils_readlink_available():
+ self.skipTest("coreutils readlink(1) unavailable")
+ vfs = vfsimpl.VFSLocalFS("dummy.img")
+ vfs.imgdir = "/foo"
+ self.assertRaises(exception.Invalid,
+ vfs._canonical_path,
+ 'etc/../../../something.conf')
+
+
+class VirtDiskVFSLocalFSTest(test.NoDBTestCase):
+ def test_makepath(self):
+ global dirs, commands
+ dirs = []
+ commands = []
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.make_path("/some/dir")
+ vfs.make_path("/other/dir")
+
+ self.assertEqual(dirs,
+ ["/scratch/dir/some/dir", "/scratch/dir/other/dir"]),
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/dir'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('mkdir', '-p',
+ '/scratch/dir/some/dir'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/other/dir'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('mkdir', '-p',
+ '/scratch/dir/other/dir'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}}])
+
+ def test_append_file(self):
+ global files, commands
+ files = {}
+ commands = []
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.append_file("/some/file", " Goodbye")
+
+ self.assertIn("/scratch/dir/some/file", files)
+ self.assertEqual(files["/scratch/dir/some/file"]["content"],
+ "Hello World Goodbye")
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('tee', '-a',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'process_input': ' Goodbye',
+ 'run_as_root': True,
+ 'root_helper': root_helper}}])
+
+ def test_replace_file(self):
+ global files, commands
+ files = {}
+ commands = []
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.replace_file("/some/file", "Goodbye")
+
+ self.assertIn("/scratch/dir/some/file", files)
+ self.assertEqual(files["/scratch/dir/some/file"]["content"],
+ "Goodbye")
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('tee', '/scratch/dir/some/file'),
+ 'kwargs': {'process_input': 'Goodbye',
+ 'run_as_root': True,
+ 'root_helper': root_helper}}])
+
+ def test_read_file(self):
+ global commands, files
+ files = {}
+ commands = []
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ self.assertEqual(vfs.read_file("/some/file"), "Hello World")
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('cat', '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}}])
+
+ def test_has_file(self):
+ global commands, files
+ files = {}
+ commands = []
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.read_file("/some/file")
+
+ self.assertTrue(vfs.has_file("/some/file"))
+ self.assertFalse(vfs.has_file("/other/file"))
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('cat', '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-e',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/other/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-e',
+ '/scratch/dir/other/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ ])
+
+ def test_set_permissions(self):
+ global commands, files
+ commands = []
+ files = {}
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.read_file("/some/file")
+
+ vfs.set_permissions("/some/file", 0o777)
+ self.assertEqual(files["/scratch/dir/some/file"]["mode"], 0o777)
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('cat', '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('chmod', '777',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}}])
+
+ def test_set_ownership(self):
+ global commands, files
+ commands = []
+ files = {}
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.read_file("/some/file")
+
+ self.assertEqual(files["/scratch/dir/some/file"]["uid"], 100)
+ self.assertEqual(files["/scratch/dir/some/file"]["gid"], 100)
+
+ vfs.set_ownership("/some/file", "fred", None)
+ self.assertEqual(files["/scratch/dir/some/file"]["uid"], 105)
+ self.assertEqual(files["/scratch/dir/some/file"]["gid"], 100)
+
+ vfs.set_ownership("/some/file", None, "users")
+ self.assertEqual(files["/scratch/dir/some/file"]["uid"], 105)
+ self.assertEqual(files["/scratch/dir/some/file"]["gid"], 500)
+
+ vfs.set_ownership("/some/file", "joe", "admins")
+ self.assertEqual(files["/scratch/dir/some/file"]["uid"], 110)
+ self.assertEqual(files["/scratch/dir/some/file"]["gid"], 600)
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('cat', '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('chown', 'fred',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('chgrp', 'users',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('chown', 'joe:admins',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}}])
diff --git a/nova/tests/unit/virt/hyperv/__init__.py b/nova/tests/unit/virt/hyperv/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/__init__.py
diff --git a/nova/tests/unit/virt/hyperv/db_fakes.py b/nova/tests/unit/virt/hyperv/db_fakes.py
new file mode 100644
index 0000000000..9e8249323e
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/db_fakes.py
@@ -0,0 +1,167 @@
+# Copyright 2012 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Stubouts, mocks and fixtures for the test suite
+"""
+
+import uuid
+
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import db
+from nova import utils
+
+
+def get_fake_instance_data(name, project_id, user_id):
+ return {'name': name,
+ 'id': 1,
+ 'uuid': str(uuid.uuid4()),
+ 'project_id': project_id,
+ 'user_id': user_id,
+ 'image_ref': "1",
+ 'kernel_id': "1",
+ 'ramdisk_id': "1",
+ 'mac_address': "de:ad:be:ef:be:ef",
+ 'flavor':
+ {'name': 'm1.tiny',
+ 'memory_mb': 512,
+ 'vcpus': 1,
+ 'root_gb': 1024,
+ 'flavorid': 1,
+ 'rxtx_factor': 1}
+ }
+
+
+def get_fake_image_data(project_id, user_id):
+ return {'name': 'image1',
+ 'id': 1,
+ 'project_id': project_id,
+ 'user_id': user_id,
+ 'image_ref': "1",
+ 'kernel_id': "1",
+ 'ramdisk_id': "1",
+ 'mac_address': "de:ad:be:ef:be:ef",
+ 'flavor': 'm1.tiny',
+ }
+
+
+def get_fake_volume_info_data(target_portal, volume_id):
+ return {
+ 'driver_volume_type': 'iscsi',
+ 'data': {
+ 'volume_id': 1,
+ 'target_iqn': 'iqn.2010-10.org.openstack:volume-' + volume_id,
+ 'target_portal': target_portal,
+ 'target_lun': 1,
+ 'auth_method': 'CHAP',
+ }
+ }
+
+
+def get_fake_block_device_info(target_portal, volume_id):
+ return {'block_device_mapping': [{'connection_info': {
+ 'driver_volume_type': 'iscsi',
+ 'data': {'target_lun': 1,
+ 'volume_id': volume_id,
+ 'target_iqn':
+ 'iqn.2010-10.org.openstack:volume-' +
+ volume_id,
+ 'target_portal': target_portal,
+ 'target_discovered': False}},
+ 'mount_device': 'vda',
+ 'delete_on_termination': False}],
+ 'root_device_name': 'fake_root_device_name',
+ 'ephemerals': [],
+ 'swap': None
+ }
+
+
+def stub_out_db_instance_api(stubs):
+ """Stubs out the db API for creating Instances."""
+
+ FLAVORS = {
+ 'm1.tiny': dict(memory_mb=512, vcpus=1, root_gb=0, flavorid=1),
+ 'm1.small': dict(memory_mb=2048, vcpus=1, root_gb=20, flavorid=2),
+ 'm1.medium': dict(memory_mb=4096, vcpus=2, root_gb=40, flavorid=3),
+ 'm1.large': dict(memory_mb=8192, vcpus=4, root_gb=80, flavorid=4),
+ 'm1.xlarge': dict(memory_mb=16384, vcpus=8, root_gb=160, flavorid=5)}
+
+ class FakeModel(object):
+ """Stubs out for model."""
+
+ def __init__(self, values):
+ self.values = values
+
+ def get(self, key, default=None):
+ if key in self.values:
+ return self.values[key]
+ else:
+ return default
+
+ def __getattr__(self, name):
+ return self.values[name]
+
+ def __getitem__(self, key):
+ return self.get(key)
+
+ def __setitem__(self, key, value):
+ self.values[key] = value
+
+ def __str__(self):
+ return str(self.values)
+
+ def fake_instance_create(context, values):
+ """Stubs out the db.instance_create method."""
+
+ if 'flavor' not in values:
+ return
+
+ flavor = values['flavor']
+
+ base_options = {
+ 'name': values['name'],
+ 'id': values['id'],
+ 'uuid': str(uuid.uuid4()),
+ 'reservation_id': utils.generate_uid('r'),
+ 'image_ref': values['image_ref'],
+ 'kernel_id': values['kernel_id'],
+ 'ramdisk_id': values['ramdisk_id'],
+ 'vm_state': vm_states.BUILDING,
+ 'task_state': task_states.SCHEDULING,
+ 'user_id': values['user_id'],
+ 'project_id': values['project_id'],
+ 'flavor': flavor,
+ 'memory_mb': flavor['memory_mb'],
+ 'vcpus': flavor['vcpus'],
+ 'mac_addresses': [{'address': values['mac_address']}],
+ 'root_gb': flavor['root_gb'],
+ 'system_metadata': {'image_shutdown_timeout': 0},
+ }
+ return FakeModel(base_options)
+
+ def fake_flavor_get_all(context, inactive=0, filters=None):
+ return FLAVORS.values()
+
+ def fake_flavor_get_by_name(context, name):
+ return FLAVORS[name]
+
+ def fake_block_device_mapping_get_all_by_instance(context, instance_uuid):
+ return {}
+
+ stubs.Set(db, 'instance_create', fake_instance_create)
+ stubs.Set(db, 'flavor_get_all', fake_flavor_get_all)
+ stubs.Set(db, 'flavor_get_by_name', fake_flavor_get_by_name)
+ stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_block_device_mapping_get_all_by_instance)
diff --git a/nova/tests/unit/virt/hyperv/fake.py b/nova/tests/unit/virt/hyperv/fake.py
new file mode 100644
index 0000000000..6403374aa5
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/fake.py
@@ -0,0 +1,90 @@
+# Copyright 2013 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import io
+import os
+
+
+class PathUtils(object):
+ def open(self, path, mode):
+ return io.BytesIO(b'fake content')
+
+ def exists(self, path):
+ return False
+
+ def makedirs(self, path):
+ pass
+
+ def remove(self, path):
+ pass
+
+ def rename(self, src, dest):
+ pass
+
+ def copyfile(self, src, dest):
+ pass
+
+ def copy(self, src, dest):
+ pass
+
+ def rmtree(self, path):
+ pass
+
+ def get_instances_dir(self, remote_server=None):
+ return 'C:\\FakeInstancesPath\\'
+
+ def get_instance_migr_revert_dir(self, instance_name, create_dir=False,
+ remove_dir=False):
+ return os.path.join(self.get_instances_dir(), instance_name, '_revert')
+
+ def get_instance_dir(self, instance_name, remote_server=None,
+ create_dir=True, remove_dir=False):
+ return os.path.join(self.get_instances_dir(remote_server),
+ instance_name)
+
+ def lookup_root_vhd_path(self, instance_name):
+ instance_path = self.get_instance_dir(instance_name)
+ return os.path.join(instance_path, 'root.vhd')
+
+ def lookup_configdrive_path(self, instance_name):
+ instance_path = self.get_instance_dir(instance_name)
+ return os.path.join(instance_path, 'configdrive.iso')
+
+ def lookup_ephemeral_vhd_path(self, instance_name):
+ instance_path = self.get_instance_dir(instance_name)
+ if instance_path:
+ return os.path.join(instance_path, 'ephemeral.vhd')
+
+ def get_root_vhd_path(self, instance_name, format_ext):
+ instance_path = self.get_instance_dir(instance_name)
+ return os.path.join(instance_path, 'root.' + format_ext)
+
+ def get_ephemeral_vhd_path(self, instance_name, format_ext):
+ instance_path = self.get_instance_dir(instance_name)
+ return os.path.join(instance_path, 'ephemeral.' + format_ext.lower())
+
+ def get_base_vhd_dir(self):
+ return os.path.join(self.get_instances_dir(), '_base')
+
+ def get_export_dir(self, instance_name):
+ export_dir = os.path.join(self.get_instances_dir(), 'export',
+ instance_name)
+ return export_dir
+
+ def vhd_exists(self, path):
+ return False
+
+ def get_vm_console_log_paths(self, vm_name, remote_server=None):
+ return 'fake_vm_log_path'
diff --git a/nova/tests/unit/virt/hyperv/test_basevolumeutils.py b/nova/tests/unit/virt/hyperv/test_basevolumeutils.py
new file mode 100644
index 0000000000..8f48515d09
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_basevolumeutils.py
@@ -0,0 +1,157 @@
+# Copyright 2014 Cloudbase Solutions Srl
+#
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import test
+from nova.virt.hyperv import basevolumeutils
+
+
+def _exception_thrower():
+ raise Exception("Testing exception handling.")
+
+
+class BaseVolumeUtilsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V BaseVolumeUtils class."""
+
+ _FAKE_COMPUTER_NAME = "fake_computer_name"
+ _FAKE_DOMAIN_NAME = "fake_domain_name"
+ _FAKE_INITIATOR_NAME = "fake_initiator_name"
+ _FAKE_INITIATOR_IQN_NAME = "iqn.1991-05.com.microsoft:fake_computer_name"
+ _FAKE_DISK_PATH = 'fake_path DeviceID="123\\\\2"'
+ _FAKE_MOUNT_DEVICE = '/dev/fake/mount'
+ _FAKE_DEVICE_NAME = '/dev/fake/path'
+ _FAKE_SWAP = {'device_name': _FAKE_DISK_PATH}
+
+ def setUp(self):
+ self._volutils = basevolumeutils.BaseVolumeUtils()
+ self._volutils._conn_wmi = mock.MagicMock()
+ self._volutils._conn_cimv2 = mock.MagicMock()
+
+ super(BaseVolumeUtilsTestCase, self).setUp()
+
+ def test_get_iscsi_initiator_ok(self):
+ self._check_get_iscsi_initiator(
+ mock.MagicMock(return_value=mock.sentinel.FAKE_KEY),
+ self._FAKE_INITIATOR_NAME)
+
+ def test_get_iscsi_initiator_exception(self):
+ initiator_name = "%(iqn)s.%(domain)s" % {
+ 'iqn': self._FAKE_INITIATOR_IQN_NAME,
+ 'domain': self._FAKE_DOMAIN_NAME
+ }
+
+ self._check_get_iscsi_initiator(_exception_thrower, initiator_name)
+
+ def _check_get_iscsi_initiator(self, winreg_method, expected):
+ mock_computer = mock.MagicMock()
+ mock_computer.name = self._FAKE_COMPUTER_NAME
+ mock_computer.Domain = self._FAKE_DOMAIN_NAME
+ self._volutils._conn_cimv2.Win32_ComputerSystem.return_value = [
+ mock_computer]
+
+ with mock.patch.object(basevolumeutils,
+ '_winreg', create=True) as mock_winreg:
+ mock_winreg.OpenKey = winreg_method
+ mock_winreg.QueryValueEx = mock.MagicMock(return_value=[expected])
+
+ initiator_name = self._volutils.get_iscsi_initiator()
+ self.assertEqual(expected, initiator_name)
+
+ @mock.patch.object(basevolumeutils, 'driver')
+ def test_volume_in_mapping(self, mock_driver):
+ mock_driver.block_device_info_get_mapping.return_value = [
+ {'mount_device': self._FAKE_MOUNT_DEVICE}]
+ mock_driver.block_device_info_get_swap = mock.MagicMock(
+ return_value=self._FAKE_SWAP)
+ mock_driver.block_device_info_get_ephemerals = mock.MagicMock(
+ return_value=[{'device_name': self._FAKE_DEVICE_NAME}])
+
+ mock_driver.swap_is_usable = mock.MagicMock(return_value=True)
+
+ self.assertTrue(self._volutils.volume_in_mapping(
+ self._FAKE_MOUNT_DEVICE, mock.sentinel.FAKE_BLOCK_DEVICE_INFO))
+
+ @mock.patch.object(basevolumeutils.BaseVolumeUtils,
+ "_get_drive_number_from_disk_path")
+ def test_get_session_id_from_mounted_disk(self, mock_get_session_id):
+ mock_get_session_id.return_value = mock.sentinel.FAKE_DEVICE_NUMBER
+ mock_initiator_session = self._create_initiator_session()
+ self._volutils._conn_wmi.query.return_value = [mock_initiator_session]
+ session_id = self._volutils.get_session_id_from_mounted_disk(
+ self._FAKE_DISK_PATH)
+
+ self.assertEqual(mock.sentinel.FAKE_SESSION_ID, session_id)
+
+ def test_get_devices_for_target(self):
+ init_session = self._create_initiator_session()
+ self._volutils._conn_wmi.query.return_value = [init_session]
+ devices = self._volutils._get_devices_for_target(
+ mock.sentinel.FAKE_IQN)
+
+ self.assertEqual(init_session.Devices, devices)
+
+ def test_get_devices_for_target_not_found(self):
+ self._volutils._conn_wmi.query.return_value = None
+ devices = self._volutils._get_devices_for_target(
+ mock.sentinel.FAKE_IQN)
+
+ self.assertEqual(0, len(devices))
+
+ @mock.patch.object(basevolumeutils.BaseVolumeUtils,
+ '_get_devices_for_target')
+ def test_get_device_number_for_target(self, fake_get_devices):
+ init_session = self._create_initiator_session()
+ fake_get_devices.return_value = init_session.Devices
+ device_number = self._volutils.get_device_number_for_target(
+ mock.sentinel.FAKE_IQN, mock.sentinel.FAKE_LUN)
+
+ self.assertEqual(mock.sentinel.FAKE_DEVICE_NUMBER, device_number)
+
+ @mock.patch.object(basevolumeutils.BaseVolumeUtils,
+ '_get_devices_for_target')
+ def test_get_target_lun_count(self, fake_get_devices):
+ init_session = self._create_initiator_session()
+ fake_get_devices.return_value = [init_session]
+ lun_count = self._volutils.get_target_lun_count(
+ mock.sentinel.FAKE_IQN)
+
+ self.assertEqual(len(init_session.Devices), lun_count)
+
+ @mock.patch.object(basevolumeutils.BaseVolumeUtils,
+ "_get_drive_number_from_disk_path")
+ def test_get_target_from_disk_path(self, mock_get_session_id):
+ mock_get_session_id.return_value = mock.sentinel.FAKE_DEVICE_NUMBER
+ init_sess = self._create_initiator_session()
+ mock_ses_class = self._volutils._conn_wmi.MSiSCSIInitiator_SessionClass
+ mock_ses_class.return_value = [init_sess]
+
+ (target_name, scsi_lun) = self._volutils.get_target_from_disk_path(
+ self._FAKE_DISK_PATH)
+
+ self.assertEqual(mock.sentinel.FAKE_TARGET_NAME, target_name)
+ self.assertEqual(mock.sentinel.FAKE_LUN, scsi_lun)
+
+ def _create_initiator_session(self):
+ device = mock.MagicMock()
+ device.ScsiLun = mock.sentinel.FAKE_LUN
+ device.DeviceNumber = mock.sentinel.FAKE_DEVICE_NUMBER
+ device.TargetName = mock.sentinel.FAKE_TARGET_NAME
+ init_session = mock.MagicMock()
+ init_session.Devices = [device]
+ init_session.SessionId = mock.sentinel.FAKE_SESSION_ID
+
+ return init_session
diff --git a/nova/tests/unit/virt/hyperv/test_hostutils.py b/nova/tests/unit/virt/hyperv/test_hostutils.py
new file mode 100644
index 0000000000..998692d350
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_hostutils.py
@@ -0,0 +1,97 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import test
+from nova.virt.hyperv import hostutils
+
+
+class FakeCPUSpec(object):
+ """Fake CPU Spec for unit tests."""
+
+ Architecture = mock.sentinel.cpu_arch
+ Name = mock.sentinel.cpu_name
+ Manufacturer = mock.sentinel.cpu_man
+ NumberOfCores = mock.sentinel.cpu_cores
+ NumberOfLogicalProcessors = mock.sentinel.cpu_procs
+
+
+class HostUtilsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V hostutils class."""
+
+ _FAKE_MEMORY_TOTAL = 1024L
+ _FAKE_MEMORY_FREE = 512L
+ _FAKE_DISK_SIZE = 1024L
+ _FAKE_DISK_FREE = 512L
+ _FAKE_VERSION_GOOD = '6.2.0'
+ _FAKE_VERSION_BAD = '6.1.9'
+
+ def setUp(self):
+ self._hostutils = hostutils.HostUtils()
+ self._hostutils._conn_cimv2 = mock.MagicMock()
+
+ super(HostUtilsTestCase, self).setUp()
+
+ @mock.patch('nova.virt.hyperv.hostutils.ctypes')
+ def test_get_host_tick_count64(self, mock_ctypes):
+ tick_count64 = "100"
+ mock_ctypes.windll.kernel32.GetTickCount64.return_value = tick_count64
+ response = self._hostutils.get_host_tick_count64()
+ self.assertEqual(tick_count64, response)
+
+ def test_get_cpus_info(self):
+ cpu = mock.MagicMock(spec=FakeCPUSpec)
+ self._hostutils._conn_cimv2.query.return_value = [cpu]
+ cpu_list = self._hostutils.get_cpus_info()
+ self.assertEqual([cpu._mock_children], cpu_list)
+
+ def test_get_memory_info(self):
+ memory = mock.MagicMock()
+ type(memory).TotalVisibleMemorySize = mock.PropertyMock(
+ return_value=self._FAKE_MEMORY_TOTAL)
+ type(memory).FreePhysicalMemory = mock.PropertyMock(
+ return_value=self._FAKE_MEMORY_FREE)
+
+ self._hostutils._conn_cimv2.query.return_value = [memory]
+ total_memory, free_memory = self._hostutils.get_memory_info()
+
+ self.assertEqual(self._FAKE_MEMORY_TOTAL, total_memory)
+ self.assertEqual(self._FAKE_MEMORY_FREE, free_memory)
+
+ def test_get_volume_info(self):
+ disk = mock.MagicMock()
+ type(disk).Size = mock.PropertyMock(return_value=self._FAKE_DISK_SIZE)
+ type(disk).FreeSpace = mock.PropertyMock(
+ return_value=self._FAKE_DISK_FREE)
+
+ self._hostutils._conn_cimv2.query.return_value = [disk]
+ (total_memory, free_memory) = self._hostutils.get_volume_info(
+ mock.sentinel.FAKE_DRIVE)
+
+ self.assertEqual(self._FAKE_DISK_SIZE, total_memory)
+ self.assertEqual(self._FAKE_DISK_FREE, free_memory)
+
+ def test_check_min_windows_version_true(self):
+ self._test_check_min_windows_version(self._FAKE_VERSION_GOOD, True)
+
+ def test_check_min_windows_version_false(self):
+ self._test_check_min_windows_version(self._FAKE_VERSION_BAD, False)
+
+ def _test_check_min_windows_version(self, version, expected):
+ os = mock.MagicMock()
+ os.Version = version
+ self._hostutils._conn_cimv2.Win32_OperatingSystem.return_value = [os]
+ self.assertEqual(expected,
+ self._hostutils.check_min_windows_version(6, 2))
diff --git a/nova/tests/unit/virt/hyperv/test_hypervapi.py b/nova/tests/unit/virt/hyperv/test_hypervapi.py
new file mode 100644
index 0000000000..375420a484
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_hypervapi.py
@@ -0,0 +1,1967 @@
+# Copyright 2012 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test suite for the Hyper-V driver and related APIs.
+"""
+
+import contextlib
+import datetime
+import io
+import os
+import platform
+import shutil
+import time
+import uuid
+
+import mock
+import mox
+from oslo.config import cfg
+from oslo.utils import units
+
+from nova.api.metadata import base as instance_metadata
+from nova.compute import power_state
+from nova.compute import task_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.i18n import _
+from nova.image import glance
+from nova.openstack.common import fileutils
+from nova import test
+from nova.tests.unit import fake_network
+from nova.tests.unit.image import fake as fake_image
+from nova.tests.unit import matchers
+from nova.tests.unit.virt.hyperv import db_fakes
+from nova.tests.unit.virt.hyperv import fake
+from nova import utils
+from nova.virt import configdrive
+from nova.virt import driver
+from nova.virt.hyperv import basevolumeutils
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import driver as driver_hyperv
+from nova.virt.hyperv import hostops
+from nova.virt.hyperv import hostutils
+from nova.virt.hyperv import ioutils
+from nova.virt.hyperv import livemigrationutils
+from nova.virt.hyperv import networkutils
+from nova.virt.hyperv import networkutilsv2
+from nova.virt.hyperv import pathutils
+from nova.virt.hyperv import rdpconsoleutils
+from nova.virt.hyperv import utilsfactory
+from nova.virt.hyperv import vhdutils
+from nova.virt.hyperv import vhdutilsv2
+from nova.virt.hyperv import vmutils
+from nova.virt.hyperv import vmutilsv2
+from nova.virt.hyperv import volumeops
+from nova.virt.hyperv import volumeutils
+from nova.virt.hyperv import volumeutilsv2
+from nova.virt import images
+
+CONF = cfg.CONF
+CONF.import_opt('vswitch_name', 'nova.virt.hyperv.vif', 'hyperv')
+
+
+class HyperVAPIBaseTestCase(test.NoDBTestCase):
+ """Base unit tests class for Hyper-V driver calls."""
+
+ def __init__(self, test_case_name):
+ self._mox = mox.Mox()
+ super(HyperVAPIBaseTestCase, self).__init__(test_case_name)
+
+ def setUp(self):
+ super(HyperVAPIBaseTestCase, self).setUp()
+
+ self._user_id = 'fake'
+ self._project_id = 'fake'
+ self._instance_data = None
+ self._image_metadata = None
+ self._fetched_image = None
+ self._update_image_raise_exception = False
+ self._volume_target_portal = 'testtargetportal:3260'
+ self._volume_id = '0ef5d708-45ab-4129-8c59-d774d2837eb7'
+ self._context = context.RequestContext(self._user_id, self._project_id)
+ self._instance_ide_disks = []
+ self._instance_ide_dvds = []
+ self._instance_volume_disks = []
+ self._test_vm_name = None
+ self._test_instance_dir = 'C:\\FakeInstancesPath\\instance-0000001'
+ self._check_min_windows_version_satisfied = True
+
+ self._setup_stubs()
+
+ self.flags(instances_path=r'C:\Hyper-V\test\instances',
+ network_api_class='nova.network.neutronv2.api.API')
+ self.flags(force_volumeutils_v1=True, group='hyperv')
+ self.flags(force_hyperv_utils_v1=True, group='hyperv')
+
+ self._conn = driver_hyperv.HyperVDriver(None)
+
+ def _setup_stubs(self):
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ fake_image.stub_out_image_service(self.stubs)
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+
+ def fake_fetch(context, image_id, target, user, project):
+ self._fetched_image = target
+ self.stubs.Set(images, 'fetch', fake_fetch)
+
+ def fake_get_remote_image_service(context, name):
+ class FakeGlanceImageService(object):
+ def update(self_fake, context, image_id, image_metadata, f):
+ if self._update_image_raise_exception:
+ raise vmutils.HyperVException(
+ "Simulated update failure")
+ self._image_metadata = image_metadata
+ return (FakeGlanceImageService(), 1)
+ self.stubs.Set(glance, 'get_remote_image_service',
+ fake_get_remote_image_service)
+
+ def fake_check_min_windows_version(fake_self, major, minor):
+ if [major, minor] >= [6, 3]:
+ return False
+ return self._check_min_windows_version_satisfied
+ self.stubs.Set(hostutils.HostUtils, 'check_min_windows_version',
+ fake_check_min_windows_version)
+
+ def fake_sleep(ms):
+ pass
+ self.stubs.Set(time, 'sleep', fake_sleep)
+
+ class FakeIOThread(object):
+ def __init__(self, src, dest, max_bytes):
+ pass
+
+ def start(self):
+ pass
+
+ self.stubs.Set(pathutils, 'PathUtils', fake.PathUtils)
+ self.stubs.Set(ioutils, 'IOThread', FakeIOThread)
+ self._mox.StubOutWithMock(fake.PathUtils, 'open')
+ self._mox.StubOutWithMock(fake.PathUtils, 'copyfile')
+ self._mox.StubOutWithMock(fake.PathUtils, 'rmtree')
+ self._mox.StubOutWithMock(fake.PathUtils, 'copy')
+ self._mox.StubOutWithMock(fake.PathUtils, 'remove')
+ self._mox.StubOutWithMock(fake.PathUtils, 'rename')
+ self._mox.StubOutWithMock(fake.PathUtils, 'makedirs')
+ self._mox.StubOutWithMock(fake.PathUtils,
+ 'get_instance_migr_revert_dir')
+ self._mox.StubOutWithMock(fake.PathUtils, 'get_instance_dir')
+ self._mox.StubOutWithMock(fake.PathUtils, 'get_vm_console_log_paths')
+
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'vm_exists')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'create_vm')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'destroy_vm')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'attach_ide_drive')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'create_scsi_controller')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'create_nic')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'set_vm_state')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'list_instances')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_summary_info')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'take_vm_snapshot')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'remove_vm_snapshot')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'set_nic_connection')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_scsi_controller')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_ide_controller')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_attached_disks')
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'attach_volume_to_controller')
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'get_mounted_disk_by_drive_number')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'detach_vm_disk')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_storage_paths')
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'get_controller_volume_paths')
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'enable_vm_metrics_collection')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_id')
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'get_vm_serial_port_connection')
+
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'create_differencing_vhd')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'reconnect_parent_vhd')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'merge_vhd')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_parent_path')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_info')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'resize_vhd')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils,
+ 'get_internal_vhd_size_by_file_size')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'validate_vhd')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_format')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'create_dynamic_vhd')
+
+ self._mox.StubOutWithMock(hostutils.HostUtils, 'get_cpus_info')
+ self._mox.StubOutWithMock(hostutils.HostUtils,
+ 'is_cpu_feature_present')
+ self._mox.StubOutWithMock(hostutils.HostUtils, 'get_memory_info')
+ self._mox.StubOutWithMock(hostutils.HostUtils, 'get_volume_info')
+ self._mox.StubOutWithMock(hostutils.HostUtils, 'get_windows_version')
+ self._mox.StubOutWithMock(hostutils.HostUtils, 'get_local_ips')
+
+ self._mox.StubOutWithMock(networkutils.NetworkUtils,
+ 'get_external_vswitch')
+ self._mox.StubOutWithMock(networkutils.NetworkUtils,
+ 'create_vswitch_port')
+ self._mox.StubOutWithMock(networkutils.NetworkUtils,
+ 'vswitch_port_needed')
+
+ self._mox.StubOutWithMock(livemigrationutils.LiveMigrationUtils,
+ 'live_migrate_vm')
+ self._mox.StubOutWithMock(livemigrationutils.LiveMigrationUtils,
+ 'check_live_migration_config')
+
+ self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
+ 'volume_in_mapping')
+ self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
+ 'get_session_id_from_mounted_disk')
+ self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
+ 'get_device_number_for_target')
+ self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
+ 'get_target_from_disk_path')
+ self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
+ 'get_target_lun_count')
+
+ self._mox.StubOutWithMock(volumeutils.VolumeUtils,
+ 'login_storage_target')
+ self._mox.StubOutWithMock(volumeutils.VolumeUtils,
+ 'logout_storage_target')
+ self._mox.StubOutWithMock(volumeutils.VolumeUtils,
+ 'execute_log_out')
+ self._mox.StubOutWithMock(volumeutils.VolumeUtils,
+ 'get_iscsi_initiator')
+
+ self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
+ 'login_storage_target')
+ self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
+ 'logout_storage_target')
+ self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
+ 'execute_log_out')
+
+ self._mox.StubOutWithMock(rdpconsoleutils.RDPConsoleUtils,
+ 'get_rdp_console_port')
+
+ self._mox.StubOutClassWithMocks(instance_metadata, 'InstanceMetadata')
+ self._mox.StubOutWithMock(instance_metadata.InstanceMetadata,
+ 'metadata_for_config_drive')
+
+ # Can't use StubOutClassWithMocks due to __exit__ and __enter__
+ self._mox.StubOutWithMock(configdrive, 'ConfigDriveBuilder')
+ self._mox.StubOutWithMock(configdrive.ConfigDriveBuilder, 'make_drive')
+
+ self._mox.StubOutWithMock(fileutils, 'delete_if_exists')
+ self._mox.StubOutWithMock(utils, 'execute')
+
+ def tearDown(self):
+ self._mox.UnsetStubs()
+ super(HyperVAPIBaseTestCase, self).tearDown()
+
+
+class HyperVAPITestCase(HyperVAPIBaseTestCase):
+ """Unit tests for Hyper-V driver calls."""
+
+ def test_public_api_signatures(self):
+ self.assertPublicAPISignatures(driver.ComputeDriver(None), self._conn)
+
+ def test_get_available_resource(self):
+ cpu_info = {'Architecture': 'fake',
+ 'Name': 'fake',
+ 'Manufacturer': 'ACME, Inc.',
+ 'NumberOfCores': 2,
+ 'NumberOfLogicalProcessors': 4}
+
+ tot_mem_kb = 2000000L
+ free_mem_kb = 1000000L
+
+ tot_hdd_b = 4L * 1024 ** 3
+ free_hdd_b = 3L * 1024 ** 3
+
+ windows_version = '6.2.9200'
+
+ hostutils.HostUtils.get_memory_info().AndReturn((tot_mem_kb,
+ free_mem_kb))
+
+ m = hostutils.HostUtils.get_volume_info(mox.IsA(str))
+ m.AndReturn((tot_hdd_b, free_hdd_b))
+
+ hostutils.HostUtils.get_cpus_info().AndReturn([cpu_info])
+ m = hostutils.HostUtils.is_cpu_feature_present(mox.IsA(int))
+ m.MultipleTimes()
+
+ m = hostutils.HostUtils.get_windows_version()
+ m.AndReturn(windows_version)
+
+ self._mox.ReplayAll()
+ dic = self._conn.get_available_resource(None)
+ self._mox.VerifyAll()
+
+ self.assertEqual(dic['vcpus'], cpu_info['NumberOfLogicalProcessors'])
+ self.assertEqual(dic['hypervisor_hostname'], platform.node())
+ self.assertEqual(dic['memory_mb'], tot_mem_kb / units.Ki)
+ self.assertEqual(dic['memory_mb_used'],
+ tot_mem_kb / units.Ki - free_mem_kb / units.Ki)
+ self.assertEqual(dic['local_gb'], tot_hdd_b / units.Gi)
+ self.assertEqual(dic['local_gb_used'],
+ tot_hdd_b / units.Gi - free_hdd_b / units.Gi)
+ self.assertEqual(dic['hypervisor_version'],
+ windows_version.replace('.', ''))
+ self.assertEqual(dic['supported_instances'],
+ '[["i686", "hyperv", "hvm"], ["x86_64", "hyperv", "hvm"]]')
+
+ def test_list_instances(self):
+ fake_instances = ['fake1', 'fake2']
+ vmutils.VMUtils.list_instances().AndReturn(fake_instances)
+
+ self._mox.ReplayAll()
+ instances = self._conn.list_instances()
+ self._mox.VerifyAll()
+
+ self.assertEqual(instances, fake_instances)
+
+ def test_get_host_uptime(self):
+ fake_host = "fake_host"
+ with mock.patch.object(self._conn._hostops,
+ "get_host_uptime") as mock_uptime:
+ self._conn._hostops.get_host_uptime(fake_host)
+ mock_uptime.assert_called_once_with(fake_host)
+
+ def test_get_info(self):
+ self._instance_data = self._get_instance_data()
+
+ summary_info = {'NumberOfProcessors': 2,
+ 'EnabledState': constants.HYPERV_VM_STATE_ENABLED,
+ 'MemoryUsage': 1000,
+ 'UpTime': 1}
+
+ m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
+ m.AndReturn(True)
+
+ func = mox.Func(self._check_instance_name)
+ m = vmutils.VMUtils.get_vm_summary_info(func)
+ m.AndReturn(summary_info)
+
+ self._mox.ReplayAll()
+ info = self._conn.get_info(self._instance_data)
+ self._mox.VerifyAll()
+
+ self.assertEqual(info["state"], power_state.RUNNING)
+
+ def test_get_info_instance_not_found(self):
+ # Tests that InstanceNotFound is raised if the instance isn't found
+ # from the vmutils.vm_exists method.
+ self._instance_data = self._get_instance_data()
+
+ m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
+ m.AndReturn(False)
+
+ self._mox.ReplayAll()
+ self.assertRaises(exception.InstanceNotFound, self._conn.get_info,
+ self._instance_data)
+ self._mox.VerifyAll()
+
+ def test_spawn_cow_image(self):
+ self._test_spawn_instance(True)
+
+ def test_spawn_cow_image_vhdx(self):
+ self._test_spawn_instance(True, vhd_format=constants.DISK_FORMAT_VHDX)
+
+ def test_spawn_no_cow_image(self):
+ self._test_spawn_instance(False)
+
+ def test_spawn_dynamic_memory(self):
+ CONF.set_override('dynamic_memory_ratio', 2.0, 'hyperv')
+ self._test_spawn_instance()
+
+ def test_spawn_no_cow_image_vhdx(self):
+ self._test_spawn_instance(False, vhd_format=constants.DISK_FORMAT_VHDX)
+
+ def _setup_spawn_config_drive_mocks(self, use_cdrom):
+ instance_metadata.InstanceMetadata(mox.IgnoreArg(),
+ content=mox.IsA(list),
+ extra_md=mox.IsA(dict))
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ m.AndReturn(self._test_instance_dir)
+
+ cdb = self._mox.CreateMockAnything()
+ m = configdrive.ConfigDriveBuilder(instance_md=mox.IgnoreArg())
+ m.AndReturn(cdb)
+ # __enter__ and __exit__ are required by "with"
+ cdb.__enter__().AndReturn(cdb)
+ cdb.make_drive(mox.IsA(str))
+ cdb.__exit__(None, None, None).AndReturn(None)
+
+ if not use_cdrom:
+ utils.execute(CONF.hyperv.qemu_img_cmd,
+ 'convert',
+ '-f',
+ 'raw',
+ '-O',
+ 'vpc',
+ mox.IsA(str),
+ mox.IsA(str),
+ attempts=1)
+ fake.PathUtils.remove(mox.IsA(str))
+
+ m = vmutils.VMUtils.attach_ide_drive(mox.IsA(str),
+ mox.IsA(str),
+ mox.IsA(int),
+ mox.IsA(int),
+ mox.IsA(str))
+ m.WithSideEffects(self._add_ide_disk)
+
+ def _test_spawn_config_drive(self, use_cdrom, format_error=False):
+ self.flags(force_config_drive=True)
+ self.flags(config_drive_cdrom=use_cdrom, group='hyperv')
+ self.flags(mkisofs_cmd='mkisofs.exe')
+
+ if use_cdrom:
+ expected_ide_disks = 1
+ expected_ide_dvds = 1
+ else:
+ expected_ide_disks = 2
+ expected_ide_dvds = 0
+
+ if format_error:
+ self.assertRaises(vmutils.UnsupportedConfigDriveFormatException,
+ self._test_spawn_instance,
+ with_exception=True,
+ config_drive=True,
+ use_cdrom=use_cdrom)
+ else:
+ self._test_spawn_instance(expected_ide_disks=expected_ide_disks,
+ expected_ide_dvds=expected_ide_dvds,
+ config_drive=True,
+ use_cdrom=use_cdrom)
+
+ def test_spawn_config_drive(self):
+ self._test_spawn_config_drive(False)
+
+ def test_spawn_config_drive_format_error(self):
+ CONF.set_override('config_drive_format', 'wrong_format')
+ self._test_spawn_config_drive(True, True)
+
+ def test_spawn_config_drive_cdrom(self):
+ self._test_spawn_config_drive(True)
+
+ def test_spawn_no_config_drive(self):
+ self.flags(force_config_drive=False)
+
+ expected_ide_disks = 1
+ expected_ide_dvds = 0
+
+ self._test_spawn_instance(expected_ide_disks=expected_ide_disks,
+ expected_ide_dvds=expected_ide_dvds)
+
+ def _test_spawn_nova_net_vif(self, with_port):
+ self.flags(network_api_class='nova.network.api.API')
+ # Reinstantiate driver, as the VIF plugin is loaded during __init__
+ self._conn = driver_hyperv.HyperVDriver(None)
+
+ def setup_vif_mocks():
+ fake_vswitch_path = 'fake vswitch path'
+ fake_vswitch_port = 'fake port'
+
+ m = networkutils.NetworkUtils.get_external_vswitch(
+ CONF.hyperv.vswitch_name)
+ m.AndReturn(fake_vswitch_path)
+
+ m = networkutils.NetworkUtils.vswitch_port_needed()
+ m.AndReturn(with_port)
+
+ if with_port:
+ m = networkutils.NetworkUtils.create_vswitch_port(
+ fake_vswitch_path, mox.IsA(str))
+ m.AndReturn(fake_vswitch_port)
+ vswitch_conn_data = fake_vswitch_port
+ else:
+ vswitch_conn_data = fake_vswitch_path
+
+ vmutils.VMUtils.set_nic_connection(mox.IsA(str),
+ mox.IsA(str), vswitch_conn_data)
+
+ self._test_spawn_instance(setup_vif_mocks_func=setup_vif_mocks)
+
+ def test_spawn_nova_net_vif_with_port(self):
+ self._test_spawn_nova_net_vif(True)
+
+ def test_spawn_nova_net_vif_without_port(self):
+ self._test_spawn_nova_net_vif(False)
+
+ def test_spawn_nova_net_vif_no_vswitch_exception(self):
+ self.flags(network_api_class='nova.network.api.API')
+ # Reinstantiate driver, as the VIF plugin is loaded during __init__
+ self._conn = driver_hyperv.HyperVDriver(None)
+
+ def setup_vif_mocks():
+ m = networkutils.NetworkUtils.get_external_vswitch(
+ CONF.hyperv.vswitch_name)
+ m.AndRaise(vmutils.HyperVException(_('fake vswitch not found')))
+
+ self.assertRaises(vmutils.HyperVException, self._test_spawn_instance,
+ setup_vif_mocks_func=setup_vif_mocks,
+ with_exception=True)
+
+ def test_spawn_with_metrics_collection(self):
+ self.flags(enable_instance_metrics_collection=True, group='hyperv')
+ self._test_spawn_instance(False)
+
+ def test_spawn_with_ephemeral_storage(self):
+ self._test_spawn_instance(True, expected_ide_disks=2,
+ ephemeral_storage=True)
+
+ def _check_instance_name(self, vm_name):
+ return vm_name == self._instance_data['name']
+
+ def _test_vm_state_change(self, action, from_state, to_state):
+ self._instance_data = self._get_instance_data()
+
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ to_state)
+
+ if to_state in (constants.HYPERV_VM_STATE_DISABLED,
+ constants.HYPERV_VM_STATE_REBOOT):
+ self._setup_delete_vm_log_mocks()
+ if to_state in (constants.HYPERV_VM_STATE_ENABLED,
+ constants.HYPERV_VM_STATE_REBOOT):
+ self._setup_log_vm_output_mocks()
+
+ self._mox.ReplayAll()
+ action(self._instance_data)
+ self._mox.VerifyAll()
+
+ def test_pause(self):
+ self._test_vm_state_change(self._conn.pause, None,
+ constants.HYPERV_VM_STATE_PAUSED)
+
+ def test_pause_already_paused(self):
+ self._test_vm_state_change(self._conn.pause,
+ constants.HYPERV_VM_STATE_PAUSED,
+ constants.HYPERV_VM_STATE_PAUSED)
+
+ def test_unpause(self):
+ self._test_vm_state_change(self._conn.unpause,
+ constants.HYPERV_VM_STATE_PAUSED,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_unpause_already_running(self):
+ self._test_vm_state_change(self._conn.unpause, None,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_suspend(self):
+ self._test_vm_state_change(self._conn.suspend, None,
+ constants.HYPERV_VM_STATE_SUSPENDED)
+
+ def test_suspend_already_suspended(self):
+ self._test_vm_state_change(self._conn.suspend,
+ constants.HYPERV_VM_STATE_SUSPENDED,
+ constants.HYPERV_VM_STATE_SUSPENDED)
+
+ def test_resume(self):
+ self._test_vm_state_change(lambda i: self._conn.resume(self._context,
+ i, None),
+ constants.HYPERV_VM_STATE_SUSPENDED,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_resume_already_running(self):
+ self._test_vm_state_change(lambda i: self._conn.resume(self._context,
+ i, None), None,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_power_off(self):
+ self._test_vm_state_change(self._conn.power_off, None,
+ constants.HYPERV_VM_STATE_DISABLED)
+
+ def test_power_off_already_powered_off(self):
+ self._test_vm_state_change(self._conn.power_off,
+ constants.HYPERV_VM_STATE_DISABLED,
+ constants.HYPERV_VM_STATE_DISABLED)
+
+ def _test_power_on(self, block_device_info):
+ self._instance_data = self._get_instance_data()
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ constants.HYPERV_VM_STATE_ENABLED)
+ if block_device_info:
+ self._mox.StubOutWithMock(volumeops.VolumeOps,
+ 'fix_instance_volume_disk_paths')
+ volumeops.VolumeOps.fix_instance_volume_disk_paths(
+ mox.Func(self._check_instance_name), block_device_info)
+
+ self._setup_log_vm_output_mocks()
+
+ self._mox.ReplayAll()
+ self._conn.power_on(self._context, self._instance_data, network_info,
+ block_device_info=block_device_info)
+ self._mox.VerifyAll()
+
+ def test_power_on_having_block_devices(self):
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+ self._test_power_on(block_device_info=block_device_info)
+
+ def test_power_on_without_block_devices(self):
+ self._test_power_on(block_device_info=None)
+
+ def test_power_on_already_running(self):
+ self._instance_data = self._get_instance_data()
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ constants.HYPERV_VM_STATE_ENABLED)
+ self._setup_log_vm_output_mocks()
+ self._mox.ReplayAll()
+ self._conn.power_on(self._context, self._instance_data, network_info)
+ self._mox.VerifyAll()
+
+ def test_reboot(self):
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ self._instance_data = self._get_instance_data()
+
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ constants.HYPERV_VM_STATE_REBOOT)
+
+ self._setup_delete_vm_log_mocks()
+ self._setup_log_vm_output_mocks()
+
+ self._mox.ReplayAll()
+ self._conn.reboot(self._context, self._instance_data, network_info,
+ None)
+ self._mox.VerifyAll()
+
+ def _setup_destroy_mocks(self, destroy_disks=True):
+ fake_volume_drives = ['fake_volume_drive']
+ fake_target_iqn = 'fake_target_iqn'
+ fake_target_lun = 'fake_target_lun'
+
+ m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
+ m.AndReturn(True)
+
+ func = mox.Func(self._check_instance_name)
+ vmutils.VMUtils.set_vm_state(func, constants.HYPERV_VM_STATE_DISABLED)
+
+ self._setup_delete_vm_log_mocks()
+
+ m = vmutils.VMUtils.get_vm_storage_paths(func)
+ m.AndReturn(([], fake_volume_drives))
+
+ vmutils.VMUtils.destroy_vm(func)
+
+ m = self._conn._volumeops.get_target_from_disk_path(
+ fake_volume_drives[0])
+ m.AndReturn((fake_target_iqn, fake_target_lun))
+
+ self._mock_logout_storage_target(fake_target_iqn)
+
+ if destroy_disks:
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str),
+ create_dir=False,
+ remove_dir=True)
+ m.AndReturn(self._test_instance_dir)
+
+ def test_destroy(self):
+ self._instance_data = self._get_instance_data()
+
+ self._setup_destroy_mocks()
+
+ self._mox.ReplayAll()
+ self._conn.destroy(self._context, self._instance_data, None)
+ self._mox.VerifyAll()
+
+ def test_live_migration_unsupported_os(self):
+ self._check_min_windows_version_satisfied = False
+ self._conn = driver_hyperv.HyperVDriver(None)
+ self._test_live_migration(unsupported_os=True)
+
+ def test_live_migration_without_volumes(self):
+ self._test_live_migration()
+
+ def test_live_migration_with_volumes(self):
+ self._test_live_migration(with_volumes=True)
+
+ def test_live_migration_with_multiple_luns_per_target(self):
+ self._test_live_migration(with_volumes=True,
+ other_luns_available=True)
+
+ def test_live_migration_with_target_failure(self):
+ self._test_live_migration(test_failure=True)
+
+ def _test_live_migration(self, test_failure=False,
+ with_volumes=False,
+ other_luns_available=False,
+ unsupported_os=False):
+ dest_server = 'fake_server'
+
+ instance_data = self._get_instance_data()
+
+ fake_post_method = self._mox.CreateMockAnything()
+ if not test_failure and not unsupported_os:
+ fake_post_method(self._context, instance_data, dest_server,
+ False)
+
+ fake_recover_method = self._mox.CreateMockAnything()
+ if test_failure:
+ fake_recover_method(self._context, instance_data, dest_server,
+ False)
+
+ if with_volumes:
+ fake_target_iqn = 'fake_target_iqn'
+ fake_target_lun_count = 1
+
+ if not unsupported_os:
+ m = fake.PathUtils.get_vm_console_log_paths(mox.IsA(str))
+ m.AndReturn(('fake_local_vm_log_path', 'fake_vm_log_path.1'))
+
+ m = fake.PathUtils.get_vm_console_log_paths(
+ mox.IsA(str), remote_server=mox.IsA(str))
+ m.AndReturn(('fake_remote_vm_log_path',
+ 'fake_remote_vm_log_path.1'))
+
+ self._mox.StubOutWithMock(fake.PathUtils, 'exists')
+ m = fake.PathUtils.exists(mox.IsA(str))
+ m.AndReturn(True)
+ m = fake.PathUtils.exists(mox.IsA(str))
+ m.AndReturn(False)
+
+ fake.PathUtils.copy(mox.IsA(str), mox.IsA(str))
+
+ m = livemigrationutils.LiveMigrationUtils.live_migrate_vm(
+ instance_data['name'], dest_server)
+ if test_failure:
+ m.AndRaise(vmutils.HyperVException('Simulated failure'))
+
+ if with_volumes:
+ m.AndReturn({fake_target_iqn: fake_target_lun_count})
+
+ self._mock_logout_storage_target(fake_target_iqn,
+ other_luns_available)
+ else:
+ m.AndReturn({})
+
+ self._mox.ReplayAll()
+ try:
+ hyperv_exception_raised = False
+ unsupported_os_exception_raised = False
+ self._conn.live_migration(self._context, instance_data,
+ dest_server, fake_post_method,
+ fake_recover_method)
+ except vmutils.HyperVException:
+ hyperv_exception_raised = True
+ except NotImplementedError:
+ unsupported_os_exception_raised = True
+
+ self.assertTrue(not test_failure ^ hyperv_exception_raised)
+ self.assertTrue(not unsupported_os ^ unsupported_os_exception_raised)
+ self._mox.VerifyAll()
+
+ def test_pre_live_migration_cow_image(self):
+ self._test_pre_live_migration(True, False)
+
+ def test_pre_live_migration_no_cow_image(self):
+ self._test_pre_live_migration(False, False)
+
+ def test_pre_live_migration_with_volumes(self):
+ self._test_pre_live_migration(False, True)
+
+ def _test_pre_live_migration(self, cow, with_volumes):
+ self.flags(use_cow_images=cow)
+
+ instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, instance_data)
+ instance['system_metadata'] = {}
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ m = livemigrationutils.LiveMigrationUtils.check_live_migration_config()
+ m.AndReturn(True)
+
+ if cow:
+ self._setup_get_cached_image_mocks(cow)
+
+ if with_volumes:
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+
+ mapping = driver.block_device_info_get_mapping(block_device_info)
+ data = mapping[0]['connection_info']['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+
+ fake_mounted_disk = "fake_mounted_disk"
+ fake_device_number = 0
+
+ self._mock_login_storage_target(target_iqn, target_lun,
+ target_portal,
+ fake_mounted_disk,
+ fake_device_number)
+ else:
+ block_device_info = None
+
+ self._mox.ReplayAll()
+ self._conn.pre_live_migration(self._context, instance,
+ block_device_info, None, network_info)
+ self._mox.VerifyAll()
+
+ if cow:
+ self.assertIsNotNone(self._fetched_image)
+ else:
+ self.assertIsNone(self._fetched_image)
+
+ def test_get_instance_disk_info_is_implemented(self):
+ # Ensure that the method has been implemented in the driver
+ try:
+ disk_info = self._conn.get_instance_disk_info('fake_instance_name')
+ self.assertIsNone(disk_info)
+ except NotImplementedError:
+ self.fail("test_get_instance_disk_info() should not raise "
+ "NotImplementedError")
+
+ def test_snapshot_with_update_failure(self):
+ (snapshot_name, func_call_matcher) = self._setup_snapshot_mocks()
+
+ self._update_image_raise_exception = True
+
+ self._mox.ReplayAll()
+ self.assertRaises(vmutils.HyperVException, self._conn.snapshot,
+ self._context, self._instance_data, snapshot_name,
+ func_call_matcher.call)
+ self._mox.VerifyAll()
+
+ # Assert states changed in correct order
+ self.assertIsNone(func_call_matcher.match())
+
+ def _setup_snapshot_mocks(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs': {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs': {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}
+ ]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ snapshot_name = 'test_snapshot_' + str(uuid.uuid4())
+
+ fake_hv_snapshot_path = 'fake_snapshot_path'
+ fake_parent_vhd_path = 'C:\\fake_vhd_path\\parent.vhd'
+
+ self._instance_data = self._get_instance_data()
+
+ func = mox.Func(self._check_instance_name)
+ m = vmutils.VMUtils.take_vm_snapshot(func)
+ m.AndReturn(fake_hv_snapshot_path)
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ m.AndReturn(self._test_instance_dir)
+
+ m = vhdutils.VHDUtils.get_vhd_parent_path(mox.IsA(str))
+ m.AndReturn(fake_parent_vhd_path)
+
+ self._fake_dest_disk_path = None
+
+ def copy_dest_disk_path(src, dest):
+ self._fake_dest_disk_path = dest
+
+ m = fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
+ m.WithSideEffects(copy_dest_disk_path)
+
+ self._fake_dest_base_disk_path = None
+
+ def copy_dest_base_disk_path(src, dest):
+ self._fake_dest_base_disk_path = dest
+
+ m = fake.PathUtils.copyfile(fake_parent_vhd_path, mox.IsA(str))
+ m.WithSideEffects(copy_dest_base_disk_path)
+
+ def check_dest_disk_path(path):
+ return path == self._fake_dest_disk_path
+
+ def check_dest_base_disk_path(path):
+ return path == self._fake_dest_base_disk_path
+
+ func1 = mox.Func(check_dest_disk_path)
+ func2 = mox.Func(check_dest_base_disk_path)
+ # Make sure that the hyper-v base and differential VHDs are merged
+ vhdutils.VHDUtils.reconnect_parent_vhd(func1, func2)
+ vhdutils.VHDUtils.merge_vhd(func1, func2)
+
+ def check_snapshot_path(snapshot_path):
+ return snapshot_path == fake_hv_snapshot_path
+
+ # Make sure that the Hyper-V snapshot is removed
+ func = mox.Func(check_snapshot_path)
+ vmutils.VMUtils.remove_vm_snapshot(func)
+
+ fake.PathUtils.rmtree(mox.IsA(str))
+
+ m = fake.PathUtils.open(func2, 'rb')
+ m.AndReturn(io.BytesIO(b'fake content'))
+
+ return (snapshot_name, func_call_matcher)
+
+ def test_snapshot(self):
+ (snapshot_name, func_call_matcher) = self._setup_snapshot_mocks()
+
+ self._mox.ReplayAll()
+ self._conn.snapshot(self._context, self._instance_data, snapshot_name,
+ func_call_matcher.call)
+ self._mox.VerifyAll()
+
+ self.assertTrue(self._image_metadata)
+ self.assertIn("disk_format", self._image_metadata)
+ self.assertEqual("vhd", self._image_metadata["disk_format"])
+
+ # Assert states changed in correct order
+ self.assertIsNone(func_call_matcher.match())
+
+ def _get_instance_data(self):
+ instance_name = 'openstack_unit_test_vm_' + str(uuid.uuid4())
+ return db_fakes.get_fake_instance_data(instance_name,
+ self._project_id,
+ self._user_id)
+
+ def _spawn_instance(self, cow, block_device_info=None,
+ ephemeral_storage=False):
+ self.flags(use_cow_images=cow)
+
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+ instance['system_metadata'] = {}
+
+ if ephemeral_storage:
+ instance['ephemeral_gb'] = 1
+
+ image = db_fakes.get_fake_image_data(self._project_id, self._user_id)
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ self._conn.spawn(self._context, instance, image,
+ injected_files=[], admin_password=None,
+ network_info=network_info,
+ block_device_info=block_device_info)
+
+ def _add_ide_disk(self, vm_name, path, ctrller_addr,
+ drive_addr, drive_type):
+ if drive_type == constants.IDE_DISK:
+ self._instance_ide_disks.append(path)
+ elif drive_type == constants.IDE_DVD:
+ self._instance_ide_dvds.append(path)
+
+ def _add_volume_disk(self, vm_name, controller_path, address,
+ mounted_disk_path):
+ self._instance_volume_disks.append(mounted_disk_path)
+
+ def _check_img_path(self, image_path):
+ return image_path == self._fetched_image
+
+ def _setup_create_instance_mocks(self, setup_vif_mocks_func=None,
+ boot_from_volume=False,
+ block_device_info=None,
+ admin_permissions=True,
+ ephemeral_storage=False):
+ vmutils.VMUtils.create_vm(mox.Func(self._check_vm_name), mox.IsA(int),
+ mox.IsA(int), mox.IsA(bool),
+ CONF.hyperv.dynamic_memory_ratio,
+ mox.IsA(list))
+
+ if not boot_from_volume:
+ m = vmutils.VMUtils.attach_ide_drive(mox.Func(self._check_vm_name),
+ mox.IsA(str),
+ mox.IsA(int),
+ mox.IsA(int),
+ mox.IsA(str))
+ m.WithSideEffects(self._add_ide_disk).InAnyOrder()
+
+ if ephemeral_storage:
+ m = vmutils.VMUtils.attach_ide_drive(mox.Func(self._check_vm_name),
+ mox.IsA(str),
+ mox.IsA(int),
+ mox.IsA(int),
+ mox.IsA(str))
+ m.WithSideEffects(self._add_ide_disk).InAnyOrder()
+
+ func = mox.Func(self._check_vm_name)
+ m = vmutils.VMUtils.create_scsi_controller(func)
+ m.InAnyOrder()
+
+ if boot_from_volume:
+ mapping = driver.block_device_info_get_mapping(block_device_info)
+ data = mapping[0]['connection_info']['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+
+ self._mock_attach_volume(mox.Func(self._check_vm_name), target_iqn,
+ target_lun, target_portal, True)
+
+ vmutils.VMUtils.create_nic(mox.Func(self._check_vm_name),
+ mox.IsA(str), mox.IsA(unicode)).InAnyOrder()
+
+ if setup_vif_mocks_func:
+ setup_vif_mocks_func()
+
+ if CONF.hyperv.enable_instance_metrics_collection:
+ vmutils.VMUtils.enable_vm_metrics_collection(
+ mox.Func(self._check_vm_name))
+
+ vmutils.VMUtils.get_vm_serial_port_connection(
+ mox.IsA(str), update_connection=mox.IsA(str))
+
+ def _set_vm_name(self, vm_name):
+ self._test_vm_name = vm_name
+
+ def _check_vm_name(self, vm_name):
+ return vm_name == self._test_vm_name
+
+ def _setup_check_admin_permissions_mocks(self, admin_permissions=True):
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'check_admin_permissions')
+ m = vmutils.VMUtils.check_admin_permissions()
+ if admin_permissions:
+ m.AndReturn(None)
+ else:
+ m.AndRaise(vmutils.HyperVAuthorizationException(_(
+ 'Simulated failure')))
+
+ def _setup_log_vm_output_mocks(self):
+ m = fake.PathUtils.get_vm_console_log_paths(mox.IsA(str))
+ m.AndReturn(('fake_vm_log_path', 'fake_vm_log_path.1'))
+ ioutils.IOThread('fake_pipe', 'fake_vm_log_path',
+ units.Mi).start()
+
+ def _setup_delete_vm_log_mocks(self):
+ m = fake.PathUtils.get_vm_console_log_paths(mox.IsA(str))
+ m.AndReturn(('fake_vm_log_path', 'fake_vm_log_path.1'))
+ fileutils.delete_if_exists(mox.IsA(str))
+ fileutils.delete_if_exists(mox.IsA(str))
+
+ def _setup_get_cached_image_mocks(self, cow=True,
+ vhd_format=constants.DISK_FORMAT_VHD):
+ m = vhdutils.VHDUtils.get_vhd_format(
+ mox.Func(self._check_img_path))
+ m.AndReturn(vhd_format)
+
+ def check_img_path_with_ext(image_path):
+ return image_path == self._fetched_image + '.' + vhd_format.lower()
+
+ fake.PathUtils.rename(mox.Func(self._check_img_path),
+ mox.Func(check_img_path_with_ext))
+
+ if cow and vhd_format == constants.DISK_FORMAT_VHD:
+ m = vhdutils.VHDUtils.get_vhd_info(
+ mox.Func(check_img_path_with_ext))
+ m.AndReturn({'MaxInternalSize': 1024})
+
+ fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
+
+ m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
+ mox.IsA(str), mox.IsA(object))
+ m.AndReturn(1025)
+
+ vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object),
+ is_file_max_size=False)
+
+ def _setup_spawn_instance_mocks(self, cow, setup_vif_mocks_func=None,
+ with_exception=False,
+ block_device_info=None,
+ boot_from_volume=False,
+ config_drive=False,
+ use_cdrom=False,
+ admin_permissions=True,
+ vhd_format=constants.DISK_FORMAT_VHD,
+ ephemeral_storage=False):
+ m = vmutils.VMUtils.vm_exists(mox.IsA(str))
+ m.WithSideEffects(self._set_vm_name).AndReturn(False)
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str),
+ create_dir=False,
+ remove_dir=True)
+ m.AndReturn(self._test_instance_dir)
+
+ if block_device_info:
+ m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(
+ 'fake_root_device_name', block_device_info)
+ m.AndReturn(boot_from_volume)
+
+ if not boot_from_volume:
+ m = fake.PathUtils.get_instance_dir(mox.Func(self._check_vm_name))
+ m.AndReturn(self._test_instance_dir)
+
+ self._setup_get_cached_image_mocks(cow, vhd_format)
+ m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
+ m.AndReturn({'MaxInternalSize': 1024, 'FileSize': 1024,
+ 'Type': 2})
+
+ if cow:
+ m = vhdutils.VHDUtils.get_vhd_format(mox.IsA(str))
+ m.AndReturn(vhd_format)
+ if vhd_format == constants.DISK_FORMAT_VHD:
+ vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str),
+ mox.IsA(str))
+ else:
+ m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
+ mox.IsA(str), mox.IsA(object))
+ m.AndReturn(1025)
+ vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str),
+ mox.IsA(str),
+ mox.IsA(int))
+ else:
+ fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
+ m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
+ mox.IsA(str), mox.IsA(object))
+ m.AndReturn(1025)
+ vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object),
+ is_file_max_size=False)
+
+ self._setup_check_admin_permissions_mocks(
+ admin_permissions=admin_permissions)
+ if ephemeral_storage:
+ m = fake.PathUtils.get_instance_dir(mox.Func(self._check_vm_name))
+ m.AndReturn(self._test_instance_dir)
+ vhdutils.VHDUtils.create_dynamic_vhd(mox.IsA(str), mox.IsA(int),
+ mox.IsA(str))
+
+ self._setup_create_instance_mocks(setup_vif_mocks_func,
+ boot_from_volume,
+ block_device_info,
+ ephemeral_storage=ephemeral_storage)
+
+ if config_drive and not with_exception:
+ self._setup_spawn_config_drive_mocks(use_cdrom)
+
+ # TODO(alexpilotti) Based on where the exception is thrown
+ # some of the above mock calls need to be skipped
+ if with_exception:
+ self._setup_destroy_mocks()
+ else:
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_vm_name),
+ constants.HYPERV_VM_STATE_ENABLED)
+ self._setup_log_vm_output_mocks()
+
+ def _test_spawn_instance(self, cow=True,
+ expected_ide_disks=1,
+ expected_ide_dvds=0,
+ setup_vif_mocks_func=None,
+ with_exception=False,
+ config_drive=False,
+ use_cdrom=False,
+ admin_permissions=True,
+ vhd_format=constants.DISK_FORMAT_VHD,
+ ephemeral_storage=False):
+ self._setup_spawn_instance_mocks(cow,
+ setup_vif_mocks_func,
+ with_exception,
+ config_drive=config_drive,
+ use_cdrom=use_cdrom,
+ admin_permissions=admin_permissions,
+ vhd_format=vhd_format,
+ ephemeral_storage=ephemeral_storage)
+
+ self._mox.ReplayAll()
+ self._spawn_instance(cow, ephemeral_storage=ephemeral_storage)
+ self._mox.VerifyAll()
+
+ self.assertEqual(len(self._instance_ide_disks), expected_ide_disks)
+ self.assertEqual(len(self._instance_ide_dvds), expected_ide_dvds)
+
+ vhd_path = os.path.join(self._test_instance_dir, 'root.' +
+ vhd_format.lower())
+ self.assertEqual(vhd_path, self._instance_ide_disks[0])
+
+ def _mock_get_mounted_disk_from_lun(self, target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number):
+ m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
+ target_lun)
+ m.AndReturn(fake_device_number)
+
+ m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
+ fake_device_number)
+ m.AndReturn(fake_mounted_disk)
+
+ def _mock_login_storage_target(self, target_iqn, target_lun, target_portal,
+ fake_mounted_disk, fake_device_number):
+ m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
+ target_lun)
+ m.AndReturn(fake_device_number)
+
+ volumeutils.VolumeUtils.login_storage_target(target_lun,
+ target_iqn,
+ target_portal)
+
+ self._mock_get_mounted_disk_from_lun(target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number)
+
+ def _mock_attach_volume(self, instance_name, target_iqn, target_lun,
+ target_portal=None, boot_from_volume=False):
+ fake_mounted_disk = "fake_mounted_disk"
+ fake_device_number = 0
+ fake_controller_path = 'fake_scsi_controller_path'
+ self._mox.StubOutWithMock(self._conn._volumeops,
+ '_get_free_controller_slot')
+
+ self._mock_login_storage_target(target_iqn, target_lun,
+ target_portal,
+ fake_mounted_disk,
+ fake_device_number)
+
+ self._mock_get_mounted_disk_from_lun(target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number)
+
+ if boot_from_volume:
+ m = vmutils.VMUtils.get_vm_ide_controller(instance_name, 0)
+ m.AndReturn(fake_controller_path)
+ fake_free_slot = 0
+ else:
+ m = vmutils.VMUtils.get_vm_scsi_controller(instance_name)
+ m.AndReturn(fake_controller_path)
+
+ fake_free_slot = 1
+ m = self._conn._volumeops._get_free_controller_slot(
+ fake_controller_path)
+ m.AndReturn(fake_free_slot)
+
+ m = vmutils.VMUtils.attach_volume_to_controller(instance_name,
+ fake_controller_path,
+ fake_free_slot,
+ fake_mounted_disk)
+ m.WithSideEffects(self._add_volume_disk)
+
+ def _test_util_class_version(self, v1_class, v2_class,
+ get_instance_action, is_hyperv_2012,
+ force_v1_flag, force_utils_v1):
+ self._check_min_windows_version_satisfied = is_hyperv_2012
+ CONF.set_override(force_v1_flag, force_v1_flag, 'hyperv')
+ self._conn = driver_hyperv.HyperVDriver(None)
+
+ instance = get_instance_action()
+ is_v1 = isinstance(instance, v1_class)
+ # v2_class can inherit from v1_class
+ is_v2 = isinstance(instance, v2_class)
+
+ self.assertTrue((is_hyperv_2012 and not force_v1_flag) ^
+ (is_v1 and not is_v2))
+
+ def test_volumeutils_version_hyperv_2012(self):
+ self._test_util_class_version(volumeutils.VolumeUtils,
+ volumeutilsv2.VolumeUtilsV2,
+ lambda: utilsfactory.get_volumeutils(),
+ True, 'force_volumeutils_v1', False)
+
+ def test_volumeutils_version_hyperv_2012_force_v1(self):
+ self._test_util_class_version(volumeutils.VolumeUtils,
+ volumeutilsv2.VolumeUtilsV2,
+ lambda: utilsfactory.get_volumeutils(),
+ True, 'force_volumeutils_v1', True)
+
+ def test_volumeutils_version_hyperv_2008R2(self):
+ self._test_util_class_version(volumeutils.VolumeUtils,
+ volumeutilsv2.VolumeUtilsV2,
+ lambda: utilsfactory.get_volumeutils(),
+ False, 'force_volumeutils_v1', False)
+
+ def test_vmutils_version_hyperv_2012(self):
+ self._test_util_class_version(vmutils.VMUtils, vmutilsv2.VMUtilsV2,
+ lambda: utilsfactory.get_vmutils(),
+ True, 'force_hyperv_utils_v1', False)
+
+ def test_vmutils_version_hyperv_2012_force_v1(self):
+ self._test_util_class_version(vmutils.VMUtils, vmutilsv2.VMUtilsV2,
+ lambda: utilsfactory.get_vmutils(),
+ True, 'force_hyperv_utils_v1', True)
+
+ def test_vmutils_version_hyperv_2008R2(self):
+ self._test_util_class_version(vmutils.VMUtils, vmutilsv2.VMUtilsV2,
+ lambda: utilsfactory.get_vmutils(),
+ False, 'force_hyperv_utils_v1', False)
+
+ def test_vhdutils_version_hyperv_2012(self):
+ self._test_util_class_version(vhdutils.VHDUtils,
+ vhdutilsv2.VHDUtilsV2,
+ lambda: utilsfactory.get_vhdutils(),
+ True, 'force_hyperv_utils_v1', False)
+
+ def test_vhdutils_version_hyperv_2012_force_v1(self):
+ self._test_util_class_version(vhdutils.VHDUtils,
+ vhdutilsv2.VHDUtilsV2,
+ lambda: utilsfactory.get_vhdutils(),
+ True, 'force_hyperv_utils_v1', True)
+
+ def test_vhdutils_version_hyperv_2008R2(self):
+ self._test_util_class_version(vhdutils.VHDUtils,
+ vhdutilsv2.VHDUtilsV2,
+ lambda: utilsfactory.get_vhdutils(),
+ False, 'force_hyperv_utils_v1', False)
+
+ def test_networkutils_version_hyperv_2012(self):
+ self._test_util_class_version(networkutils.NetworkUtils,
+ networkutilsv2.NetworkUtilsV2,
+ lambda: utilsfactory.get_networkutils(),
+ True, 'force_hyperv_utils_v1', False)
+
+ def test_networkutils_version_hyperv_2012_force_v1(self):
+ self._test_util_class_version(networkutils.NetworkUtils,
+ networkutilsv2.NetworkUtilsV2,
+ lambda: utilsfactory.get_networkutils(),
+ True, 'force_hyperv_utils_v1', True)
+
+ def test_networkutils_version_hyperv_2008R2(self):
+ self._test_util_class_version(networkutils.NetworkUtils,
+ networkutilsv2.NetworkUtilsV2,
+ lambda: utilsfactory.get_networkutils(),
+ False, 'force_hyperv_utils_v1', False)
+
+ def test_attach_volume(self):
+ instance_data = self._get_instance_data()
+
+ connection_info = db_fakes.get_fake_volume_info_data(
+ self._volume_target_portal, self._volume_id)
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+ mount_point = '/dev/sdc'
+
+ self._mock_attach_volume(instance_data['name'], target_iqn, target_lun,
+ target_portal)
+
+ self._mox.ReplayAll()
+ self._conn.attach_volume(None, connection_info, instance_data,
+ mount_point)
+ self._mox.VerifyAll()
+
+ self.assertEqual(len(self._instance_volume_disks), 1)
+
+ def _mock_get_mounted_disk_from_lun_error(self, target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number):
+ m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
+ target_lun)
+ m.AndRaise(vmutils.HyperVException('Simulated failure'))
+
+ def _mock_attach_volume_target_logout(self, instance_name, target_iqn,
+ target_lun, target_portal=None,
+ boot_from_volume=False):
+ fake_mounted_disk = "fake_mounted disk"
+ fake_device_number = 0
+
+ self._mock_login_storage_target(target_iqn, target_lun,
+ target_portal,
+ fake_mounted_disk,
+ fake_device_number)
+
+ self._mock_get_mounted_disk_from_lun_error(target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number)
+
+ self._mock_logout_storage_target(target_iqn)
+
+ def test_attach_volume_logout(self):
+ instance_data = self._get_instance_data()
+
+ connection_info = db_fakes.get_fake_volume_info_data(
+ self._volume_target_portal, self._volume_id)
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+ mount_point = '/dev/sdc'
+
+ self._mock_attach_volume_target_logout(instance_data['name'],
+ target_iqn, target_lun,
+ target_portal)
+
+ self._mox.ReplayAll()
+ self.assertRaises(vmutils.HyperVException, self._conn.attach_volume,
+ None, connection_info, instance_data, mount_point)
+ self._mox.VerifyAll()
+
+ def test_attach_volume_connection_error(self):
+ instance_data = self._get_instance_data()
+
+ connection_info = db_fakes.get_fake_volume_info_data(
+ self._volume_target_portal, self._volume_id)
+ mount_point = '/dev/sdc'
+
+ def fake_login_storage_target(connection_info):
+ raise vmutils.HyperVException('Fake connection exception')
+
+ self.stubs.Set(self._conn._volumeops, '_login_storage_target',
+ fake_login_storage_target)
+ self.assertRaises(vmutils.HyperVException, self._conn.attach_volume,
+ None, connection_info, instance_data, mount_point)
+
+ def _mock_detach_volume(self, target_iqn, target_lun,
+ other_luns_available=False):
+ fake_mounted_disk = "fake_mounted_disk"
+ fake_device_number = 0
+ m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
+ target_lun)
+ m.AndReturn(fake_device_number)
+
+ m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
+ fake_device_number)
+ m.AndReturn(fake_mounted_disk)
+
+ vmutils.VMUtils.detach_vm_disk(mox.IsA(str), fake_mounted_disk)
+
+ self._mock_logout_storage_target(target_iqn, other_luns_available)
+
+ def _mock_logout_storage_target(self, target_iqn,
+ other_luns_available=False):
+
+ m = volumeutils.VolumeUtils.get_target_lun_count(target_iqn)
+ m.AndReturn(1 + int(other_luns_available))
+
+ if not other_luns_available:
+ volumeutils.VolumeUtils.logout_storage_target(target_iqn)
+
+ def _test_detach_volume(self, other_luns_available=False):
+ instance_data = self._get_instance_data()
+ self.assertIn('name', instance_data)
+
+ connection_info = db_fakes.get_fake_volume_info_data(
+ self._volume_target_portal, self._volume_id)
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ self.assertIn('target_portal', data)
+
+ mount_point = '/dev/sdc'
+
+ self._mock_detach_volume(target_iqn, target_lun, other_luns_available)
+ self._mox.ReplayAll()
+ self._conn.detach_volume(connection_info, instance_data, mount_point)
+ self._mox.VerifyAll()
+
+ def test_detach_volume(self):
+ self._test_detach_volume()
+
+ def test_detach_volume_multiple_luns_per_target(self):
+ # The iSCSI target should not be disconnected in this case.
+ self._test_detach_volume(other_luns_available=True)
+
+ def test_boot_from_volume(self):
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+
+ self._setup_spawn_instance_mocks(cow=False,
+ block_device_info=block_device_info,
+ boot_from_volume=True)
+
+ self._mox.ReplayAll()
+ self._spawn_instance(False, block_device_info)
+ self._mox.VerifyAll()
+
+ self.assertEqual(len(self._instance_volume_disks), 1)
+
+ def test_get_volume_connector(self):
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+
+ fake_my_ip = "fake_ip"
+ fake_host = "fake_host"
+ fake_initiator = "fake_initiator"
+
+ self.flags(my_ip=fake_my_ip)
+ self.flags(host=fake_host)
+
+ m = volumeutils.VolumeUtils.get_iscsi_initiator()
+ m.AndReturn(fake_initiator)
+
+ self._mox.ReplayAll()
+ data = self._conn.get_volume_connector(instance)
+ self._mox.VerifyAll()
+
+ self.assertEqual(fake_my_ip, data.get('ip'))
+ self.assertEqual(fake_host, data.get('host'))
+ self.assertEqual(fake_initiator, data.get('initiator'))
+
+ def _setup_test_migrate_disk_and_power_off_mocks(self, same_host=False,
+ copy_exception=False,
+ size_exception=False):
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ instance['root_gb'] = 10
+
+ fake_local_ip = '10.0.0.1'
+ if same_host:
+ fake_dest_ip = fake_local_ip
+ else:
+ fake_dest_ip = '10.0.0.2'
+
+ if size_exception:
+ flavor = 'm1.tiny'
+ else:
+ flavor = 'm1.small'
+
+ flavor = db.flavor_get_by_name(self._context, flavor)
+
+ if not size_exception:
+ fake_root_vhd_path = 'C:\\FakePath\\root.vhd'
+ fake_revert_path = os.path.join(self._test_instance_dir, '_revert')
+
+ func = mox.Func(self._check_instance_name)
+ vmutils.VMUtils.set_vm_state(func,
+ constants.HYPERV_VM_STATE_DISABLED)
+
+ self._setup_delete_vm_log_mocks()
+
+ m = vmutils.VMUtils.get_vm_storage_paths(func)
+ m.AndReturn(([fake_root_vhd_path], []))
+
+ m = hostutils.HostUtils.get_local_ips()
+ m.AndReturn([fake_local_ip])
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ m.AndReturn(self._test_instance_dir)
+
+ m = pathutils.PathUtils.get_instance_migr_revert_dir(
+ instance['name'], remove_dir=True)
+ m.AndReturn(fake_revert_path)
+
+ if same_host:
+ fake.PathUtils.makedirs(mox.IsA(str))
+
+ m = fake.PathUtils.copy(fake_root_vhd_path, mox.IsA(str))
+ if copy_exception:
+ m.AndRaise(shutil.Error('Simulated copy error'))
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str),
+ mox.IsA(str),
+ remove_dir=True)
+ m.AndReturn(self._test_instance_dir)
+ else:
+ fake.PathUtils.rename(mox.IsA(str), mox.IsA(str))
+ destroy_disks = True
+ if same_host:
+ fake.PathUtils.rename(mox.IsA(str), mox.IsA(str))
+ destroy_disks = False
+
+ self._setup_destroy_mocks(False)
+
+ if destroy_disks:
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str),
+ mox.IsA(str),
+ remove_dir=True)
+ m.AndReturn(self._test_instance_dir)
+
+ return (instance, fake_dest_ip, network_info, flavor)
+
+ def test_migrate_disk_and_power_off(self):
+ (instance,
+ fake_dest_ip,
+ network_info,
+ flavor) = self._setup_test_migrate_disk_and_power_off_mocks()
+
+ self._mox.ReplayAll()
+ self._conn.migrate_disk_and_power_off(self._context, instance,
+ fake_dest_ip, flavor,
+ network_info)
+ self._mox.VerifyAll()
+
+ def test_migrate_disk_and_power_off_same_host(self):
+ args = self._setup_test_migrate_disk_and_power_off_mocks(
+ same_host=True)
+ (instance, fake_dest_ip, network_info, flavor) = args
+
+ self._mox.ReplayAll()
+ self._conn.migrate_disk_and_power_off(self._context, instance,
+ fake_dest_ip, flavor,
+ network_info)
+ self._mox.VerifyAll()
+
+ def test_migrate_disk_and_power_off_copy_exception(self):
+ args = self._setup_test_migrate_disk_and_power_off_mocks(
+ copy_exception=True)
+ (instance, fake_dest_ip, network_info, flavor) = args
+
+ self._mox.ReplayAll()
+ self.assertRaises(shutil.Error, self._conn.migrate_disk_and_power_off,
+ self._context, instance, fake_dest_ip,
+ flavor, network_info)
+ self._mox.VerifyAll()
+
+ def test_migrate_disk_and_power_off_smaller_root_vhd_size_exception(self):
+ args = self._setup_test_migrate_disk_and_power_off_mocks(
+ size_exception=True)
+ (instance, fake_dest_ip, network_info, flavor) = args
+
+ self._mox.ReplayAll()
+ self.assertRaises(exception.InstanceFaultRollback,
+ self._conn.migrate_disk_and_power_off,
+ self._context, instance, fake_dest_ip,
+ flavor, network_info)
+ self._mox.VerifyAll()
+
+ def _mock_attach_config_drive(self, instance, config_drive_format):
+ instance['config_drive'] = True
+ self._mox.StubOutWithMock(fake.PathUtils, 'lookup_configdrive_path')
+ m = fake.PathUtils.lookup_configdrive_path(
+ mox.Func(self._check_instance_name))
+
+ if config_drive_format in constants.DISK_FORMAT_MAP:
+ m.AndReturn(self._test_instance_dir + '/configdrive.' +
+ config_drive_format)
+ else:
+ m.AndReturn(None)
+
+ m = vmutils.VMUtils.attach_ide_drive(
+ mox.Func(self._check_instance_name),
+ mox.IsA(str),
+ mox.IsA(int),
+ mox.IsA(int),
+ mox.IsA(str))
+ m.WithSideEffects(self._add_ide_disk).InAnyOrder()
+
+ def _verify_attach_config_drive(self, config_drive_format):
+ if config_drive_format == constants.IDE_DISK_FORMAT.lower():
+ self.assertEqual(self._instance_ide_disks[1],
+ self._test_instance_dir + '/configdrive.' +
+ config_drive_format)
+ elif config_drive_format == constants.IDE_DVD_FORMAT.lower():
+ self.assertEqual(self._instance_ide_dvds[0],
+ self._test_instance_dir + '/configdrive.' +
+ config_drive_format)
+
+ def _test_finish_migration(self, power_on, ephemeral_storage=False,
+ config_drive=False,
+ config_drive_format='iso'):
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+ instance['system_metadata'] = {}
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ m.AndReturn(self._test_instance_dir)
+
+ self._mox.StubOutWithMock(fake.PathUtils, 'exists')
+ m = fake.PathUtils.exists(mox.IsA(str))
+ m.AndReturn(True)
+
+ fake_parent_vhd_path = (os.path.join('FakeParentPath', '%s.vhd' %
+ instance["image_ref"]))
+
+ m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
+ m.AndReturn({'ParentPath': fake_parent_vhd_path,
+ 'MaxInternalSize': 1})
+ m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
+ mox.IsA(str), mox.IsA(object))
+ m.AndReturn(1025)
+
+ vhdutils.VHDUtils.reconnect_parent_vhd(mox.IsA(str), mox.IsA(str))
+
+ m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
+ m.AndReturn({'MaxInternalSize': 1024})
+
+ m = fake.PathUtils.exists(mox.IsA(str))
+ m.AndReturn(True)
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ if ephemeral_storage:
+ return m.AndReturn(self._test_instance_dir)
+ else:
+ m.AndReturn(None)
+
+ self._set_vm_name(instance['name'])
+ self._setup_create_instance_mocks(None, False,
+ ephemeral_storage=ephemeral_storage)
+
+ if power_on:
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ constants.HYPERV_VM_STATE_ENABLED)
+ self._setup_log_vm_output_mocks()
+
+ if config_drive:
+ self._mock_attach_config_drive(instance, config_drive_format)
+
+ self._mox.ReplayAll()
+ self._conn.finish_migration(self._context, None, instance, "",
+ network_info, None, False, None, power_on)
+ self._mox.VerifyAll()
+
+ if config_drive:
+ self._verify_attach_config_drive(config_drive_format)
+
+ def test_finish_migration_power_on(self):
+ self._test_finish_migration(True)
+
+ def test_finish_migration_power_off(self):
+ self._test_finish_migration(False)
+
+ def test_finish_migration_with_ephemeral_storage(self):
+ self._test_finish_migration(False, ephemeral_storage=True)
+
+ def test_finish_migration_attach_config_drive_iso(self):
+ self._test_finish_migration(False, config_drive=True,
+ config_drive_format=constants.IDE_DVD_FORMAT.lower())
+
+ def test_finish_migration_attach_config_drive_vhd(self):
+ self._test_finish_migration(False, config_drive=True,
+ config_drive_format=constants.IDE_DISK_FORMAT.lower())
+
+ def test_confirm_migration(self):
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'],
+ remove_dir=True)
+ self._mox.ReplayAll()
+ self._conn.confirm_migration(None, instance, network_info)
+ self._mox.VerifyAll()
+
+ def _test_finish_revert_migration(self, power_on, ephemeral_storage=False,
+ config_drive=False,
+ config_drive_format='iso'):
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ fake_revert_path = ('C:\\FakeInstancesPath\\%s\\_revert' %
+ instance['name'])
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str),
+ create_dir=False,
+ remove_dir=True)
+ m.AndReturn(self._test_instance_dir)
+
+ m = pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'])
+ m.AndReturn(fake_revert_path)
+ fake.PathUtils.rename(fake_revert_path, mox.IsA(str))
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ m.AndReturn(self._test_instance_dir)
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ if ephemeral_storage:
+ m.AndReturn(self._test_instance_dir)
+ else:
+ m.AndReturn(None)
+
+ self._set_vm_name(instance['name'])
+ self._setup_create_instance_mocks(None, False,
+ ephemeral_storage=ephemeral_storage)
+
+ if power_on:
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ constants.HYPERV_VM_STATE_ENABLED)
+ self._setup_log_vm_output_mocks()
+
+ if config_drive:
+ self._mock_attach_config_drive(instance, config_drive_format)
+
+ self._mox.ReplayAll()
+ self._conn.finish_revert_migration(self._context, instance,
+ network_info, None,
+ power_on)
+ self._mox.VerifyAll()
+
+ if config_drive:
+ self._verify_attach_config_drive(config_drive_format)
+
+ def test_finish_revert_migration_power_on(self):
+ self._test_finish_revert_migration(True)
+
+ def test_finish_revert_migration_power_off(self):
+ self._test_finish_revert_migration(False)
+
+ def test_spawn_no_admin_permissions(self):
+ self.assertRaises(vmutils.HyperVAuthorizationException,
+ self._test_spawn_instance,
+ with_exception=True,
+ admin_permissions=False)
+
+ def test_finish_revert_migration_with_ephemeral_storage(self):
+ self._test_finish_revert_migration(False, ephemeral_storage=True)
+
+ def test_finish_revert_migration_attach_config_drive_iso(self):
+ self._test_finish_revert_migration(False, config_drive=True,
+ config_drive_format=constants.IDE_DVD_FORMAT.lower())
+
+ def test_finish_revert_migration_attach_config_drive_vhd(self):
+ self._test_finish_revert_migration(False, config_drive=True,
+ config_drive_format=constants.IDE_DISK_FORMAT.lower())
+
+ def test_plug_vifs(self):
+ # Check to make sure the method raises NotImplementedError.
+ self.assertRaises(NotImplementedError,
+ self._conn.plug_vifs,
+ instance=self._test_spawn_instance,
+ network_info=None)
+
+ def test_unplug_vifs(self):
+ # Check to make sure the method raises NotImplementedError.
+ self.assertRaises(NotImplementedError,
+ self._conn.unplug_vifs,
+ instance=self._test_spawn_instance,
+ network_info=None)
+
+ def test_rollback_live_migration_at_destination(self):
+ with mock.patch.object(self._conn, "destroy") as mock_destroy:
+ self._conn.rollback_live_migration_at_destination(self._context,
+ self._test_spawn_instance, [], None)
+ mock_destroy.assert_called_once_with(self._context,
+ self._test_spawn_instance, [], None)
+
+ def test_refresh_instance_security_rules(self):
+ self.assertRaises(NotImplementedError,
+ self._conn.refresh_instance_security_rules,
+ instance=None)
+
+ def test_get_rdp_console(self):
+ self.flags(my_ip="192.168.1.1")
+
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+
+ fake_port = 9999
+ fake_vm_id = "fake_vm_id"
+
+ m = rdpconsoleutils.RDPConsoleUtils.get_rdp_console_port()
+ m.AndReturn(fake_port)
+
+ m = vmutils.VMUtils.get_vm_id(mox.IsA(str))
+ m.AndReturn(fake_vm_id)
+
+ self._mox.ReplayAll()
+ connect_info = self._conn.get_rdp_console(self._context, instance)
+ self._mox.VerifyAll()
+
+ self.assertEqual(CONF.my_ip, connect_info.host)
+ self.assertEqual(fake_port, connect_info.port)
+ self.assertEqual(fake_vm_id, connect_info.internal_access_path)
+
+
+class VolumeOpsTestCase(HyperVAPIBaseTestCase):
+ """Unit tests for VolumeOps class."""
+
+ def setUp(self):
+ super(VolumeOpsTestCase, self).setUp()
+ self.volumeops = volumeops.VolumeOps()
+
+ def test_get_mounted_disk_from_lun(self):
+ with contextlib.nested(
+ mock.patch.object(self.volumeops._volutils,
+ 'get_device_number_for_target'),
+ mock.patch.object(self.volumeops._vmutils,
+ 'get_mounted_disk_by_drive_number')
+ ) as (mock_get_device_number_for_target,
+ mock_get_mounted_disk_by_drive_number):
+
+ mock_get_device_number_for_target.return_value = 0
+ mock_get_mounted_disk_by_drive_number.return_value = 'disk_path'
+
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+
+ mapping = driver.block_device_info_get_mapping(block_device_info)
+ data = mapping[0]['connection_info']['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+
+ disk = self.volumeops._get_mounted_disk_from_lun(target_iqn,
+ target_lun)
+ self.assertEqual(disk, 'disk_path')
+
+ def test_get_mounted_disk_from_lun_failure(self):
+ self.flags(mounted_disk_query_retry_count=1, group='hyperv')
+
+ with mock.patch.object(self.volumeops._volutils,
+ 'get_device_number_for_target') as m_device_num:
+ m_device_num.side_effect = [None, -1]
+
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+
+ mapping = driver.block_device_info_get_mapping(block_device_info)
+ data = mapping[0]['connection_info']['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+
+ for attempt in xrange(1):
+ self.assertRaises(exception.NotFound,
+ self.volumeops._get_mounted_disk_from_lun,
+ target_iqn, target_lun)
+
+ def test_get_free_controller_slot_exception(self):
+ fake_drive = mock.MagicMock()
+ type(fake_drive).AddressOnParent = mock.PropertyMock(
+ side_effect=xrange(constants.SCSI_CONTROLLER_SLOTS_NUMBER))
+ fake_scsi_controller_path = 'fake_scsi_controller_path'
+
+ with mock.patch.object(self.volumeops._vmutils,
+ 'get_attached_disks') as fake_get_attached_disks:
+ fake_get_attached_disks.return_value = (
+ [fake_drive] * constants.SCSI_CONTROLLER_SLOTS_NUMBER)
+ self.assertRaises(vmutils.HyperVException,
+ self.volumeops._get_free_controller_slot,
+ fake_scsi_controller_path)
+
+ def test_fix_instance_volume_disk_paths(self):
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+
+ with contextlib.nested(
+ mock.patch.object(self.volumeops,
+ '_get_mounted_disk_from_lun'),
+ mock.patch.object(self.volumeops._vmutils,
+ 'get_vm_scsi_controller'),
+ mock.patch.object(self.volumeops._vmutils,
+ 'set_disk_host_resource'),
+ mock.patch.object(self.volumeops,
+ 'ebs_root_in_block_devices')
+ ) as (mock_get_mounted_disk_from_lun,
+ mock_get_vm_scsi_controller,
+ mock_set_disk_host_resource,
+ mock_ebs_in_block_devices):
+
+ mock_ebs_in_block_devices.return_value = False
+ mock_get_mounted_disk_from_lun.return_value = "fake_mounted_path"
+ mock_set_disk_host_resource.return_value = "fake_controller_path"
+
+ self.volumeops.fix_instance_volume_disk_paths(
+ "test_vm_name",
+ block_device_info)
+
+ mock_get_mounted_disk_from_lun.assert_called_with(
+ 'iqn.2010-10.org.openstack:volume-' + self._volume_id, 1, True)
+ mock_get_vm_scsi_controller.assert_called_with("test_vm_name")
+ mock_set_disk_host_resource("test_vm_name", "fake_controller_path",
+ 0, "fake_mounted_path")
+
+
+class HostOpsTestCase(HyperVAPIBaseTestCase):
+ """Unit tests for the Hyper-V hostops class."""
+
+ def setUp(self):
+ self._hostops = hostops.HostOps()
+ self._hostops._hostutils = mock.MagicMock()
+ self._hostops.time = mock.MagicMock()
+ super(HostOpsTestCase, self).setUp()
+
+ @mock.patch('nova.virt.hyperv.hostops.time')
+ def test_host_uptime(self, mock_time):
+ self._hostops._hostutils.get_host_tick_count64.return_value = 100
+ mock_time.strftime.return_value = "01:01:01"
+
+ result_uptime = "01:01:01 up %s, 0 users, load average: 0, 0, 0" % (
+ str(datetime.timedelta(
+ milliseconds = long(100))))
+ actual_uptime = self._hostops.get_host_uptime()
+ self.assertEqual(result_uptime, actual_uptime)
diff --git a/nova/tests/unit/virt/hyperv/test_ioutils.py b/nova/tests/unit/virt/hyperv/test_ioutils.py
new file mode 100644
index 0000000000..2f12450a46
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_ioutils.py
@@ -0,0 +1,61 @@
+# Copyright 2014 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.import mock
+
+import mock
+
+import os
+
+from nova import test
+from nova.virt.hyperv import ioutils
+
+
+class IOThreadTestCase(test.NoDBTestCase):
+ _FAKE_SRC = r'fake_source_file'
+ _FAKE_DEST = r'fake_dest_file'
+ _FAKE_MAX_BYTES = 1
+
+ def setUp(self):
+ self._iothread = ioutils.IOThread(
+ self._FAKE_SRC, self._FAKE_DEST, self._FAKE_MAX_BYTES)
+ super(IOThreadTestCase, self).setUp()
+
+ @mock.patch('__builtin__.open')
+ @mock.patch('os.rename')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.remove')
+ def test_copy(self, fake_remove, fake_exists, fake_rename, fake_open):
+ fake_data = 'a'
+ fake_src = mock.Mock()
+ fake_dest = mock.Mock()
+
+ fake_src.read.return_value = fake_data
+ fake_dest.tell.return_value = 0
+ fake_exists.return_value = True
+
+ mock_context_manager = mock.MagicMock()
+ fake_open.return_value = mock_context_manager
+ mock_context_manager.__enter__.side_effect = [fake_src, fake_dest]
+ self._iothread._stopped.isSet = mock.Mock(side_effect=[False, True])
+
+ self._iothread._copy(self._FAKE_SRC, self._FAKE_DEST)
+
+ fake_dest.seek.assert_called_once_with(0, os.SEEK_END)
+ fake_dest.write.assert_called_once_with(fake_data)
+ fake_dest.close.assert_called_once_with()
+ fake_rename.assert_called_once_with(
+ self._iothread._dest, self._iothread._dest_archive)
+ fake_remove.assert_called_once_with(
+ self._iothread._dest_archive)
+ self.assertEqual(3, fake_open.call_count)
diff --git a/nova/tests/unit/virt/hyperv/test_migrationops.py b/nova/tests/unit/virt/hyperv/test_migrationops.py
new file mode 100644
index 0000000000..8cda2ccd48
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_migrationops.py
@@ -0,0 +1,79 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.virt.hyperv import migrationops
+from nova.virt.hyperv import vmutils
+
+
+class MigrationOpsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V MigrationOps class."""
+
+ _FAKE_TIMEOUT = 10
+ _FAKE_RETRY_INTERVAL = 5
+
+ def setUp(self):
+ super(MigrationOpsTestCase, self).setUp()
+ self.context = 'fake-context'
+
+ # utilsfactory will check the host OS version via get_hostutils,
+ # in order to return the proper Utils Class, so it must be mocked.
+ patched_func = mock.patch.object(migrationops.utilsfactory,
+ "get_hostutils")
+ patched_func.start()
+ self.addCleanup(patched_func.stop)
+
+ self._migrationops = migrationops.MigrationOps()
+ self._migrationops._vmops = mock.MagicMock()
+ self._migrationops._vmutils = mock.MagicMock()
+
+ def test_check_and_attach_config_drive_unknown_path(self):
+ instance = fake_instance.fake_instance_obj(self.context,
+ expected_attrs=['system_metadata'])
+ instance.config_drive = 'True'
+ self._migrationops._pathutils.lookup_configdrive_path = mock.MagicMock(
+ return_value=None)
+ self.assertRaises(vmutils.HyperVException,
+ self._migrationops._check_and_attach_config_drive,
+ instance)
+
+ @mock.patch.object(migrationops.MigrationOps, '_migrate_disk_files')
+ @mock.patch.object(migrationops.MigrationOps, '_check_target_flavor')
+ def test_migrate_disk_and_power_off(self, mock_check_flavor,
+ mock_migrate_disk_files):
+ instance = fake_instance.fake_instance_obj(self.context)
+ flavor = mock.MagicMock()
+ network_info = mock.MagicMock()
+
+ disk_files = [mock.MagicMock()]
+ volume_drives = [mock.MagicMock()]
+
+ mock_get_vm_st_path = self._migrationops._vmutils.get_vm_storage_paths
+ mock_get_vm_st_path.return_value = (disk_files, volume_drives)
+
+ self._migrationops.migrate_disk_and_power_off(
+ self.context, instance, mock.sentinel.FAKE_DEST, flavor,
+ network_info, None, self._FAKE_TIMEOUT, self._FAKE_RETRY_INTERVAL)
+
+ mock_check_flavor.assert_called_once_with(instance, flavor)
+ self._migrationops._vmops.power_off.assert_called_once_with(
+ instance, self._FAKE_TIMEOUT, self._FAKE_RETRY_INTERVAL)
+ mock_get_vm_st_path.assert_called_once_with(instance.name)
+ mock_migrate_disk_files.assert_called_once_with(
+ instance.name, disk_files, mock.sentinel.FAKE_DEST)
+ self._migrationops._vmops.destroy.assert_called_once_with(
+ instance, destroy_disks=False)
diff --git a/nova/tests/unit/virt/hyperv/test_networkutils.py b/nova/tests/unit/virt/hyperv/test_networkutils.py
new file mode 100644
index 0000000000..281df29833
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_networkutils.py
@@ -0,0 +1,82 @@
+# Copyright 2014 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import test
+from nova.virt.hyperv import networkutils
+from nova.virt.hyperv import vmutils
+
+
+class NetworkUtilsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V NetworkUtils class."""
+
+ _FAKE_PORT = {'Name': mock.sentinel.FAKE_PORT_NAME}
+ _FAKE_RET_VALUE = 0
+
+ _MSVM_VIRTUAL_SWITCH = 'Msvm_VirtualSwitch'
+
+ def setUp(self):
+ self._networkutils = networkutils.NetworkUtils()
+ self._networkutils._conn = mock.MagicMock()
+
+ super(NetworkUtilsTestCase, self).setUp()
+
+ def test_get_external_vswitch(self):
+ mock_vswitch = mock.MagicMock()
+ mock_vswitch.path_.return_value = mock.sentinel.FAKE_VSWITCH_PATH
+ getattr(self._networkutils._conn,
+ self._MSVM_VIRTUAL_SWITCH).return_value = [mock_vswitch]
+
+ switch_path = self._networkutils.get_external_vswitch(
+ mock.sentinel.FAKE_VSWITCH_NAME)
+
+ self.assertEqual(mock.sentinel.FAKE_VSWITCH_PATH, switch_path)
+
+ def test_get_external_vswitch_not_found(self):
+ self._networkutils._conn.Msvm_VirtualEthernetSwitch.return_value = []
+
+ self.assertRaises(vmutils.HyperVException,
+ self._networkutils.get_external_vswitch,
+ mock.sentinel.FAKE_VSWITCH_NAME)
+
+ def test_get_external_vswitch_no_name(self):
+ mock_vswitch = mock.MagicMock()
+ mock_vswitch.path_.return_value = mock.sentinel.FAKE_VSWITCH_PATH
+
+ mock_ext_port = self._networkutils._conn.Msvm_ExternalEthernetPort()[0]
+ self._prepare_external_port(mock_vswitch, mock_ext_port)
+
+ switch_path = self._networkutils.get_external_vswitch(None)
+ self.assertEqual(mock.sentinel.FAKE_VSWITCH_PATH, switch_path)
+
+ def _prepare_external_port(self, mock_vswitch, mock_ext_port):
+ mock_lep = mock_ext_port.associators()[0]
+ mock_lep.associators.return_value = [mock_vswitch]
+
+ def test_create_vswitch_port(self):
+ svc = self._networkutils._conn.Msvm_VirtualSwitchManagementService()[0]
+ svc.CreateSwitchPort.return_value = (
+ self._FAKE_PORT, self._FAKE_RET_VALUE)
+
+ port = self._networkutils.create_vswitch_port(
+ mock.sentinel.FAKE_VSWITCH_PATH, mock.sentinel.FAKE_PORT_NAME)
+
+ svc.CreateSwitchPort.assert_called_once_with(
+ Name=mock.ANY, FriendlyName=mock.sentinel.FAKE_PORT_NAME,
+ ScopeOfResidence="", VirtualSwitch=mock.sentinel.FAKE_VSWITCH_PATH)
+ self.assertEqual(self._FAKE_PORT, port)
+
+ def test_vswitch_port_needed(self):
+ self.assertTrue(self._networkutils.vswitch_port_needed())
diff --git a/nova/tests/unit/virt/hyperv/test_networkutilsv2.py b/nova/tests/unit/virt/hyperv/test_networkutilsv2.py
new file mode 100644
index 0000000000..1038e88682
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_networkutilsv2.py
@@ -0,0 +1,45 @@
+# Copyright 2013 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.tests.unit.virt.hyperv import test_networkutils
+from nova.virt.hyperv import networkutilsv2
+
+
+class NetworkUtilsV2TestCase(test_networkutils.NetworkUtilsTestCase):
+ """Unit tests for the Hyper-V NetworkUtilsV2 class."""
+
+ _MSVM_VIRTUAL_SWITCH = 'Msvm_VirtualEthernetSwitch'
+
+ def setUp(self):
+ super(NetworkUtilsV2TestCase, self).setUp()
+ self._networkutils = networkutilsv2.NetworkUtilsV2()
+ self._networkutils._conn = mock.MagicMock()
+
+ def _prepare_external_port(self, mock_vswitch, mock_ext_port):
+ mock_lep = mock_ext_port.associators()[0]
+ mock_lep1 = mock_lep.associators()[0]
+ mock_esw = mock_lep1.associators()[0]
+ mock_esw.associators.return_value = [mock_vswitch]
+
+ def test_create_vswitch_port(self):
+ self.assertRaises(
+ NotImplementedError,
+ self._networkutils.create_vswitch_port,
+ mock.sentinel.FAKE_VSWITCH_PATH,
+ mock.sentinel.FAKE_PORT_NAME)
+
+ def test_vswitch_port_needed(self):
+ self.assertFalse(self._networkutils.vswitch_port_needed())
diff --git a/nova/tests/unit/virt/hyperv/test_pathutils.py b/nova/tests/unit/virt/hyperv/test_pathutils.py
new file mode 100644
index 0000000000..0ded84ec6b
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_pathutils.py
@@ -0,0 +1,58 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import mock
+
+from nova import test
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import pathutils
+
+
+class PathUtilsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V PathUtils class."""
+
+ def setUp(self):
+ self.fake_instance_dir = os.path.join('C:', 'fake_instance_dir')
+ self.fake_instance_name = 'fake_instance_name'
+ self._pathutils = pathutils.PathUtils()
+ super(PathUtilsTestCase, self).setUp()
+
+ def _mock_lookup_configdrive_path(self, ext):
+ self._pathutils.get_instance_dir = mock.MagicMock(
+ return_value=self.fake_instance_dir)
+
+ def mock_exists(*args, **kwargs):
+ path = args[0]
+ return True if path[(path.rfind('.') + 1):] == ext else False
+ self._pathutils.exists = mock_exists
+ configdrive_path = self._pathutils.lookup_configdrive_path(
+ self.fake_instance_name)
+ return configdrive_path
+
+ def test_lookup_configdrive_path(self):
+ for format_ext in constants.DISK_FORMAT_MAP:
+ configdrive_path = self._mock_lookup_configdrive_path(format_ext)
+ fake_path = os.path.join(self.fake_instance_dir,
+ 'configdrive.' + format_ext)
+ self.assertEqual(configdrive_path, fake_path)
+
+ def test_lookup_configdrive_path_non_exist(self):
+ self._pathutils.get_instance_dir = mock.MagicMock(
+ return_value=self.fake_instance_dir)
+ self._pathutils.exists = mock.MagicMock(return_value=False)
+ configdrive_path = self._pathutils.lookup_configdrive_path(
+ self.fake_instance_name)
+ self.assertIsNone(configdrive_path)
diff --git a/nova/tests/unit/virt/hyperv/test_rdpconsoleutils.py b/nova/tests/unit/virt/hyperv/test_rdpconsoleutils.py
new file mode 100644
index 0000000000..98d4484b61
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_rdpconsoleutils.py
@@ -0,0 +1,28 @@
+# Copyright 2013 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova.virt.hyperv import rdpconsoleutils
+
+
+class RDPConsoleUtilsTestCase(test.NoDBTestCase):
+ def setUp(self):
+ self._rdpconsoleutils = rdpconsoleutils.RDPConsoleUtils()
+ super(RDPConsoleUtilsTestCase, self).setUp()
+
+ def test_get_rdp_console_port(self):
+ listener_port = self._rdpconsoleutils.get_rdp_console_port()
+
+ self.assertEqual(self._rdpconsoleutils._DEFAULT_HYPERV_RDP_PORT,
+ listener_port)
diff --git a/nova/tests/unit/virt/hyperv/test_rdpconsoleutilsv2.py b/nova/tests/unit/virt/hyperv/test_rdpconsoleutilsv2.py
new file mode 100644
index 0000000000..bcdfaf92f0
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_rdpconsoleutilsv2.py
@@ -0,0 +1,37 @@
+# Copyright 2013 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import test
+from nova.virt.hyperv import rdpconsoleutilsv2
+
+
+class RDPConsoleUtilsV2TestCase(test.NoDBTestCase):
+ _FAKE_RDP_PORT = 1000
+
+ def setUp(self):
+ self._rdpconsoleutils = rdpconsoleutilsv2.RDPConsoleUtilsV2()
+ self._rdpconsoleutils._conn = mock.MagicMock()
+
+ super(RDPConsoleUtilsV2TestCase, self).setUp()
+
+ def test_get_rdp_console_port(self):
+ conn = self._rdpconsoleutils._conn
+ mock_rdp_setting_data = conn.Msvm_TerminalServiceSettingData()[0]
+ mock_rdp_setting_data.ListenerPort = self._FAKE_RDP_PORT
+
+ listener_port = self._rdpconsoleutils.get_rdp_console_port()
+
+ self.assertEqual(self._FAKE_RDP_PORT, listener_port)
diff --git a/nova/tests/unit/virt/hyperv/test_utilsfactory.py b/nova/tests/unit/virt/hyperv/test_utilsfactory.py
new file mode 100644
index 0000000000..77b8a92a8e
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_utilsfactory.py
@@ -0,0 +1,57 @@
+# Copyright 2014 Cloudbase Solutions SRL
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Unit tests for the Hyper-V utils factory.
+"""
+
+import mock
+from oslo.config import cfg
+
+from nova import test
+from nova.virt.hyperv import hostutils
+from nova.virt.hyperv import utilsfactory
+from nova.virt.hyperv import vmutils
+from nova.virt.hyperv import vmutilsv2
+
+CONF = cfg.CONF
+
+
+class TestHyperVUtilsFactory(test.NoDBTestCase):
+ def test_get_vmutils_force_v1_and_min_version(self):
+ self._test_returned_class(None, True, True)
+
+ def test_get_vmutils_v2(self):
+ self._test_returned_class(vmutilsv2.VMUtilsV2, False, True)
+
+ def test_get_vmutils_v2_r2(self):
+ self._test_returned_class(vmutils.VMUtils, False, False)
+
+ def test_get_vmutils_force_v1_and_not_min_version(self):
+ self._test_returned_class(vmutils.VMUtils, True, False)
+
+ def _test_returned_class(self, expected_class, force_v1, os_supports_v2):
+ CONF.set_override('force_hyperv_utils_v1', force_v1, 'hyperv')
+ with mock.patch.object(
+ hostutils.HostUtils,
+ 'check_min_windows_version') as mock_check_min_windows_version:
+ mock_check_min_windows_version.return_value = os_supports_v2
+
+ if os_supports_v2 and force_v1:
+ self.assertRaises(vmutils.HyperVException,
+ utilsfactory.get_vmutils)
+ else:
+ actual_class = type(utilsfactory.get_vmutils())
+ self.assertEqual(actual_class, expected_class)
diff --git a/nova/tests/unit/virt/hyperv/test_vhdutils.py b/nova/tests/unit/virt/hyperv/test_vhdutils.py
new file mode 100644
index 0000000000..e41353329a
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_vhdutils.py
@@ -0,0 +1,161 @@
+# Copyright 2013 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import test
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import vhdutils
+from nova.virt.hyperv import vmutils
+
+
+class VHDUtilsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V VHDUtils class."""
+
+ _FAKE_VHD_PATH = "C:\\fake_path.vhdx"
+ _FAKE_PARENT_PATH = "C:\\fake_parent_path.vhdx"
+ _FAKE_FORMAT = 3
+ _FAKE_MAK_INTERNAL_SIZE = 1000
+ _FAKE_JOB_PATH = 'fake_job_path'
+ _FAKE_RET_VAL = 0
+
+ def setUp(self):
+ self._vhdutils = vhdutils.VHDUtils()
+ self._vhdutils._conn = mock.MagicMock()
+ self._vhdutils._vmutils = mock.MagicMock()
+ super(VHDUtilsTestCase, self).setUp()
+
+ def test_create_dynamic_vhd(self):
+ self._vhdutils.get_vhd_info = mock.MagicMock(
+ return_value={'Format': self._FAKE_FORMAT})
+
+ mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
+ mock_img_svc.CreateDynamicVirtualHardDisk.return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+
+ self._vhdutils.create_dynamic_vhd(self._FAKE_VHD_PATH,
+ self._FAKE_MAK_INTERNAL_SIZE,
+ constants.DISK_FORMAT_VHD)
+
+ mock_img_svc.CreateDynamicVirtualHardDisk.assert_called_once_with(
+ Path=self._FAKE_VHD_PATH,
+ MaxInternalSize=self._FAKE_MAK_INTERNAL_SIZE)
+
+ def test_create_differencing_vhd(self):
+ mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
+ mock_img_svc.CreateDifferencingVirtualHardDisk.return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+
+ self._vhdutils.create_differencing_vhd(self._FAKE_VHD_PATH,
+ self._FAKE_PARENT_PATH)
+
+ mock_img_svc.CreateDifferencingVirtualHardDisk.assert_called_once_with(
+ Path=self._FAKE_VHD_PATH,
+ ParentPath=self._FAKE_PARENT_PATH)
+
+ def test_create_differencing_vhd_with_new_size(self):
+ fake_new_size = 1024
+ self.assertRaises(vmutils.HyperVException,
+ self._vhdutils.create_differencing_vhd,
+ self._FAKE_VHD_PATH,
+ self._FAKE_PARENT_PATH,
+ fake_new_size)
+
+ def test_get_internal_vhd_size_by_file_size_fixed(self):
+ vhdutil = vhdutils.VHDUtils()
+ root_vhd_size = 1 * 1024 ** 3
+ vhdutil.get_vhd_info = mock.MagicMock()
+ vhdutil.get_vhd_info.return_value = {'Type': constants.VHD_TYPE_FIXED}
+
+ real_size = vhdutil.get_internal_vhd_size_by_file_size(None,
+ root_vhd_size)
+ expected_vhd_size = 1 * 1024 ** 3 - 512
+ self.assertEqual(expected_vhd_size, real_size)
+
+ def test_get_internal_vhd_size_by_file_size_dynamic(self):
+ vhdutil = vhdutils.VHDUtils()
+ root_vhd_size = 20 * 1024 ** 3
+ vhdutil.get_vhd_info = mock.MagicMock()
+ vhdutil.get_vhd_info.return_value = {'Type':
+ constants.VHD_TYPE_DYNAMIC}
+ vhdutil._get_vhd_dynamic_blk_size = mock.MagicMock()
+ vhdutil._get_vhd_dynamic_blk_size.return_value = 2097152
+
+ real_size = vhdutil.get_internal_vhd_size_by_file_size(None,
+ root_vhd_size)
+ expected_vhd_size = 20 * 1024 ** 3 - 43008
+ self.assertEqual(expected_vhd_size, real_size)
+
+ def test_get_internal_vhd_size_by_file_size_differencing(self):
+ # For differencing images, the internal size of the parent vhd
+ # is returned
+ vhdutil = vhdutils.VHDUtils()
+ root_vhd_size = 20 * 1024 ** 3
+ vhdutil.get_vhd_info = mock.MagicMock()
+ vhdutil.get_vhd_parent_path = mock.MagicMock()
+ vhdutil.get_vhd_parent_path.return_value = self._FAKE_VHD_PATH
+ vhdutil.get_vhd_info.side_effect = [
+ {'Type': 4}, {'Type': constants.VHD_TYPE_DYNAMIC}]
+
+ vhdutil._get_vhd_dynamic_blk_size = mock.MagicMock()
+ vhdutil._get_vhd_dynamic_blk_size.return_value = 2097152
+
+ real_size = vhdutil.get_internal_vhd_size_by_file_size(None,
+ root_vhd_size)
+ expected_vhd_size = 20 * 1024 ** 3 - 43008
+ self.assertEqual(expected_vhd_size, real_size)
+
+ def test_get_vhd_format_vhdx(self):
+ with mock.patch('nova.virt.hyperv.vhdutils.open',
+ mock.mock_open(read_data=vhdutils.VHDX_SIGNATURE),
+ create=True):
+
+ format = self._vhdutils.get_vhd_format(self._FAKE_VHD_PATH)
+
+ self.assertEqual(constants.DISK_FORMAT_VHDX, format)
+
+ def test_get_vhd_format_vhd(self):
+ with mock.patch('nova.virt.hyperv.vhdutils.open',
+ mock.mock_open(read_data=vhdutils.VHD_SIGNATURE),
+ create=True) as mock_open:
+ f = mock_open.return_value
+ f.tell.return_value = 1024
+
+ format = self._vhdutils.get_vhd_format(self._FAKE_VHD_PATH)
+
+ self.assertEqual(constants.DISK_FORMAT_VHD, format)
+
+ def test_get_vhd_format_invalid_format(self):
+ with mock.patch('nova.virt.hyperv.vhdutils.open',
+ mock.mock_open(read_data='invalid'),
+ create=True) as mock_open:
+ f = mock_open.return_value
+ f.tell.return_value = 1024
+
+ self.assertRaises(vmutils.HyperVException,
+ self._vhdutils.get_vhd_format,
+ self._FAKE_VHD_PATH)
+
+ def test_get_vhd_format_zero_length_file(self):
+ with mock.patch('nova.virt.hyperv.vhdutils.open',
+ mock.mock_open(read_data=''),
+ create=True) as mock_open:
+ f = mock_open.return_value
+ f.tell.return_value = 0
+
+ self.assertRaises(vmutils.HyperVException,
+ self._vhdutils.get_vhd_format,
+ self._FAKE_VHD_PATH)
+
+ f.seek.assert_called_once_with(0, 2)
diff --git a/nova/tests/unit/virt/hyperv/test_vhdutilsv2.py b/nova/tests/unit/virt/hyperv/test_vhdutilsv2.py
new file mode 100644
index 0000000000..a813d3bbd6
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_vhdutilsv2.py
@@ -0,0 +1,249 @@
+# Copyright 2013 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.utils import units
+
+from nova import test
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import vhdutilsv2
+
+
+class VHDUtilsV2TestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V VHDUtilsV2 class."""
+
+ _FAKE_VHD_PATH = "C:\\fake_path.vhdx"
+ _FAKE_PARENT_VHD_PATH = "C:\\fake_parent_path.vhdx"
+ _FAKE_FORMAT = 3
+ _FAKE_MAK_INTERNAL_SIZE = units.Gi
+ _FAKE_TYPE = 3
+ _FAKE_JOB_PATH = 'fake_job_path'
+ _FAKE_RET_VAL = 0
+ _FAKE_VHD_FORMAT = 'vhdx'
+ _FAKE_BLOCK_SIZE = 33554432
+ _FAKE_LOG_SIZE = 1048576
+ _FAKE_LOGICAL_SECTOR_SIZE = 4096
+ _FAKE_METADATA_SIZE = 1048576
+ _FAKE_VHD_INFO = {'ParentPath': _FAKE_PARENT_VHD_PATH,
+ 'Format': _FAKE_FORMAT,
+ 'BlockSize': _FAKE_BLOCK_SIZE,
+ 'LogicalSectorSize': _FAKE_LOGICAL_SECTOR_SIZE,
+ 'Type': _FAKE_TYPE}
+
+ def setUp(self):
+ self._vhdutils = vhdutilsv2.VHDUtilsV2()
+ self._vhdutils._conn = mock.MagicMock()
+ self._vhdutils._vmutils = mock.MagicMock()
+ self._vhdutils.get_vhd_format = mock.MagicMock(
+ return_value=self._FAKE_VHD_FORMAT)
+
+ self._fake_file_handle = mock.MagicMock()
+ self._fake_vhd_info_xml = (
+ '<INSTANCE CLASSNAME="Msvm_VirtualHardDiskSettingData">'
+ '<PROPERTY NAME="BlockSize" TYPE="uint32">'
+ '<VALUE>33554432</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="Caption" TYPE="string">'
+ '<VALUE>Virtual Hard Disk Setting Data</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="Description" TYPE="string">'
+ '<VALUE>Setting Data for a Virtual Hard Disk.</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="ElementName" TYPE="string">'
+ '<VALUE>fake_path.vhdx</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="Format" TYPE="uint16">'
+ '<VALUE>%(format)s</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="InstanceID" TYPE="string">'
+ '<VALUE>52794B89-AC06-4349-AC57-486CAAD52F69</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="LogicalSectorSize" TYPE="uint32">'
+ '<VALUE>512</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="MaxInternalSize" TYPE="uint64">'
+ '<VALUE>%(max_internal_size)s</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="ParentPath" TYPE="string">'
+ '<VALUE>%(parent_path)s</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="Path" TYPE="string">'
+ '<VALUE>%(path)s</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="PhysicalSectorSize" TYPE="uint32">'
+ '<VALUE>4096</VALUE>'
+ '</PROPERTY>'
+ '<PROPERTY NAME="Type" TYPE="uint16">'
+ '<VALUE>%(type)s</VALUE>'
+ '</PROPERTY>'
+ '</INSTANCE>' %
+ {'path': self._FAKE_VHD_PATH,
+ 'parent_path': self._FAKE_PARENT_VHD_PATH,
+ 'format': self._FAKE_FORMAT,
+ 'max_internal_size': self._FAKE_MAK_INTERNAL_SIZE,
+ 'type': self._FAKE_TYPE})
+
+ super(VHDUtilsV2TestCase, self).setUp()
+
+ def test_get_vhd_info(self):
+ mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
+ mock_img_svc.GetVirtualHardDiskSettingData.return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL, self._fake_vhd_info_xml)
+
+ vhd_info = self._vhdutils.get_vhd_info(self._FAKE_VHD_PATH)
+
+ self.assertEqual(self._FAKE_VHD_PATH, vhd_info['Path'])
+ self.assertEqual(self._FAKE_PARENT_VHD_PATH, vhd_info['ParentPath'])
+ self.assertEqual(self._FAKE_FORMAT, vhd_info['Format'])
+ self.assertEqual(self._FAKE_MAK_INTERNAL_SIZE,
+ vhd_info['MaxInternalSize'])
+ self.assertEqual(self._FAKE_TYPE, vhd_info['Type'])
+
+ def test_create_dynamic_vhd(self):
+ self._vhdutils.get_vhd_info = mock.MagicMock(
+ return_value={'Format': self._FAKE_FORMAT})
+
+ mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
+ mock_img_svc.CreateVirtualHardDisk.return_value = (self._FAKE_JOB_PATH,
+ self._FAKE_RET_VAL)
+
+ self._vhdutils.create_dynamic_vhd(self._FAKE_VHD_PATH,
+ self._FAKE_MAK_INTERNAL_SIZE,
+ constants.DISK_FORMAT_VHDX)
+
+ self.assertTrue(mock_img_svc.CreateVirtualHardDisk.called)
+
+ def test_create_differencing_vhd(self):
+ self._vhdutils.get_vhd_info = mock.MagicMock(
+ return_value={'ParentPath': self._FAKE_PARENT_VHD_PATH,
+ 'Format': self._FAKE_FORMAT})
+
+ mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
+ mock_img_svc.CreateVirtualHardDisk.return_value = (self._FAKE_JOB_PATH,
+ self._FAKE_RET_VAL)
+
+ self._vhdutils.create_differencing_vhd(self._FAKE_VHD_PATH,
+ self._FAKE_PARENT_VHD_PATH)
+
+ self.assertTrue(mock_img_svc.CreateVirtualHardDisk.called)
+
+ def test_reconnect_parent_vhd(self):
+ mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
+
+ self._vhdutils._get_vhd_info_xml = mock.MagicMock(
+ return_value=self._fake_vhd_info_xml)
+
+ mock_img_svc.SetVirtualHardDiskSettingData.return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+
+ self._vhdutils.reconnect_parent_vhd(self._FAKE_VHD_PATH,
+ self._FAKE_PARENT_VHD_PATH)
+
+ mock_img_svc.SetVirtualHardDiskSettingData.assert_called_once_with(
+ VirtualDiskSettingData=self._fake_vhd_info_xml)
+
+ def test_resize_vhd(self):
+ mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
+ mock_img_svc.ResizeVirtualHardDisk.return_value = (self._FAKE_JOB_PATH,
+ self._FAKE_RET_VAL)
+ self._vhdutils.get_internal_vhd_size_by_file_size = mock.MagicMock(
+ return_value=self._FAKE_MAK_INTERNAL_SIZE)
+
+ self._vhdutils.resize_vhd(self._FAKE_VHD_PATH,
+ self._FAKE_MAK_INTERNAL_SIZE)
+
+ mock_img_svc.ResizeVirtualHardDisk.assert_called_once_with(
+ Path=self._FAKE_VHD_PATH,
+ MaxInternalSize=self._FAKE_MAK_INTERNAL_SIZE)
+
+ self.mock_get = self._vhdutils.get_internal_vhd_size_by_file_size
+ self.mock_get.assert_called_once_with(self._FAKE_VHD_PATH,
+ self._FAKE_MAK_INTERNAL_SIZE)
+
+ def _test_get_vhdx_internal_size(self, vhd_type):
+ self._vhdutils.get_vhd_info = mock.MagicMock()
+ self._vhdutils.get_vhd_parent_path = mock.Mock(
+ return_value=self._FAKE_PARENT_VHD_PATH)
+
+ if vhd_type == 4:
+ self._vhdutils.get_vhd_info.side_effect = [
+ {'Type': vhd_type}, self._FAKE_VHD_INFO]
+ else:
+ self._vhdutils.get_vhd_info.return_value = self._FAKE_VHD_INFO
+ self._vhdutils._get_vhdx_log_size = mock.MagicMock(
+ return_value=self._FAKE_LOG_SIZE)
+ self._vhdutils._get_vhdx_metadata_size_and_offset = mock.MagicMock(
+ return_value=(self._FAKE_METADATA_SIZE, 1024))
+ self._vhdutils._get_vhdx_block_size = mock.MagicMock(
+ return_value=self._FAKE_BLOCK_SIZE)
+
+ file_mock = mock.MagicMock()
+ with mock.patch('__builtin__.open', file_mock):
+ internal_size = (
+ self._vhdutils.get_internal_vhd_size_by_file_size(
+ self._FAKE_VHD_PATH, self._FAKE_MAK_INTERNAL_SIZE))
+
+ self.assertEqual(self._FAKE_MAK_INTERNAL_SIZE - self._FAKE_BLOCK_SIZE,
+ internal_size)
+
+ def test_get_vhdx_internal_size_dynamic(self):
+ self._test_get_vhdx_internal_size(3)
+
+ def test_get_vhdx_internal_size_differencing(self):
+ self._test_get_vhdx_internal_size(4)
+
+ def test_get_vhdx_current_header(self):
+ VHDX_HEADER_OFFSETS = [64 * 1024, 128 * 1024]
+ fake_sequence_numbers = ['\x01\x00\x00\x00\x00\x00\x00\x00',
+ '\x02\x00\x00\x00\x00\x00\x00\x00']
+ self._fake_file_handle.read = mock.MagicMock(
+ side_effect=fake_sequence_numbers)
+
+ offset = self._vhdutils._get_vhdx_current_header_offset(
+ self._fake_file_handle)
+ self.assertEqual(offset, VHDX_HEADER_OFFSETS[1])
+
+ def test_get_vhdx_metadata_size(self):
+ fake_metadata_offset = '\x01\x00\x00\x00\x00\x00\x00\x00'
+ fake_metadata_size = '\x01\x00\x00\x00'
+ self._fake_file_handle.read = mock.MagicMock(
+ side_effect=[fake_metadata_offset, fake_metadata_size])
+
+ metadata_size, metadata_offset = (
+ self._vhdutils._get_vhdx_metadata_size_and_offset(
+ self._fake_file_handle))
+ self.assertEqual(metadata_size, 1)
+ self.assertEqual(metadata_offset, 1)
+
+ def test_get_block_size(self):
+ self._vhdutils._get_vhdx_metadata_size_and_offset = mock.MagicMock(
+ return_value=(self._FAKE_METADATA_SIZE, 1024))
+ fake_block_size = '\x01\x00\x00\x00'
+ self._fake_file_handle.read = mock.MagicMock(
+ return_value=fake_block_size)
+
+ block_size = self._vhdutils._get_vhdx_block_size(
+ self._fake_file_handle)
+ self.assertEqual(block_size, 1)
+
+ def test_get_log_size(self):
+ fake_current_header_offset = 64 * 1024
+ self._vhdutils._get_vhdx_current_header_offset = mock.MagicMock(
+ return_value=fake_current_header_offset)
+ fake_log_size = '\x01\x00\x00\x00'
+ self._fake_file_handle.read = mock.MagicMock(
+ return_value=fake_log_size)
+
+ log_size = self._vhdutils._get_vhdx_log_size(self._fake_file_handle)
+ self.assertEqual(log_size, 1)
diff --git a/nova/tests/unit/virt/hyperv/test_vmops.py b/nova/tests/unit/virt/hyperv/test_vmops.py
new file mode 100644
index 0000000000..5ec107747e
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_vmops.py
@@ -0,0 +1,230 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from eventlet import timeout as etimeout
+import mock
+
+from nova import exception
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import pathutils
+from nova.virt.hyperv import vmops
+from nova.virt.hyperv import vmutils
+
+
+class VMOpsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V VMOps class."""
+
+ _FAKE_TIMEOUT = 2
+
+ def __init__(self, test_case_name):
+ super(VMOpsTestCase, self).__init__(test_case_name)
+
+ def setUp(self):
+ super(VMOpsTestCase, self).setUp()
+ self.context = 'fake-context'
+
+ # utilsfactory will check the host OS version via get_hostutils,
+ # in order to return the proper Utils Class, so it must be mocked.
+ patched_func = mock.patch.object(vmops.utilsfactory,
+ "get_hostutils")
+ patched_func.start()
+ self.addCleanup(patched_func.stop)
+
+ self._vmops = vmops.VMOps()
+
+ def test_attach_config_drive(self):
+ instance = fake_instance.fake_instance_obj(self.context)
+ self.assertRaises(exception.InvalidDiskFormat,
+ self._vmops.attach_config_drive,
+ instance, 'C:/fake_instance_dir/configdrive.xxx')
+
+ def test_reboot_hard(self):
+ self._test_reboot(vmops.REBOOT_TYPE_HARD,
+ constants.HYPERV_VM_STATE_REBOOT)
+
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
+ def test_reboot_soft(self, mock_soft_shutdown):
+ mock_soft_shutdown.return_value = True
+ self._test_reboot(vmops.REBOOT_TYPE_SOFT,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
+ def test_reboot_soft_failed(self, mock_soft_shutdown):
+ mock_soft_shutdown.return_value = False
+ self._test_reboot(vmops.REBOOT_TYPE_SOFT,
+ constants.HYPERV_VM_STATE_REBOOT)
+
+ @mock.patch("nova.virt.hyperv.vmops.VMOps.power_on")
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
+ def test_reboot_soft_exception(self, mock_soft_shutdown, mock_power_on):
+ mock_soft_shutdown.return_value = True
+ mock_power_on.side_effect = vmutils.HyperVException("Expected failure")
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ self.assertRaises(vmutils.HyperVException, self._vmops.reboot,
+ instance, {}, vmops.REBOOT_TYPE_SOFT)
+
+ mock_soft_shutdown.assert_called_once_with(instance)
+ mock_power_on.assert_called_once_with(instance)
+
+ def _test_reboot(self, reboot_type, vm_state):
+ instance = fake_instance.fake_instance_obj(self.context)
+ with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
+ self._vmops.reboot(instance, {}, reboot_type)
+ mock_set_state.assert_called_once_with(instance, vm_state)
+
+ @mock.patch("nova.virt.hyperv.vmutils.VMUtils.soft_shutdown_vm")
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
+ def test_soft_shutdown(self, mock_wait_for_power_off, mock_shutdown_vm):
+ instance = fake_instance.fake_instance_obj(self.context)
+ mock_wait_for_power_off.return_value = True
+
+ result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
+
+ mock_shutdown_vm.assert_called_once_with(instance.name)
+ mock_wait_for_power_off.assert_called_once_with(
+ instance.name, self._FAKE_TIMEOUT)
+
+ self.assertTrue(result)
+
+ @mock.patch("time.sleep")
+ @mock.patch("nova.virt.hyperv.vmutils.VMUtils.soft_shutdown_vm")
+ def test_soft_shutdown_failed(self, mock_shutdown_vm, mock_sleep):
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ mock_shutdown_vm.side_effect = vmutils.HyperVException(
+ "Expected failure.")
+
+ result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
+
+ mock_shutdown_vm.assert_called_once_with(instance.name)
+ self.assertFalse(result)
+
+ @mock.patch("nova.virt.hyperv.vmutils.VMUtils.soft_shutdown_vm")
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
+ def test_soft_shutdown_wait(self, mock_wait_for_power_off,
+ mock_shutdown_vm):
+ instance = fake_instance.fake_instance_obj(self.context)
+ mock_wait_for_power_off.side_effect = [False, True]
+
+ result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1)
+
+ calls = [mock.call(instance.name, 1),
+ mock.call(instance.name, self._FAKE_TIMEOUT - 1)]
+ mock_shutdown_vm.assert_called_with(instance.name)
+ mock_wait_for_power_off.assert_has_calls(calls)
+
+ self.assertTrue(result)
+
+ @mock.patch("nova.virt.hyperv.vmutils.VMUtils.soft_shutdown_vm")
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
+ def test_soft_shutdown_wait_timeout(self, mock_wait_for_power_off,
+ mock_shutdown_vm):
+ instance = fake_instance.fake_instance_obj(self.context)
+ mock_wait_for_power_off.return_value = False
+
+ result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1.5)
+
+ calls = [mock.call(instance.name, 1.5),
+ mock.call(instance.name, self._FAKE_TIMEOUT - 1.5)]
+ mock_shutdown_vm.assert_called_with(instance.name)
+ mock_wait_for_power_off.assert_has_calls(calls)
+
+ self.assertFalse(result)
+
+ def _test_power_off(self, timeout):
+ instance = fake_instance.fake_instance_obj(self.context)
+ with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
+ self._vmops.power_off(instance, timeout)
+
+ mock_set_state.assert_called_once_with(
+ instance, constants.HYPERV_VM_STATE_DISABLED)
+
+ def test_power_off_hard(self):
+ self._test_power_off(timeout=0)
+
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
+ def test_power_off_exception(self, mock_soft_shutdown):
+ mock_soft_shutdown.return_value = False
+ self._test_power_off(timeout=1)
+
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._set_vm_state")
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
+ def test_power_off_soft(self, mock_soft_shutdown, mock_set_state):
+ instance = fake_instance.fake_instance_obj(self.context)
+ mock_soft_shutdown.return_value = True
+
+ self._vmops.power_off(instance, 1, 0)
+
+ mock_soft_shutdown.assert_called_once_with(
+ instance, 1, vmops.SHUTDOWN_TIME_INCREMENT)
+ self.assertFalse(mock_set_state.called)
+
+ def test_get_vm_state(self):
+ summary_info = {'EnabledState': constants.HYPERV_VM_STATE_DISABLED}
+
+ with mock.patch.object(self._vmops._vmutils,
+ 'get_vm_summary_info') as mock_get_summary_info:
+ mock_get_summary_info.return_value = summary_info
+
+ response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME)
+ self.assertEqual(response, constants.HYPERV_VM_STATE_DISABLED)
+
+ @mock.patch.object(vmops.VMOps, '_get_vm_state')
+ def test_wait_for_power_off_true(self, mock_get_state):
+ mock_get_state.return_value = constants.HYPERV_VM_STATE_DISABLED
+ result = self._vmops._wait_for_power_off(
+ mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
+ mock_get_state.assert_called_with(mock.sentinel.FAKE_VM_NAME)
+ self.assertTrue(result)
+
+ @mock.patch.object(vmops.etimeout, "with_timeout")
+ def test_wait_for_power_off_false(self, mock_with_timeout):
+ mock_with_timeout.side_effect = etimeout.Timeout()
+ result = self._vmops._wait_for_power_off(
+ mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
+ self.assertFalse(result)
+
+ @mock.patch("__builtin__.open")
+ @mock.patch("os.path.exists")
+ @mock.patch.object(pathutils.PathUtils, 'get_vm_console_log_paths')
+ def test_get_console_output_exception(self,
+ fake_get_vm_log_path,
+ fake_path_exists,
+ fake_open):
+ fake_vm = mock.MagicMock()
+
+ fake_open.side_effect = vmutils.HyperVException
+ fake_path_exists.return_value = True
+ fake_get_vm_log_path.return_value = (
+ mock.sentinel.fake_console_log_path,
+ mock.sentinel.fake_console_log_archived)
+
+ with mock.patch('nova.virt.hyperv.vmops.open', fake_open, create=True):
+ self.assertRaises(vmutils.HyperVException,
+ self._vmops.get_console_output,
+ fake_vm)
+
+ def test_list_instance_uuids(self):
+ fake_uuid = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'
+ with mock.patch.object(self._vmops._vmutils,
+ 'list_instance_notes') as mock_list_notes:
+ mock_list_notes.return_value = [('fake_name', [fake_uuid])]
+
+ response = self._vmops.list_instance_uuids()
+ mock_list_notes.assert_called_once_with()
+
+ self.assertEqual(response, [fake_uuid])
diff --git a/nova/tests/unit/virt/hyperv/test_vmutils.py b/nova/tests/unit/virt/hyperv/test_vmutils.py
new file mode 100644
index 0000000000..7c54f273ab
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_vmutils.py
@@ -0,0 +1,668 @@
+# Copyright 2014 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import exception
+from nova import test
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import vmutils
+
+
+class VMUtilsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V VMUtils class."""
+
+ _FAKE_VM_NAME = 'fake_vm'
+ _FAKE_MEMORY_MB = 2
+ _FAKE_VCPUS_NUM = 4
+ _FAKE_JOB_PATH = 'fake_job_path'
+ _FAKE_RET_VAL = 0
+ _FAKE_RET_VAL_BAD = -1
+ _FAKE_CTRL_PATH = 'fake_ctrl_path'
+ _FAKE_CTRL_ADDR = 0
+ _FAKE_DRIVE_ADDR = 0
+ _FAKE_MOUNTED_DISK_PATH = 'fake_mounted_disk_path'
+ _FAKE_VM_PATH = "fake_vm_path"
+ _FAKE_VHD_PATH = "fake_vhd_path"
+ _FAKE_DVD_PATH = "fake_dvd_path"
+ _FAKE_VOLUME_DRIVE_PATH = "fake_volume_drive_path"
+ _FAKE_VM_UUID = "04e79212-39bc-4065-933c-50f6d48a57f6"
+ _FAKE_INSTANCE = {"name": _FAKE_VM_NAME,
+ "uuid": _FAKE_VM_UUID}
+ _FAKE_SNAPSHOT_PATH = "fake_snapshot_path"
+ _FAKE_RES_DATA = "fake_res_data"
+ _FAKE_HOST_RESOURCE = "fake_host_resource"
+ _FAKE_CLASS = "FakeClass"
+ _FAKE_RES_PATH = "fake_res_path"
+ _FAKE_RES_NAME = 'fake_res_name'
+ _FAKE_ADDRESS = "fake_address"
+ _FAKE_JOB_STATUS_DONE = 7
+ _FAKE_JOB_STATUS_BAD = -1
+ _FAKE_JOB_DESCRIPTION = "fake_job_description"
+ _FAKE_ERROR = "fake_error"
+ _FAKE_ELAPSED_TIME = 0
+ _CONCRETE_JOB = "Msvm_ConcreteJob"
+ _FAKE_DYNAMIC_MEMORY_RATIO = 1.0
+
+ _FAKE_SUMMARY_INFO = {'NumberOfProcessors': 4,
+ 'EnabledState': 2,
+ 'MemoryUsage': 2,
+ 'UpTime': 1}
+
+ _DEFINE_SYSTEM = 'DefineVirtualSystem'
+ _DESTROY_SYSTEM = 'DestroyVirtualSystem'
+ _DESTROY_SNAPSHOT = 'RemoveVirtualSystemSnapshot'
+ _ADD_RESOURCE = 'AddVirtualSystemResources'
+ _REMOVE_RESOURCE = 'RemoveVirtualSystemResources'
+ _SETTING_TYPE = 'SettingType'
+
+ _VIRTUAL_SYSTEM_TYPE_REALIZED = 3
+
+ def setUp(self):
+ self._vmutils = vmutils.VMUtils()
+ self._vmutils._conn = mock.MagicMock()
+
+ super(VMUtilsTestCase, self).setUp()
+
+ def test_enable_vm_metrics_collection(self):
+ self.assertRaises(NotImplementedError,
+ self._vmutils.enable_vm_metrics_collection,
+ self._FAKE_VM_NAME)
+
+ def test_get_vm_summary_info(self):
+ self._lookup_vm()
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+
+ mock_summary = mock.MagicMock()
+ mock_svc.GetSummaryInformation.return_value = (self._FAKE_RET_VAL,
+ [mock_summary])
+
+ for (key, val) in self._FAKE_SUMMARY_INFO.items():
+ setattr(mock_summary, key, val)
+
+ summary = self._vmutils.get_vm_summary_info(self._FAKE_VM_NAME)
+ self.assertEqual(self._FAKE_SUMMARY_INFO, summary)
+
+ def _lookup_vm(self):
+ mock_vm = mock.MagicMock()
+ self._vmutils._lookup_vm_check = mock.MagicMock(
+ return_value=mock_vm)
+ mock_vm.path_.return_value = self._FAKE_VM_PATH
+ return mock_vm
+
+ def test_lookup_vm_ok(self):
+ mock_vm = mock.MagicMock()
+ self._vmutils._conn.Msvm_ComputerSystem.return_value = [mock_vm]
+ vm = self._vmutils._lookup_vm_check(self._FAKE_VM_NAME)
+ self.assertEqual(mock_vm, vm)
+
+ def test_lookup_vm_multiple(self):
+ mockvm = mock.MagicMock()
+ self._vmutils._conn.Msvm_ComputerSystem.return_value = [mockvm, mockvm]
+ self.assertRaises(vmutils.HyperVException,
+ self._vmutils._lookup_vm_check,
+ self._FAKE_VM_NAME)
+
+ def test_lookup_vm_none(self):
+ self._vmutils._conn.Msvm_ComputerSystem.return_value = []
+ self.assertRaises(exception.NotFound,
+ self._vmutils._lookup_vm_check,
+ self._FAKE_VM_NAME)
+
+ def test_set_vm_memory_static(self):
+ self._test_set_vm_memory_dynamic(1.0)
+
+ def test_set_vm_memory_dynamic(self):
+ self._test_set_vm_memory_dynamic(2.0)
+
+ def _test_set_vm_memory_dynamic(self, dynamic_memory_ratio):
+ mock_vm = self._lookup_vm()
+
+ mock_s = self._vmutils._conn.Msvm_VirtualSystemSettingData()[0]
+ mock_s.SystemType = 3
+
+ mock_vmsetting = mock.MagicMock()
+ mock_vmsetting.associators.return_value = [mock_s]
+
+ self._vmutils._modify_virt_resource = mock.MagicMock()
+
+ self._vmutils._set_vm_memory(mock_vm, mock_vmsetting,
+ self._FAKE_MEMORY_MB,
+ dynamic_memory_ratio)
+
+ self._vmutils._modify_virt_resource.assert_called_with(
+ mock_s, self._FAKE_VM_PATH)
+
+ if dynamic_memory_ratio > 1:
+ self.assertTrue(mock_s.DynamicMemoryEnabled)
+ else:
+ self.assertFalse(mock_s.DynamicMemoryEnabled)
+
+ def test_soft_shutdown_vm(self):
+ mock_vm = self._lookup_vm()
+ mock_shutdown = mock.MagicMock()
+ mock_shutdown.InitiateShutdown.return_value = (self._FAKE_RET_VAL, )
+ mock_vm.associators.return_value = [mock_shutdown]
+
+ with mock.patch.object(self._vmutils, 'check_ret_val') as mock_check:
+ self._vmutils.soft_shutdown_vm(self._FAKE_VM_NAME)
+
+ mock_shutdown.InitiateShutdown.assert_called_once_with(
+ Force=False, Reason=mock.ANY)
+ mock_check.assert_called_once_with(self._FAKE_RET_VAL, None)
+
+ def test_soft_shutdown_vm_no_component(self):
+ mock_vm = self._lookup_vm()
+ mock_vm.associators.return_value = []
+
+ with mock.patch.object(self._vmutils, 'check_ret_val') as mock_check:
+ self._vmutils.soft_shutdown_vm(self._FAKE_VM_NAME)
+ self.assertFalse(mock_check.called)
+
+ @mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_disks')
+ def test_get_vm_storage_paths(self, mock_get_vm_disks):
+ self._lookup_vm()
+ mock_rasds = self._create_mock_disks()
+ mock_get_vm_disks.return_value = ([mock_rasds[0]], [mock_rasds[1]])
+
+ storage = self._vmutils.get_vm_storage_paths(self._FAKE_VM_NAME)
+ (disk_files, volume_drives) = storage
+
+ self.assertEqual([self._FAKE_VHD_PATH], disk_files)
+ self.assertEqual([self._FAKE_VOLUME_DRIVE_PATH], volume_drives)
+
+ def test_get_vm_disks(self):
+ mock_vm = self._lookup_vm()
+ mock_vmsettings = [mock.MagicMock()]
+ mock_vm.associators.return_value = mock_vmsettings
+
+ mock_rasds = self._create_mock_disks()
+ mock_vmsettings[0].associators.return_value = mock_rasds
+
+ (disks, volumes) = self._vmutils._get_vm_disks(mock_vm)
+
+ mock_vm.associators.assert_called_with(
+ wmi_result_class=self._vmutils._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
+ mock_vmsettings[0].associators.assert_called_with(
+ wmi_result_class=self._vmutils._RESOURCE_ALLOC_SETTING_DATA_CLASS)
+ self.assertEqual([mock_rasds[0]], disks)
+ self.assertEqual([mock_rasds[1]], volumes)
+
+ def _create_mock_disks(self):
+ mock_rasd1 = mock.MagicMock()
+ mock_rasd1.ResourceSubType = self._vmutils._IDE_DISK_RES_SUB_TYPE
+ mock_rasd1.HostResource = [self._FAKE_VHD_PATH]
+ mock_rasd1.Connection = [self._FAKE_VHD_PATH]
+ mock_rasd1.Parent = self._FAKE_CTRL_PATH
+ mock_rasd1.Address = self._FAKE_ADDRESS
+ mock_rasd1.HostResource = [self._FAKE_VHD_PATH]
+
+ mock_rasd2 = mock.MagicMock()
+ mock_rasd2.ResourceSubType = self._vmutils._PHYS_DISK_RES_SUB_TYPE
+ mock_rasd2.HostResource = [self._FAKE_VOLUME_DRIVE_PATH]
+
+ return [mock_rasd1, mock_rasd2]
+
+ @mock.patch.object(vmutils.VMUtils, '_set_vm_vcpus')
+ @mock.patch.object(vmutils.VMUtils, '_set_vm_memory')
+ @mock.patch.object(vmutils.VMUtils, '_get_wmi_obj')
+ def test_create_vm(self, mock_get_wmi_obj, mock_set_mem, mock_set_vcpus):
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+ getattr(mock_svc, self._DEFINE_SYSTEM).return_value = (
+ None, self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+
+ mock_vm = mock_get_wmi_obj.return_value
+ self._vmutils._conn.Msvm_ComputerSystem.return_value = [mock_vm]
+
+ mock_s = mock.MagicMock()
+ setattr(mock_s,
+ self._SETTING_TYPE,
+ self._VIRTUAL_SYSTEM_TYPE_REALIZED)
+ mock_vm.associators.return_value = [mock_s]
+
+ self._vmutils.create_vm(self._FAKE_VM_NAME, self._FAKE_MEMORY_MB,
+ self._FAKE_VCPUS_NUM, False,
+ self._FAKE_DYNAMIC_MEMORY_RATIO)
+
+ self.assertTrue(getattr(mock_svc, self._DEFINE_SYSTEM).called)
+ mock_set_mem.assert_called_with(mock_vm, mock_s, self._FAKE_MEMORY_MB,
+ self._FAKE_DYNAMIC_MEMORY_RATIO)
+
+ mock_set_vcpus.assert_called_with(mock_vm, mock_s,
+ self._FAKE_VCPUS_NUM,
+ False)
+
+ def test_get_vm_scsi_controller(self):
+ self._prepare_get_vm_controller(self._vmutils._SCSI_CTRL_RES_SUB_TYPE)
+ path = self._vmutils.get_vm_scsi_controller(self._FAKE_VM_NAME)
+ self.assertEqual(self._FAKE_RES_PATH, path)
+
+ def test_get_vm_ide_controller(self):
+ self._prepare_get_vm_controller(self._vmutils._IDE_CTRL_RES_SUB_TYPE)
+ path = self._vmutils.get_vm_ide_controller(self._FAKE_VM_NAME,
+ self._FAKE_ADDRESS)
+ self.assertEqual(self._FAKE_RES_PATH, path)
+
+ def _prepare_get_vm_controller(self, resource_sub_type):
+ mock_vm = self._lookup_vm()
+ mock_vm_settings = mock.MagicMock()
+ mock_rasds = mock.MagicMock()
+ mock_rasds.path_.return_value = self._FAKE_RES_PATH
+ mock_rasds.ResourceSubType = resource_sub_type
+ mock_rasds.Address = self._FAKE_ADDRESS
+ mock_vm_settings.associators.return_value = [mock_rasds]
+ mock_vm.associators.return_value = [mock_vm_settings]
+
+ def _prepare_resources(self, mock_path, mock_subtype, mock_vm_settings):
+ mock_rasds = mock_vm_settings.associators.return_value[0]
+ mock_rasds.path_.return_value = mock_path
+ mock_rasds.ResourceSubType = mock_subtype
+ return mock_rasds
+
+ @mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data')
+ @mock.patch.object(vmutils.VMUtils, '_get_vm_ide_controller')
+ def test_attach_ide_drive(self, mock_get_ide_ctrl, mock_get_new_rsd):
+ mock_vm = self._lookup_vm()
+ mock_rsd = mock_get_new_rsd.return_value
+
+ with mock.patch.object(self._vmutils,
+ '_add_virt_resource') as mock_add_virt_res:
+ self._vmutils.attach_ide_drive(self._FAKE_VM_NAME,
+ self._FAKE_CTRL_PATH,
+ self._FAKE_CTRL_ADDR,
+ self._FAKE_DRIVE_ADDR)
+
+ mock_add_virt_res.assert_called_with(mock_rsd,
+ mock_vm.path_.return_value)
+
+ mock_get_ide_ctrl.assert_called_with(mock_vm, self._FAKE_CTRL_ADDR)
+ self.assertTrue(mock_get_new_rsd.called)
+
+ @mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data')
+ def test_create_scsi_controller(self, mock_get_new_rsd):
+ mock_vm = self._lookup_vm()
+ with mock.patch.object(self._vmutils,
+ '_add_virt_resource') as mock_add_virt_res:
+ self._vmutils.create_scsi_controller(self._FAKE_VM_NAME)
+
+ mock_add_virt_res.assert_called_with(mock_get_new_rsd.return_value,
+ mock_vm.path_.return_value)
+
+ @mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data')
+ def test_attach_volume_to_controller(self, mock_get_new_rsd):
+ mock_vm = self._lookup_vm()
+ with mock.patch.object(self._vmutils,
+ '_add_virt_resource') as mock_add_virt_res:
+ self._vmutils.attach_volume_to_controller(
+ self._FAKE_VM_NAME, self._FAKE_CTRL_PATH, self._FAKE_CTRL_ADDR,
+ self._FAKE_MOUNTED_DISK_PATH)
+
+ mock_add_virt_res.assert_called_with(mock_get_new_rsd.return_value,
+ mock_vm.path_.return_value)
+
+ @mock.patch.object(vmutils.VMUtils, '_modify_virt_resource')
+ @mock.patch.object(vmutils.VMUtils, '_get_nic_data_by_name')
+ def test_set_nic_connection(self, mock_get_nic_conn, mock_modify_virt_res):
+ self._lookup_vm()
+ mock_nic = mock_get_nic_conn.return_value
+ self._vmutils.set_nic_connection(self._FAKE_VM_NAME, None, None)
+
+ mock_modify_virt_res.assert_called_with(mock_nic, self._FAKE_VM_PATH)
+
+ @mock.patch.object(vmutils.VMUtils, '_get_new_setting_data')
+ def test_create_nic(self, mock_get_new_virt_res):
+ self._lookup_vm()
+ mock_nic = mock_get_new_virt_res.return_value
+
+ with mock.patch.object(self._vmutils,
+ '_add_virt_resource') as mock_add_virt_res:
+ self._vmutils.create_nic(
+ self._FAKE_VM_NAME, self._FAKE_RES_NAME, self._FAKE_ADDRESS)
+
+ mock_add_virt_res.assert_called_with(mock_nic, self._FAKE_VM_PATH)
+
+ def test_set_vm_state(self):
+ mock_vm = self._lookup_vm()
+ mock_vm.RequestStateChange.return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+
+ self._vmutils.set_vm_state(self._FAKE_VM_NAME,
+ constants.HYPERV_VM_STATE_ENABLED)
+ mock_vm.RequestStateChange.assert_called_with(
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_destroy_vm(self):
+ self._lookup_vm()
+
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+ getattr(mock_svc, self._DESTROY_SYSTEM).return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+
+ self._vmutils.destroy_vm(self._FAKE_VM_NAME)
+
+ getattr(mock_svc, self._DESTROY_SYSTEM).assert_called_with(
+ self._FAKE_VM_PATH)
+
+ @mock.patch.object(vmutils.VMUtils, '_wait_for_job')
+ def test_check_ret_val_ok(self, mock_wait_for_job):
+ self._vmutils.check_ret_val(constants.WMI_JOB_STATUS_STARTED,
+ self._FAKE_JOB_PATH)
+ mock_wait_for_job.assert_called_once_with(self._FAKE_JOB_PATH)
+
+ def test_check_ret_val_exception(self):
+ self.assertRaises(vmutils.HyperVException,
+ self._vmutils.check_ret_val,
+ self._FAKE_RET_VAL_BAD,
+ self._FAKE_JOB_PATH)
+
+ def test_wait_for_job_done(self):
+ mockjob = self._prepare_wait_for_job(constants.WMI_JOB_STATE_COMPLETED)
+ job = self._vmutils._wait_for_job(self._FAKE_JOB_PATH)
+ self.assertEqual(mockjob, job)
+
+ def test_wait_for_job_exception_concrete_job(self):
+ mock_job = self._prepare_wait_for_job()
+ mock_job.path.return_value.Class = self._CONCRETE_JOB
+ self.assertRaises(vmutils.HyperVException,
+ self._vmutils._wait_for_job,
+ self._FAKE_JOB_PATH)
+
+ def test_wait_for_job_exception_with_error(self):
+ mock_job = self._prepare_wait_for_job()
+ mock_job.GetError.return_value = (self._FAKE_ERROR, self._FAKE_RET_VAL)
+ self.assertRaises(vmutils.HyperVException,
+ self._vmutils._wait_for_job,
+ self._FAKE_JOB_PATH)
+
+ def test_wait_for_job_exception_no_error(self):
+ mock_job = self._prepare_wait_for_job()
+ mock_job.GetError.return_value = (None, None)
+ self.assertRaises(vmutils.HyperVException,
+ self._vmutils._wait_for_job,
+ self._FAKE_JOB_PATH)
+
+ def _prepare_wait_for_job(self, state=_FAKE_JOB_STATUS_BAD):
+ mock_job = mock.MagicMock()
+ mock_job.JobState = state
+ mock_job.Description = self._FAKE_JOB_DESCRIPTION
+ mock_job.ElapsedTime = self._FAKE_ELAPSED_TIME
+
+ self._vmutils._get_wmi_obj = mock.MagicMock(return_value=mock_job)
+ return mock_job
+
+ def test_add_virt_resource(self):
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+ getattr(mock_svc, self._ADD_RESOURCE).return_value = (
+ self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL)
+ mock_res_setting_data = mock.MagicMock()
+ mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
+
+ self._vmutils._add_virt_resource(mock_res_setting_data,
+ self._FAKE_VM_PATH)
+ self._assert_add_resources(mock_svc)
+
+ def test_modify_virt_resource(self):
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+ mock_svc.ModifyVirtualSystemResources.return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+ mock_res_setting_data = mock.MagicMock()
+ mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
+
+ self._vmutils._modify_virt_resource(mock_res_setting_data,
+ self._FAKE_VM_PATH)
+
+ mock_svc.ModifyVirtualSystemResources.assert_called_with(
+ ResourceSettingData=[self._FAKE_RES_DATA],
+ ComputerSystem=self._FAKE_VM_PATH)
+
+ def test_remove_virt_resource(self):
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+ getattr(mock_svc, self._REMOVE_RESOURCE).return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+ mock_res_setting_data = mock.MagicMock()
+ mock_res_setting_data.path_.return_value = self._FAKE_RES_PATH
+
+ self._vmutils._remove_virt_resource(mock_res_setting_data,
+ self._FAKE_VM_PATH)
+ self._assert_remove_resources(mock_svc)
+
+ def test_set_disk_host_resource(self):
+ self._lookup_vm()
+ mock_rasds = self._create_mock_disks()
+
+ self._vmutils._get_vm_disks = mock.MagicMock(
+ return_value=([mock_rasds[0]], [mock_rasds[1]]))
+ self._vmutils._modify_virt_resource = mock.MagicMock()
+ self._vmutils._get_disk_resource_address = mock.MagicMock(
+ return_value=self._FAKE_ADDRESS)
+
+ self._vmutils.set_disk_host_resource(
+ self._FAKE_VM_NAME,
+ self._FAKE_CTRL_PATH,
+ self._FAKE_ADDRESS,
+ mock.sentinel.fake_new_mounted_disk_path)
+ self._vmutils._get_disk_resource_address.assert_called_with(
+ mock_rasds[0])
+ self._vmutils._modify_virt_resource.assert_called_with(
+ mock_rasds[0], self._FAKE_VM_PATH)
+ self.assertEqual(
+ mock.sentinel.fake_new_mounted_disk_path,
+ mock_rasds[0].HostResource[0])
+
+ @mock.patch.object(vmutils, 'wmi', create=True)
+ @mock.patch.object(vmutils.VMUtils, 'check_ret_val')
+ def test_take_vm_snapshot(self, mock_check_ret_val, mock_wmi):
+ self._lookup_vm()
+
+ mock_svc = self._get_snapshot_service()
+ mock_svc.CreateVirtualSystemSnapshot.return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL, mock.MagicMock())
+
+ self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME)
+
+ mock_svc.CreateVirtualSystemSnapshot.assert_called_with(
+ self._FAKE_VM_PATH)
+
+ mock_check_ret_val.assert_called_once_with(self._FAKE_RET_VAL,
+ self._FAKE_JOB_PATH)
+
+ def test_remove_vm_snapshot(self):
+ mock_svc = self._get_snapshot_service()
+ getattr(mock_svc, self._DESTROY_SNAPSHOT).return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+
+ self._vmutils.remove_vm_snapshot(self._FAKE_SNAPSHOT_PATH)
+ getattr(mock_svc, self._DESTROY_SNAPSHOT).assert_called_with(
+ self._FAKE_SNAPSHOT_PATH)
+
+ def test_detach_vm_disk(self):
+ self._lookup_vm()
+ mock_disk = self._prepare_mock_disk()
+
+ with mock.patch.object(self._vmutils,
+ '_remove_virt_resource') as mock_rm_virt_res:
+ self._vmutils.detach_vm_disk(self._FAKE_VM_NAME,
+ self._FAKE_HOST_RESOURCE)
+
+ mock_rm_virt_res.assert_called_with(mock_disk, self._FAKE_VM_PATH)
+
+ def test_get_mounted_disk_resource_from_path(self):
+ mock_disk_1 = mock.MagicMock()
+ mock_disk_2 = mock.MagicMock()
+ mock_disk_2.HostResource = [self._FAKE_MOUNTED_DISK_PATH]
+ self._vmutils._conn.query.return_value = [mock_disk_1, mock_disk_2]
+
+ physical_disk = self._vmutils._get_mounted_disk_resource_from_path(
+ self._FAKE_MOUNTED_DISK_PATH)
+
+ self.assertEqual(mock_disk_2, physical_disk)
+
+ def test_get_controller_volume_paths(self):
+ self._prepare_mock_disk()
+ mock_disks = {self._FAKE_RES_PATH: self._FAKE_HOST_RESOURCE}
+ disks = self._vmutils.get_controller_volume_paths(self._FAKE_RES_PATH)
+ self.assertEqual(mock_disks, disks)
+
+ def _prepare_mock_disk(self):
+ mock_disk = mock.MagicMock()
+ mock_disk.HostResource = [self._FAKE_HOST_RESOURCE]
+ mock_disk.path.return_value.RelPath = self._FAKE_RES_PATH
+ mock_disk.ResourceSubType = self._vmutils._IDE_DISK_RES_SUB_TYPE
+ self._vmutils._conn.query.return_value = [mock_disk]
+
+ return mock_disk
+
+ def _get_snapshot_service(self):
+ return self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+
+ def _assert_add_resources(self, mock_svc):
+ getattr(mock_svc, self._ADD_RESOURCE).assert_called_with(
+ [self._FAKE_RES_DATA], self._FAKE_VM_PATH)
+
+ def _assert_remove_resources(self, mock_svc):
+ getattr(mock_svc, self._REMOVE_RESOURCE).assert_called_with(
+ [self._FAKE_RES_PATH], self._FAKE_VM_PATH)
+
+ def test_get_active_instances(self):
+ fake_vm = mock.MagicMock()
+
+ type(fake_vm).ElementName = mock.PropertyMock(
+ side_effect=['active_vm', 'inactive_vm'])
+ type(fake_vm).EnabledState = mock.PropertyMock(
+ side_effect=[constants.HYPERV_VM_STATE_ENABLED,
+ constants.HYPERV_VM_STATE_DISABLED])
+ self._vmutils.list_instances = mock.MagicMock(
+ return_value=[mock.sentinel.fake_vm_name] * 2)
+ self._vmutils._lookup_vm = mock.MagicMock(side_effect=[fake_vm] * 2)
+ active_instances = self._vmutils.get_active_instances()
+
+ self.assertEqual(['active_vm'], active_instances)
+
+ def _test_get_vm_serial_port_connection(self, new_connection=None):
+ old_serial_connection = 'old_serial_connection'
+
+ mock_vm = self._lookup_vm()
+ mock_vmsettings = [mock.MagicMock()]
+ mock_vm.associators.return_value = mock_vmsettings
+
+ fake_serial_port = mock.MagicMock()
+
+ fake_serial_port.ResourceSubType = (
+ self._vmutils._SERIAL_PORT_RES_SUB_TYPE)
+ fake_serial_port.Connection = [old_serial_connection]
+ mock_rasds = [fake_serial_port]
+ mock_vmsettings[0].associators.return_value = mock_rasds
+ self._vmutils._modify_virt_resource = mock.MagicMock()
+ fake_modify = self._vmutils._modify_virt_resource
+
+ ret_val = self._vmutils.get_vm_serial_port_connection(
+ self._FAKE_VM_NAME, update_connection=new_connection)
+
+ if new_connection:
+ self.assertEqual(new_connection, ret_val)
+ fake_modify.assert_called_once_with(fake_serial_port,
+ mock_vm.path_())
+ else:
+ self.assertEqual(old_serial_connection, ret_val)
+
+ def test_set_vm_serial_port_connection(self):
+ self._test_get_vm_serial_port_connection('new_serial_connection')
+
+ def test_get_vm_serial_port_connection(self):
+ self._test_get_vm_serial_port_connection()
+
+ def test_list_instance_notes(self):
+ vs = mock.MagicMock()
+ attrs = {'ElementName': 'fake_name',
+ 'Notes': '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'}
+ vs.configure_mock(**attrs)
+ self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
+ response = self._vmutils.list_instance_notes()
+
+ self.assertEqual([(attrs['ElementName'], [attrs['Notes']])], response)
+ self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
+ ['ElementName', 'Notes'],
+ SettingType=self._vmutils._VIRTUAL_SYSTEM_CURRENT_SETTINGS)
+
+ @mock.patch('nova.virt.hyperv.vmutils.VMUtils.check_ret_val')
+ def test_modify_virtual_system(self, mock_check_ret_val):
+ mock_vs_man_svc = mock.MagicMock()
+ mock_vmsetting = mock.MagicMock()
+ fake_path = 'fake path'
+ fake_job_path = 'fake job path'
+ fake_ret_val = 'fake return value'
+
+ mock_vs_man_svc.ModifyVirtualSystem.return_value = (0, fake_job_path,
+ fake_ret_val)
+
+ self._vmutils._modify_virtual_system(vs_man_svc=mock_vs_man_svc,
+ vm_path=fake_path,
+ vmsetting=mock_vmsetting)
+
+ mock_vs_man_svc.ModifyVirtualSystem.assert_called_once_with(
+ ComputerSystem=fake_path,
+ SystemSettingData=mock_vmsetting.GetText_(1))
+ mock_check_ret_val.assert_called_once_with(fake_ret_val, fake_job_path)
+
+ @mock.patch('nova.virt.hyperv.vmutils.VMUtils.check_ret_val')
+ @mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_wmi_obj')
+ @mock.patch('nova.virt.hyperv.vmutils.VMUtils._modify_virtual_system')
+ @mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_setting_data')
+ def test_create_vm_obj(self, mock_get_vm_setting_data,
+ mock_modify_virtual_system,
+ mock_get_wmi_obj, mock_check_ret_val):
+ mock_vs_man_svc = mock.MagicMock()
+ mock_vs_gs_data = mock.MagicMock()
+ fake_vm_path = 'fake vm path'
+ fake_job_path = 'fake job path'
+ fake_ret_val = 'fake return value'
+ _conn = self._vmutils._conn.Msvm_VirtualSystemGlobalSettingData
+
+ _conn.new.return_value = mock_vs_gs_data
+ mock_vs_man_svc.DefineVirtualSystem.return_value = (fake_vm_path,
+ fake_job_path,
+ fake_ret_val)
+
+ response = self._vmutils._create_vm_obj(vs_man_svc=mock_vs_man_svc,
+ vm_name='fake vm',
+ notes='fake notes',
+ dynamic_memory_ratio=1.0)
+
+ _conn.new.assert_called_once_with()
+ self.assertEqual(mock_vs_gs_data.ElementName, 'fake vm')
+ mock_vs_man_svc.DefineVirtualSystem.assert_called_once_with(
+ [], None, mock_vs_gs_data.GetText_(1))
+ mock_check_ret_val.assert_called_once_with(fake_ret_val, fake_job_path)
+
+ mock_get_wmi_obj.assert_called_with(fake_vm_path)
+ mock_get_vm_setting_data.assert_called_once_with(mock_get_wmi_obj())
+ mock_modify_virtual_system.assert_called_once_with(
+ mock_vs_man_svc, fake_vm_path, mock_get_vm_setting_data())
+
+ self.assertEqual(mock_get_vm_setting_data().Notes,
+ '\n'.join('fake notes'))
+ self.assertEqual(response, mock_get_wmi_obj())
+
+ def test_list_instances(self):
+ vs = mock.MagicMock()
+ attrs = {'ElementName': 'fake_name'}
+ vs.configure_mock(**attrs)
+ self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
+ response = self._vmutils.list_instances()
+
+ self.assertEqual([(attrs['ElementName'])], response)
+ self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
+ ['ElementName'],
+ SettingType=self._vmutils._VIRTUAL_SYSTEM_CURRENT_SETTINGS)
diff --git a/nova/tests/unit/virt/hyperv/test_vmutilsv2.py b/nova/tests/unit/virt/hyperv/test_vmutilsv2.py
new file mode 100644
index 0000000000..e4c24683eb
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_vmutilsv2.py
@@ -0,0 +1,197 @@
+# Copyright 2014 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.tests.unit.virt.hyperv import test_vmutils
+from nova.virt.hyperv import vmutilsv2
+
+
+class VMUtilsV2TestCase(test_vmutils.VMUtilsTestCase):
+ """Unit tests for the Hyper-V VMUtilsV2 class."""
+
+ _DEFINE_SYSTEM = 'DefineSystem'
+ _DESTROY_SYSTEM = 'DestroySystem'
+ _DESTROY_SNAPSHOT = 'DestroySnapshot'
+
+ _ADD_RESOURCE = 'AddResourceSettings'
+ _REMOVE_RESOURCE = 'RemoveResourceSettings'
+ _SETTING_TYPE = 'VirtualSystemType'
+
+ _VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
+
+ def setUp(self):
+ super(VMUtilsV2TestCase, self).setUp()
+ self._vmutils = vmutilsv2.VMUtilsV2()
+ self._vmutils._conn = mock.MagicMock()
+
+ def test_modify_virt_resource(self):
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+ mock_svc.ModifyResourceSettings.return_value = (self._FAKE_JOB_PATH,
+ mock.MagicMock(),
+ self._FAKE_RET_VAL)
+ mock_res_setting_data = mock.MagicMock()
+ mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
+
+ self._vmutils._modify_virt_resource(mock_res_setting_data,
+ self._FAKE_VM_PATH)
+
+ mock_svc.ModifyResourceSettings.assert_called_with(
+ ResourceSettings=[self._FAKE_RES_DATA])
+
+ @mock.patch.object(vmutilsv2, 'wmi', create=True)
+ @mock.patch.object(vmutilsv2.VMUtilsV2, 'check_ret_val')
+ def test_take_vm_snapshot(self, mock_check_ret_val, mock_wmi):
+ self._lookup_vm()
+
+ mock_svc = self._get_snapshot_service()
+ mock_svc.CreateSnapshot.return_value = (self._FAKE_JOB_PATH,
+ mock.MagicMock(),
+ self._FAKE_RET_VAL)
+
+ self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME)
+
+ mock_svc.CreateSnapshot.assert_called_with(
+ AffectedSystem=self._FAKE_VM_PATH,
+ SnapshotType=self._vmutils._SNAPSHOT_FULL)
+
+ mock_check_ret_val.assert_called_once_with(self._FAKE_RET_VAL,
+ self._FAKE_JOB_PATH)
+
+ @mock.patch.object(vmutilsv2.VMUtilsV2, '_add_virt_resource')
+ @mock.patch.object(vmutilsv2.VMUtilsV2, '_get_new_setting_data')
+ @mock.patch.object(vmutilsv2.VMUtilsV2, '_get_nic_data_by_name')
+ def test_set_nic_connection(self, mock_get_nic_data, mock_get_new_sd,
+ mock_add_virt_res):
+ self._lookup_vm()
+ fake_eth_port = mock_get_new_sd.return_value
+
+ self._vmutils.set_nic_connection(self._FAKE_VM_NAME, None, None)
+ mock_add_virt_res.assert_called_with(fake_eth_port, self._FAKE_VM_PATH)
+
+ @mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_disks')
+ def test_enable_vm_metrics_collection(self, mock_get_vm_disks):
+ self._lookup_vm()
+ mock_svc = self._vmutils._conn.Msvm_MetricService()[0]
+
+ metric_def = mock.MagicMock()
+ mock_disk = mock.MagicMock()
+ mock_disk.path_.return_value = self._FAKE_RES_PATH
+ mock_get_vm_disks.return_value = ([mock_disk], [mock_disk])
+
+ fake_metric_def_paths = ["fake_0", None]
+ fake_metric_resource_paths = [self._FAKE_VM_PATH, self._FAKE_RES_PATH]
+
+ metric_def.path_.side_effect = fake_metric_def_paths
+ self._vmutils._conn.CIM_BaseMetricDefinition.return_value = [
+ metric_def]
+
+ self._vmutils.enable_vm_metrics_collection(self._FAKE_VM_NAME)
+
+ calls = []
+ for i in range(len(fake_metric_def_paths)):
+ calls.append(mock.call(
+ Subject=fake_metric_resource_paths[i],
+ Definition=fake_metric_def_paths[i],
+ MetricCollectionEnabled=self._vmutils._METRIC_ENABLED))
+
+ mock_svc.ControlMetrics.assert_has_calls(calls, any_order=True)
+
+ def _get_snapshot_service(self):
+ return self._vmutils._conn.Msvm_VirtualSystemSnapshotService()[0]
+
+ def _assert_add_resources(self, mock_svc):
+ getattr(mock_svc, self._ADD_RESOURCE).assert_called_with(
+ self._FAKE_VM_PATH, [self._FAKE_RES_DATA])
+
+ def _assert_remove_resources(self, mock_svc):
+ getattr(mock_svc, self._REMOVE_RESOURCE).assert_called_with(
+ [self._FAKE_RES_PATH])
+
+ def test_list_instance_notes(self):
+ vs = mock.MagicMock()
+ attrs = {'ElementName': 'fake_name',
+ 'Notes': ['4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3']}
+ vs.configure_mock(**attrs)
+ self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
+ response = self._vmutils.list_instance_notes()
+
+ self.assertEqual([(attrs['ElementName'], attrs['Notes'])], response)
+ self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
+ ['ElementName', 'Notes'],
+ VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED)
+
+ @mock.patch('nova.virt.hyperv.vmutilsv2.VMUtilsV2.check_ret_val')
+ @mock.patch('nova.virt.hyperv.vmutilsv2.VMUtilsV2._get_wmi_obj')
+ def _test_create_vm_obj(self, mock_get_wmi_obj, mock_check_ret_val,
+ vm_path, dynamic_memory_ratio=1.0):
+ mock_vs_man_svc = mock.MagicMock()
+ mock_vs_data = mock.MagicMock()
+ mock_job = mock.MagicMock()
+ fake_job_path = 'fake job path'
+ fake_ret_val = 'fake return value'
+ _conn = self._vmutils._conn.Msvm_VirtualSystemSettingData
+
+ mock_check_ret_val.return_value = mock_job
+ _conn.new.return_value = mock_vs_data
+ mock_vs_man_svc.DefineSystem.return_value = (fake_job_path,
+ vm_path,
+ fake_ret_val)
+ mock_job.associators.return_value = ['fake vm path']
+
+ response = self._vmutils._create_vm_obj(
+ vs_man_svc=mock_vs_man_svc,
+ vm_name='fake vm',
+ notes='fake notes',
+ dynamic_memory_ratio=dynamic_memory_ratio)
+
+ if not vm_path:
+ mock_job.associators.assert_called_once_with(
+ self._vmutils._AFFECTED_JOB_ELEMENT_CLASS)
+
+ _conn.new.assert_called_once_with()
+ self.assertEqual(mock_vs_data.ElementName, 'fake vm')
+ mock_vs_man_svc.DefineSystem.assert_called_once_with(
+ ResourceSettings=[], ReferenceConfiguration=None,
+ SystemSettings=mock_vs_data.GetText_(1))
+ mock_check_ret_val.assert_called_once_with(fake_ret_val, fake_job_path)
+
+ if dynamic_memory_ratio > 1:
+ self.assertFalse(mock_vs_data.VirtualNumaEnabled)
+
+ mock_get_wmi_obj.assert_called_with('fake vm path')
+
+ self.assertEqual(mock_vs_data.Notes, 'fake notes')
+ self.assertEqual(response, mock_get_wmi_obj())
+
+ def test_create_vm_obj(self):
+ self._test_create_vm_obj(vm_path='fake vm path')
+
+ def test_create_vm_obj_no_vm_path(self):
+ self._test_create_vm_obj(vm_path=None)
+
+ def test_create_vm_obj_dynamic_memory(self):
+ self._test_create_vm_obj(vm_path=None, dynamic_memory_ratio=1.1)
+
+ def test_list_instances(self):
+ vs = mock.MagicMock()
+ attrs = {'ElementName': 'fake_name'}
+ vs.configure_mock(**attrs)
+ self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
+ response = self._vmutils.list_instances()
+
+ self.assertEqual([(attrs['ElementName'])], response)
+ self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
+ ['ElementName'],
+ VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED)
diff --git a/nova/tests/unit/virt/hyperv/test_volumeutils.py b/nova/tests/unit/virt/hyperv/test_volumeutils.py
new file mode 100644
index 0000000000..98ffcce533
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_volumeutils.py
@@ -0,0 +1,151 @@
+# Copyright 2014 Cloudbase Solutions Srl
+#
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.config import cfg
+
+from nova.tests.unit.virt.hyperv import test_basevolumeutils
+from nova.virt.hyperv import vmutils
+from nova.virt.hyperv import volumeutils
+
+CONF = cfg.CONF
+CONF.import_opt('volume_attach_retry_count', 'nova.virt.hyperv.volumeops',
+ 'hyperv')
+
+
+class VolumeUtilsTestCase(test_basevolumeutils.BaseVolumeUtilsTestCase):
+ """Unit tests for the Hyper-V VolumeUtils class."""
+
+ _FAKE_PORTAL_ADDR = '10.1.1.1'
+ _FAKE_PORTAL_PORT = '3260'
+ _FAKE_LUN = 0
+ _FAKE_TARGET = 'iqn.2010-10.org.openstack:fake_target'
+
+ _FAKE_STDOUT_VALUE = 'The operation completed successfully'
+
+ def setUp(self):
+ super(VolumeUtilsTestCase, self).setUp()
+ self._volutils = volumeutils.VolumeUtils()
+ self._volutils._conn_wmi = mock.MagicMock()
+ self._volutils._conn_cimv2 = mock.MagicMock()
+ self.flags(volume_attach_retry_count=4, group='hyperv')
+ self.flags(volume_attach_retry_interval=0, group='hyperv')
+
+ def _test_login_target_portal(self, portal_connected):
+ fake_portal = '%s:%s' % (self._FAKE_PORTAL_ADDR,
+ self._FAKE_PORTAL_PORT)
+
+ self._volutils.execute = mock.MagicMock()
+ if portal_connected:
+ exec_output = 'Address and Socket: %s %s' % (
+ self._FAKE_PORTAL_ADDR, self._FAKE_PORTAL_PORT)
+ else:
+ exec_output = ''
+
+ self._volutils.execute.return_value = exec_output
+
+ self._volutils._login_target_portal(fake_portal)
+
+ call_list = self._volutils.execute.call_args_list
+ all_call_args = [arg for call in call_list for arg in call[0]]
+
+ if portal_connected:
+ self.assertIn('RefreshTargetPortal', all_call_args)
+ else:
+ self.assertIn('AddTargetPortal', all_call_args)
+
+ def test_login_connected_portal(self):
+ self._test_login_target_portal(True)
+
+ def test_login_new_portal(self):
+ self._test_login_target_portal(False)
+
+ def _test_login_target(self, target_connected, raise_exception=False):
+ fake_portal = '%s:%s' % (self._FAKE_PORTAL_ADDR,
+ self._FAKE_PORTAL_PORT)
+ self._volutils.execute = mock.MagicMock()
+ self._volutils._login_target_portal = mock.MagicMock()
+
+ if target_connected:
+ self._volutils.execute.return_value = self._FAKE_TARGET
+ elif raise_exception:
+ self._volutils.execute.return_value = ''
+ else:
+ self._volutils.execute.side_effect = (
+ ['', '', '', self._FAKE_TARGET, ''])
+
+ if raise_exception:
+ self.assertRaises(vmutils.HyperVException,
+ self._volutils.login_storage_target,
+ self._FAKE_LUN, self._FAKE_TARGET, fake_portal)
+ else:
+ self._volutils.login_storage_target(self._FAKE_LUN,
+ self._FAKE_TARGET,
+ fake_portal)
+
+ call_list = self._volutils.execute.call_args_list
+ all_call_args = [arg for call in call_list for arg in call[0]]
+
+ if target_connected:
+ self.assertNotIn('qlogintarget', all_call_args)
+ else:
+ self.assertIn('qlogintarget', all_call_args)
+
+ def test_login_connected_target(self):
+ self._test_login_target(True)
+
+ def test_login_disconncted_target(self):
+ self._test_login_target(False)
+
+ def test_login_target_exception(self):
+ self._test_login_target(False, True)
+
+ def _test_execute_wrapper(self, raise_exception):
+ fake_cmd = ('iscsicli.exe', 'ListTargetPortals')
+
+ if raise_exception:
+ output = 'fake error'
+ else:
+ output = 'The operation completed successfully'
+
+ with mock.patch('nova.utils.execute') as fake_execute:
+ fake_execute.return_value = (output, None)
+
+ if raise_exception:
+ self.assertRaises(vmutils.HyperVException,
+ self._volutils.execute,
+ *fake_cmd)
+ else:
+ ret_val = self._volutils.execute(*fake_cmd)
+ self.assertEqual(output, ret_val)
+
+ def test_execute_raise_exception(self):
+ self._test_execute_wrapper(True)
+
+ def test_execute_exception(self):
+ self._test_execute_wrapper(False)
+
+ @mock.patch.object(volumeutils, 'utils')
+ def test_logout_storage_target(self, mock_utils):
+ mock_utils.execute.return_value = (self._FAKE_STDOUT_VALUE,
+ mock.sentinel.FAKE_STDERR_VALUE)
+ session = mock.MagicMock()
+ session.SessionId = mock.sentinel.FAKE_SESSION_ID
+ self._volutils._conn_wmi.query.return_value = [session]
+
+ self._volutils.logout_storage_target(mock.sentinel.FAKE_IQN)
+ mock_utils.execute.assert_called_once_with(
+ 'iscsicli.exe', 'logouttarget', mock.sentinel.FAKE_SESSION_ID)
diff --git a/nova/tests/unit/virt/hyperv/test_volumeutilsv2.py b/nova/tests/unit/virt/hyperv/test_volumeutilsv2.py
new file mode 100644
index 0000000000..1c242b71f8
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_volumeutilsv2.py
@@ -0,0 +1,147 @@
+# Copyright 2014 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.config import cfg
+
+from nova import test
+from nova.virt.hyperv import vmutils
+from nova.virt.hyperv import volumeutilsv2
+
+CONF = cfg.CONF
+CONF.import_opt('volume_attach_retry_count', 'nova.virt.hyperv.volumeops',
+ 'hyperv')
+
+
+class VolumeUtilsV2TestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V VolumeUtilsV2 class."""
+
+ _FAKE_PORTAL_ADDR = '10.1.1.1'
+ _FAKE_PORTAL_PORT = '3260'
+ _FAKE_LUN = 0
+ _FAKE_TARGET = 'iqn.2010-10.org.openstack:fake_target'
+
+ def setUp(self):
+ super(VolumeUtilsV2TestCase, self).setUp()
+ self._volutilsv2 = volumeutilsv2.VolumeUtilsV2()
+ self._volutilsv2._conn_storage = mock.MagicMock()
+ self._volutilsv2._conn_wmi = mock.MagicMock()
+ self.flags(volume_attach_retry_count=4, group='hyperv')
+ self.flags(volume_attach_retry_interval=0, group='hyperv')
+
+ def _test_login_target_portal(self, portal_connected):
+ fake_portal = '%s:%s' % (self._FAKE_PORTAL_ADDR,
+ self._FAKE_PORTAL_PORT)
+ fake_portal_object = mock.MagicMock()
+ _query = self._volutilsv2._conn_storage.query
+ self._volutilsv2._conn_storage.MSFT_iSCSITargetPortal = (
+ fake_portal_object)
+
+ if portal_connected:
+ _query.return_value = [fake_portal_object]
+ else:
+ _query.return_value = None
+
+ self._volutilsv2._login_target_portal(fake_portal)
+
+ if portal_connected:
+ fake_portal_object.Update.assert_called_once_with()
+ else:
+ fake_portal_object.New.assert_called_once_with(
+ TargetPortalAddress=self._FAKE_PORTAL_ADDR,
+ TargetPortalPortNumber=self._FAKE_PORTAL_PORT)
+
+ def test_login_connected_portal(self):
+ self._test_login_target_portal(True)
+
+ def test_login_new_portal(self):
+ self._test_login_target_portal(False)
+
+ def _test_login_target(self, target_connected, raise_exception=False):
+ fake_portal = '%s:%s' % (self._FAKE_PORTAL_ADDR,
+ self._FAKE_PORTAL_PORT)
+
+ fake_target_object = mock.MagicMock()
+
+ if target_connected:
+ fake_target_object.IsConnected = True
+ elif not raise_exception:
+ type(fake_target_object).IsConnected = mock.PropertyMock(
+ side_effect=[False, True])
+ else:
+ fake_target_object.IsConnected = False
+
+ _query = self._volutilsv2._conn_storage.query
+ _query.return_value = [fake_target_object]
+
+ self._volutilsv2._conn_storage.MSFT_iSCSITarget = (
+ fake_target_object)
+
+ if raise_exception:
+ self.assertRaises(vmutils.HyperVException,
+ self._volutilsv2.login_storage_target,
+ self._FAKE_LUN, self._FAKE_TARGET, fake_portal)
+ else:
+ self._volutilsv2.login_storage_target(self._FAKE_LUN,
+ self._FAKE_TARGET,
+ fake_portal)
+
+ if target_connected:
+ fake_target_object.Update.assert_called_with()
+ else:
+ fake_target_object.Connect.assert_called_once_with(
+ IsPersistent=True, NodeAddress=self._FAKE_TARGET)
+
+ def test_login_connected_target(self):
+ self._test_login_target(True)
+
+ def test_login_disconncted_target(self):
+ self._test_login_target(False)
+
+ def test_login_target_exception(self):
+ self._test_login_target(False, True)
+
+ def test_logout_storage_target(self):
+ mock_msft_target = self._volutilsv2._conn_storage.MSFT_iSCSITarget
+ mock_msft_session = self._volutilsv2._conn_storage.MSFT_iSCSISession
+
+ mock_target = mock.MagicMock()
+ mock_target.IsConnected = True
+ mock_msft_target.return_value = [mock_target]
+
+ mock_session = mock.MagicMock()
+ mock_session.IsPersistent = True
+ mock_msft_session.return_value = [mock_session]
+
+ self._volutilsv2.logout_storage_target(self._FAKE_TARGET)
+
+ mock_msft_target.assert_called_once_with(NodeAddress=self._FAKE_TARGET)
+ mock_msft_session.assert_called_once_with(
+ TargetNodeAddress=self._FAKE_TARGET)
+
+ mock_session.Unregister.assert_called_once_with()
+ mock_target.Disconnect.assert_called_once_with()
+
+ @mock.patch.object(volumeutilsv2.VolumeUtilsV2, 'logout_storage_target')
+ def test_execute_log_out(self, mock_logout_target):
+ sess_class = self._volutilsv2._conn_wmi.MSiSCSIInitiator_SessionClass
+
+ mock_session = mock.MagicMock()
+ sess_class.return_value = [mock_session]
+
+ self._volutilsv2.execute_log_out(mock.sentinel.FAKE_SESSION_ID)
+
+ sess_class.assert_called_once_with(
+ SessionId=mock.sentinel.FAKE_SESSION_ID)
+ mock_logout_target.assert_called_once_with(mock_session.TargetName)
diff --git a/nova/tests/unit/virt/ironic/__init__.py b/nova/tests/unit/virt/ironic/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/ironic/__init__.py
diff --git a/nova/tests/unit/virt/ironic/test_client_wrapper.py b/nova/tests/unit/virt/ironic/test_client_wrapper.py
new file mode 100644
index 0000000000..025d2616dd
--- /dev/null
+++ b/nova/tests/unit/virt/ironic/test_client_wrapper.py
@@ -0,0 +1,126 @@
+# Copyright 2014 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from ironicclient import client as ironic_client
+from ironicclient import exc as ironic_exception
+import mock
+from oslo.config import cfg
+
+from nova import exception
+from nova import test
+from nova.tests.unit.virt.ironic import utils as ironic_utils
+from nova.virt.ironic import client_wrapper
+
+CONF = cfg.CONF
+
+FAKE_CLIENT = ironic_utils.FakeClient()
+
+
+class IronicClientWrapperTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(IronicClientWrapperTestCase, self).setUp()
+ self.ironicclient = client_wrapper.IronicClientWrapper()
+ # Do not waste time sleeping
+ cfg.CONF.set_override('api_retry_interval', 0, 'ironic')
+
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
+ def test_call_good_no_args(self, mock_get_client, mock_multi_getattr):
+ mock_get_client.return_value = FAKE_CLIENT
+ self.ironicclient.call("node.list")
+ mock_get_client.assert_called_once_with()
+ mock_multi_getattr.assert_called_once_with(FAKE_CLIENT, "node.list")
+ mock_multi_getattr.return_value.assert_called_once_with()
+
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
+ def test_call_good_with_args(self, mock_get_client, mock_multi_getattr):
+ mock_get_client.return_value = FAKE_CLIENT
+ self.ironicclient.call("node.list", 'test', associated=True)
+ mock_get_client.assert_called_once_with()
+ mock_multi_getattr.assert_called_once_with(FAKE_CLIENT, "node.list")
+ mock_multi_getattr.return_value.assert_called_once_with(
+ 'test', associated=True)
+
+ @mock.patch.object(ironic_client, 'get_client')
+ def test__get_client_no_auth_token(self, mock_ir_cli):
+ self.flags(admin_auth_token=None, group='ironic')
+ ironicclient = client_wrapper.IronicClientWrapper()
+ # dummy call to have _get_client() called
+ ironicclient.call("node.list")
+ expected = {'os_username': CONF.ironic.admin_username,
+ 'os_password': CONF.ironic.admin_password,
+ 'os_auth_url': CONF.ironic.admin_url,
+ 'os_tenant_name': CONF.ironic.admin_tenant_name,
+ 'os_service_type': 'baremetal',
+ 'os_endpoint_type': 'public',
+ 'ironic_url': CONF.ironic.api_endpoint}
+ mock_ir_cli.assert_called_once_with(CONF.ironic.api_version,
+ **expected)
+
+ @mock.patch.object(ironic_client, 'get_client')
+ def test__get_client_with_auth_token(self, mock_ir_cli):
+ self.flags(admin_auth_token='fake-token', group='ironic')
+ ironicclient = client_wrapper.IronicClientWrapper()
+ # dummy call to have _get_client() called
+ ironicclient.call("node.list")
+ expected = {'os_auth_token': 'fake-token',
+ 'ironic_url': CONF.ironic.api_endpoint}
+ mock_ir_cli.assert_called_once_with(CONF.ironic.api_version,
+ **expected)
+
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
+ def test_call_fail(self, mock_get_client, mock_multi_getattr):
+ cfg.CONF.set_override('api_max_retries', 2, 'ironic')
+ test_obj = mock.Mock()
+ test_obj.side_effect = ironic_exception.HTTPServiceUnavailable
+ mock_multi_getattr.return_value = test_obj
+ mock_get_client.return_value = FAKE_CLIENT
+ self.assertRaises(exception.NovaException, self.ironicclient.call,
+ "node.list")
+ self.assertEqual(2, test_obj.call_count)
+
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
+ def test_call_fail_unexpected_exception(self, mock_get_client,
+ mock_multi_getattr):
+ test_obj = mock.Mock()
+ test_obj.side_effect = ironic_exception.HTTPNotFound
+ mock_multi_getattr.return_value = test_obj
+ mock_get_client.return_value = FAKE_CLIENT
+ self.assertRaises(ironic_exception.HTTPNotFound,
+ self.ironicclient.call, "node.list")
+
+ @mock.patch.object(ironic_client, 'get_client')
+ def test__get_client_unauthorized(self, mock_get_client):
+ mock_get_client.side_effect = ironic_exception.Unauthorized
+ self.assertRaises(exception.NovaException,
+ self.ironicclient._get_client)
+
+ @mock.patch.object(ironic_client, 'get_client')
+ def test__get_client_unexpected_exception(self, mock_get_client):
+ mock_get_client.side_effect = ironic_exception.ConnectionRefused
+ self.assertRaises(ironic_exception.ConnectionRefused,
+ self.ironicclient._get_client)
+
+ def test__multi_getattr_good(self):
+ response = self.ironicclient._multi_getattr(FAKE_CLIENT, "node.list")
+ self.assertEqual(FAKE_CLIENT.node.list, response)
+
+ def test__multi_getattr_fail(self):
+ self.assertRaises(AttributeError, self.ironicclient._multi_getattr,
+ FAKE_CLIENT, "nonexistent")
diff --git a/nova/tests/unit/virt/ironic/test_driver.py b/nova/tests/unit/virt/ironic/test_driver.py
new file mode 100644
index 0000000000..0e24c7bab4
--- /dev/null
+++ b/nova/tests/unit/virt/ironic/test_driver.py
@@ -0,0 +1,1268 @@
+# Copyright 2014 Red Hat, Inc.
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for the ironic driver."""
+
+from ironicclient import exc as ironic_exception
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+
+from nova.compute import power_state as nova_states
+from nova.compute import task_states
+from nova import context as nova_context
+from nova import exception
+from nova import objects
+from nova.openstack.common import loopingcall
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit import utils
+from nova.tests.unit.virt.ironic import utils as ironic_utils
+from nova.virt import driver
+from nova.virt import fake
+from nova.virt import firewall
+from nova.virt.ironic import client_wrapper as cw
+from nova.virt.ironic import driver as ironic_driver
+from nova.virt.ironic import ironic_states
+
+
+CONF = cfg.CONF
+
+IRONIC_FLAGS = dict(
+ api_version=1,
+ group='ironic',
+)
+
+FAKE_CLIENT = ironic_utils.FakeClient()
+
+
+class FakeClientWrapper(cw.IronicClientWrapper):
+ def _get_client(self):
+ return FAKE_CLIENT
+
+
+class FakeLoopingCall(object):
+ def __init__(self):
+ self.wait = mock.MagicMock()
+ self.start = mock.MagicMock()
+ self.start.return_value = self
+
+
+def _get_properties():
+ return {'cpus': 2,
+ 'memory_mb': 512,
+ 'local_gb': 10,
+ 'cpu_arch': 'x86_64'}
+
+
+def _get_stats():
+ return {'cpu_arch': 'x86_64'}
+
+
+FAKE_CLIENT_WRAPPER = FakeClientWrapper()
+
+
+@mock.patch.object(cw, 'IronicClientWrapper', lambda *_: FAKE_CLIENT_WRAPPER)
+class IronicDriverTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(IronicDriverTestCase, self).setUp()
+ self.flags(**IRONIC_FLAGS)
+ self.driver = ironic_driver.IronicDriver(None)
+ self.driver.virtapi = fake.FakeVirtAPI()
+ self.ctx = nova_context.get_admin_context()
+
+ # mock retries configs to avoid sleeps and make tests run quicker
+ CONF.set_default('api_max_retries', default=1, group='ironic')
+ CONF.set_default('api_retry_interval', default=0, group='ironic')
+
+ def test_public_api_signatures(self):
+ self.assertPublicAPISignatures(driver.ComputeDriver(None), self.driver)
+
+ def test_validate_driver_loading(self):
+ self.assertIsInstance(self.driver, ironic_driver.IronicDriver)
+
+ def test__get_hypervisor_type(self):
+ self.assertEqual('ironic', self.driver._get_hypervisor_type())
+
+ def test__get_hypervisor_version(self):
+ self.assertEqual(1, self.driver._get_hypervisor_version())
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
+ def test__validate_instance_and_node(self, mock_gbiui):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ instance_uuid = uuidutils.generate_uuid()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=instance_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=instance_uuid)
+ ironicclient = cw.IronicClientWrapper()
+
+ mock_gbiui.return_value = node
+ result = ironic_driver._validate_instance_and_node(ironicclient,
+ instance)
+ self.assertEqual(result.uuid, node_uuid)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
+ def test__validate_instance_and_node_failed(self, mock_gbiui):
+ ironicclient = cw.IronicClientWrapper()
+ mock_gbiui.side_effect = ironic_exception.NotFound()
+ instance_uuid = uuidutils.generate_uuid(),
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=instance_uuid)
+ self.assertRaises(exception.InstanceNotFound,
+ ironic_driver._validate_instance_and_node,
+ ironicclient, instance)
+
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ def test__wait_for_active_pass(self, fake_validate):
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=uuidutils.generate_uuid())
+ node = ironic_utils.get_test_node(
+ provision_state=ironic_states.DEPLOYING)
+
+ fake_validate.return_value = node
+ self.driver._wait_for_active(FAKE_CLIENT, instance)
+ self.assertTrue(fake_validate.called)
+
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ def test__wait_for_active_done(self, fake_validate):
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=uuidutils.generate_uuid())
+ node = ironic_utils.get_test_node(
+ provision_state=ironic_states.ACTIVE)
+
+ fake_validate.return_value = node
+ self.assertRaises(loopingcall.LoopingCallDone,
+ self.driver._wait_for_active,
+ FAKE_CLIENT, instance)
+ self.assertTrue(fake_validate.called)
+
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ def test__wait_for_active_fail(self, fake_validate):
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=uuidutils.generate_uuid())
+ node = ironic_utils.get_test_node(
+ provision_state=ironic_states.DEPLOYFAIL)
+
+ fake_validate.return_value = node
+ self.assertRaises(exception.InstanceDeployFailure,
+ self.driver._wait_for_active,
+ FAKE_CLIENT, instance)
+ self.assertTrue(fake_validate.called)
+
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ def test__wait_for_power_state_pass(self, fake_validate):
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=uuidutils.generate_uuid())
+ node = ironic_utils.get_test_node(
+ target_power_state=ironic_states.POWER_OFF)
+
+ fake_validate.return_value = node
+ self.driver._wait_for_power_state(
+ FAKE_CLIENT, instance, 'fake message')
+ self.assertTrue(fake_validate.called)
+
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ def test__wait_for_power_state_ok(self, fake_validate):
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=uuidutils.generate_uuid())
+ node = ironic_utils.get_test_node(
+ target_power_state=ironic_states.NOSTATE)
+
+ fake_validate.return_value = node
+ self.assertRaises(loopingcall.LoopingCallDone,
+ self.driver._wait_for_power_state,
+ FAKE_CLIENT, instance, 'fake message')
+ self.assertTrue(fake_validate.called)
+
+ def test__node_resource(self):
+ node_uuid = uuidutils.generate_uuid()
+ instance_uuid = uuidutils.generate_uuid()
+ props = _get_properties()
+ stats = _get_stats()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=instance_uuid,
+ properties=props)
+
+ result = self.driver._node_resource(node)
+ self.assertEqual(props['cpus'], result['vcpus'])
+ self.assertEqual(props['cpus'], result['vcpus_used'])
+ self.assertEqual(props['memory_mb'], result['memory_mb'])
+ self.assertEqual(props['memory_mb'], result['memory_mb_used'])
+ self.assertEqual(props['local_gb'], result['local_gb'])
+ self.assertEqual(props['local_gb'], result['local_gb_used'])
+ self.assertEqual(node_uuid, result['hypervisor_hostname'])
+ self.assertEqual(stats, jsonutils.loads(result['stats']))
+
+ def test__node_resource_canonicalizes_arch(self):
+ node_uuid = uuidutils.generate_uuid()
+ props = _get_properties()
+ props['cpu_arch'] = 'i386'
+ node = ironic_utils.get_test_node(uuid=node_uuid, properties=props)
+
+ result = self.driver._node_resource(node)
+ self.assertEqual('i686',
+ jsonutils.loads(result['supported_instances'])[0][0])
+ self.assertEqual('i386',
+ jsonutils.loads(result['stats'])['cpu_arch'])
+
+ def test__node_resource_unknown_arch(self):
+ node_uuid = uuidutils.generate_uuid()
+ props = _get_properties()
+ del props['cpu_arch']
+ node = ironic_utils.get_test_node(uuid=node_uuid, properties=props)
+
+ result = self.driver._node_resource(node)
+ self.assertEqual([], jsonutils.loads(result['supported_instances']))
+
+ def test__node_resource_exposes_capabilities(self):
+ props = _get_properties()
+ props['capabilities'] = 'test:capability'
+ node = ironic_utils.get_test_node(properties=props)
+ result = self.driver._node_resource(node)
+ stats = jsonutils.loads(result['stats'])
+ self.assertIsNone(stats.get('capabilities'))
+ self.assertEqual('capability', stats.get('test'))
+
+ def test__node_resource_no_capabilities(self):
+ props = _get_properties()
+ props['capabilities'] = None
+ node = ironic_utils.get_test_node(properties=props)
+ result = self.driver._node_resource(node)
+ self.assertIsNone(jsonutils.loads(result['stats']).get('capabilities'))
+
+ def test__node_resource_malformed_capabilities(self):
+ props = _get_properties()
+ props['capabilities'] = 'test:capability,:no_key,no_val:'
+ node = ironic_utils.get_test_node(properties=props)
+ result = self.driver._node_resource(node)
+ stats = jsonutils.loads(result['stats'])
+ self.assertEqual('capability', stats.get('test'))
+
+ def test__node_resource_no_instance_uuid(self):
+ node_uuid = uuidutils.generate_uuid()
+ props = _get_properties()
+ stats = _get_stats()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=None,
+ power_state=ironic_states.POWER_OFF,
+ properties=props)
+
+ result = self.driver._node_resource(node)
+ self.assertEqual(props['cpus'], result['vcpus'])
+ self.assertEqual(0, result['vcpus_used'])
+ self.assertEqual(props['memory_mb'], result['memory_mb'])
+ self.assertEqual(0, result['memory_mb_used'])
+ self.assertEqual(props['local_gb'], result['local_gb'])
+ self.assertEqual(0, result['local_gb_used'])
+ self.assertEqual(node_uuid, result['hypervisor_hostname'])
+ self.assertEqual(stats, jsonutils.loads(result['stats']))
+
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_node_resources_unavailable')
+ def test__node_resource_unavailable_node_res(self, mock_res_unavail):
+ mock_res_unavail.return_value = True
+ node_uuid = uuidutils.generate_uuid()
+ props = _get_properties()
+ stats = _get_stats()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=None,
+ properties=props)
+
+ result = self.driver._node_resource(node)
+ self.assertEqual(0, result['vcpus'])
+ self.assertEqual(0, result['vcpus_used'])
+ self.assertEqual(0, result['memory_mb'])
+ self.assertEqual(0, result['memory_mb_used'])
+ self.assertEqual(0, result['local_gb'])
+ self.assertEqual(0, result['local_gb_used'])
+ self.assertEqual(node_uuid, result['hypervisor_hostname'])
+ self.assertEqual(stats, jsonutils.loads(result['stats']))
+
+ @mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter',
+ create=True)
+ @mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering',
+ create=True)
+ @mock.patch.object(firewall.NoopFirewallDriver, 'apply_instance_filter',
+ create=True)
+ def test__start_firewall(self, mock_aif, mock_sbf, mock_pif):
+ fake_inst = 'fake-inst'
+ fake_net_info = utils.get_test_network_info()
+ self.driver._start_firewall(fake_inst, fake_net_info)
+
+ mock_aif.assert_called_once_with(fake_inst, fake_net_info)
+ mock_sbf.assert_called_once_with(fake_inst, fake_net_info)
+ mock_pif.assert_called_once_with(fake_inst, fake_net_info)
+
+ @mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance',
+ create=True)
+ def test__stop_firewall(self, mock_ui):
+ fake_inst = 'fake-inst'
+ fake_net_info = utils.get_test_network_info()
+ self.driver._stop_firewall(fake_inst, fake_net_info)
+ mock_ui.assert_called_once_with(fake_inst, fake_net_info)
+
+ @mock.patch.object(cw.IronicClientWrapper, 'call')
+ def test_instance_exists(self, mock_call):
+ instance_uuid = 'fake-uuid'
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=instance_uuid)
+ self.assertTrue(self.driver.instance_exists(instance))
+ mock_call.assert_called_once_with('node.get_by_instance_uuid',
+ instance_uuid)
+
+ @mock.patch.object(cw.IronicClientWrapper, 'call')
+ def test_instance_exists_fail(self, mock_call):
+ mock_call.side_effect = ironic_exception.NotFound
+ instance_uuid = 'fake-uuid'
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=instance_uuid)
+ self.assertFalse(self.driver.instance_exists(instance))
+ mock_call.assert_called_once_with('node.get_by_instance_uuid',
+ instance_uuid)
+
+ @mock.patch.object(cw.IronicClientWrapper, 'call')
+ @mock.patch.object(objects.Instance, 'get_by_uuid')
+ def test_list_instances(self, mock_inst_by_uuid, mock_call):
+ nodes = []
+ instances = []
+ for i in range(2):
+ uuid = uuidutils.generate_uuid()
+ instances.append(fake_instance.fake_instance_obj(self.ctx,
+ id=i,
+ uuid=uuid))
+ nodes.append(ironic_utils.get_test_node(instance_uuid=uuid))
+
+ mock_inst_by_uuid.side_effect = instances
+ mock_call.return_value = nodes
+
+ response = self.driver.list_instances()
+ mock_call.assert_called_with("node.list", associated=True, limit=0)
+ expected_calls = [mock.call(mock.ANY, instances[0].uuid),
+ mock.call(mock.ANY, instances[1].uuid)]
+ mock_inst_by_uuid.assert_has_calls(expected_calls)
+ self.assertEqual(['instance-00000000', 'instance-00000001'],
+ sorted(response))
+
+ @mock.patch.object(cw.IronicClientWrapper, 'call')
+ def test_list_instance_uuids(self, mock_call):
+ num_nodes = 2
+ nodes = []
+ for n in range(num_nodes):
+ nodes.append(ironic_utils.get_test_node(
+ instance_uuid=uuidutils.generate_uuid()))
+
+ mock_call.return_value = nodes
+ uuids = self.driver.list_instance_uuids()
+ mock_call.assert_called_with('node.list', associated=True, limit=0)
+ expected = [n.instance_uuid for n in nodes]
+ self.assertEqual(sorted(expected), sorted(uuids))
+
+ @mock.patch.object(FAKE_CLIENT.node, 'list')
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ def test_node_is_available_empty_cache_empty_list(self, mock_get,
+ mock_list):
+ node = ironic_utils.get_test_node()
+ mock_get.return_value = node
+ mock_list.return_value = []
+ self.assertTrue(self.driver.node_is_available(node.uuid))
+ mock_get.assert_called_with(node.uuid)
+ mock_list.assert_called_with(detail=True, limit=0)
+
+ mock_get.side_effect = ironic_exception.NotFound
+ self.assertFalse(self.driver.node_is_available(node.uuid))
+
+ @mock.patch.object(FAKE_CLIENT.node, 'list')
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ def test_node_is_available_empty_cache(self, mock_get, mock_list):
+ node = ironic_utils.get_test_node()
+ mock_get.return_value = node
+ mock_list.return_value = [node]
+ self.assertTrue(self.driver.node_is_available(node.uuid))
+ mock_list.assert_called_with(detail=True, limit=0)
+ self.assertEqual(0, mock_get.call_count)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'list')
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ def test_node_is_available_with_cache(self, mock_get, mock_list):
+ node = ironic_utils.get_test_node()
+ mock_get.return_value = node
+ mock_list.return_value = [node]
+ # populate the cache
+ self.driver.get_available_nodes(refresh=True)
+ # prove that zero calls are made after populating cache
+ mock_list.reset_mock()
+ self.assertTrue(self.driver.node_is_available(node.uuid))
+ self.assertEqual(0, mock_list.call_count)
+ self.assertEqual(0, mock_get.call_count)
+
+ def test__node_resources_unavailable(self):
+ node_dicts = [
+ # a node in maintenance /w no instance and power OFF
+ {'uuid': uuidutils.generate_uuid(),
+ 'maintenance': True,
+ 'power_state': ironic_states.POWER_OFF},
+ # a node in maintenance /w no instance and ERROR power state
+ {'uuid': uuidutils.generate_uuid(),
+ 'maintenance': True,
+ 'power_state': ironic_states.ERROR},
+ # a node not in maintenance /w no instance and bad power state
+ {'uuid': uuidutils.generate_uuid(),
+ 'power_state': ironic_states.NOSTATE},
+ ]
+ for n in node_dicts:
+ node = ironic_utils.get_test_node(**n)
+ self.assertTrue(self.driver._node_resources_unavailable(node))
+
+ avail_node = ironic_utils.get_test_node(
+ power_state=ironic_states.POWER_OFF)
+ self.assertFalse(self.driver._node_resources_unavailable(avail_node))
+
+ @mock.patch.object(FAKE_CLIENT.node, 'list')
+ def test_get_available_nodes(self, mock_list):
+ node_dicts = [
+ # a node in maintenance /w no instance and power OFF
+ {'uuid': uuidutils.generate_uuid(),
+ 'maintenance': True,
+ 'power_state': ironic_states.POWER_OFF},
+ # a node /w instance and power ON
+ {'uuid': uuidutils.generate_uuid(),
+ 'instance_uuid': uuidutils.generate_uuid(),
+ 'power_state': ironic_states.POWER_ON},
+ # a node not in maintenance /w no instance and bad power state
+ {'uuid': uuidutils.generate_uuid(),
+ 'power_state': ironic_states.ERROR},
+ ]
+ nodes = [ironic_utils.get_test_node(**n) for n in node_dicts]
+ mock_list.return_value = nodes
+ available_nodes = self.driver.get_available_nodes()
+ expected_uuids = [n['uuid'] for n in node_dicts]
+ self.assertEqual(sorted(expected_uuids), sorted(available_nodes))
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ @mock.patch.object(FAKE_CLIENT.node, 'list')
+ @mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
+ def test_get_available_resource(self, mock_nr, mock_list, mock_get):
+ node = ironic_utils.get_test_node()
+ node_2 = ironic_utils.get_test_node(uuid=uuidutils.generate_uuid())
+ fake_resource = 'fake-resource'
+ mock_get.return_value = node
+ # ensure cache gets populated without the node we want
+ mock_list.return_value = [node_2]
+ mock_nr.return_value = fake_resource
+
+ result = self.driver.get_available_resource(node.uuid)
+ self.assertEqual(fake_resource, result)
+ mock_nr.assert_called_once_with(node)
+ mock_get.assert_called_once_with(node.uuid)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ @mock.patch.object(FAKE_CLIENT.node, 'list')
+ @mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
+ def test_get_available_resource_with_cache(self, mock_nr, mock_list,
+ mock_get):
+ node = ironic_utils.get_test_node()
+ fake_resource = 'fake-resource'
+ mock_list.return_value = [node]
+ mock_nr.return_value = fake_resource
+ # populate the cache
+ self.driver.get_available_nodes(refresh=True)
+ mock_list.reset_mock()
+
+ result = self.driver.get_available_resource(node.uuid)
+ self.assertEqual(fake_resource, result)
+ self.assertEqual(0, mock_list.call_count)
+ self.assertEqual(0, mock_get.call_count)
+ mock_nr.assert_called_once_with(node)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
+ def test_get_info(self, mock_gbiu):
+ instance_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ properties = {'memory_mb': 512, 'cpus': 2}
+ power_state = ironic_states.POWER_ON
+ node = ironic_utils.get_test_node(instance_uuid=instance_uuid,
+ properties=properties,
+ power_state=power_state)
+
+ mock_gbiu.return_value = node
+
+ # ironic_states.POWER_ON should be mapped to
+ # nova_states.RUNNING
+ memory_kib = properties['memory_mb'] * 1024
+ expected = {'state': nova_states.RUNNING,
+ 'max_mem': memory_kib,
+ 'mem': memory_kib,
+ 'num_cpu': properties['cpus'],
+ 'cpu_time': 0}
+ instance = fake_instance.fake_instance_obj('fake-context',
+ uuid=instance_uuid)
+ result = self.driver.get_info(instance)
+ self.assertEqual(expected, result)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
+ def test_get_info_http_not_found(self, mock_gbiu):
+ mock_gbiu.side_effect = ironic_exception.NotFound()
+
+ expected = {'state': nova_states.NOSTATE,
+ 'max_mem': 0,
+ 'mem': 0,
+ 'num_cpu': 0,
+ 'cpu_time': 0}
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, uuid=uuidutils.generate_uuid())
+ result = self.driver.get_info(instance)
+ self.assertEqual(expected, result)
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ def test_macs_for_instance(self, mock_node):
+ node = ironic_utils.get_test_node()
+ port = ironic_utils.get_test_port()
+ mock_node.get.return_value = node
+ mock_node.list_ports.return_value = [port]
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ result = self.driver.macs_for_instance(instance)
+ self.assertEqual(set([port.address]), result)
+ mock_node.list_ports.assert_called_once_with(node.uuid)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ def test_macs_for_instance_http_not_found(self, mock_get):
+ mock_get.side_effect = ironic_exception.NotFound()
+
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, node=uuidutils.generate_uuid())
+ result = self.driver.macs_for_instance(instance)
+ self.assertIsNone(result)
+
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
+ @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ def test_spawn(self, mock_sf, mock_pvifs, mock_adf, mock_wait_active,
+ mock_fg_bid, mock_node, mock_looping, mock_save):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ fake_flavor = {'ephemeral_gb': 0}
+
+ mock_node.get.return_value = node
+ mock_node.validate.return_value = ironic_utils.get_test_validation()
+ mock_node.get_by_instance_uuid.return_value = node
+ mock_node.set_provision_state.return_value = mock.MagicMock()
+ mock_fg_bid.return_value = fake_flavor
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+
+ self.driver.spawn(self.ctx, instance, None, [], None)
+
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.validate.assert_called_once_with(node_uuid)
+ mock_fg_bid.assert_called_once_with(self.ctx,
+ instance['instance_type_id'])
+ mock_adf.assert_called_once_with(node, instance, None, fake_flavor)
+ mock_pvifs.assert_called_once_with(node, instance, None)
+ mock_sf.assert_called_once_with(instance, None)
+ mock_node.set_provision_state.assert_called_once_with(node_uuid,
+ 'active')
+
+ self.assertIsNone(instance['default_ephemeral_device'])
+ self.assertFalse(mock_save.called)
+
+ mock_looping.assert_called_once_with(mock_wait_active,
+ FAKE_CLIENT_WRAPPER,
+ instance)
+ fake_looping_call.start.assert_called_once_with(
+ interval=CONF.ironic.api_retry_interval)
+ fake_looping_call.wait.assert_called_once_with()
+
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, 'destroy')
+ @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
+ @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ def test_spawn_destroyed_after_failure(self, mock_sf, mock_pvifs, mock_adf,
+ mock_wait_active, mock_destroy,
+ mock_fg_bid, mock_node,
+ mock_looping):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ fake_flavor = {'ephemeral_gb': 0}
+
+ mock_node.get.return_value = node
+ mock_node.validate.return_value = ironic_utils.get_test_validation()
+ mock_node.get_by_instance_uuid.return_value = node
+ mock_node.set_provision_state.return_value = mock.MagicMock()
+ mock_fg_bid.return_value = fake_flavor
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+
+ deploy_exc = exception.InstanceDeployFailure('foo')
+ fake_looping_call.wait.side_effect = deploy_exc
+ self.assertRaises(
+ exception.InstanceDeployFailure,
+ self.driver.spawn, self.ctx, instance, None, [], None)
+ mock_destroy.assert_called_once_with(self.ctx, instance, None)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ def test__add_driver_fields_good(self, mock_update):
+ node = ironic_utils.get_test_node(driver='fake')
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ image_meta = ironic_utils.get_test_image_meta()
+ flavor = ironic_utils.get_test_flavor()
+ self.driver._add_driver_fields(node, instance, image_meta, flavor)
+ expected_patch = [{'path': '/instance_info/image_source', 'op': 'add',
+ 'value': image_meta['id']},
+ {'path': '/instance_info/root_gb', 'op': 'add',
+ 'value': str(instance.root_gb)},
+ {'path': '/instance_info/swap_mb', 'op': 'add',
+ 'value': str(flavor['swap'])},
+ {'path': '/instance_uuid', 'op': 'add',
+ 'value': instance.uuid}]
+ mock_update.assert_called_once_with(node.uuid, expected_patch)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ def test__add_driver_fields_fail(self, mock_update):
+ mock_update.side_effect = ironic_exception.BadRequest()
+ node = ironic_utils.get_test_node(driver='fake')
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ image_meta = ironic_utils.get_test_image_meta()
+ flavor = ironic_utils.get_test_flavor()
+ self.assertRaises(exception.InstanceDeployFailure,
+ self.driver._add_driver_fields,
+ node, instance, image_meta, flavor)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ def test__cleanup_deploy_good_with_flavor(self, mock_update):
+ node = ironic_utils.get_test_node(driver='fake',
+ instance_uuid='fake-id')
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ flavor = ironic_utils.get_test_flavor(extra_specs={})
+ self.driver._cleanup_deploy(self.ctx, node, instance, None,
+ flavor=flavor)
+ expected_patch = [{'path': '/instance_uuid', 'op': 'remove'}]
+ mock_update.assert_called_once_with(node.uuid, expected_patch)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ def test__cleanup_deploy_without_flavor(self, mock_update, mock_flavor):
+ mock_flavor.return_value = ironic_utils.get_test_flavor(extra_specs={})
+ node = ironic_utils.get_test_node(driver='fake',
+ instance_uuid='fake-id')
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ self.driver._cleanup_deploy(self.ctx, node, instance, None)
+ expected_patch = [{'path': '/instance_uuid', 'op': 'remove'}]
+ mock_update.assert_called_once_with(node.uuid, expected_patch)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ def test__cleanup_deploy_fail(self, mock_update, mock_flavor):
+ mock_flavor.return_value = ironic_utils.get_test_flavor(extra_specs={})
+ mock_update.side_effect = ironic_exception.BadRequest()
+ node = ironic_utils.get_test_node(driver='fake',
+ instance_uuid='fake-id')
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ self.assertRaises(exception.InstanceTerminationFailure,
+ self.driver._cleanup_deploy,
+ self.ctx, node, instance, None)
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_spawn_node_driver_validation_fail(self, mock_flavor, mock_node):
+ mock_flavor.return_value = ironic_utils.get_test_flavor()
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ mock_node.validate.return_value = ironic_utils.get_test_validation(
+ power=False, deploy=False)
+ mock_node.get.return_value = node
+ image_meta = ironic_utils.get_test_image_meta()
+
+ self.assertRaises(exception.ValidationError, self.driver.spawn,
+ self.ctx, instance, image_meta, [], None)
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.validate.assert_called_once_with(node_uuid)
+ mock_flavor.assert_called_with(mock.ANY, instance['instance_type_id'])
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
+ def test_spawn_node_prepare_for_deploy_fail(self, mock_cleanup_deploy,
+ mock_pvifs, mock_sf,
+ mock_flavor, mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ mock_node.get.return_value = node
+ mock_node.validate.return_value = ironic_utils.get_test_validation()
+ flavor = ironic_utils.get_test_flavor()
+ mock_flavor.return_value = flavor
+ image_meta = ironic_utils.get_test_image_meta()
+
+ class TestException(Exception):
+ pass
+
+ mock_sf.side_effect = TestException()
+ self.assertRaises(TestException, self.driver.spawn,
+ self.ctx, instance, image_meta, [], None)
+
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.validate.assert_called_once_with(node_uuid)
+ mock_flavor.assert_called_once_with(self.ctx,
+ instance['instance_type_id'])
+ mock_cleanup_deploy.assert_called_with(self.ctx, node, instance, None,
+ flavor=flavor)
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
+ def test_spawn_node_trigger_deploy_fail(self, mock_cleanup_deploy,
+ mock_pvifs, mock_sf,
+ mock_flavor, mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ flavor = ironic_utils.get_test_flavor()
+ mock_flavor.return_value = flavor
+ image_meta = ironic_utils.get_test_image_meta()
+
+ mock_node.get.return_value = node
+ mock_node.validate.return_value = ironic_utils.get_test_validation()
+
+ mock_node.set_provision_state.side_effect = exception.NovaException()
+ self.assertRaises(exception.NovaException, self.driver.spawn,
+ self.ctx, instance, image_meta, [], None)
+
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.validate.assert_called_once_with(node_uuid)
+ mock_flavor.assert_called_once_with(self.ctx,
+ instance['instance_type_id'])
+ mock_cleanup_deploy.assert_called_once_with(self.ctx, node,
+ instance, None,
+ flavor=flavor)
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
+ def test_spawn_node_trigger_deploy_fail2(self, mock_cleanup_deploy,
+ mock_pvifs, mock_sf,
+ mock_flavor, mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ flavor = ironic_utils.get_test_flavor()
+ mock_flavor.return_value = flavor
+ image_meta = ironic_utils.get_test_image_meta()
+
+ mock_node.get.return_value = node
+ mock_node.validate.return_value = ironic_utils.get_test_validation()
+ mock_node.set_provision_state.side_effect = ironic_exception.BadRequest
+ self.assertRaises(ironic_exception.BadRequest,
+ self.driver.spawn,
+ self.ctx, instance, image_meta, [], None)
+
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.validate.assert_called_once_with(node_uuid)
+ mock_flavor.assert_called_once_with(self.ctx,
+ instance['instance_type_id'])
+ mock_cleanup_deploy.assert_called_once_with(self.ctx, node,
+ instance, None,
+ flavor=flavor)
+
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, 'destroy')
+ def test_spawn_node_trigger_deploy_fail3(self, mock_destroy,
+ mock_pvifs, mock_sf,
+ mock_flavor, mock_node,
+ mock_looping):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ fake_net_info = utils.get_test_network_info()
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ mock_flavor.return_value = ironic_utils.get_test_flavor()
+ image_meta = ironic_utils.get_test_image_meta()
+
+ mock_node.get.return_value = node
+ mock_node.validate.return_value = ironic_utils.get_test_validation()
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+
+ fake_looping_call.wait.side_effect = ironic_exception.BadRequest
+ fake_net_info = utils.get_test_network_info()
+ self.assertRaises(ironic_exception.BadRequest,
+ self.driver.spawn, self.ctx, instance,
+ image_meta, [], None, fake_net_info)
+ mock_destroy.assert_called_once_with(self.ctx, instance,
+ fake_net_info)
+
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ def test_spawn_sets_default_ephemeral_device(self, mock_sf, mock_pvifs,
+ mock_wait, mock_flavor,
+ mock_node, mock_save,
+ mock_looping):
+ mock_flavor.return_value = ironic_utils.get_test_flavor(ephemeral_gb=1)
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ mock_node.get_by_instance_uuid.return_value = node
+ mock_node.set_provision_state.return_value = mock.MagicMock()
+ image_meta = ironic_utils.get_test_image_meta()
+
+ self.driver.spawn(self.ctx, instance, image_meta, [], None)
+ mock_flavor.assert_called_once_with(self.ctx,
+ instance['instance_type_id'])
+ self.assertTrue(mock_save.called)
+ self.assertEqual('/dev/sda1', instance['default_ephemeral_device'])
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
+ def test_destroy(self, mock_cleanup_deploy, mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ network_info = 'foo'
+
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
+ provision_state=ironic_states.ACTIVE)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ def fake_set_provision_state(*_):
+ node.provision_state = None
+
+ mock_node.get_by_instance_uuid.return_value = node
+ mock_node.set_provision_state.side_effect = fake_set_provision_state
+ self.driver.destroy(self.ctx, instance, network_info, None)
+ mock_node.set_provision_state.assert_called_once_with(node_uuid,
+ 'deleted')
+ mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
+ mock_cleanup_deploy.assert_called_with(self.ctx, node,
+ instance, network_info)
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
+ def test_destroy_ignore_unexpected_state(self, mock_cleanup_deploy,
+ mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ network_info = 'foo'
+
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
+ provision_state=ironic_states.DELETING)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ mock_node.get_by_instance_uuid.return_value = node
+ self.driver.destroy(self.ctx, instance, network_info, None)
+ self.assertFalse(mock_node.set_provision_state.called)
+ mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
+ mock_cleanup_deploy.assert_called_with(self.ctx, node, instance,
+ network_info)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ def test_destroy_trigger_undeploy_fail(self, fake_validate, mock_sps):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
+ provision_state=ironic_states.ACTIVE)
+ fake_validate.return_value = node
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ mock_sps.side_effect = exception.NovaException()
+ self.assertRaises(exception.NovaException, self.driver.destroy,
+ self.ctx, instance, None, None)
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ def test_destroy_unprovision_fail(self, mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
+ provision_state=ironic_states.ACTIVE)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ def fake_set_provision_state(*_):
+ node.provision_state = ironic_states.ERROR
+
+ mock_node.get_by_instance_uuid.return_value = node
+ self.assertRaises(exception.NovaException, self.driver.destroy,
+ self.ctx, instance, None, None)
+ mock_node.set_provision_state.assert_called_once_with(node_uuid,
+ 'deleted')
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ def test_destroy_unassociate_fail(self, mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
+ provision_state=ironic_states.ACTIVE)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ mock_node.get_by_instance_uuid.return_value = node
+ mock_node.update.side_effect = exception.NovaException()
+ self.assertRaises(exception.NovaException, self.driver.destroy,
+ self.ctx, instance, None, None)
+ mock_node.set_provision_state.assert_called_once_with(node_uuid,
+ 'deleted')
+ mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
+
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
+ def test_reboot(self, mock_sp, fake_validate, mock_looping):
+ node = ironic_utils.get_test_node()
+ fake_validate.side_effect = [node, node]
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ self.driver.reboot(self.ctx, instance, None, None)
+ mock_sp.assert_called_once_with(node.uuid, 'reboot')
+
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
+ def test_power_off(self, mock_sp, fake_validate, mock_looping):
+ node = ironic_utils.get_test_node()
+ fake_validate.side_effect = [node, node]
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+ instance_uuid = uuidutils.generate_uuid()
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=instance_uuid)
+
+ self.driver.power_off(instance)
+ mock_sp.assert_called_once_with(node.uuid, 'off')
+
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
+ def test_power_on(self, mock_sp, fake_validate, mock_looping):
+ node = ironic_utils.get_test_node()
+ fake_validate.side_effect = [node, node]
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+ instance_uuid = uuidutils.generate_uuid()
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=instance_uuid)
+
+ self.driver.power_on(self.ctx, instance,
+ utils.get_test_network_info())
+ mock_sp.assert_called_once_with(node.uuid, 'on')
+
+ @mock.patch.object(FAKE_CLIENT.node, 'list_ports')
+ @mock.patch.object(FAKE_CLIENT.port, 'update')
+ @mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
+ def test_plug_vifs_with_port(self, mock_uvifs, mock_port_udt, mock_lp):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(uuid=node_uuid)
+ port = ironic_utils.get_test_port()
+
+ mock_lp.return_value = [port]
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ network_info = utils.get_test_network_info()
+
+ port_id = unicode(network_info[0]['id'])
+ expected_patch = [{'op': 'add',
+ 'path': '/extra/vif_port_id',
+ 'value': port_id}]
+ self.driver._plug_vifs(node, instance, network_info)
+
+ # asserts
+ mock_uvifs.assert_called_once_with(node, instance, network_info)
+ mock_lp.assert_called_once_with(node_uuid)
+ mock_port_udt.assert_called_with(port.uuid, expected_patch)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ def test_plug_vifs(self, mock__plug_vifs, mock_get):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(uuid=node_uuid)
+
+ mock_get.return_value = node
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ network_info = utils.get_test_network_info()
+ self.driver.plug_vifs(instance, network_info)
+
+ mock_get.assert_called_once_with(node_uuid)
+ mock__plug_vifs.assert_called_once_with(node, instance, network_info)
+
+ @mock.patch.object(FAKE_CLIENT.port, 'update')
+ @mock.patch.object(FAKE_CLIENT.node, 'list_ports')
+ @mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
+ def test_plug_vifs_count_mismatch(self, mock_uvifs, mock_lp,
+ mock_port_udt):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(uuid=node_uuid)
+ port = ironic_utils.get_test_port()
+
+ mock_lp.return_value = [port]
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ # len(network_info) > len(ports)
+ network_info = (utils.get_test_network_info() +
+ utils.get_test_network_info())
+ self.assertRaises(exception.NovaException,
+ self.driver._plug_vifs, node, instance,
+ network_info)
+
+ # asserts
+ mock_uvifs.assert_called_once_with(node, instance, network_info)
+ mock_lp.assert_called_once_with(node_uuid)
+ # assert port.update() was not called
+ self.assertFalse(mock_port_udt.called)
+
+ @mock.patch.object(FAKE_CLIENT.port, 'update')
+ @mock.patch.object(FAKE_CLIENT.node, 'list_ports')
+ @mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
+ def test_plug_vifs_no_network_info(self, mock_uvifs, mock_lp,
+ mock_port_udt):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(uuid=node_uuid)
+ port = ironic_utils.get_test_port()
+
+ mock_lp.return_value = [port]
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ network_info = []
+ self.driver._plug_vifs(node, instance, network_info)
+
+ # asserts
+ mock_uvifs.assert_called_once_with(node, instance, network_info)
+ mock_lp.assert_called_once_with(node_uuid)
+ # assert port.update() was not called
+ self.assertFalse(mock_port_udt.called)
+
+ @mock.patch.object(FAKE_CLIENT.port, 'update')
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ def test_unplug_vifs(self, mock_node, mock_update):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(uuid=node_uuid)
+ port = ironic_utils.get_test_port(extra={'vif_port_id': 'fake-vif'})
+
+ mock_node.get.return_value = node
+ mock_node.list_ports.return_value = [port]
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ expected_patch = [{'op': 'remove', 'path':
+ '/extra/vif_port_id'}]
+ self.driver.unplug_vifs(instance,
+ utils.get_test_network_info())
+
+ # asserts
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.list_ports.assert_called_once_with(node_uuid, detail=True)
+ mock_update.assert_called_once_with(port.uuid, expected_patch)
+
+ @mock.patch.object(FAKE_CLIENT.port, 'update')
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ def test_unplug_vifs_port_not_associated(self, mock_node, mock_update):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(uuid=node_uuid)
+ port = ironic_utils.get_test_port(extra={})
+
+ mock_node.get.return_value = node
+ mock_node.list_ports.return_value = [port]
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ self.driver.unplug_vifs(instance, utils.get_test_network_info())
+
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.list_ports.assert_called_once_with(node_uuid, detail=True)
+ # assert port.update() was not called
+ self.assertFalse(mock_update.called)
+
+ @mock.patch.object(FAKE_CLIENT.port, 'update')
+ def test_unplug_vifs_no_network_info(self, mock_update):
+ instance = fake_instance.fake_instance_obj(self.ctx)
+ network_info = []
+ self.driver.unplug_vifs(instance, network_info)
+
+ # assert port.update() was not called
+ self.assertFalse(mock_update.called)
+
+ @mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance',
+ create=True)
+ def test_unfilter_instance(self, mock_ui):
+ instance = fake_instance.fake_instance_obj(self.ctx)
+ network_info = utils.get_test_network_info()
+ self.driver.unfilter_instance(instance, network_info)
+ mock_ui.assert_called_once_with(instance, network_info)
+
+ @mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering',
+ create=True)
+ @mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter',
+ create=True)
+ def test_ensure_filtering_rules_for_instance(self, mock_pif, mock_sbf):
+ instance = fake_instance.fake_instance_obj(self.ctx)
+ network_info = utils.get_test_network_info()
+ self.driver.ensure_filtering_rules_for_instance(instance,
+ network_info)
+ mock_sbf.assert_called_once_with(instance, network_info)
+ mock_pif.assert_called_once_with(instance, network_info)
+
+ @mock.patch.object(firewall.NoopFirewallDriver,
+ 'refresh_instance_security_rules', create=True)
+ def test_refresh_instance_security_rules(self, mock_risr):
+ instance = fake_instance.fake_instance_obj(self.ctx)
+ self.driver.refresh_instance_security_rules(instance)
+ mock_risr.assert_called_once_with(instance)
+
+ @mock.patch.object(firewall.NoopFirewallDriver,
+ 'refresh_provider_fw_rules', create=True)
+ def test_refresh_provider_fw_rules(self, mock_rpfr):
+ fake_instance.fake_instance_obj(self.ctx)
+ self.driver.refresh_provider_fw_rules()
+ mock_rpfr.assert_called_once_with()
+
+ @mock.patch.object(firewall.NoopFirewallDriver,
+ 'refresh_security_group_members', create=True)
+ def test_refresh_security_group_members(self, mock_rsgm):
+ fake_group = 'fake-security-group-members'
+ self.driver.refresh_security_group_members(fake_group)
+ mock_rsgm.assert_called_once_with(fake_group)
+
+ @mock.patch.object(firewall.NoopFirewallDriver,
+ 'refresh_instance_security_rules', create=True)
+ def test_refresh_security_group_rules(self, mock_risr):
+ fake_group = 'fake-security-group-members'
+ self.driver.refresh_instance_security_rules(fake_group)
+ mock_risr.assert_called_once_with(fake_group)
+
+ @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ @mock.patch.object(objects.Instance, 'save')
+ def _test_rebuild(self, mock_save, mock_get, mock_driver_fields,
+ mock_fg_bid, mock_set_pstate, mock_looping,
+ mock_wait_active, preserve=False):
+ node_uuid = uuidutils.generate_uuid()
+ instance_uuid = uuidutils.generate_uuid()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=instance_uuid,
+ instance_type_id=5)
+ mock_get.return_value = node
+
+ image_meta = ironic_utils.get_test_image_meta()
+ flavor_id = 5
+ flavor = {'id': flavor_id, 'name': 'baremetal'}
+ mock_fg_bid.return_value = flavor
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=instance_uuid,
+ node=node_uuid,
+ instance_type_id=flavor_id)
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+
+ self.driver.rebuild(
+ context=self.ctx, instance=instance, image_meta=image_meta,
+ injected_files=None, admin_password=None, bdms=None,
+ detach_block_devices=None, attach_block_devices=None,
+ preserve_ephemeral=preserve)
+
+ mock_save.assert_called_once_with(
+ expected_task_state=[task_states.REBUILDING])
+ mock_driver_fields.assert_called_once_with(node, instance, image_meta,
+ flavor, preserve)
+ mock_set_pstate.assert_called_once_with(node_uuid,
+ ironic_states.REBUILD)
+ mock_looping.assert_called_once_with(mock_wait_active,
+ FAKE_CLIENT_WRAPPER,
+ instance)
+ fake_looping_call.start.assert_called_once_with(
+ interval=CONF.ironic.api_retry_interval)
+ fake_looping_call.wait.assert_called_once_with()
+
+ def test_rebuild_preserve_ephemeral(self):
+ self._test_rebuild(preserve=True)
+
+ def test_rebuild_no_preserve_ephemeral(self):
+ self._test_rebuild(preserve=False)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ @mock.patch.object(objects.Instance, 'save')
+ def test_rebuild_failures(self, mock_save, mock_get, mock_driver_fields,
+ mock_fg_bid, mock_set_pstate):
+ node_uuid = uuidutils.generate_uuid()
+ instance_uuid = uuidutils.generate_uuid()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=instance_uuid,
+ instance_type_id=5)
+ mock_get.return_value = node
+
+ image_meta = ironic_utils.get_test_image_meta()
+ flavor_id = 5
+ flavor = {'id': flavor_id, 'name': 'baremetal'}
+ mock_fg_bid.return_value = flavor
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=instance_uuid,
+ node=node_uuid,
+ instance_type_id=flavor_id)
+
+ exceptions = [
+ exception.NovaException(),
+ ironic_exception.BadRequest(),
+ ironic_exception.InternalServerError(),
+ ]
+ for e in exceptions:
+ mock_set_pstate.side_effect = e
+ self.assertRaises(exception.InstanceDeployFailure,
+ self.driver.rebuild,
+ context=self.ctx, instance=instance, image_meta=image_meta,
+ injected_files=None, admin_password=None, bdms=None,
+ detach_block_devices=None, attach_block_devices=None)
diff --git a/nova/tests/unit/virt/ironic/test_patcher.py b/nova/tests/unit/virt/ironic/test_patcher.py
new file mode 100644
index 0000000000..a69e8cacfe
--- /dev/null
+++ b/nova/tests/unit/virt/ironic/test_patcher.py
@@ -0,0 +1,139 @@
+# Copyright 2014 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+from nova import context as nova_context
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit.virt.ironic import utils as ironic_utils
+from nova.virt.ironic import patcher
+
+CONF = cfg.CONF
+
+
+class IronicDriverFieldsTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(IronicDriverFieldsTestCase, self).setUp()
+ self.image_meta = ironic_utils.get_test_image_meta()
+ self.flavor = ironic_utils.get_test_flavor()
+ self.ctx = nova_context.get_admin_context()
+ self.instance = fake_instance.fake_instance_obj(self.ctx)
+ # Generic expected patches
+ self._expected_deploy_patch = [{'path': '/instance_info/image_source',
+ 'value': self.image_meta['id'],
+ 'op': 'add'},
+ {'path': '/instance_info/root_gb',
+ 'value': str(self.instance['root_gb']),
+ 'op': 'add'},
+ {'path': '/instance_info/swap_mb',
+ 'value': str(self.flavor['swap']),
+ 'op': 'add'}]
+ self._expected_cleanup_patch = []
+
+ def test_create_generic(self):
+ node = ironic_utils.get_test_node(driver='fake')
+ patcher_obj = patcher.create(node)
+ self.assertIsInstance(patcher_obj, patcher.GenericDriverFields)
+
+ def test_create_pxe(self):
+ node = ironic_utils.get_test_node(driver='pxe_fake')
+ patcher_obj = patcher.create(node)
+ self.assertIsInstance(patcher_obj, patcher.PXEDriverFields)
+
+ def test_generic_get_deploy_patch(self):
+ node = ironic_utils.get_test_node(driver='fake')
+ patch = patcher.create(node).get_deploy_patch(
+ self.instance, self.image_meta, self.flavor)
+ self.assertEqual(sorted(self._expected_deploy_patch), sorted(patch))
+
+ def test_generic_get_deploy_patch_ephemeral(self):
+ CONF.set_override('default_ephemeral_format', 'testfmt')
+ node = ironic_utils.get_test_node(driver='fake')
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ ephemeral_gb=10)
+ patch = patcher.create(node).get_deploy_patch(
+ instance, self.image_meta, self.flavor)
+ expected = [{'path': '/instance_info/ephemeral_gb',
+ 'value': str(instance.ephemeral_gb),
+ 'op': 'add'},
+ {'path': '/instance_info/ephemeral_format',
+ 'value': 'testfmt',
+ 'op': 'add'}]
+ expected += self._expected_deploy_patch
+ self.assertEqual(sorted(expected), sorted(patch))
+
+ def test_generic_get_deploy_patch_preserve_ephemeral(self):
+ node = ironic_utils.get_test_node(driver='fake')
+ for preserve in [True, False]:
+ patch = patcher.create(node).get_deploy_patch(
+ self.instance, self.image_meta, self.flavor,
+ preserve_ephemeral=preserve)
+ expected = [{'path': '/instance_info/preserve_ephemeral',
+ 'value': str(preserve), 'op': 'add', }]
+ expected += self._expected_deploy_patch
+ self.assertEqual(sorted(expected), sorted(patch))
+
+ def test_generic_get_cleanup_patch(self):
+ node = ironic_utils.get_test_node(driver='fake')
+ patch = patcher.create(node).get_cleanup_patch(self.instance, None,
+ self.flavor)
+ self.assertEqual(self._expected_cleanup_patch, patch)
+
+ def test_pxe_get_deploy_patch(self):
+ node = ironic_utils.get_test_node(driver='pxe_fake')
+ extra_specs = self.flavor['extra_specs']
+ expected = [{'path': '/driver_info/pxe_deploy_kernel',
+ 'value': extra_specs['baremetal:deploy_kernel_id'],
+ 'op': 'add'},
+ {'path': '/driver_info/pxe_deploy_ramdisk',
+ 'value': extra_specs['baremetal:deploy_ramdisk_id'],
+ 'op': 'add'}]
+ expected += self._expected_deploy_patch
+ patch = patcher.create(node).get_deploy_patch(
+ self.instance, self.image_meta, self.flavor)
+ self.assertEqual(sorted(expected), sorted(patch))
+
+ def test_pxe_get_deploy_patch_no_flavor_kernel_ramdisk_ids(self):
+ flavor = ironic_utils.get_test_flavor(extra_specs={})
+ node = ironic_utils.get_test_node(driver='pxe_fake')
+ patch = patcher.create(node).get_deploy_patch(
+ self.instance, self.image_meta, flavor)
+ # If there's no extra_specs patch should be exactly like a
+ # generic patch
+ self.assertEqual(sorted(self._expected_deploy_patch), sorted(patch))
+
+ def test_pxe_get_cleanup_patch(self):
+ driver_info = {'pxe_deploy_kernel': 'fake-kernel-id',
+ 'pxe_deploy_ramdisk': 'fake-ramdisk-id'}
+ node = ironic_utils.get_test_node(driver='pxe_fake',
+ driver_info=driver_info)
+ patch = patcher.create(node).get_cleanup_patch(self.instance, None,
+ self.flavor)
+ expected = [{'path': '/driver_info/pxe_deploy_kernel',
+ 'op': 'remove'},
+ {'path': '/driver_info/pxe_deploy_ramdisk',
+ 'op': 'remove'}]
+ self.assertEqual(sorted(expected), sorted(patch))
+
+ def test_pxe_get_cleanup_patch_no_flavor_kernel_ramdisk_ids(self):
+ self.flavor = ironic_utils.get_test_flavor(extra_specs={})
+ node = ironic_utils.get_test_node(driver='pxe_fake')
+ patch = patcher.create(node).get_cleanup_patch(self.instance, None,
+ self.flavor)
+ # If there's no extra_specs patch should be exactly like a
+ # generic patch
+ self.assertEqual(self._expected_cleanup_patch, patch)
diff --git a/nova/tests/unit/virt/ironic/utils.py b/nova/tests/unit/virt/ironic/utils.py
new file mode 100644
index 0000000000..cee0abffac
--- /dev/null
+++ b/nova/tests/unit/virt/ironic/utils.py
@@ -0,0 +1,115 @@
+# Copyright 2014 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.virt.ironic import ironic_states
+
+
+def get_test_validation(**kw):
+ return type('interfaces', (object,),
+ {'power': kw.get('power', True),
+ 'deploy': kw.get('deploy', True),
+ 'console': kw.get('console', True),
+ 'rescue': kw.get('rescue', True)})()
+
+
+def get_test_node(**kw):
+ return type('node', (object,),
+ {'uuid': kw.get('uuid', 'eeeeeeee-dddd-cccc-bbbb-aaaaaaaaaaaa'),
+ 'chassis_uuid': kw.get('chassis_uuid'),
+ 'power_state': kw.get('power_state',
+ ironic_states.NOSTATE),
+ 'target_power_state': kw.get('target_power_state',
+ ironic_states.NOSTATE),
+ 'provision_state': kw.get('provision_state',
+ ironic_states.NOSTATE),
+ 'target_provision_state': kw.get('target_provision_state',
+ ironic_states.NOSTATE),
+ 'last_error': kw.get('last_error'),
+ 'instance_uuid': kw.get('instance_uuid'),
+ 'driver': kw.get('driver', 'fake'),
+ 'driver_info': kw.get('driver_info', {}),
+ 'properties': kw.get('properties', {}),
+ 'reservation': kw.get('reservation'),
+ 'maintenance': kw.get('maintenance', False),
+ 'extra': kw.get('extra', {}),
+ 'updated_at': kw.get('created_at'),
+ 'created_at': kw.get('updated_at')})()
+
+
+def get_test_port(**kw):
+ return type('port', (object,),
+ {'uuid': kw.get('uuid', 'gggggggg-uuuu-qqqq-ffff-llllllllllll'),
+ 'node_uuid': kw.get('node_uuid', get_test_node().uuid),
+ 'address': kw.get('address', 'FF:FF:FF:FF:FF:FF'),
+ 'extra': kw.get('extra', {}),
+ 'created_at': kw.get('created_at'),
+ 'updated_at': kw.get('updated_at')})()
+
+
+def get_test_flavor(**kw):
+ default_extra_specs = {'baremetal:deploy_kernel_id':
+ 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ 'baremetal:deploy_ramdisk_id':
+ 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'}
+ return {'name': kw.get('name', 'fake.flavor'),
+ 'extra_specs': kw.get('extra_specs', default_extra_specs),
+ 'swap': kw.get('swap', 0),
+ 'ephemeral_gb': kw.get('ephemeral_gb', 0)}
+
+
+def get_test_image_meta(**kw):
+ return {'id': kw.get('id', 'cccccccc-cccc-cccc-cccc-cccccccccccc')}
+
+
+class FakePortClient(object):
+
+ def get(self, port_uuid):
+ pass
+
+ def update(self, port_uuid, patch):
+ pass
+
+
+class FakeNodeClient(object):
+
+ def list(self, detail=False):
+ return []
+
+ def get(self, node_uuid):
+ pass
+
+ def get_by_instance_uuid(self, instance_uuid):
+ pass
+
+ def list_ports(self, node_uuid):
+ pass
+
+ def set_power_state(self, node_uuid, target):
+ pass
+
+ def set_provision_state(self, node_uuid, target):
+ pass
+
+ def update(self, node_uuid, patch):
+ pass
+
+ def validate(self, node_uuid):
+ pass
+
+
+class FakeClient(object):
+
+ node = FakeNodeClient()
+ port = FakePortClient()
diff --git a/nova/tests/unit/virt/libvirt/__init__.py b/nova/tests/unit/virt/libvirt/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/__init__.py
diff --git a/nova/tests/unit/virt/libvirt/fake_imagebackend.py b/nova/tests/unit/virt/libvirt/fake_imagebackend.py
new file mode 100644
index 0000000000..9a7cbdbdaf
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/fake_imagebackend.py
@@ -0,0 +1,75 @@
+# Copyright 2012 Grid Dynamics
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from nova.virt.libvirt import config
+from nova.virt.libvirt import imagebackend
+
+
+class Backend(object):
+ def __init__(self, use_cow):
+ pass
+
+ def image(self, instance, name, image_type=''):
+ class FakeImage(imagebackend.Image):
+ def __init__(self, instance, name):
+ self.path = os.path.join(instance['name'], name)
+
+ def create_image(self, prepare_template, base,
+ size, *args, **kwargs):
+ pass
+
+ def cache(self, fetch_func, filename, size=None, *args, **kwargs):
+ pass
+
+ def snapshot(self, name):
+ pass
+
+ def libvirt_info(self, disk_bus, disk_dev, device_type,
+ cache_mode, extra_specs, hypervisor_version):
+ info = config.LibvirtConfigGuestDisk()
+ info.source_type = 'file'
+ info.source_device = device_type
+ info.target_bus = disk_bus
+ info.target_dev = disk_dev
+ info.driver_cache = cache_mode
+ info.driver_format = 'raw'
+ info.source_path = self.path
+ return info
+
+ return FakeImage(instance, name)
+
+ def snapshot(self, instance, disk_path, image_type=''):
+ # NOTE(bfilippov): this is done in favor for
+ # snapshot tests in test_libvirt.LibvirtConnTestCase
+ return imagebackend.Backend(True).snapshot(instance,
+ disk_path,
+ image_type=image_type)
+
+
+class Raw(imagebackend.Image):
+ # NOTE(spandhe) Added for test_rescue and test_rescue_config_drive
+ def __init__(self, instance=None, disk_name=None, path=None):
+ pass
+
+ def _get_driver_format(self):
+ pass
+
+ def correct_format(self):
+ pass
+
+ def create_image(self, prepare_template, base, size, *args, **kwargs):
+ pass
diff --git a/nova/tests/unit/virt/libvirt/fake_libvirt_utils.py b/nova/tests/unit/virt/libvirt/fake_libvirt_utils.py
new file mode 100644
index 0000000000..01ab689b00
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/fake_libvirt_utils.py
@@ -0,0 +1,211 @@
+# Copyright (c) 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import StringIO
+
+from nova.virt.libvirt import utils as libvirt_utils
+
+
+files = {'console.log': True}
+disk_sizes = {}
+disk_backing_files = {}
+disk_type = "qcow2"
+
+
+def get_iscsi_initiator():
+ return "fake.initiator.iqn"
+
+
+def get_fc_hbas():
+ return [{'ClassDevice': 'host1',
+ 'ClassDevicePath': '/sys/devices/pci0000:00/0000:00:03.0'
+ '/0000:05:00.2/host1/fc_host/host1',
+ 'dev_loss_tmo': '30',
+ 'fabric_name': '0x1000000533f55566',
+ 'issue_lip': '<store method only>',
+ 'max_npiv_vports': '255',
+ 'maxframe_size': '2048 bytes',
+ 'node_name': '0x200010604b019419',
+ 'npiv_vports_inuse': '0',
+ 'port_id': '0x680409',
+ 'port_name': '0x100010604b019419',
+ 'port_state': 'Online',
+ 'port_type': 'NPort (fabric via point-to-point)',
+ 'speed': '10 Gbit',
+ 'supported_classes': 'Class 3',
+ 'supported_speeds': '10 Gbit',
+ 'symbolic_name': 'Emulex 554M FV4.0.493.0 DV8.3.27',
+ 'tgtid_bind_type': 'wwpn (World Wide Port Name)',
+ 'uevent': None,
+ 'vport_create': '<store method only>',
+ 'vport_delete': '<store method only>'}]
+
+
+def get_fc_hbas_info():
+ hbas = get_fc_hbas()
+ info = [{'port_name': hbas[0]['port_name'].replace('0x', ''),
+ 'node_name': hbas[0]['node_name'].replace('0x', ''),
+ 'host_device': hbas[0]['ClassDevice'],
+ 'device_path': hbas[0]['ClassDevicePath']}]
+ return info
+
+
+def get_fc_wwpns():
+ hbas = get_fc_hbas()
+ wwpns = []
+ for hba in hbas:
+ wwpn = hba['port_name'].replace('0x', '')
+ wwpns.append(wwpn)
+
+ return wwpns
+
+
+def get_fc_wwnns():
+ hbas = get_fc_hbas()
+ wwnns = []
+ for hba in hbas:
+ wwnn = hba['node_name'].replace('0x', '')
+ wwnns.append(wwnn)
+
+ return wwnns
+
+
+def create_image(disk_format, path, size):
+ pass
+
+
+def create_cow_image(backing_file, path):
+ pass
+
+
+def get_disk_size(path):
+ return 0
+
+
+def get_disk_backing_file(path):
+ return disk_backing_files.get(path, None)
+
+
+def get_disk_type(path):
+ return disk_type
+
+
+def copy_image(src, dest):
+ pass
+
+
+def resize2fs(path):
+ pass
+
+
+def create_lvm_image(vg, lv, size, sparse=False):
+ pass
+
+
+def volume_group_free_space(vg):
+ pass
+
+
+def remove_logical_volumes(*paths):
+ pass
+
+
+def write_to_file(path, contents, umask=None):
+ pass
+
+
+def chown(path, owner):
+ pass
+
+
+def extract_snapshot(disk_path, source_fmt, out_path, dest_fmt):
+ files[out_path] = ''
+
+
+class File(object):
+ def __init__(self, path, mode=None):
+ if path in files:
+ self.fp = StringIO.StringIO(files[path])
+ else:
+ self.fp = StringIO.StringIO(files[os.path.split(path)[-1]])
+
+ def __enter__(self):
+ return self.fp
+
+ def __exit__(self, *args):
+ return
+
+ def close(self, *args, **kwargs):
+ self.fp.close()
+
+
+def file_open(path, mode=None):
+ return File(path, mode)
+
+
+def find_disk(virt_dom):
+ if disk_type == 'lvm':
+ return "/dev/nova-vg/lv"
+ elif disk_type in ['raw', 'qcow2']:
+ return "filename"
+ else:
+ return "unknown_type_disk"
+
+
+def load_file(path):
+ if os.path.exists(path):
+ with open(path, 'r') as fp:
+ return fp.read()
+ else:
+ return ''
+
+
+def logical_volume_info(path):
+ return {}
+
+
+def file_delete(path):
+ return True
+
+
+def get_fs_info(path):
+ return {'total': 128 * (1024 ** 3),
+ 'used': 44 * (1024 ** 3),
+ 'free': 84 * (1024 ** 3)}
+
+
+def fetch_image(context, target, image_id, user_id, project_id, max_size=0):
+ pass
+
+
+def get_instance_path(instance, forceold=False, relative=False):
+ return libvirt_utils.get_instance_path(instance, forceold=forceold,
+ relative=relative)
+
+
+def pick_disk_driver_name(hypervisor_version, is_block_dev=False):
+ return "qemu"
+
+
+def is_valid_hostname(name):
+ return True
+
+
+def chown_for_id_maps(path, id_maps):
+ pass
+
+
+def get_arch(image_meta):
+ return libvirt_utils.get_arch(image_meta)
diff --git a/nova/tests/unit/virt/libvirt/fakelibvirt.py b/nova/tests/unit/virt/libvirt/fakelibvirt.py
new file mode 100644
index 0000000000..3a0e7ebefb
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/fakelibvirt.py
@@ -0,0 +1,1108 @@
+# Copyright 2010 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+
+import time
+import uuid
+
+from nova.compute import arch
+from nova.i18n import _
+
+# Allow passing None to the various connect methods
+# (i.e. allow the client to rely on default URLs)
+allow_default_uri_connection = True
+
+# string indicating the CPU arch
+node_arch = arch.X86_64 # or 'i686' (or whatever else uname -m might return)
+
+# memory size in kilobytes
+node_kB_mem = 4096
+
+# the number of active CPUs
+node_cpus = 2
+
+# expected CPU frequency
+node_mhz = 800
+
+# the number of NUMA cell, 1 for unusual NUMA topologies or uniform
+# memory access; check capabilities XML for the actual NUMA topology
+node_nodes = 1 # NUMA nodes
+
+# number of CPU sockets per node if nodes > 1, total number of CPU
+# sockets otherwise
+node_sockets = 1
+
+# number of cores per socket
+node_cores = 2
+
+# number of threads per core
+node_threads = 1
+
+# CPU model
+node_cpu_model = "Penryn"
+
+# CPU vendor
+node_cpu_vendor = "Intel"
+
+# Has libvirt connection been used at least once
+connection_used = False
+
+
+def _reset():
+ global allow_default_uri_connection
+ allow_default_uri_connection = True
+
+# virDomainState
+VIR_DOMAIN_NOSTATE = 0
+VIR_DOMAIN_RUNNING = 1
+VIR_DOMAIN_BLOCKED = 2
+VIR_DOMAIN_PAUSED = 3
+VIR_DOMAIN_SHUTDOWN = 4
+VIR_DOMAIN_SHUTOFF = 5
+VIR_DOMAIN_CRASHED = 6
+
+VIR_DOMAIN_XML_SECURE = 1
+VIR_DOMAIN_XML_INACTIVE = 2
+
+VIR_DOMAIN_BLOCK_REBASE_SHALLOW = 1
+VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT = 2
+VIR_DOMAIN_BLOCK_REBASE_COPY = 8
+
+VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT = 2
+
+VIR_DOMAIN_EVENT_ID_LIFECYCLE = 0
+
+VIR_DOMAIN_EVENT_DEFINED = 0
+VIR_DOMAIN_EVENT_UNDEFINED = 1
+VIR_DOMAIN_EVENT_STARTED = 2
+VIR_DOMAIN_EVENT_SUSPENDED = 3
+VIR_DOMAIN_EVENT_RESUMED = 4
+VIR_DOMAIN_EVENT_STOPPED = 5
+VIR_DOMAIN_EVENT_SHUTDOWN = 6
+VIR_DOMAIN_EVENT_PMSUSPENDED = 7
+
+VIR_DOMAIN_UNDEFINE_MANAGED_SAVE = 1
+
+VIR_DOMAIN_AFFECT_CURRENT = 0
+VIR_DOMAIN_AFFECT_LIVE = 1
+VIR_DOMAIN_AFFECT_CONFIG = 2
+
+VIR_CPU_COMPARE_ERROR = -1
+VIR_CPU_COMPARE_INCOMPATIBLE = 0
+VIR_CPU_COMPARE_IDENTICAL = 1
+VIR_CPU_COMPARE_SUPERSET = 2
+
+VIR_CRED_USERNAME = 1
+VIR_CRED_AUTHNAME = 2
+VIR_CRED_LANGUAGE = 3
+VIR_CRED_CNONCE = 4
+VIR_CRED_PASSPHRASE = 5
+VIR_CRED_ECHOPROMPT = 6
+VIR_CRED_NOECHOPROMPT = 7
+VIR_CRED_REALM = 8
+VIR_CRED_EXTERNAL = 9
+
+VIR_MIGRATE_LIVE = 1
+VIR_MIGRATE_PEER2PEER = 2
+VIR_MIGRATE_TUNNELLED = 4
+VIR_MIGRATE_UNDEFINE_SOURCE = 16
+VIR_MIGRATE_NON_SHARED_INC = 128
+
+VIR_NODE_CPU_STATS_ALL_CPUS = -1
+
+VIR_DOMAIN_START_PAUSED = 1
+
+# libvirtError enums
+# (Intentionally different from what's in libvirt. We do this to check,
+# that consumers of the library are using the symbolic names rather than
+# hardcoding the numerical values)
+VIR_FROM_QEMU = 100
+VIR_FROM_DOMAIN = 200
+VIR_FROM_NWFILTER = 330
+VIR_FROM_REMOTE = 340
+VIR_FROM_RPC = 345
+VIR_ERR_NO_SUPPORT = 3
+VIR_ERR_XML_DETAIL = 350
+VIR_ERR_NO_DOMAIN = 420
+VIR_ERR_OPERATION_INVALID = 55
+VIR_ERR_OPERATION_TIMEOUT = 68
+VIR_ERR_NO_NWFILTER = 620
+VIR_ERR_SYSTEM_ERROR = 900
+VIR_ERR_INTERNAL_ERROR = 950
+VIR_ERR_CONFIG_UNSUPPORTED = 951
+
+# Readonly
+VIR_CONNECT_RO = 1
+
+# virConnectBaselineCPU flags
+VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES = 1
+
+# snapshotCreateXML flags
+VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA = 4
+VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY = 16
+VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
+VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
+
+# blockCommit flags
+VIR_DOMAIN_BLOCK_COMMIT_RELATIVE = 4
+
+
+VIR_CONNECT_LIST_DOMAINS_ACTIVE = 1
+VIR_CONNECT_LIST_DOMAINS_INACTIVE = 2
+
+
+def _parse_disk_info(element):
+ disk_info = {}
+ disk_info['type'] = element.get('type', 'file')
+ disk_info['device'] = element.get('device', 'disk')
+
+ driver = element.find('./driver')
+ if driver is not None:
+ disk_info['driver_name'] = driver.get('name')
+ disk_info['driver_type'] = driver.get('type')
+
+ source = element.find('./source')
+ if source is not None:
+ disk_info['source'] = source.get('file')
+ if not disk_info['source']:
+ disk_info['source'] = source.get('dev')
+
+ if not disk_info['source']:
+ disk_info['source'] = source.get('path')
+
+ target = element.find('./target')
+ if target is not None:
+ disk_info['target_dev'] = target.get('dev')
+ disk_info['target_bus'] = target.get('bus')
+
+ return disk_info
+
+
+class libvirtError(Exception):
+ """This class was copied and slightly modified from
+ `libvirt-python:libvirt-override.py`.
+
+ Since a test environment will use the real `libvirt-python` version of
+ `libvirtError` if it's installed and not this fake, we need to maintain
+ strict compatibility with the original class, including `__init__` args
+ and instance-attributes.
+
+ To create a libvirtError instance you should:
+
+ # Create an unsupported error exception
+ exc = libvirtError('my message')
+ exc.err = (libvirt.VIR_ERR_NO_SUPPORT,)
+
+ self.err is a tuple of form:
+ (error_code, error_domain, error_message, error_level, str1, str2,
+ str3, int1, int2)
+
+ Alternatively, you can use the `make_libvirtError` convenience function to
+ allow you to specify these attributes in one shot.
+ """
+ def __init__(self, defmsg, conn=None, dom=None, net=None, pool=None,
+ vol=None):
+ Exception.__init__(self, defmsg)
+ self.err = None
+
+ def get_error_code(self):
+ if self.err is None:
+ return None
+ return self.err[0]
+
+ def get_error_domain(self):
+ if self.err is None:
+ return None
+ return self.err[1]
+
+ def get_error_message(self):
+ if self.err is None:
+ return None
+ return self.err[2]
+
+ def get_error_level(self):
+ if self.err is None:
+ return None
+ return self.err[3]
+
+ def get_str1(self):
+ if self.err is None:
+ return None
+ return self.err[4]
+
+ def get_str2(self):
+ if self.err is None:
+ return None
+ return self.err[5]
+
+ def get_str3(self):
+ if self.err is None:
+ return None
+ return self.err[6]
+
+ def get_int1(self):
+ if self.err is None:
+ return None
+ return self.err[7]
+
+ def get_int2(self):
+ if self.err is None:
+ return None
+ return self.err[8]
+
+
+class NWFilter(object):
+ def __init__(self, connection, xml):
+ self._connection = connection
+
+ self._xml = xml
+ self._parse_xml(xml)
+
+ def _parse_xml(self, xml):
+ tree = etree.fromstring(xml)
+ root = tree.find('.')
+ self._name = root.get('name')
+
+ def undefine(self):
+ self._connection._remove_filter(self)
+
+
+class Domain(object):
+ def __init__(self, connection, xml, running=False, transient=False):
+ self._connection = connection
+ if running:
+ connection._mark_running(self)
+
+ self._state = running and VIR_DOMAIN_RUNNING or VIR_DOMAIN_SHUTOFF
+ self._transient = transient
+ self._def = self._parse_definition(xml)
+ self._has_saved_state = False
+ self._snapshots = {}
+ self._id = self._connection._id_counter
+
+ def _parse_definition(self, xml):
+ try:
+ tree = etree.fromstring(xml)
+ except etree.ParseError:
+ raise make_libvirtError(
+ libvirtError, "Invalid XML.",
+ error_code=VIR_ERR_XML_DETAIL,
+ error_domain=VIR_FROM_DOMAIN)
+
+ definition = {}
+
+ name = tree.find('./name')
+ if name is not None:
+ definition['name'] = name.text
+
+ uuid_elem = tree.find('./uuid')
+ if uuid_elem is not None:
+ definition['uuid'] = uuid_elem.text
+ else:
+ definition['uuid'] = str(uuid.uuid4())
+
+ vcpu = tree.find('./vcpu')
+ if vcpu is not None:
+ definition['vcpu'] = int(vcpu.text)
+
+ memory = tree.find('./memory')
+ if memory is not None:
+ definition['memory'] = int(memory.text)
+
+ os = {}
+ os_type = tree.find('./os/type')
+ if os_type is not None:
+ os['type'] = os_type.text
+ os['arch'] = os_type.get('arch', node_arch)
+
+ os_kernel = tree.find('./os/kernel')
+ if os_kernel is not None:
+ os['kernel'] = os_kernel.text
+
+ os_initrd = tree.find('./os/initrd')
+ if os_initrd is not None:
+ os['initrd'] = os_initrd.text
+
+ os_cmdline = tree.find('./os/cmdline')
+ if os_cmdline is not None:
+ os['cmdline'] = os_cmdline.text
+
+ os_boot = tree.find('./os/boot')
+ if os_boot is not None:
+ os['boot_dev'] = os_boot.get('dev')
+
+ definition['os'] = os
+
+ features = {}
+
+ acpi = tree.find('./features/acpi')
+ if acpi is not None:
+ features['acpi'] = True
+
+ definition['features'] = features
+
+ devices = {}
+
+ device_nodes = tree.find('./devices')
+ if device_nodes is not None:
+ disks_info = []
+ disks = device_nodes.findall('./disk')
+ for disk in disks:
+ disks_info += [_parse_disk_info(disk)]
+ devices['disks'] = disks_info
+
+ nics_info = []
+ nics = device_nodes.findall('./interface')
+ for nic in nics:
+ nic_info = {}
+ nic_info['type'] = nic.get('type')
+
+ mac = nic.find('./mac')
+ if mac is not None:
+ nic_info['mac'] = mac.get('address')
+
+ source = nic.find('./source')
+ if source is not None:
+ if nic_info['type'] == 'network':
+ nic_info['source'] = source.get('network')
+ elif nic_info['type'] == 'bridge':
+ nic_info['source'] = source.get('bridge')
+
+ nics_info += [nic_info]
+
+ devices['nics'] = nics_info
+
+ definition['devices'] = devices
+
+ return definition
+
+ def create(self):
+ self.createWithFlags(0)
+
+ def createWithFlags(self, flags):
+ # FIXME: Not handling flags at the moment
+ self._state = VIR_DOMAIN_RUNNING
+ self._connection._mark_running(self)
+ self._has_saved_state = False
+
+ def isActive(self):
+ return int(self._state == VIR_DOMAIN_RUNNING)
+
+ def undefine(self):
+ self._connection._undefine(self)
+
+ def undefineFlags(self, flags):
+ self.undefine()
+ if flags & VIR_DOMAIN_UNDEFINE_MANAGED_SAVE:
+ if self.hasManagedSaveImage(0):
+ self.managedSaveRemove()
+
+ def destroy(self):
+ self._state = VIR_DOMAIN_SHUTOFF
+ self._connection._mark_not_running(self)
+
+ def ID(self):
+ return self._id
+
+ def name(self):
+ return self._def['name']
+
+ def UUIDString(self):
+ return self._def['uuid']
+
+ def interfaceStats(self, device):
+ return [10000242400, 1234, 0, 2, 213412343233, 34214234, 23, 3]
+
+ def blockStats(self, device):
+ return [2, 10000242400, 234, 2343424234, 34]
+
+ def suspend(self):
+ self._state = VIR_DOMAIN_PAUSED
+
+ def shutdown(self):
+ self._state = VIR_DOMAIN_SHUTDOWN
+ self._connection._mark_not_running(self)
+
+ def reset(self, flags):
+ # FIXME: Not handling flags at the moment
+ self._state = VIR_DOMAIN_RUNNING
+ self._connection._mark_running(self)
+
+ def info(self):
+ return [self._state,
+ long(self._def['memory']),
+ long(self._def['memory']),
+ self._def['vcpu'],
+ 123456789L]
+
+ def migrateToURI(self, desturi, flags, dname, bandwidth):
+ raise make_libvirtError(
+ libvirtError,
+ "Migration always fails for fake libvirt!",
+ error_code=VIR_ERR_INTERNAL_ERROR,
+ error_domain=VIR_FROM_QEMU)
+
+ def migrateToURI2(self, dconnuri, miguri, dxml, flags, dname, bandwidth):
+ raise make_libvirtError(
+ libvirtError,
+ "Migration always fails for fake libvirt!",
+ error_code=VIR_ERR_INTERNAL_ERROR,
+ error_domain=VIR_FROM_QEMU)
+
+ def attachDevice(self, xml):
+ disk_info = _parse_disk_info(etree.fromstring(xml))
+ disk_info['_attached'] = True
+ self._def['devices']['disks'] += [disk_info]
+ return True
+
+ def attachDeviceFlags(self, xml, flags):
+ if (flags & VIR_DOMAIN_AFFECT_LIVE and
+ self._state != VIR_DOMAIN_RUNNING):
+ raise make_libvirtError(
+ libvirtError,
+ "AFFECT_LIVE only allowed for running domains!",
+ error_code=VIR_ERR_INTERNAL_ERROR,
+ error_domain=VIR_FROM_QEMU)
+ self.attachDevice(xml)
+
+ def detachDevice(self, xml):
+ disk_info = _parse_disk_info(etree.fromstring(xml))
+ disk_info['_attached'] = True
+ return disk_info in self._def['devices']['disks']
+
+ def detachDeviceFlags(self, xml, _flags):
+ self.detachDevice(xml)
+
+ def XMLDesc(self, flags):
+ disks = ''
+ for disk in self._def['devices']['disks']:
+ disks += '''<disk type='%(type)s' device='%(device)s'>
+ <driver name='%(driver_name)s' type='%(driver_type)s'/>
+ <source file='%(source)s'/>
+ <target dev='%(target_dev)s' bus='%(target_bus)s'/>
+ <address type='drive' controller='0' bus='0' unit='0'/>
+ </disk>''' % disk
+
+ nics = ''
+ for nic in self._def['devices']['nics']:
+ nics += '''<interface type='%(type)s'>
+ <mac address='%(mac)s'/>
+ <source %(type)s='%(source)s'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
+ function='0x0'/>
+ </interface>''' % nic
+
+ return '''<domain type='kvm'>
+ <name>%(name)s</name>
+ <uuid>%(uuid)s</uuid>
+ <memory>%(memory)s</memory>
+ <currentMemory>%(memory)s</currentMemory>
+ <vcpu>%(vcpu)s</vcpu>
+ <os>
+ <type arch='%(arch)s' machine='pc-0.12'>hvm</type>
+ <boot dev='hd'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <clock offset='localtime'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>/usr/bin/kvm</emulator>
+ %(disks)s
+ <controller type='ide' index='0'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01'
+ function='0x1'/>
+ </controller>
+ %(nics)s
+ <serial type='file'>
+ <source path='dummy.log'/>
+ <target port='0'/>
+ </serial>
+ <serial type='pty'>
+ <source pty='/dev/pts/27'/>
+ <target port='1'/>
+ </serial>
+ <serial type='tcp'>
+ <source host="-1" service="-1" mode="bind"/>
+ </serial>
+ <console type='file'>
+ <source path='dummy.log'/>
+ <target port='0'/>
+ </console>
+ <input type='tablet' bus='usb'/>
+ <input type='mouse' bus='ps2'/>
+ <graphics type='vnc' port='-1' autoport='yes'/>
+ <graphics type='spice' port='-1' autoport='yes'/>
+ <video>
+ <model type='cirrus' vram='9216' heads='1'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x02'
+ function='0x0'/>
+ </video>
+ <memballoon model='virtio'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x04'
+ function='0x0'/>
+ </memballoon>
+ </devices>
+</domain>''' % {'name': self._def['name'],
+ 'uuid': self._def['uuid'],
+ 'memory': self._def['memory'],
+ 'vcpu': self._def['vcpu'],
+ 'arch': self._def['os']['arch'],
+ 'disks': disks,
+ 'nics': nics}
+
+ def managedSave(self, flags):
+ self._connection._mark_not_running(self)
+ self._has_saved_state = True
+
+ def managedSaveRemove(self, flags):
+ self._has_saved_state = False
+
+ def hasManagedSaveImage(self, flags):
+ return int(self._has_saved_state)
+
+ def resume(self):
+ self._state = VIR_DOMAIN_RUNNING
+
+ def snapshotCreateXML(self, xml, flags):
+ tree = etree.fromstring(xml)
+ name = tree.find('./name').text
+ snapshot = DomainSnapshot(name, self)
+ self._snapshots[name] = snapshot
+ return snapshot
+
+ def vcpus(self):
+ vcpus = ([], [])
+ for i in range(0, self._def['vcpu']):
+ vcpus[0].append((i, 1, 120405L, i))
+ vcpus[1].append((True, True, True, True))
+ return vcpus
+
+ def memoryStats(self):
+ return {}
+
+ def maxMemory(self):
+ return self._def['memory']
+
+ def blockJobInfo(self, disk, flags):
+ return {}
+
+
+class DomainSnapshot(object):
+ def __init__(self, name, domain):
+ self._name = name
+ self._domain = domain
+
+ def delete(self, flags):
+ del self._domain._snapshots[self._name]
+
+
+class Connection(object):
+ def __init__(self, uri=None, readonly=False, version=9011):
+ if not uri or uri == '':
+ if allow_default_uri_connection:
+ uri = 'qemu:///session'
+ else:
+ raise ValueError("URI was None, but fake libvirt is "
+ "configured to not accept this.")
+
+ uri_whitelist = ['qemu:///system',
+ 'qemu:///session',
+ 'xen:///system',
+ 'uml:///system',
+ 'test:///default']
+
+ if uri not in uri_whitelist:
+ raise make_libvirtError(
+ libvirtError,
+ "libvirt error: no connection driver "
+ "available for No connection for URI %s" % uri,
+ error_code=5, error_domain=0)
+
+ self.readonly = readonly
+ self._uri = uri
+ self._vms = {}
+ self._running_vms = {}
+ self._id_counter = 1 # libvirt reserves 0 for the hypervisor.
+ self._nwfilters = {}
+ self._event_callbacks = {}
+ self.fakeLibVersion = version
+ self.fakeVersion = version
+
+ def _add_filter(self, nwfilter):
+ self._nwfilters[nwfilter._name] = nwfilter
+
+ def _remove_filter(self, nwfilter):
+ del self._nwfilters[nwfilter._name]
+
+ def _mark_running(self, dom):
+ self._running_vms[self._id_counter] = dom
+ self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0)
+ self._id_counter += 1
+
+ def _mark_not_running(self, dom):
+ if dom._transient:
+ self._undefine(dom)
+
+ dom._id = -1
+
+ for (k, v) in self._running_vms.iteritems():
+ if v == dom:
+ del self._running_vms[k]
+ self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STOPPED, 0)
+ return
+
+ def _undefine(self, dom):
+ del self._vms[dom.name()]
+ if not dom._transient:
+ self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_UNDEFINED, 0)
+
+ def getInfo(self):
+ return [node_arch,
+ node_kB_mem,
+ node_cpus,
+ node_mhz,
+ node_nodes,
+ node_sockets,
+ node_cores,
+ node_threads]
+
+ def numOfDomains(self):
+ return len(self._running_vms)
+
+ def listDomainsID(self):
+ return self._running_vms.keys()
+
+ def lookupByID(self, id):
+ if id in self._running_vms:
+ return self._running_vms[id]
+ raise make_libvirtError(
+ libvirtError,
+ 'Domain not found: no domain with matching id %d' % id,
+ error_code=VIR_ERR_NO_DOMAIN,
+ error_domain=VIR_FROM_QEMU)
+
+ def lookupByName(self, name):
+ if name in self._vms:
+ return self._vms[name]
+ raise make_libvirtError(
+ libvirtError,
+ 'Domain not found: no domain with matching name "%s"' % name,
+ error_code=VIR_ERR_NO_DOMAIN,
+ error_domain=VIR_FROM_QEMU)
+
+ def listAllDomains(self, flags):
+ vms = []
+ for vm in self._vms:
+ if flags & VIR_CONNECT_LIST_DOMAINS_ACTIVE:
+ if vm.state != VIR_DOMAIN_SHUTOFF:
+ vms.append(vm)
+ if flags & VIR_CONNECT_LIST_DOMAINS_INACTIVE:
+ if vm.state == VIR_DOMAIN_SHUTOFF:
+ vms.append(vm)
+ return vms
+
+ def _emit_lifecycle(self, dom, event, detail):
+ if VIR_DOMAIN_EVENT_ID_LIFECYCLE not in self._event_callbacks:
+ return
+
+ cbinfo = self._event_callbacks[VIR_DOMAIN_EVENT_ID_LIFECYCLE]
+ callback = cbinfo[0]
+ opaque = cbinfo[1]
+ callback(self, dom, event, detail, opaque)
+
+ def defineXML(self, xml):
+ dom = Domain(connection=self, running=False, transient=False, xml=xml)
+ self._vms[dom.name()] = dom
+ self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_DEFINED, 0)
+ return dom
+
+ def createXML(self, xml, flags):
+ dom = Domain(connection=self, running=True, transient=True, xml=xml)
+ self._vms[dom.name()] = dom
+ self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0)
+ return dom
+
+ def getType(self):
+ if self._uri == 'qemu:///system':
+ return 'QEMU'
+
+ def getLibVersion(self):
+ return self.fakeLibVersion
+
+ def getVersion(self):
+ return self.fakeVersion
+
+ def getHostname(self):
+ return 'compute1'
+
+ def domainEventRegisterAny(self, dom, eventid, callback, opaque):
+ self._event_callbacks[eventid] = [callback, opaque]
+
+ def registerCloseCallback(self, cb, opaque):
+ pass
+
+ def getCapabilities(self):
+ """Return spoofed capabilities."""
+ return '''<capabilities>
+ <host>
+ <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <topology sockets='1' cores='2' threads='1'/>
+ <feature name='xtpr'/>
+ <feature name='tm2'/>
+ <feature name='est'/>
+ <feature name='vmx'/>
+ <feature name='ds_cpl'/>
+ <feature name='monitor'/>
+ <feature name='pbe'/>
+ <feature name='tm'/>
+ <feature name='ht'/>
+ <feature name='ss'/>
+ <feature name='acpi'/>
+ <feature name='ds'/>
+ <feature name='vme'/>
+ </cpu>
+ <migration_features>
+ <live/>
+ <uri_transports>
+ <uri_transport>tcp</uri_transport>
+ </uri_transports>
+ </migration_features>
+ <secmodel>
+ <model>apparmor</model>
+ <doi>0</doi>
+ </secmodel>
+ </host>
+
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='i686'>
+ <wordsize>32</wordsize>
+ <emulator>/usr/bin/qemu</emulator>
+ <machine>pc-0.14</machine>
+ <machine canonical='pc-0.14'>pc</machine>
+ <machine>pc-0.13</machine>
+ <machine>pc-0.12</machine>
+ <machine>pc-0.11</machine>
+ <machine>pc-0.10</machine>
+ <machine>isapc</machine>
+ <domain type='qemu'>
+ </domain>
+ <domain type='kvm'>
+ <emulator>/usr/bin/kvm</emulator>
+ <machine>pc-0.14</machine>
+ <machine canonical='pc-0.14'>pc</machine>
+ <machine>pc-0.13</machine>
+ <machine>pc-0.12</machine>
+ <machine>pc-0.11</machine>
+ <machine>pc-0.10</machine>
+ <machine>isapc</machine>
+ </domain>
+ </arch>
+ <features>
+ <cpuselection/>
+ <deviceboot/>
+ <pae/>
+ <nonpae/>
+ <acpi default='on' toggle='yes'/>
+ <apic default='on' toggle='no'/>
+ </features>
+ </guest>
+
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='x86_64'>
+ <wordsize>64</wordsize>
+ <emulator>/usr/bin/qemu-system-x86_64</emulator>
+ <machine>pc-0.14</machine>
+ <machine canonical='pc-0.14'>pc</machine>
+ <machine>pc-0.13</machine>
+ <machine>pc-0.12</machine>
+ <machine>pc-0.11</machine>
+ <machine>pc-0.10</machine>
+ <machine>isapc</machine>
+ <domain type='qemu'>
+ </domain>
+ <domain type='kvm'>
+ <emulator>/usr/bin/kvm</emulator>
+ <machine>pc-0.14</machine>
+ <machine canonical='pc-0.14'>pc</machine>
+ <machine>pc-0.13</machine>
+ <machine>pc-0.12</machine>
+ <machine>pc-0.11</machine>
+ <machine>pc-0.10</machine>
+ <machine>isapc</machine>
+ </domain>
+ </arch>
+ <features>
+ <cpuselection/>
+ <deviceboot/>
+ <acpi default='on' toggle='yes'/>
+ <apic default='on' toggle='no'/>
+ </features>
+ </guest>
+
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='armv7l'>
+ <wordsize>32</wordsize>
+ <emulator>/usr/bin/qemu-system-arm</emulator>
+ <machine>integratorcp</machine>
+ <machine>vexpress-a9</machine>
+ <machine>syborg</machine>
+ <machine>musicpal</machine>
+ <machine>mainstone</machine>
+ <machine>n800</machine>
+ <machine>n810</machine>
+ <machine>n900</machine>
+ <machine>cheetah</machine>
+ <machine>sx1</machine>
+ <machine>sx1-v1</machine>
+ <machine>beagle</machine>
+ <machine>beaglexm</machine>
+ <machine>tosa</machine>
+ <machine>akita</machine>
+ <machine>spitz</machine>
+ <machine>borzoi</machine>
+ <machine>terrier</machine>
+ <machine>connex</machine>
+ <machine>verdex</machine>
+ <machine>lm3s811evb</machine>
+ <machine>lm3s6965evb</machine>
+ <machine>realview-eb</machine>
+ <machine>realview-eb-mpcore</machine>
+ <machine>realview-pb-a8</machine>
+ <machine>realview-pbx-a9</machine>
+ <machine>versatilepb</machine>
+ <machine>versatileab</machine>
+ <domain type='qemu'>
+ </domain>
+ </arch>
+ <features>
+ <deviceboot/>
+ </features>
+ </guest>
+
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='mips'>
+ <wordsize>32</wordsize>
+ <emulator>/usr/bin/qemu-system-mips</emulator>
+ <machine>malta</machine>
+ <machine>mipssim</machine>
+ <machine>magnum</machine>
+ <machine>pica61</machine>
+ <machine>mips</machine>
+ <domain type='qemu'>
+ </domain>
+ </arch>
+ <features>
+ <deviceboot/>
+ </features>
+ </guest>
+
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='mipsel'>
+ <wordsize>32</wordsize>
+ <emulator>/usr/bin/qemu-system-mipsel</emulator>
+ <machine>malta</machine>
+ <machine>mipssim</machine>
+ <machine>magnum</machine>
+ <machine>pica61</machine>
+ <machine>mips</machine>
+ <domain type='qemu'>
+ </domain>
+ </arch>
+ <features>
+ <deviceboot/>
+ </features>
+ </guest>
+
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='sparc'>
+ <wordsize>32</wordsize>
+ <emulator>/usr/bin/qemu-system-sparc</emulator>
+ <machine>SS-5</machine>
+ <machine>leon3_generic</machine>
+ <machine>SS-10</machine>
+ <machine>SS-600MP</machine>
+ <machine>SS-20</machine>
+ <machine>Voyager</machine>
+ <machine>LX</machine>
+ <machine>SS-4</machine>
+ <machine>SPARCClassic</machine>
+ <machine>SPARCbook</machine>
+ <machine>SS-1000</machine>
+ <machine>SS-2000</machine>
+ <machine>SS-2</machine>
+ <domain type='qemu'>
+ </domain>
+ </arch>
+ </guest>
+
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='ppc'>
+ <wordsize>32</wordsize>
+ <emulator>/usr/bin/qemu-system-ppc</emulator>
+ <machine>g3beige</machine>
+ <machine>virtex-ml507</machine>
+ <machine>mpc8544ds</machine>
+ <machine canonical='bamboo-0.13'>bamboo</machine>
+ <machine>bamboo-0.13</machine>
+ <machine>bamboo-0.12</machine>
+ <machine>ref405ep</machine>
+ <machine>taihu</machine>
+ <machine>mac99</machine>
+ <machine>prep</machine>
+ <domain type='qemu'>
+ </domain>
+ </arch>
+ <features>
+ <deviceboot/>
+ </features>
+ </guest>
+
+</capabilities>'''
+
+ def compareCPU(self, xml, flags):
+ tree = etree.fromstring(xml)
+
+ arch_node = tree.find('./arch')
+ if arch_node is not None:
+ if arch_node.text not in [arch.X86_64,
+ arch.I686]:
+ return VIR_CPU_COMPARE_INCOMPATIBLE
+
+ model_node = tree.find('./model')
+ if model_node is not None:
+ if model_node.text != node_cpu_model:
+ return VIR_CPU_COMPARE_INCOMPATIBLE
+
+ vendor_node = tree.find('./vendor')
+ if vendor_node is not None:
+ if vendor_node.text != node_cpu_vendor:
+ return VIR_CPU_COMPARE_INCOMPATIBLE
+
+ # The rest of the stuff libvirt implements is rather complicated
+ # and I don't think it adds much value to replicate it here.
+
+ return VIR_CPU_COMPARE_IDENTICAL
+
+ def getCPUStats(self, cpuNum, flag):
+ if cpuNum < 2:
+ return {'kernel': 5664160000000L,
+ 'idle': 1592705190000000L,
+ 'user': 26728850000000L,
+ 'iowait': 6121490000000L}
+ else:
+ raise make_libvirtError(
+ libvirtError,
+ "invalid argument: Invalid cpu number",
+ error_code=VIR_ERR_INTERNAL_ERROR,
+ error_domain=VIR_FROM_QEMU)
+
+ def nwfilterLookupByName(self, name):
+ try:
+ return self._nwfilters[name]
+ except KeyError:
+ raise make_libvirtError(
+ libvirtError,
+ "no nwfilter with matching name %s" % name,
+ error_code=VIR_ERR_NO_NWFILTER,
+ error_domain=VIR_FROM_NWFILTER)
+
+ def nwfilterDefineXML(self, xml):
+ nwfilter = NWFilter(self, xml)
+ self._add_filter(nwfilter)
+
+ def listDefinedDomains(self):
+ return []
+
+ def listDevices(self, cap, flags):
+ return []
+
+ def baselineCPU(self, cpu, flag):
+ """Add new libvirt API."""
+ return """<cpu mode='custom' match='exact'>
+ <model fallback='allow'>Westmere</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='aes'/>
+ </cpu>"""
+
+
+def openAuth(uri, auth, flags):
+
+ if type(auth) != list:
+ raise Exception(_("Expected a list for 'auth' parameter"))
+
+ if type(auth[0]) != list:
+ raise Exception(
+ _("Expected a function in 'auth[0]' parameter"))
+
+ if not callable(auth[1]):
+ raise Exception(
+ _("Expected a function in 'auth[1]' parameter"))
+
+ return Connection(uri, (flags == VIR_CONNECT_RO))
+
+
+def virEventRunDefaultImpl():
+ time.sleep(1)
+
+
+def virEventRegisterDefaultImpl():
+ if connection_used:
+ raise Exception(_("virEventRegisterDefaultImpl() must be \
+ called before connection is used."))
+
+
+def registerErrorHandler(handler, ctxt):
+ pass
+
+
+def make_libvirtError(error_class, msg, error_code=None,
+ error_domain=None, error_message=None,
+ error_level=None, str1=None, str2=None, str3=None,
+ int1=None, int2=None):
+ """Convenience function for creating `libvirtError` exceptions which
+ allow you to specify arguments in constructor without having to manipulate
+ the `err` tuple directly.
+
+ We need to pass in `error_class` to this function because it may be
+ `libvirt.libvirtError` or `fakelibvirt.libvirtError` depending on whether
+ `libvirt-python` is installed.
+ """
+ exc = error_class(msg)
+ exc.err = (error_code, error_domain, error_message, error_level,
+ str1, str2, str3, int1, int2)
+ return exc
+
+
+virDomain = Domain
+
+
+virConnect = Connection
diff --git a/nova/tests/unit/virt/libvirt/test_blockinfo.py b/nova/tests/unit/virt/libvirt/test_blockinfo.py
new file mode 100644
index 0000000000..f849bc59a7
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_blockinfo.py
@@ -0,0 +1,991 @@
+# Copyright 2010 OpenStack Foundation
+# Copyright 2012 University Of Minho
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+import mock
+
+from nova import block_device
+from nova.compute import arch
+from nova import context
+from nova import exception
+from nova import objects
+from nova import test
+from nova.tests.unit import fake_block_device
+import nova.tests.unit.image.fake
+from nova.virt import block_device as driver_block_device
+from nova.virt.libvirt import blockinfo
+
+
+class LibvirtBlockInfoTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(LibvirtBlockInfoTest, self).setUp()
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.get_admin_context()
+ nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
+ self.test_instance = {
+ 'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
+ 'memory_kb': '1024000',
+ 'basepath': '/some/path',
+ 'bridge_name': 'br100',
+ 'vcpus': 2,
+ 'project_id': 'fake',
+ 'bridge': 'br101',
+ 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ 'root_gb': 10,
+ 'ephemeral_gb': 20,
+ 'instance_type_id': 2, # m1.tiny
+ 'config_drive': None,
+ 'system_metadata': {
+ 'instance_type_memory_mb': 128,
+ 'instance_type_root_gb': 0,
+ 'instance_type_name': 'm1.micro',
+ 'instance_type_ephemeral_gb': 0,
+ 'instance_type_vcpus': 1,
+ 'instance_type_swap': 0,
+ 'instance_type_rxtx_factor': 1.0,
+ 'instance_type_flavorid': '1',
+ 'instance_type_vcpu_weight': None,
+ 'instance_type_id': 2,
+ }
+ }
+
+ def test_volume_in_mapping(self):
+ swap = {'device_name': '/dev/sdb',
+ 'swap_size': 1}
+ ephemerals = [{'device_type': 'disk', 'guest_format': 'ext3',
+ 'device_name': '/dev/sdc1', 'size': 10},
+ {'disk_bus': 'ide', 'guest_format': None,
+ 'device_name': '/dev/sdd', 'size': 10}]
+ block_device_mapping = [{'mount_device': '/dev/sde',
+ 'device_path': 'fake_device'},
+ {'mount_device': '/dev/sdf',
+ 'device_path': 'fake_device'}]
+ block_device_info = {
+ 'root_device_name': '/dev/sda',
+ 'swap': swap,
+ 'ephemerals': ephemerals,
+ 'block_device_mapping': block_device_mapping}
+
+ def _assert_volume_in_mapping(device_name, true_or_false):
+ self.assertEqual(
+ true_or_false,
+ block_device.volume_in_mapping(device_name,
+ block_device_info))
+
+ _assert_volume_in_mapping('sda', False)
+ _assert_volume_in_mapping('sdb', True)
+ _assert_volume_in_mapping('sdc1', True)
+ _assert_volume_in_mapping('sdd', True)
+ _assert_volume_in_mapping('sde', True)
+ _assert_volume_in_mapping('sdf', True)
+ _assert_volume_in_mapping('sdg', False)
+ _assert_volume_in_mapping('sdh1', False)
+
+ def test_find_disk_dev(self):
+ mapping = {
+ "disk.local": {
+ 'dev': 'sda',
+ 'bus': 'scsi',
+ 'type': 'disk',
+ },
+ "disk.swap": {
+ 'dev': 'sdc',
+ 'bus': 'scsi',
+ 'type': 'disk',
+ },
+ }
+
+ dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'scsi')
+ self.assertEqual('sdb', dev)
+
+ dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'scsi',
+ last_device=True)
+ self.assertEqual('sdz', dev)
+
+ dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'virtio')
+ self.assertEqual('vda', dev)
+
+ dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'fdc')
+ self.assertEqual('fda', dev)
+
+ def test_get_next_disk_dev(self):
+ mapping = {}
+ mapping['disk.local'] = blockinfo.get_next_disk_info(mapping,
+ 'virtio')
+ self.assertEqual({'dev': 'vda', 'bus': 'virtio', 'type': 'disk'},
+ mapping['disk.local'])
+
+ mapping['disk.swap'] = blockinfo.get_next_disk_info(mapping,
+ 'virtio')
+ self.assertEqual({'dev': 'vdb', 'bus': 'virtio', 'type': 'disk'},
+ mapping['disk.swap'])
+
+ mapping['disk.config'] = blockinfo.get_next_disk_info(mapping,
+ 'ide',
+ 'cdrom',
+ True)
+ self.assertEqual({'dev': 'hdd', 'bus': 'ide', 'type': 'cdrom'},
+ mapping['disk.config'])
+
+ def test_get_next_disk_dev_boot_index(self):
+ info = blockinfo.get_next_disk_info({}, 'virtio', boot_index=-1)
+ self.assertEqual({'dev': 'vda', 'bus': 'virtio', 'type': 'disk'}, info)
+
+ info = blockinfo.get_next_disk_info({}, 'virtio', boot_index=2)
+ self.assertEqual({'dev': 'vda', 'bus': 'virtio',
+ 'type': 'disk', 'boot_index': '2'},
+ info)
+
+ def test_get_disk_mapping_simple(self):
+ # The simplest possible disk mapping setup, all defaults
+
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide")
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'}
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_simple_rootdev(self):
+ # A simple disk mapping setup, but with custom root device name
+
+ instance_ref = objects.Instance(**self.test_instance)
+ block_device_info = {
+ 'root_device_name': '/dev/sda'
+ }
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ 'disk': {'bus': 'scsi', 'dev': 'sda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
+ 'root': {'bus': 'scsi', 'dev': 'sda',
+ 'type': 'disk', 'boot_index': '1'}
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_rescue(self):
+ # A simple disk mapping setup, but in rescue mode
+
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ rescue=True)
+
+ expect = {
+ 'disk.rescue': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_lxc(self):
+ # A simple disk mapping setup, but for lxc
+
+ self.test_instance['ephemeral_gb'] = 0
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("lxc", instance_ref,
+ "lxc", "lxc",
+ None)
+ expect = {
+ 'disk': {'bus': 'lxc', 'dev': None,
+ 'type': 'disk', 'boot_index': '1'},
+ 'root': {'bus': 'lxc', 'dev': None,
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_simple_iso(self):
+ # A simple disk mapping setup, but with a ISO for root device
+
+ instance_ref = objects.Instance(**self.test_instance)
+ image_meta = {'disk_format': 'iso'}
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ None,
+ image_meta)
+
+ expect = {
+ 'disk': {'bus': 'ide', 'dev': 'hda',
+ 'type': 'cdrom', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
+ 'root': {'bus': 'ide', 'dev': 'hda',
+ 'type': 'cdrom', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_simple_swap(self):
+ # A simple disk mapping setup, but with a swap device added
+
+ self.test_instance['system_metadata']['instance_type_swap'] = 5
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide")
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'disk.swap': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_simple_configdrive(self):
+ # A simple disk mapping setup, but with configdrive added
+ # It's necessary to check if the architecture is power, because
+ # power doesn't have support to ide, and so libvirt translate
+ # all ide calls to scsi
+
+ self.flags(force_config_drive=True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide")
+
+ # The last device is selected for this. on x86 is the last ide
+ # device (hdd). Since power only support scsi, the last device
+ # is sdz
+
+ bus_ppc = ("scsi", "sdz")
+ expect_bus = {"ppc": bus_ppc, "ppc64": bus_ppc}
+
+ bus, dev = expect_bus.get(blockinfo.libvirt_utils.get_arch({}),
+ ("ide", "hdd"))
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'disk.config': {'bus': bus, 'dev': dev, 'type': 'cdrom'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'}
+ }
+
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_cdrom_configdrive(self):
+ # A simple disk mapping setup, with configdrive added as cdrom
+ # It's necessary to check if the architecture is power, because
+ # power doesn't have support to ide, and so libvirt translate
+ # all ide calls to scsi
+
+ self.flags(force_config_drive=True)
+ self.flags(config_drive_format='iso9660')
+
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide")
+
+ bus_ppc = ("scsi", "sdz")
+ expect_bus = {"ppc": bus_ppc, "ppc64": bus_ppc}
+
+ bus, dev = expect_bus.get(blockinfo.libvirt_utils.get_arch({}),
+ ("ide", "hdd"))
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'disk.config': {'bus': bus, 'dev': dev, 'type': 'cdrom'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'}
+ }
+
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_disk_configdrive(self):
+ # A simple disk mapping setup, with configdrive added as disk
+
+ self.flags(force_config_drive=True)
+ self.flags(config_drive_format='vfat')
+
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide")
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'disk.config': {'bus': 'virtio', 'dev': 'vdz', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_ephemeral(self):
+ # A disk mapping with ephemeral devices
+ self.test_instance['system_metadata']['instance_type_swap'] = 5
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'ephemerals': [
+ {'device_type': 'disk', 'guest_format': 'ext3',
+ 'device_name': '/dev/vdb', 'size': 10},
+ {'disk_bus': 'ide', 'guest_format': None,
+ 'device_name': '/dev/vdc', 'size': 10},
+ {'device_type': 'floppy',
+ 'device_name': '/dev/vdd', 'size': 10},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb',
+ 'type': 'disk', 'format': 'ext3'},
+ 'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'},
+ 'disk.eph2': {'bus': 'virtio', 'dev': 'vdd', 'type': 'floppy'},
+ 'disk.swap': {'bus': 'virtio', 'dev': 'vde', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_custom_swap(self):
+ # A disk mapping with a swap device at position vdb. This
+ # should cause disk.local to be removed
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'swap': {'device_name': '/dev/vdb',
+ 'swap_size': 10},
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.swap': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_blockdev_root(self):
+ # A disk mapping with a blockdev replacing the default root
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'block_device_mapping': [
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vda",
+ 'boot_index': 0,
+ 'device_type': 'disk',
+ 'delete_on_termination': True},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ '/dev/vda': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_blockdev_eph(self):
+ # A disk mapping with a blockdev replacing the ephemeral device
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'block_device_mapping': [
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vdb",
+ 'boot_index': -1,
+ 'delete_on_termination': True},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ '/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_blockdev_many(self):
+ # A disk mapping with a blockdev replacing all devices
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'block_device_mapping': [
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vda",
+ 'boot_index': 0,
+ 'disk_bus': 'scsi',
+ 'delete_on_termination': True},
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vdb",
+ 'boot_index': -1,
+ 'delete_on_termination': True},
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vdc",
+ 'boot_index': -1,
+ 'device_type': 'cdrom',
+ 'delete_on_termination': True},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ '/dev/vda': {'bus': 'scsi', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ '/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ '/dev/vdc': {'bus': 'virtio', 'dev': 'vdc', 'type': 'cdrom'},
+ 'root': {'bus': 'scsi', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_complex(self):
+ # The strangest possible disk mapping setup
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'root_device_name': '/dev/vdf',
+ 'swap': {'device_name': '/dev/vdy',
+ 'swap_size': 10},
+ 'ephemerals': [
+ {'device_type': 'disk', 'guest_format': 'ext3',
+ 'device_name': '/dev/vdb', 'size': 10},
+ {'disk_bus': 'ide', 'guest_format': None,
+ 'device_name': '/dev/vdc', 'size': 10},
+ ],
+ 'block_device_mapping': [
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vda",
+ 'boot_index': 1,
+ 'delete_on_termination': True},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vdf',
+ 'type': 'disk', 'boot_index': '1'},
+ '/dev/vda': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '2'},
+ 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb',
+ 'type': 'disk', 'format': 'ext3'},
+ 'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'},
+ 'disk.swap': {'bus': 'virtio', 'dev': 'vdy', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vdf',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_updates_original(self):
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'root_device_name': '/dev/vda',
+ 'swap': {'device_name': '/dev/vdb',
+ 'device_type': 'really_lame_type',
+ 'swap_size': 10},
+ 'ephemerals': [{'disk_bus': 'no_such_bus',
+ 'device_type': 'yeah_right',
+ 'device_name': '/dev/vdc', 'size': 10}],
+ 'block_device_mapping': [
+ {'connection_info': "fake",
+ 'mount_device': None,
+ 'device_type': 'lawnmower',
+ 'delete_on_termination': True}]
+ }
+ expected_swap = {'device_name': '/dev/vdb', 'disk_bus': 'virtio',
+ 'device_type': 'disk', 'swap_size': 10}
+ expected_ephemeral = {'disk_bus': 'virtio',
+ 'device_type': 'disk',
+ 'device_name': '/dev/vdc', 'size': 10}
+ expected_bdm = {'connection_info': "fake",
+ 'mount_device': '/dev/vdd',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'delete_on_termination': True}
+
+ blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide", block_device_info)
+
+ self.assertEqual(expected_swap, block_device_info['swap'])
+ self.assertEqual(expected_ephemeral,
+ block_device_info['ephemerals'][0])
+ self.assertEqual(expected_bdm,
+ block_device_info['block_device_mapping'][0])
+
+ def test_get_disk_bus(self):
+ expected = (
+ (arch.X86_64, 'disk', 'virtio'),
+ (arch.X86_64, 'cdrom', 'ide'),
+ (arch.X86_64, 'floppy', 'fdc'),
+ (arch.PPC, 'disk', 'virtio'),
+ (arch.PPC, 'cdrom', 'scsi'),
+ (arch.PPC64, 'disk', 'virtio'),
+ (arch.PPC64, 'cdrom', 'scsi')
+ )
+ for guestarch, dev, res in expected:
+ with mock.patch.object(blockinfo.libvirt_utils,
+ 'get_arch',
+ return_value=guestarch):
+ bus = blockinfo.get_disk_bus_for_device_type('kvm',
+ device_type=dev)
+ self.assertEqual(res, bus)
+
+ expected = (
+ ('scsi', None, 'disk', 'scsi'),
+ (None, 'scsi', 'cdrom', 'scsi'),
+ ('usb', None, 'disk', 'usb')
+ )
+ for dbus, cbus, dev, res in expected:
+ image_meta = {'properties': {'hw_disk_bus': dbus,
+ 'hw_cdrom_bus': cbus}}
+ bus = blockinfo.get_disk_bus_for_device_type('kvm',
+ image_meta,
+ device_type=dev)
+ self.assertEqual(res, bus)
+
+ image_meta = {'properties': {'hw_disk_bus': 'xen'}}
+ self.assertRaises(exception.UnsupportedHardware,
+ blockinfo.get_disk_bus_for_device_type,
+ 'kvm',
+ image_meta)
+
+ def test_success_get_disk_bus_for_disk_dev(self):
+ expected = (
+ ('ide', ("kvm", "hda")),
+ ('scsi', ("kvm", "sdf")),
+ ('virtio', ("kvm", "vds")),
+ ('fdc', ("kvm", "fdc")),
+ ('uml', ("kvm", "ubd")),
+ ('xen', ("xen", "sdf")),
+ ('xen', ("xen", "xvdb"))
+ )
+ for res, args in expected:
+ self.assertEqual(res, blockinfo.get_disk_bus_for_disk_dev(*args))
+
+ def test_fail_get_disk_bus_for_disk_dev(self):
+ self.assertRaises(exception.NovaException,
+ blockinfo.get_disk_bus_for_disk_dev, 'inv', 'val')
+
+ def test_get_config_drive_type_default(self):
+ config_drive_type = blockinfo.get_config_drive_type()
+ self.assertEqual('cdrom', config_drive_type)
+
+ def test_get_config_drive_type_cdrom(self):
+ self.flags(config_drive_format='iso9660')
+ config_drive_type = blockinfo.get_config_drive_type()
+ self.assertEqual('cdrom', config_drive_type)
+
+ def test_get_config_drive_type_disk(self):
+ self.flags(config_drive_format='vfat')
+ config_drive_type = blockinfo.get_config_drive_type()
+ self.assertEqual('disk', config_drive_type)
+
+ def test_get_config_drive_type_improper_value(self):
+ self.flags(config_drive_format='test')
+ self.assertRaises(exception.ConfigDriveUnknownFormat,
+ blockinfo.get_config_drive_type)
+
+ def test_get_info_from_bdm(self):
+ bdms = [{'device_name': '/dev/vds', 'device_type': 'disk',
+ 'disk_bus': 'usb', 'swap_size': 4},
+ {'device_type': 'disk', 'guest_format': 'ext3',
+ 'device_name': '/dev/vdb', 'size': 2},
+ {'disk_bus': 'ide', 'guest_format': None,
+ 'device_name': '/dev/vdc', 'size': 3},
+ {'connection_info': "fake",
+ 'mount_device': "/dev/sdr",
+ 'disk_bus': 'lame_bus',
+ 'device_type': 'cdrom',
+ 'boot_index': 0,
+ 'delete_on_termination': True},
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vdo",
+ 'disk_bus': 'scsi',
+ 'boot_index': 1,
+ 'device_type': 'lame_type',
+ 'delete_on_termination': True}]
+ expected = [{'dev': 'vds', 'type': 'disk', 'bus': 'usb'},
+ {'dev': 'vdb', 'type': 'disk',
+ 'bus': 'virtio', 'format': 'ext3'},
+ {'dev': 'vdc', 'type': 'disk', 'bus': 'ide'},
+ {'dev': 'sdr', 'type': 'cdrom',
+ 'bus': 'scsi', 'boot_index': '1'},
+ {'dev': 'vdo', 'type': 'disk',
+ 'bus': 'scsi', 'boot_index': '2'}]
+
+ for bdm, expected in zip(bdms, expected):
+ self.assertEqual(expected,
+ blockinfo.get_info_from_bdm('kvm', bdm, {}))
+
+ # Test that passed bus and type are considered
+ bdm = {'device_name': '/dev/vda'}
+ expected = {'dev': 'vda', 'type': 'disk', 'bus': 'ide'}
+ self.assertEqual(
+ expected, blockinfo.get_info_from_bdm('kvm', bdm, {},
+ disk_bus='ide',
+ dev_type='disk'))
+
+ # Test that lame bus values are defaulted properly
+ bdm = {'disk_bus': 'lame_bus', 'device_type': 'cdrom'}
+ with mock.patch.object(blockinfo,
+ 'get_disk_bus_for_device_type',
+ return_value='ide') as get_bus:
+ blockinfo.get_info_from_bdm('kvm', bdm, {})
+ get_bus.assert_called_once_with('kvm', None, 'cdrom')
+
+ # Test that missing device is defaulted as expected
+ bdm = {'disk_bus': 'ide', 'device_type': 'cdrom'}
+ expected = {'dev': 'vdd', 'type': 'cdrom', 'bus': 'ide'}
+ mapping = {'root': {'dev': 'vda'}}
+ with mock.patch.object(blockinfo,
+ 'find_disk_dev_for_disk_bus',
+ return_value='vdd') as find_dev:
+ got = blockinfo.get_info_from_bdm(
+ 'kvm', bdm, mapping, assigned_devices=['vdb', 'vdc'])
+ find_dev.assert_called_once_with(
+ {'root': {'dev': 'vda'},
+ 'vdb': {'dev': 'vdb'},
+ 'vdc': {'dev': 'vdc'}}, 'ide')
+ self.assertEqual(expected, got)
+
+ def test_get_device_name(self):
+ bdm_obj = objects.BlockDeviceMapping(self.context,
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vda',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id-1',
+ 'boot_index': 0}))
+ self.assertEqual('/dev/vda', blockinfo.get_device_name(bdm_obj))
+
+ driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm_obj)
+ self.assertEqual('/dev/vda', blockinfo.get_device_name(driver_bdm))
+
+ bdm_obj.device_name = None
+ self.assertIsNone(blockinfo.get_device_name(bdm_obj))
+
+ driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm_obj)
+ self.assertIsNone(blockinfo.get_device_name(driver_bdm))
+
+ @mock.patch('nova.virt.libvirt.blockinfo.find_disk_dev_for_disk_bus',
+ return_value='vda')
+ @mock.patch('nova.virt.libvirt.blockinfo.get_disk_bus_for_disk_dev',
+ return_value='virtio')
+ def test_get_root_info_no_bdm(self, mock_get_bus, mock_find_dev):
+ blockinfo.get_root_info('kvm', None, None, 'virtio', 'ide')
+ mock_find_dev.assert_called_once_with({}, 'virtio')
+
+ blockinfo.get_root_info('kvm', None, None, 'virtio', 'ide',
+ root_device_name='/dev/vda')
+ mock_get_bus.assert_called_once_with('kvm', '/dev/vda')
+
+ @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
+ def test_get_root_info_bdm(self, mock_get_info):
+ root_bdm = {'mount_device': '/dev/vda',
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk'}
+ # No root_device_name
+ blockinfo.get_root_info('kvm', None, root_bdm, 'virtio', 'ide')
+ mock_get_info.assert_called_once_with('kvm', root_bdm, {}, 'virtio')
+ mock_get_info.reset_mock()
+ # Both device names
+ blockinfo.get_root_info('kvm', None, root_bdm, 'virtio', 'ide',
+ root_device_name='sda')
+ mock_get_info.assert_called_once_with('kvm', root_bdm, {}, 'virtio')
+ mock_get_info.reset_mock()
+ # Missing device names
+ del root_bdm['mount_device']
+ blockinfo.get_root_info('kvm', None, root_bdm, 'virtio', 'ide',
+ root_device_name='sda')
+ mock_get_info.assert_called_once_with('kvm',
+ {'device_name': 'sda',
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk'},
+ {}, 'virtio')
+
+ def test_get_boot_order_simple(self):
+ disk_info = {
+ 'disk_bus': 'virtio',
+ 'cdrom_bus': 'ide',
+ 'mapping': {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ }
+ expected_order = ['hd']
+ self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info))
+
+ def test_get_boot_order_complex(self):
+ disk_info = {
+ 'disk_bus': 'virtio',
+ 'cdrom_bus': 'ide',
+ 'mapping': {
+ 'disk': {'bus': 'virtio', 'dev': 'vdf',
+ 'type': 'disk', 'boot_index': '1'},
+ '/dev/hda': {'bus': 'ide', 'dev': 'hda',
+ 'type': 'cdrom', 'boot_index': '3'},
+ '/dev/fda': {'bus': 'fdc', 'dev': 'fda',
+ 'type': 'floppy', 'boot_index': '2'},
+ 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb',
+ 'type': 'disk', 'format': 'ext3'},
+ 'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'},
+ 'disk.swap': {'bus': 'virtio', 'dev': 'vdy', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vdf',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ }
+ expected_order = ['hd', 'fd', 'cdrom']
+ self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info))
+
+ def test_get_boot_order_overlapping(self):
+ disk_info = {
+ 'disk_bus': 'virtio',
+ 'cdrom_bus': 'ide',
+ 'mapping': {
+ '/dev/vda': {'bus': 'scsi', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ '/dev/vdb': {'bus': 'virtio', 'dev': 'vdb',
+ 'type': 'disk', 'boot_index': '2'},
+ '/dev/vdc': {'bus': 'virtio', 'dev': 'vdc',
+ 'type': 'cdrom', 'boot_index': '3'},
+ 'root': {'bus': 'scsi', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ }
+ expected_order = ['hd', 'cdrom']
+ self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info))
+
+
+class DefaultDeviceNamesTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(DefaultDeviceNamesTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ self.instance = {
+ 'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
+ 'memory_kb': '1024000',
+ 'basepath': '/some/path',
+ 'bridge_name': 'br100',
+ 'vcpus': 2,
+ 'project_id': 'fake',
+ 'bridge': 'br101',
+ 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ 'root_gb': 10,
+ 'ephemeral_gb': 20,
+ 'instance_type_id': 2}
+ self.root_device_name = '/dev/vda'
+ self.virt_type = 'kvm'
+ self.flavor = {'swap': 4}
+ self.patchers = []
+ self.patchers.append(mock.patch('nova.compute.flavors.extract_flavor',
+ return_value=self.flavor))
+ self.patchers.append(mock.patch(
+ 'nova.objects.block_device.BlockDeviceMapping.save'))
+ for patcher in self.patchers:
+ patcher.start()
+
+ self.ephemerals = [objects.BlockDeviceMapping(
+ self.context, **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdb',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'delete_on_termination': True,
+ 'guest_format': None,
+ 'volume_size': 1,
+ 'boot_index': -1}))]
+
+ self.swap = [objects.BlockDeviceMapping(
+ self.context, **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdc',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'delete_on_termination': True,
+ 'guest_format': 'swap',
+ 'volume_size': 1,
+ 'boot_index': -1}))]
+
+ self.block_device_mapping = [
+ objects.BlockDeviceMapping(self.context,
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vda',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'volume_id': 'fake-volume-id-1',
+ 'boot_index': 0})),
+ objects.BlockDeviceMapping(self.context,
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 4, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdd',
+ 'source_type': 'snapshot',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'destination_type': 'volume',
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'boot_index': -1})),
+ objects.BlockDeviceMapping(self.context,
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 5, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vde',
+ 'source_type': 'blank',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'destination_type': 'volume',
+ 'boot_index': -1}))]
+
+ def tearDown(self):
+ super(DefaultDeviceNamesTestCase, self).tearDown()
+ for patcher in self.patchers:
+ patcher.stop()
+
+ def _test_default_device_names(self, *block_device_lists):
+ blockinfo.default_device_names(self.virt_type,
+ self.context,
+ self.instance,
+ self.root_device_name,
+ *block_device_lists)
+
+ def test_only_block_device_mapping(self):
+ # Test no-op
+ original_bdm = copy.deepcopy(self.block_device_mapping)
+ self._test_default_device_names([], [], self.block_device_mapping)
+ for original, defaulted in zip(
+ original_bdm, self.block_device_mapping):
+ self.assertEqual(original.device_name, defaulted.device_name)
+
+ # Assert it defaults the missing one as expected
+ self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
+ self._test_default_device_names([], [], self.block_device_mapping)
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vde',
+ self.block_device_mapping[2]['device_name'])
+
+ def test_with_ephemerals(self):
+ # Test ephemeral gets assigned
+ self.ephemerals[0]['device_name'] = None
+ self._test_default_device_names(self.ephemerals, [],
+ self.block_device_mapping)
+ self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name'])
+
+ self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
+ self._test_default_device_names(self.ephemerals, [],
+ self.block_device_mapping)
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vde',
+ self.block_device_mapping[2]['device_name'])
+
+ def test_with_swap(self):
+ # Test swap only
+ self.swap[0]['device_name'] = None
+ self._test_default_device_names([], self.swap, [])
+ self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
+
+ # Test swap and block_device_mapping
+ self.swap[0]['device_name'] = None
+ self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
+ self._test_default_device_names([], self.swap,
+ self.block_device_mapping)
+ self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vde',
+ self.block_device_mapping[2]['device_name'])
+
+ def test_all_together(self):
+ # Test swap missing
+ self.swap[0]['device_name'] = None
+ self._test_default_device_names(self.ephemerals,
+ self.swap, self.block_device_mapping)
+ self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
+
+ # Test swap and eph missing
+ self.swap[0]['device_name'] = None
+ self.ephemerals[0]['device_name'] = None
+ self._test_default_device_names(self.ephemerals,
+ self.swap, self.block_device_mapping)
+ self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name'])
+ self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
+
+ # Test all missing
+ self.swap[0]['device_name'] = None
+ self.ephemerals[0]['device_name'] = None
+ self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
+ self._test_default_device_names(self.ephemerals,
+ self.swap, self.block_device_mapping)
+ self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name'])
+ self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vde',
+ self.block_device_mapping[2]['device_name'])
diff --git a/nova/tests/unit/virt/libvirt/test_config.py b/nova/tests/unit/virt/libvirt/test_config.py
new file mode 100644
index 0000000000..192d075640
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_config.py
@@ -0,0 +1,2344 @@
+# Copyright (C) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.utils import units
+
+from nova.compute import arch
+from nova import test
+from nova.tests.unit import matchers
+from nova.virt.libvirt import config
+
+
+class LibvirtConfigBaseTest(test.NoDBTestCase):
+ def assertXmlEqual(self, expectedXmlstr, actualXmlstr):
+ self.assertThat(actualXmlstr, matchers.XMLMatches(expectedXmlstr))
+
+
+class LibvirtConfigTest(LibvirtConfigBaseTest):
+
+ def test_config_plain(self):
+ obj = config.LibvirtConfigObject(root_name="demo")
+ xml = obj.to_xml()
+
+ self.assertXmlEqual(xml, "<demo/>")
+
+ def test_config_ns(self):
+ obj = config.LibvirtConfigObject(root_name="demo", ns_prefix="foo",
+ ns_uri="http://example.com/foo")
+ xml = obj.to_xml()
+
+ self.assertXmlEqual(xml, """
+ <foo:demo xmlns:foo="http://example.com/foo"/>""")
+
+ def test_config_text(self):
+ obj = config.LibvirtConfigObject(root_name="demo")
+ root = obj.format_dom()
+ root.append(obj._text_node("foo", "bar"))
+
+ xml = etree.tostring(root)
+ self.assertXmlEqual(xml, "<demo><foo>bar</foo></demo>")
+
+ def test_config_text_unicode(self):
+ obj = config.LibvirtConfigObject(root_name='demo')
+ root = obj.format_dom()
+ root.append(obj._text_node('foo', u'\xF0\x9F\x92\xA9'))
+ self.assertXmlEqual('<demo><foo>&#240;&#159;&#146;&#169;</foo></demo>',
+ etree.tostring(root))
+
+ def test_config_parse(self):
+ inxml = "<demo><foo/></demo>"
+ obj = config.LibvirtConfigObject(root_name="demo")
+ obj.parse_str(inxml)
+
+
+class LibvirtConfigCapsTest(LibvirtConfigBaseTest):
+
+ def test_config_host(self):
+ xmlin = """
+ <capabilities>
+ <host>
+ <uuid>c7a5fdbd-edaf-9455-926a-d65c16db1809</uuid>
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Opteron_G3</model>
+ <vendor>AMD</vendor>
+ <topology sockets='1' cores='4' threads='1'/>
+ <feature name='ibs'/>
+ <feature name='osvw'/>
+ </cpu>
+ <topology>
+ <cells num='2'>
+ <cell id='0'>
+ <memory unit='KiB'>4048280</memory>
+ <pages unit='KiB' size='4'>1011941</pages>
+ <pages unit='KiB' size='2048'>0</pages>
+ <cpus num='4'>
+ <cpu id='0' socket_id='0' core_id='0' siblings='0'/>
+ <cpu id='1' socket_id='0' core_id='1' siblings='1'/>
+ <cpu id='2' socket_id='0' core_id='2' siblings='2'/>
+ <cpu id='3' socket_id='0' core_id='3' siblings='3'/>
+ </cpus>
+ </cell>
+ <cell id='1'>
+ <memory unit='KiB'>4127684</memory>
+ <pages unit='KiB' size='4'>1031921</pages>
+ <pages unit='KiB' size='2048'>0</pages>
+ <cpus num='4'>
+ <cpu id='4' socket_id='1' core_id='0' siblings='4'/>
+ <cpu id='5' socket_id='1' core_id='1' siblings='5'/>
+ <cpu id='6' socket_id='1' core_id='2' siblings='6'/>
+ <cpu id='7' socket_id='1' core_id='3' siblings='7'/>
+ </cpus>
+ </cell>
+ </cells>
+ </topology>
+ </host>
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='x86_64'/>
+ </guest>
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='i686'/>
+ </guest>
+ </capabilities>"""
+
+ obj = config.LibvirtConfigCaps()
+ obj.parse_str(xmlin)
+
+ self.assertIsInstance(obj.host, config.LibvirtConfigCapsHost)
+ self.assertEqual(obj.host.uuid, "c7a5fdbd-edaf-9455-926a-d65c16db1809")
+
+ xmlout = obj.to_xml()
+
+ self.assertXmlEqual(xmlin, xmlout)
+
+
+class LibvirtConfigGuestTimerTest(LibvirtConfigBaseTest):
+ def test_config_platform(self):
+ obj = config.LibvirtConfigGuestTimer()
+ obj.track = "host"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <timer name="platform" track="host"/>
+ """)
+
+ def test_config_pit(self):
+ obj = config.LibvirtConfigGuestTimer()
+ obj.name = "pit"
+ obj.tickpolicy = "discard"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <timer name="pit" tickpolicy="discard"/>
+ """)
+
+ def test_config_hpet(self):
+ obj = config.LibvirtConfigGuestTimer()
+ obj.name = "hpet"
+ obj.present = False
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <timer name="hpet" present="no"/>
+ """)
+
+
+class LibvirtConfigGuestClockTest(LibvirtConfigBaseTest):
+ def test_config_utc(self):
+ obj = config.LibvirtConfigGuestClock()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <clock offset="utc"/>
+ """)
+
+ def test_config_localtime(self):
+ obj = config.LibvirtConfigGuestClock()
+ obj.offset = "localtime"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <clock offset="localtime"/>
+ """)
+
+ def test_config_timezone(self):
+ obj = config.LibvirtConfigGuestClock()
+ obj.offset = "timezone"
+ obj.timezone = "EDT"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <clock offset="timezone" timezone="EDT"/>
+ """)
+
+ def test_config_variable(self):
+ obj = config.LibvirtConfigGuestClock()
+ obj.offset = "variable"
+ obj.adjustment = "123456"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <clock offset="variable" adjustment="123456"/>
+ """)
+
+ def test_config_timers(self):
+ obj = config.LibvirtConfigGuestClock()
+
+ tmpit = config.LibvirtConfigGuestTimer()
+ tmpit.name = "pit"
+ tmpit.tickpolicy = "discard"
+
+ tmrtc = config.LibvirtConfigGuestTimer()
+ tmrtc.name = "rtc"
+ tmrtc.tickpolicy = "merge"
+
+ obj.add_timer(tmpit)
+ obj.add_timer(tmrtc)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <clock offset="utc">
+ <timer name="pit" tickpolicy="discard"/>
+ <timer name="rtc" tickpolicy="merge"/>
+ </clock>
+ """)
+
+
+class LibvirtConfigCPUFeatureTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigCPUFeature("mtrr")
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <feature name="mtrr"/>
+ """)
+
+
+class LibvirtConfigGuestCPUFeatureTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigGuestCPUFeature("mtrr")
+ obj.policy = "force"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <feature name="mtrr" policy="force"/>
+ """)
+
+
+class LibvirtConfigGuestCPUNUMATest(LibvirtConfigBaseTest):
+
+ def test_parse_dom(self):
+ xml = """
+ <numa>
+ <cell id="0" cpus="0-1" memory="1000000"/>
+ <cell id="1" cpus="2-3" memory="1500000"/>
+ </numa>
+ """
+ xmldoc = etree.fromstring(xml)
+ obj = config.LibvirtConfigGuestCPUNUMA()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(2, len(obj.cells))
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigGuestCPUNUMA()
+
+ cell = config.LibvirtConfigGuestCPUNUMACell()
+ cell.id = 0
+ cell.cpus = set([0, 1])
+ cell.memory = 1000000
+
+ obj.cells.append(cell)
+
+ cell = config.LibvirtConfigGuestCPUNUMACell()
+ cell.id = 1
+ cell.cpus = set([2, 3])
+ cell.memory = 1500000
+
+ obj.cells.append(cell)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <numa>
+ <cell id="0" cpus="0-1" memory="1000000"/>
+ <cell id="1" cpus="2-3" memory="1500000"/>
+ </numa>
+ """)
+
+
+class LibvirtConfigCPUTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigCPU()
+ obj.model = "Penryn"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu>
+ <model>Penryn</model>
+ </cpu>
+ """)
+
+ def test_config_complex(self):
+ obj = config.LibvirtConfigCPU()
+ obj.model = "Penryn"
+ obj.vendor = "Intel"
+ obj.arch = arch.X86_64
+
+ obj.add_feature(config.LibvirtConfigCPUFeature("mtrr"))
+ obj.add_feature(config.LibvirtConfigCPUFeature("apic"))
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <feature name="apic"/>
+ <feature name="mtrr"/>
+ </cpu>
+ """)
+
+ def test_only_uniq_cpu_featues(self):
+ obj = config.LibvirtConfigCPU()
+ obj.model = "Penryn"
+ obj.vendor = "Intel"
+ obj.arch = arch.X86_64
+
+ obj.add_feature(config.LibvirtConfigCPUFeature("mtrr"))
+ obj.add_feature(config.LibvirtConfigCPUFeature("apic"))
+ obj.add_feature(config.LibvirtConfigCPUFeature("apic"))
+ obj.add_feature(config.LibvirtConfigCPUFeature("mtrr"))
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <feature name="apic"/>
+ <feature name="mtrr"/>
+ </cpu>
+ """)
+
+ def test_config_topology(self):
+ obj = config.LibvirtConfigCPU()
+ obj.model = "Penryn"
+ obj.sockets = 4
+ obj.cores = 4
+ obj.threads = 2
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu>
+ <model>Penryn</model>
+ <topology sockets="4" cores="4" threads="2"/>
+ </cpu>
+ """)
+
+
+class LibvirtConfigGuestCPUTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigGuestCPU()
+ obj.model = "Penryn"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu match="exact">
+ <model>Penryn</model>
+ </cpu>
+ """)
+
+ def test_config_complex(self):
+ obj = config.LibvirtConfigGuestCPU()
+ obj.model = "Penryn"
+ obj.vendor = "Intel"
+ obj.arch = arch.X86_64
+ obj.mode = "custom"
+
+ obj.add_feature(config.LibvirtConfigGuestCPUFeature("mtrr"))
+ obj.add_feature(config.LibvirtConfigGuestCPUFeature("apic"))
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu mode="custom" match="exact">
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <feature name="apic" policy="require"/>
+ <feature name="mtrr" policy="require"/>
+ </cpu>
+ """)
+
+ def test_config_host(self):
+ obj = config.LibvirtConfigGuestCPU()
+ obj.mode = "host-model"
+ obj.match = "exact"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu mode="host-model" match="exact"/>
+ """)
+
+ def test_config_host_with_numa(self):
+ obj = config.LibvirtConfigGuestCPU()
+ obj.mode = "host-model"
+ obj.match = "exact"
+
+ numa = config.LibvirtConfigGuestCPUNUMA()
+
+ cell = config.LibvirtConfigGuestCPUNUMACell()
+ cell.id = 0
+ cell.cpus = set([0, 1])
+ cell.memory = 1000000
+
+ numa.cells.append(cell)
+
+ cell = config.LibvirtConfigGuestCPUNUMACell()
+ cell.id = 1
+ cell.cpus = set([2, 3])
+ cell.memory = 1500000
+
+ numa.cells.append(cell)
+
+ obj.numa = numa
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu mode="host-model" match="exact">
+ <numa>
+ <cell id="0" cpus="0-1" memory="1000000"/>
+ <cell id="1" cpus="2-3" memory="1500000"/>
+ </numa>
+ </cpu>
+ """)
+
+
+class LibvirtConfigGuestSMBIOSTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigGuestSMBIOS()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <smbios mode="sysinfo"/>
+ """)
+
+
+class LibvirtConfigGuestSysinfoTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigGuestSysinfo()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <sysinfo type="smbios"/>
+ """)
+
+ def test_config_bios(self):
+ obj = config.LibvirtConfigGuestSysinfo()
+ obj.bios_vendor = "Acme"
+ obj.bios_version = "6.6.6"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <sysinfo type="smbios">
+ <bios>
+ <entry name="vendor">Acme</entry>
+ <entry name="version">6.6.6</entry>
+ </bios>
+ </sysinfo>
+ """)
+
+ def test_config_system(self):
+ obj = config.LibvirtConfigGuestSysinfo()
+ obj.system_manufacturer = "Acme"
+ obj.system_product = "Wile Coyote"
+ obj.system_version = "6.6.6"
+ obj.system_serial = "123456"
+ obj.system_uuid = "c7a5fdbd-edaf-9455-926a-d65c16db1809"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <sysinfo type="smbios">
+ <system>
+ <entry name="manufacturer">Acme</entry>
+ <entry name="product">Wile Coyote</entry>
+ <entry name="version">6.6.6</entry>
+ <entry name="serial">123456</entry>
+ <entry name="uuid">c7a5fdbd-edaf-9455-926a-d65c16db1809</entry>
+ </system>
+ </sysinfo>
+ """)
+
+ def test_config_mixed(self):
+ obj = config.LibvirtConfigGuestSysinfo()
+ obj.bios_vendor = "Acme"
+ obj.system_manufacturer = "Acme"
+ obj.system_product = "Wile Coyote"
+ obj.system_uuid = "c7a5fdbd-edaf-9455-926a-d65c16db1809"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <sysinfo type="smbios">
+ <bios>
+ <entry name="vendor">Acme</entry>
+ </bios>
+ <system>
+ <entry name="manufacturer">Acme</entry>
+ <entry name="product">Wile Coyote</entry>
+ <entry name="uuid">c7a5fdbd-edaf-9455-926a-d65c16db1809</entry>
+ </system>
+ </sysinfo>
+ """)
+
+
+class LibvirtConfigGuestDiskTest(LibvirtConfigBaseTest):
+
+ def test_config_file(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "file"
+ obj.source_path = "/tmp/hello"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ </disk>""")
+
+ def test_config_file_parse(self):
+ xml = """<disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ </disk>"""
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDisk()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.source_type, 'file')
+ self.assertEqual(obj.source_path, '/tmp/hello')
+ self.assertEqual(obj.target_dev, '/dev/hda')
+ self.assertEqual(obj.target_bus, 'ide')
+
+ def test_config_file_serial(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "file"
+ obj.source_path = "/tmp/hello"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+ obj.serial = "7a97c4a3-6f59-41d4-bf47-191d7f97f8e9"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
+ </disk>""")
+
+ def test_config_file_serial_parse(self):
+ xml = """<disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
+ </disk>"""
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDisk()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.source_type, 'file')
+ self.assertEqual(obj.serial, '7a97c4a3-6f59-41d4-bf47-191d7f97f8e9')
+
+ def test_config_file_discard(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.driver_name = "qemu"
+ obj.driver_format = "qcow2"
+ obj.driver_cache = "none"
+ obj.driver_discard = "unmap"
+ obj.source_type = "file"
+ obj.source_path = "/tmp/hello.qcow2"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+ obj.serial = "7a97c4a3-6f59-41d4-bf47-191d7f97f8e9"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("""
+ <disk type="file" device="disk">
+ <driver name="qemu" type="qcow2" cache="none" discard="unmap"/>
+ <source file="/tmp/hello.qcow2"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
+ </disk>""", xml)
+
+ def test_config_file_discard_parse(self):
+ xml = """
+ <disk type="file" device="disk">
+ <driver name="qemu" type="qcow2" cache="none" discard="unmap"/>
+ <source file="/tmp/hello.qcow2"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
+ </disk>"""
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDisk()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual('unmap', obj.driver_discard)
+
+ def test_config_block(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "block"
+ obj.source_path = "/tmp/hello"
+ obj.source_device = "cdrom"
+ obj.driver_name = "qemu"
+ obj.target_dev = "/dev/hdc"
+ obj.target_bus = "ide"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="block" device="cdrom">
+ <driver name="qemu"/>
+ <source dev="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hdc"/>
+ </disk>""")
+
+ def test_config_block_parse(self):
+ xml = """<disk type="block" device="cdrom">
+ <driver name="qemu"/>
+ <source dev="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hdc"/>
+ </disk>"""
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDisk()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.source_type, 'block')
+ self.assertEqual(obj.source_path, '/tmp/hello')
+ self.assertEqual(obj.target_dev, '/dev/hdc')
+ self.assertEqual(obj.target_bus, 'ide')
+
+ def test_config_network(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "network"
+ obj.source_protocol = "iscsi"
+ obj.source_name = "foo.bar.com"
+ obj.driver_name = "qemu"
+ obj.driver_format = "qcow2"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="network" device="disk">
+ <driver name="qemu" type="qcow2"/>
+ <source name="foo.bar.com" protocol="iscsi"/>
+ <target bus="ide" dev="/dev/hda"/>
+ </disk>""")
+
+ def test_config_network_parse(self):
+ xml = """<disk type="network" device="disk">
+ <driver name="qemu" type="qcow2"/>
+ <source name="foo.bar.com" protocol="iscsi"/>
+ <target bus="ide" dev="/dev/hda"/>
+ </disk>"""
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDisk()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.source_type, 'network')
+ self.assertEqual(obj.source_protocol, 'iscsi')
+ self.assertEqual(obj.source_name, 'foo.bar.com')
+ self.assertEqual(obj.driver_name, 'qemu')
+ self.assertEqual(obj.driver_format, 'qcow2')
+ self.assertEqual(obj.target_dev, '/dev/hda')
+ self.assertEqual(obj.target_bus, 'ide')
+
+ def test_config_network_no_name(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = 'network'
+ obj.source_protocol = 'nbd'
+ obj.source_hosts = ['foo.bar.com']
+ obj.source_ports = [None]
+ obj.driver_name = 'qemu'
+ obj.driver_format = 'raw'
+ obj.target_dev = '/dev/vda'
+ obj.target_bus = 'virtio'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="network" device="disk">
+ <driver name="qemu" type="raw"/>
+ <source protocol="nbd">
+ <host name="foo.bar.com"/>
+ </source>
+ <target bus="virtio" dev="/dev/vda"/>
+ </disk>""")
+
+ def test_config_network_multihost(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = 'network'
+ obj.source_protocol = 'rbd'
+ obj.source_name = 'pool/image'
+ obj.source_hosts = ['foo.bar.com', '::1', '1.2.3.4']
+ obj.source_ports = [None, '123', '456']
+ obj.driver_name = 'qemu'
+ obj.driver_format = 'raw'
+ obj.target_dev = '/dev/vda'
+ obj.target_bus = 'virtio'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="network" device="disk">
+ <driver name="qemu" type="raw"/>
+ <source name="pool/image" protocol="rbd">
+ <host name="foo.bar.com"/>
+ <host name="::1" port="123"/>
+ <host name="1.2.3.4" port="456"/>
+ </source>
+ <target bus="virtio" dev="/dev/vda"/>
+ </disk>""")
+
+ def test_config_network_auth(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "network"
+ obj.source_protocol = "rbd"
+ obj.source_name = "pool/image"
+ obj.driver_name = "qemu"
+ obj.driver_format = "raw"
+ obj.target_dev = "/dev/vda"
+ obj.target_bus = "virtio"
+ obj.auth_username = "foo"
+ obj.auth_secret_type = "ceph"
+ obj.auth_secret_uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="network" device="disk">
+ <driver name="qemu" type="raw"/>
+ <source name="pool/image" protocol="rbd"/>
+ <auth username="foo">
+ <secret type="ceph"
+ uuid="b38a3f43-4be2-4046-897f-b67c2f5e0147"/>
+ </auth>
+ <target bus="virtio" dev="/dev/vda"/>
+ </disk>""")
+
+ def test_config_iotune(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "file"
+ obj.source_path = "/tmp/hello"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+ obj.disk_read_bytes_sec = 1024000
+ obj.disk_read_iops_sec = 1000
+ obj.disk_total_bytes_sec = 2048000
+ obj.disk_write_bytes_sec = 1024000
+ obj.disk_write_iops_sec = 1000
+ obj.disk_total_iops_sec = 2000
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <iotune>
+ <read_bytes_sec>1024000</read_bytes_sec>
+ <read_iops_sec>1000</read_iops_sec>
+ <write_bytes_sec>1024000</write_bytes_sec>
+ <write_iops_sec>1000</write_iops_sec>
+ <total_bytes_sec>2048000</total_bytes_sec>
+ <total_iops_sec>2000</total_iops_sec>
+ </iotune>
+ </disk>""")
+
+ def test_config_blockio(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "file"
+ obj.source_path = "/tmp/hello"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+ obj.logical_block_size = "4096"
+ obj.physical_block_size = "4096"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("""
+ <disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <blockio logical_block_size="4096" physical_block_size="4096"/>
+ </disk>""", xml)
+
+
+class LibvirtConfigGuestSnapshotDiskTest(LibvirtConfigBaseTest):
+
+ def test_config_file(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "file"
+ obj.source_path = "/tmp/hello"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ </disk>""")
+
+ def test_config_file_parse(self):
+ xml = """<disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ </disk>"""
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDisk()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.source_type, 'file')
+ self.assertEqual(obj.source_path, '/tmp/hello')
+ self.assertEqual(obj.target_dev, '/dev/hda')
+ self.assertEqual(obj.target_bus, 'ide')
+
+
+class LibvirtConfigGuestDiskBackingStoreTest(LibvirtConfigBaseTest):
+
+ def test_config_file_parse(self):
+ xml = """<backingStore type='file'>
+ <driver name='qemu' type='qcow2'/>
+ <source file='/var/lib/libvirt/images/mid.qcow2'/>
+ <backingStore type='file'>
+ <driver name='qemu' type='qcow2'/>
+ <source file='/var/lib/libvirt/images/base.qcow2'/>
+ <backingStore/>
+ </backingStore>
+ </backingStore>
+ """
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDiskBackingStore()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.driver_name, 'qemu')
+ self.assertEqual(obj.driver_format, 'qcow2')
+ self.assertEqual(obj.source_type, 'file')
+ self.assertEqual(obj.source_file, '/var/lib/libvirt/images/mid.qcow2')
+ self.assertEqual(obj.backing_store.driver_name, 'qemu')
+ self.assertEqual(obj.backing_store.source_type, 'file')
+ self.assertEqual(obj.backing_store.source_file,
+ '/var/lib/libvirt/images/base.qcow2')
+ self.assertIsNone(obj.backing_store.backing_store)
+
+ def test_config_network_parse(self):
+ xml = """<backingStore type='network' index='1'>
+ <format type='qcow2'/>
+ <source protocol='gluster' name='volume1/img1'>
+ <host name='host1' port='24007'/>
+ </source>
+ <backingStore type='network' index='2'>
+ <format type='qcow2'/>
+ <source protocol='gluster' name='volume1/img2'>
+ <host name='host1' port='24007'/>
+ </source>
+ <backingStore/>
+ </backingStore>
+ </backingStore>
+ """
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDiskBackingStore()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.source_type, 'network')
+ self.assertEqual(obj.source_protocol, 'gluster')
+ self.assertEqual(obj.source_name, 'volume1/img1')
+ self.assertEqual(obj.source_hosts[0], 'host1')
+ self.assertEqual(obj.source_ports[0], '24007')
+ self.assertEqual(obj.index, '1')
+ self.assertEqual(obj.backing_store.source_name, 'volume1/img2')
+ self.assertEqual(obj.backing_store.index, '2')
+ self.assertEqual(obj.backing_store.source_hosts[0], 'host1')
+ self.assertEqual(obj.backing_store.source_ports[0], '24007')
+ self.assertIsNone(obj.backing_store.backing_store)
+
+
+class LibvirtConfigGuestFilesysTest(LibvirtConfigBaseTest):
+
+ def test_config_mount(self):
+ obj = config.LibvirtConfigGuestFilesys()
+ obj.source_type = "mount"
+ obj.source_dir = "/tmp/hello"
+ obj.target_dir = "/mnt"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <filesystem type="mount">
+ <source dir="/tmp/hello"/>
+ <target dir="/mnt"/>
+ </filesystem>""")
+
+
+class LibvirtConfigGuestInputTest(LibvirtConfigBaseTest):
+
+ def test_config_tablet(self):
+ obj = config.LibvirtConfigGuestInput()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <input type="tablet" bus="usb"/>""")
+
+
+class LibvirtConfigGuestGraphicsTest(LibvirtConfigBaseTest):
+
+ def test_config_graphics(self):
+ obj = config.LibvirtConfigGuestGraphics()
+ obj.type = "vnc"
+ obj.autoport = True
+ obj.keymap = "en_US"
+ obj.listen = "127.0.0.1"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <graphics type="vnc" autoport="yes" keymap="en_US" listen="127.0.0.1"/>
+ """)
+
+
+class LibvirtConfigGuestHostdev(LibvirtConfigBaseTest):
+
+ def test_config_pci_guest_host_dev(self):
+ obj = config.LibvirtConfigGuestHostdev(mode='subsystem', type='pci')
+ xml = obj.to_xml()
+ expected = """
+ <hostdev mode="subsystem" type="pci" managed="yes"/>
+ """
+ self.assertXmlEqual(xml, expected)
+
+ def test_parse_GuestHostdev(self):
+ xmldoc = """<hostdev mode="subsystem" type="pci" managed="yes"/>"""
+ obj = config.LibvirtConfigGuestHostdev()
+ obj.parse_str(xmldoc)
+ self.assertEqual(obj.mode, 'subsystem')
+ self.assertEqual(obj.type, 'pci')
+ self.assertEqual(obj.managed, 'yes')
+
+ def test_parse_GuestHostdev_non_pci(self):
+ xmldoc = """<hostdev mode="subsystem" type="usb" managed="no"/>"""
+ obj = config.LibvirtConfigGuestHostdev()
+ obj.parse_str(xmldoc)
+ self.assertEqual(obj.mode, 'subsystem')
+ self.assertEqual(obj.type, 'usb')
+ self.assertEqual(obj.managed, 'no')
+
+
+class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest):
+
+ expected = """
+ <hostdev mode="subsystem" type="pci" managed="yes">
+ <source>
+ <address bus="0x11" domain="0x1234" function="0x3"
+ slot="0x22" />
+ </source>
+ </hostdev>
+ """
+
+ def test_config_guest_hosdev_pci(self):
+ hostdev = config.LibvirtConfigGuestHostdevPCI()
+ hostdev.domain = "1234"
+ hostdev.bus = "11"
+ hostdev.slot = "22"
+ hostdev.function = "3"
+ xml = hostdev.to_xml()
+ self.assertXmlEqual(self.expected, xml)
+
+ def test_parse_guest_hosdev_pci(self):
+ xmldoc = self.expected
+ obj = config.LibvirtConfigGuestHostdevPCI()
+ obj.parse_str(xmldoc)
+ self.assertEqual(obj.mode, 'subsystem')
+ self.assertEqual(obj.type, 'pci')
+ self.assertEqual(obj.managed, 'yes')
+ self.assertEqual(obj.domain, '0x1234')
+ self.assertEqual(obj.bus, '0x11')
+ self.assertEqual(obj.slot, '0x22')
+ self.assertEqual(obj.function, '0x3')
+
+ def test_parse_guest_hosdev_usb(self):
+ xmldoc = """<hostdev mode='subsystem' type='usb'>
+ <source startupPolicy='optional'>
+ <vendor id='0x1234'/>
+ <product id='0xbeef'/>
+ </source>
+ <boot order='2'/>
+ </hostdev>"""
+ obj = config.LibvirtConfigGuestHostdevPCI()
+ obj.parse_str(xmldoc)
+ self.assertEqual(obj.mode, 'subsystem')
+ self.assertEqual(obj.type, 'usb')
+
+
+class LibvirtConfigGuestSerialTest(LibvirtConfigBaseTest):
+
+ def test_config_file(self):
+ obj = config.LibvirtConfigGuestSerial()
+ obj.type = "file"
+ obj.source_path = "/tmp/vm.log"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <serial type="file">
+ <source path="/tmp/vm.log"/>
+ </serial>""")
+
+ def test_config_serial_port(self):
+ obj = config.LibvirtConfigGuestSerial()
+ obj.type = "tcp"
+ obj.listen_port = 11111
+ obj.listen_host = "0.0.0.0"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <serial type="tcp">
+ <source host="0.0.0.0" service="11111" mode="bind"/>
+ </serial>""")
+
+
+class LibvirtConfigGuestConsoleTest(LibvirtConfigBaseTest):
+ def test_config_pty(self):
+ obj = config.LibvirtConfigGuestConsole()
+ obj.type = "pty"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <console type="pty"/>""")
+
+
+class LibvirtConfigGuestChannelTest(LibvirtConfigBaseTest):
+ def test_config_spice_minimal(self):
+ obj = config.LibvirtConfigGuestChannel()
+ obj.type = "spicevmc"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <channel type="spicevmc">
+ <target type='virtio'/>
+ </channel>""")
+
+ def test_config_spice_full(self):
+ obj = config.LibvirtConfigGuestChannel()
+ obj.type = "spicevmc"
+ obj.target_name = "com.redhat.spice.0"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <channel type="spicevmc">
+ <target type='virtio' name='com.redhat.spice.0'/>
+ </channel>""")
+
+ def test_config_qga_full(self):
+ obj = config.LibvirtConfigGuestChannel()
+ obj.type = "unix"
+ obj.target_name = "org.qemu.guest_agent.0"
+ obj.source_path = "/var/lib/libvirt/qemu/%s.%s.sock" % (
+ obj.target_name, "instance-name")
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <channel type="unix">
+ <source path="%s" mode="bind"/>
+ <target type="virtio" name="org.qemu.guest_agent.0"/>
+ </channel>""" % obj.source_path)
+
+
+class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
+ def test_config_ethernet(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "ethernet"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.model = "virtio"
+ obj.target_dev = "vnet0"
+ obj.driver_name = "vhost"
+ obj.vif_inbound_average = 1024000
+ obj.vif_inbound_peak = 10240000
+ obj.vif_inbound_burst = 1024000
+ obj.vif_outbound_average = 1024000
+ obj.vif_outbound_peak = 10240000
+ obj.vif_outbound_burst = 1024000
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="ethernet">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <model type="virtio"/>
+ <driver name="vhost"/>
+ <target dev="vnet0"/>
+ <bandwidth>
+ <inbound average="1024000" peak="10240000" burst="1024000"/>
+ <outbound average="1024000" peak="10240000" burst="1024000"/>
+ </bandwidth>
+ </interface>""")
+
+ def test_config_bridge(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "bridge"
+ obj.source_dev = "br0"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.model = "virtio"
+ obj.target_dev = "tap12345678"
+ obj.filtername = "clean-traffic"
+ obj.filterparams.append({"key": "IP", "value": "192.168.122.1"})
+ obj.vif_inbound_average = 1024000
+ obj.vif_inbound_peak = 10240000
+ obj.vif_inbound_burst = 1024000
+ obj.vif_outbound_average = 1024000
+ obj.vif_outbound_peak = 10240000
+ obj.vif_outbound_burst = 1024000
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="bridge">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <model type="virtio"/>
+ <source bridge="br0"/>
+ <target dev="tap12345678"/>
+ <filterref filter="clean-traffic">
+ <parameter name="IP" value="192.168.122.1"/>
+ </filterref>
+ <bandwidth>
+ <inbound average="1024000" peak="10240000" burst="1024000"/>
+ <outbound average="1024000" peak="10240000" burst="1024000"/>
+ </bandwidth>
+ </interface>""")
+
+ def test_config_bridge_ovs(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "bridge"
+ obj.source_dev = "br0"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.model = "virtio"
+ obj.target_dev = "tap12345678"
+ obj.vporttype = "openvswitch"
+ obj.vportparams.append({"key": "instanceid", "value": "foobar"})
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="bridge">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <model type="virtio"/>
+ <source bridge="br0"/>
+ <target dev="tap12345678"/>
+ <virtualport type="openvswitch">
+ <parameters instanceid="foobar"/>
+ </virtualport>
+ </interface>""")
+
+ def test_config_8021Qbh(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "direct"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.model = "virtio"
+ obj.target_dev = "tap12345678"
+ obj.source_dev = "eth0"
+ obj.vporttype = "802.1Qbh"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="direct">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <model type="virtio"/>
+ <source dev="eth0" mode="private"/>
+ <target dev="tap12345678"/>
+ <virtualport type="802.1Qbh"/>
+ </interface>""")
+
+ def test_config_direct(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "direct"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.model = "virtio"
+ obj.source_dev = "eth0"
+ obj.source_mode = "passthrough"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="direct">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <model type="virtio"/>
+ <source dev="eth0" mode="passthrough"/>
+ </interface>""")
+
+ def test_config_8021Qbh_hostdev(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "hostdev"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.source_dev = "0000:0a:00.1"
+ obj.vporttype = "802.1Qbh"
+ obj.add_vport_param("profileid", "MyPortProfile")
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="hostdev" managed="yes">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <source>
+ <address type="pci" domain="0x0000"
+ bus="0x0a" slot="0x00" function="0x1"/>
+ </source>
+ <virtualport type="802.1Qbh">
+ <parameters profileid="MyPortProfile"/>
+ </virtualport>
+ </interface>""")
+
+ def test_config_hw_veb_hostdev(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "hostdev"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.source_dev = "0000:0a:00.1"
+ obj.vlan = "100"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="hostdev" managed="yes">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <source>
+ <address type="pci" domain="0x0000"
+ bus="0x0a" slot="0x00" function="0x1"/>
+ </source>
+ <vlan>
+ <tag id="100"/>
+ </vlan>
+ </interface>""")
+
+
+class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
+
+ def test_config_lxc(self):
+ obj = config.LibvirtConfigGuest()
+ obj.virt_type = "lxc"
+ obj.memory = 100 * units.Mi
+ obj.vcpus = 2
+ obj.cpuset = set([0, 1, 3, 4, 5])
+ obj.name = "demo"
+ obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+ obj.os_type = "exe"
+ obj.os_init_path = "/sbin/init"
+
+ fs = config.LibvirtConfigGuestFilesys()
+ fs.source_dir = "/root/lxc"
+ fs.target_dir = "/"
+
+ obj.add_device(fs)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domain type="lxc">
+ <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
+ <name>demo</name>
+ <memory>104857600</memory>
+ <vcpu cpuset="0-1,3-5">2</vcpu>
+ <os>
+ <type>exe</type>
+ <init>/sbin/init</init>
+ </os>
+ <devices>
+ <filesystem type="mount">
+ <source dir="/root/lxc"/>
+ <target dir="/"/>
+ </filesystem>
+ </devices>
+ </domain>""")
+
+ def test_config_lxc_with_idmap(self):
+ obj = config.LibvirtConfigGuest()
+ obj.virt_type = "lxc"
+ obj.memory = 100 * units.Mi
+ obj.vcpus = 2
+ obj.cpuset = set([0, 1, 3, 4, 5])
+ obj.name = "demo"
+ obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+ obj.os_type = "exe"
+ obj.os_init_path = "/sbin/init"
+
+ uidmap = config.LibvirtConfigGuestUIDMap()
+ uidmap.target = "10000"
+ uidmap.count = "1"
+ obj.idmaps.append(uidmap)
+ gidmap = config.LibvirtConfigGuestGIDMap()
+ gidmap.target = "10000"
+ gidmap.count = "1"
+ obj.idmaps.append(gidmap)
+
+ fs = config.LibvirtConfigGuestFilesys()
+ fs.source_dir = "/root/lxc"
+ fs.target_dir = "/"
+
+ obj.add_device(fs)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("""
+ <domain type="lxc">
+ <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
+ <name>demo</name>
+ <memory>104857600</memory>
+ <vcpu cpuset="0-1,3-5">2</vcpu>
+ <os>
+ <type>exe</type>
+ <init>/sbin/init</init>
+ </os>
+ <devices>
+ <filesystem type="mount">
+ <source dir="/root/lxc"/>
+ <target dir="/"/>
+ </filesystem>
+ </devices>
+ <idmap>
+ <uid start="0" target="10000" count="1"/>
+ <gid start="0" target="10000" count="1"/>
+ </idmap>
+ </domain>""", xml)
+
+ def test_config_xen_pv(self):
+ obj = config.LibvirtConfigGuest()
+ obj.virt_type = "xen"
+ obj.memory = 100 * units.Mi
+ obj.vcpus = 2
+ obj.cpuset = set([0, 1, 3, 4, 5])
+ obj.name = "demo"
+ obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+ obj.os_type = "linux"
+ obj.os_kernel = "/tmp/vmlinuz"
+ obj.os_initrd = "/tmp/ramdisk"
+ obj.os_cmdline = "console=xvc0"
+
+ disk = config.LibvirtConfigGuestDisk()
+ disk.source_type = "file"
+ disk.source_path = "/tmp/img"
+ disk.target_dev = "/dev/xvda"
+ disk.target_bus = "xen"
+
+ obj.add_device(disk)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domain type="xen">
+ <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
+ <name>demo</name>
+ <memory>104857600</memory>
+ <vcpu cpuset="0-1,3-5">2</vcpu>
+ <os>
+ <type>linux</type>
+ <kernel>/tmp/vmlinuz</kernel>
+ <initrd>/tmp/ramdisk</initrd>
+ <cmdline>console=xvc0</cmdline>
+ </os>
+ <devices>
+ <disk type="file" device="disk">
+ <source file="/tmp/img"/>
+ <target bus="xen" dev="/dev/xvda"/>
+ </disk>
+ </devices>
+ </domain>""")
+
+ def test_config_xen_hvm(self):
+ obj = config.LibvirtConfigGuest()
+ obj.virt_type = "xen"
+ obj.memory = 100 * units.Mi
+ obj.vcpus = 2
+ obj.cpuset = set([0, 1, 3, 4, 5])
+ obj.name = "demo"
+ obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+ obj.os_type = "hvm"
+ obj.os_loader = '/usr/lib/xen/boot/hvmloader'
+ obj.os_root = "root=xvda"
+ obj.os_cmdline = "console=xvc0"
+ obj.pae = True
+ obj.acpi = True
+ obj.apic = True
+
+ disk = config.LibvirtConfigGuestDisk()
+ disk.source_type = "file"
+ disk.source_path = "/tmp/img"
+ disk.target_dev = "/dev/xvda"
+ disk.target_bus = "xen"
+
+ obj.add_device(disk)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domain type="xen">
+ <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
+ <name>demo</name>
+ <memory>104857600</memory>
+ <vcpu cpuset="0-1,3-5">2</vcpu>
+ <os>
+ <type>hvm</type>
+ <loader>/usr/lib/xen/boot/hvmloader</loader>
+ <cmdline>console=xvc0</cmdline>
+ <root>root=xvda</root>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <devices>
+ <disk type="file" device="disk">
+ <source file="/tmp/img"/>
+ <target bus="xen" dev="/dev/xvda"/>
+ </disk>
+ </devices>
+ </domain>""")
+
+ def test_config_kvm(self):
+ obj = config.LibvirtConfigGuest()
+ obj.virt_type = "kvm"
+ obj.memory = 100 * units.Mi
+ obj.vcpus = 2
+ obj.cpuset = set([0, 1, 3, 4, 5])
+
+ obj.cputune = config.LibvirtConfigGuestCPUTune()
+ obj.cputune.shares = 100
+ obj.cputune.quota = 50000
+ obj.cputune.period = 25000
+
+ obj.membacking = config.LibvirtConfigGuestMemoryBacking()
+ obj.membacking.hugepages = True
+
+ obj.memtune = config.LibvirtConfigGuestMemoryTune()
+ obj.memtune.hard_limit = 496
+ obj.memtune.soft_limit = 672
+ obj.memtune.swap_hard_limit = 1638
+ obj.memtune.min_guarantee = 2970
+
+ obj.numatune = config.LibvirtConfigGuestNUMATune()
+
+ numamemory = config.LibvirtConfigGuestNUMATuneMemory()
+ numamemory.mode = "preferred"
+ numamemory.nodeset = [0, 1, 2, 3, 8]
+
+ obj.numatune.memory = numamemory
+
+ numamemnode0 = config.LibvirtConfigGuestNUMATuneMemNode()
+ numamemnode0.cellid = 0
+ numamemnode0.mode = "preferred"
+ numamemnode0.nodeset = [0, 1]
+
+ numamemnode1 = config.LibvirtConfigGuestNUMATuneMemNode()
+ numamemnode1.cellid = 1
+ numamemnode1.mode = "preferred"
+ numamemnode1.nodeset = [2, 3]
+
+ numamemnode2 = config.LibvirtConfigGuestNUMATuneMemNode()
+ numamemnode2.cellid = 2
+ numamemnode2.mode = "preferred"
+ numamemnode2.nodeset = [8]
+
+ obj.numatune.memnodes.extend([numamemnode0,
+ numamemnode1,
+ numamemnode2])
+
+ obj.name = "demo"
+ obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+ obj.os_type = "linux"
+ obj.os_boot_dev = ["hd", "cdrom", "fd"]
+ obj.os_smbios = config.LibvirtConfigGuestSMBIOS()
+ obj.pae = True
+ obj.acpi = True
+ obj.apic = True
+
+ obj.sysinfo = config.LibvirtConfigGuestSysinfo()
+ obj.sysinfo.bios_vendor = "Acme"
+ obj.sysinfo.system_version = "1.0.0"
+
+ disk = config.LibvirtConfigGuestDisk()
+ disk.source_type = "file"
+ disk.source_path = "/tmp/img"
+ disk.target_dev = "/dev/vda"
+ disk.target_bus = "virtio"
+
+ obj.add_device(disk)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domain type="kvm">
+ <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
+ <name>demo</name>
+ <memory>104857600</memory>
+ <memoryBacking>
+ <hugepages/>
+ </memoryBacking>
+ <memtune>
+ <hard_limit units="K">496</hard_limit>
+ <soft_limit units="K">672</soft_limit>
+ <swap_hard_limit units="K">1638</swap_hard_limit>
+ <min_guarantee units="K">2970</min_guarantee>
+ </memtune>
+ <numatune>
+ <memory mode="preferred" nodeset="0-3,8"/>
+ <memnode cellid="0" mode="preferred" nodeset="0-1"/>
+ <memnode cellid="1" mode="preferred" nodeset="2-3"/>
+ <memnode cellid="2" mode="preferred" nodeset="8"/>
+ </numatune>
+ <vcpu cpuset="0-1,3-5">2</vcpu>
+ <sysinfo type='smbios'>
+ <bios>
+ <entry name="vendor">Acme</entry>
+ </bios>
+ <system>
+ <entry name="version">1.0.0</entry>
+ </system>
+ </sysinfo>
+ <os>
+ <type>linux</type>
+ <boot dev="hd"/>
+ <boot dev="cdrom"/>
+ <boot dev="fd"/>
+ <smbios mode="sysinfo"/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <cputune>
+ <shares>100</shares>
+ <quota>50000</quota>
+ <period>25000</period>
+ </cputune>
+ <devices>
+ <disk type="file" device="disk">
+ <source file="/tmp/img"/>
+ <target bus="virtio" dev="/dev/vda"/>
+ </disk>
+ </devices>
+ </domain>""")
+
+ def test_config_machine_type(self):
+ obj = config.LibvirtConfigGuest()
+ obj.virt_type = "kvm"
+ obj.memory = 100 * units.Mi
+ obj.vcpus = 2
+ obj.name = "demo"
+ obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+ obj.os_type = "hvm"
+ obj.os_mach_type = "fake_machine_type"
+ xml = obj.to_xml()
+
+ self.assertXmlEqual(xml, """
+ <domain type="kvm">
+ <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
+ <name>demo</name>
+ <memory>104857600</memory>
+ <vcpu>2</vcpu>
+ <os>
+ <type machine="fake_machine_type">hvm</type>
+ </os>
+ </domain>""")
+
+ def test_ConfigGuest_parse_devices(self):
+ xmldoc = """ <domain type="kvm">
+ <devices>
+ <hostdev mode="subsystem" type="pci" managed="no">
+ </hostdev>
+ </devices>
+ </domain>
+ """
+ obj = config.LibvirtConfigGuest()
+ obj.parse_str(xmldoc)
+ self.assertEqual(len(obj.devices), 1)
+ self.assertIsInstance(obj.devices[0],
+ config.LibvirtConfigGuestHostdevPCI)
+ self.assertEqual(obj.devices[0].mode, 'subsystem')
+ self.assertEqual(obj.devices[0].managed, 'no')
+
+ def test_ConfigGuest_parse_devices_wrong_type(self):
+ xmldoc = """ <domain type="kvm">
+ <devices>
+ <hostdev mode="subsystem" type="xxxx" managed="no">
+ </hostdev>
+ </devices>
+ </domain>
+ """
+ obj = config.LibvirtConfigGuest()
+ obj.parse_str(xmldoc)
+ self.assertEqual(len(obj.devices), 0)
+
+ def test_ConfigGuest_parese_cpu(self):
+ xmldoc = """ <domain>
+ <cpu mode='custom' match='exact'>
+ <model>kvm64</model>
+ </cpu>
+ </domain>
+ """
+ obj = config.LibvirtConfigGuest()
+ obj.parse_str(xmldoc)
+
+ self.assertEqual(obj.cpu.mode, 'custom')
+ self.assertEqual(obj.cpu.match, 'exact')
+ self.assertEqual(obj.cpu.model, 'kvm64')
+
+
+class LibvirtConfigGuestSnapshotTest(LibvirtConfigBaseTest):
+
+ def test_config_snapshot(self):
+ obj = config.LibvirtConfigGuestSnapshot()
+ obj.name = "Demo"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domainsnapshot>
+ <name>Demo</name>
+ <disks/>
+ </domainsnapshot>""")
+
+ def test_config_snapshot_with_disks(self):
+ obj = config.LibvirtConfigGuestSnapshot()
+ obj.name = "Demo"
+
+ disk = config.LibvirtConfigGuestSnapshotDisk()
+ disk.name = 'vda'
+ disk.source_path = 'source-path'
+ disk.source_type = 'file'
+ disk.snapshot = 'external'
+ disk.driver_name = 'qcow2'
+ obj.add_disk(disk)
+
+ disk2 = config.LibvirtConfigGuestSnapshotDisk()
+ disk2.name = 'vdb'
+ disk2.snapshot = 'no'
+ obj.add_disk(disk2)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domainsnapshot>
+ <name>Demo</name>
+ <disks>
+ <disk name='vda' snapshot='external' type='file'>
+ <source file='source-path'/>
+ </disk>
+ <disk name='vdb' snapshot='no'/>
+ </disks>
+ </domainsnapshot>""")
+
+ def test_config_snapshot_with_network_disks(self):
+ obj = config.LibvirtConfigGuestSnapshot()
+ obj.name = "Demo"
+
+ disk = config.LibvirtConfigGuestSnapshotDisk()
+ disk.name = 'vda'
+ disk.source_name = 'source-file'
+ disk.source_type = 'network'
+ disk.source_hosts = ['host1']
+ disk.source_ports = ['12345']
+ disk.source_protocol = 'glusterfs'
+ disk.snapshot = 'external'
+ disk.driver_name = 'qcow2'
+ obj.add_disk(disk)
+
+ disk2 = config.LibvirtConfigGuestSnapshotDisk()
+ disk2.name = 'vdb'
+ disk2.snapshot = 'no'
+ obj.add_disk(disk2)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domainsnapshot>
+ <name>Demo</name>
+ <disks>
+ <disk name='vda' snapshot='external' type='network'>
+ <source protocol='glusterfs' name='source-file'>
+ <host name='host1' port='12345'/>
+ </source>
+ </disk>
+ <disk name='vdb' snapshot='no'/>
+ </disks>
+ </domainsnapshot>""")
+
+
+class LibvirtConfigNodeDeviceTest(LibvirtConfigBaseTest):
+
+ def test_config_virt_usb_device(self):
+ xmlin = """
+ <device>
+ <name>usb_0000_09_00_0</name>
+ <parent>pci_0000_00_1c_0</parent>
+ <driver>
+ <name>vxge</name>
+ </driver>
+ <capability type="usb">
+ <domain>0</domain>
+ <capability type="fake_usb">
+ <address fake_usb="fake"/>
+ </capability>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+
+ self.assertIsNone(obj.pci_capability)
+
+ def test_config_virt_device(self):
+ xmlin = """
+ <device>
+ <name>pci_0000_09_00_0</name>
+ <parent>pci_0000_00_1c_0</parent>
+ <driver>
+ <name>vxge</name>
+ </driver>
+ <capability type="pci">
+ <domain>0</domain>
+ <bus>9</bus>
+ <slot>0</slot>
+ <function>0</function>
+ <product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product>
+ <vendor id="0x17d5">Neterion Inc.</vendor>
+ <capability type="virt_functions">
+ <address domain="0x0000" bus="0x0a" slot="0x00" function="0x1"/>
+ <address domain="0x0000" bus="0x0a" slot="0x00" function="0x2"/>
+ <address domain="0x0000" bus="0x0a" slot="0x00" function="0x3"/>
+ </capability>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+
+ self.assertIsInstance(obj.pci_capability,
+ config.LibvirtConfigNodeDevicePciCap)
+ self.assertIsInstance(obj.pci_capability.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+ self.assertEqual(obj.pci_capability.fun_capability[0].type,
+ "virt_functions")
+ self.assertEqual(len(obj.pci_capability.fun_capability[0].
+ device_addrs),
+ 3)
+ self.assertEqual(obj.pci_capability.bus, 9)
+
+ def test_config_phy_device(self):
+ xmlin = """
+ <device>
+ <name>pci_0000_33_00_0</name>
+ <parent>pci_0000_22_1c_0</parent>
+ <driver>
+ <name>vxx</name>
+ </driver>
+ <capability type="pci">
+ <domain>0</domain>
+ <bus>9</bus>
+ <slot>0</slot>
+ <function>0</function>
+ <product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product>
+ <vendor id="0x17d5">Neterion Inc.</vendor>
+ <capability type="phys_function">
+ <address domain='0x0000' bus='0x09' slot='0x00' function='0x0'/>
+ </capability>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+
+ self.assertIsInstance(obj.pci_capability,
+ config.LibvirtConfigNodeDevicePciCap)
+ self.assertIsInstance(obj.pci_capability.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+ self.assertEqual(obj.pci_capability.fun_capability[0].type,
+ "phys_function")
+ self.assertEqual(len(obj.pci_capability.fun_capability[0].
+ device_addrs),
+ 1)
+
+ def test_config_non_device(self):
+ xmlin = """
+ <device>
+ <name>pci_0000_33_00_0</name>
+ <parent>pci_0000_22_1c_0</parent>
+ <driver>
+ <name>vxx</name>
+ </driver>
+ <capability type="pci">
+ <domain>0</domain>
+ <bus>9</bus>
+ <slot>0</slot>
+ <function>0</function>
+ <product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product>
+ <vendor id="0x17d5">Neterion Inc.</vendor>
+ <capability type="virt_functions"/>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+
+ self.assertIsInstance(obj.pci_capability,
+ config.LibvirtConfigNodeDevicePciCap)
+ self.assertIsInstance(obj.pci_capability.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+ self.assertEqual(obj.pci_capability.fun_capability[0].type,
+ "virt_functions")
+
+ def test_config_fail_device(self):
+ xmlin = """
+ <device>
+ <name>pci_0000_33_00_0</name>
+ <parent>pci_0000_22_1c_0</parent>
+ <driver>
+ <name>vxx</name>
+ </driver>
+ <capability type="pci">
+ <domain>0</domain>
+ <bus>9</bus>
+ <slot>0</slot>
+ <function>0</function>
+ <product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product>
+ <vendor id="0x17d5">Neterion Inc.</vendor>
+ <capability type="virt_functions">
+ </capability>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+
+ self.assertIsInstance(obj.pci_capability,
+ config.LibvirtConfigNodeDevicePciCap)
+ self.assertIsInstance(obj.pci_capability.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+ self.assertEqual(obj.pci_capability.fun_capability[0].type,
+ "virt_functions")
+
+ def test_config_2cap_device(self):
+ xmlin = """
+ <device>
+ <name>pci_0000_04_10_7</name>
+ <parent>pci_0000_00_01_1</parent>
+ <driver>
+ <name>igbvf</name>
+ </driver>
+ <capability type='pci'>
+ <domain>0</domain>
+ <bus>4</bus>
+ <slot>16</slot>
+ <function>7</function>
+ <product id='0x1520'>I350 Ethernet Controller Virtual</product>
+ <vendor id='0x8086'>Intel Corporation</vendor>
+ <capability type='phys_function'>
+ <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
+ </capability>
+ <capability type='virt_functions'>
+ <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
+ </capability>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+
+ self.assertIsInstance(obj.pci_capability,
+ config.LibvirtConfigNodeDevicePciCap)
+ self.assertIsInstance(obj.pci_capability.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+ self.assertEqual(obj.pci_capability.fun_capability[0].type,
+ "phys_function")
+ self.assertEqual(obj.pci_capability.fun_capability[1].type,
+ "virt_functions")
+
+
+class LibvirtConfigNodeDevicePciCapTest(LibvirtConfigBaseTest):
+
+ def test_config_device_pci_cap(self):
+ xmlin = """
+ <capability type="pci">
+ <domain>0</domain>
+ <bus>10</bus>
+ <slot>1</slot>
+ <function>5</function>
+ <product id="0x10bd">Intel 10 Gigabit Ethernet</product>
+ <vendor id="0x8086">Intel Inc.</vendor>
+ <capability type="virt_functions">
+ <address domain="0000" bus="0x0a" slot="0x1" function="0x1"/>
+ <address domain="0001" bus="0x0a" slot="0x02" function="0x03"/>
+ </capability>
+ </capability>"""
+ obj = config.LibvirtConfigNodeDevicePciCap()
+ obj.parse_str(xmlin)
+
+ self.assertEqual(obj.domain, 0)
+ self.assertEqual(obj.bus, 10)
+ self.assertEqual(obj.slot, 1)
+ self.assertEqual(obj.function, 5)
+ self.assertEqual(obj.product, "Intel 10 Gigabit Ethernet")
+ self.assertEqual(obj.product_id, 0x10bd)
+ self.assertEqual(obj.vendor, "Intel Inc.")
+ self.assertEqual(obj.vendor_id, 0x8086)
+ self.assertIsInstance(obj.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+
+ self.assertEqual(obj.fun_capability[0].type, 'virt_functions')
+ self.assertEqual(obj.fun_capability[0].device_addrs,
+ [(0, 10, 1, 1),
+ (1, 10, 2, 3), ])
+
+ def test_config_device_pci_2cap(self):
+ xmlin = """
+ <capability type="pci">
+ <domain>0</domain>
+ <bus>10</bus>
+ <slot>1</slot>
+ <function>5</function>
+ <product id="0x10bd">Intel 10 Gigabit Ethernet</product>
+ <vendor id="0x8086">Intel Inc.</vendor>
+ <capability type="virt_functions">
+ <address domain="0000" bus="0x0a" slot="0x1" function="0x1"/>
+ <address domain="0001" bus="0x0a" slot="0x02" function="0x03"/>
+ </capability>
+ <capability type="phys_function">
+ <address domain="0000" bus="0x0a" slot="0x1" function="0x1"/>
+ </capability>
+ </capability>"""
+ obj = config.LibvirtConfigNodeDevicePciCap()
+ obj.parse_str(xmlin)
+
+ self.assertEqual(obj.domain, 0)
+ self.assertEqual(obj.bus, 10)
+ self.assertEqual(obj.slot, 1)
+ self.assertEqual(obj.function, 5)
+ self.assertEqual(obj.product, "Intel 10 Gigabit Ethernet")
+ self.assertEqual(obj.product_id, 0x10bd)
+ self.assertEqual(obj.vendor, "Intel Inc.")
+ self.assertEqual(obj.vendor_id, 0x8086)
+ self.assertIsInstance(obj.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+
+ self.assertEqual(obj.fun_capability[0].type, 'virt_functions')
+ self.assertEqual(obj.fun_capability[0].device_addrs,
+ [(0, 10, 1, 1),
+ (1, 10, 2, 3), ])
+ self.assertEqual(obj.fun_capability[1].type, 'phys_function')
+ self.assertEqual(obj.fun_capability[1].device_addrs,
+ [(0, 10, 1, 1), ])
+
+ def test_config_read_only_disk(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "disk"
+ obj.source_device = "disk"
+ obj.driver_name = "kvm"
+ obj.target_dev = "/dev/hdc"
+ obj.target_bus = "virtio"
+ obj.readonly = True
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="disk" device="disk">
+ <driver name="kvm"/>
+ <target bus="virtio" dev="/dev/hdc"/>
+ <readonly/>
+ </disk>""")
+
+ obj.readonly = False
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="disk" device="disk">
+ <driver name="kvm"/>
+ <target bus="virtio" dev="/dev/hdc"/>
+ </disk>""")
+
+
+class LibvirtConfigNodeDevicePciSubFunctionCap(LibvirtConfigBaseTest):
+
+ def test_config_device_pci_subfunction(self):
+ xmlin = """
+ <capability type="virt_functions">
+ <address domain="0000" bus="0x0a" slot="0x1" function="0x1"/>
+ <address domain="0001" bus="0x0a" slot="0x02" function="0x03"/>
+ </capability>"""
+ fun_capability = config.LibvirtConfigNodeDevicePciSubFunctionCap()
+ fun_capability.parse_str(xmlin)
+ self.assertEqual('virt_functions', fun_capability.type)
+ self.assertEqual([(0, 10, 1, 1),
+ (1, 10, 2, 3)],
+ fun_capability.device_addrs)
+
+
+class LibvirtConfigGuestVideoTest(LibvirtConfigBaseTest):
+
+ def test_config_video_driver(self):
+ obj = config.LibvirtConfigGuestVideo()
+ obj.type = 'qxl'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <video>
+ <model type='qxl'/>
+ </video>""")
+
+ def test_config_video_driver_vram_heads(self):
+ obj = config.LibvirtConfigGuestVideo()
+ obj.type = 'qxl'
+ obj.vram = '9216'
+ obj.heads = '1'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <video>
+ <model type='qxl' vram='9216' heads='1'/>
+ </video>""")
+
+
+class LibvirtConfigGuestSeclabel(LibvirtConfigBaseTest):
+
+ def test_config_seclabel_config(self):
+ obj = config.LibvirtConfigSeclabel()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <seclabel type='dynamic'/>""")
+
+ def test_config_seclabel_baselabel(self):
+ obj = config.LibvirtConfigSeclabel()
+ obj.type = 'dynamic'
+ obj.baselabel = 'system_u:system_r:my_svirt_t:s0'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <seclabel type='dynamic'>
+ <baselabel>system_u:system_r:my_svirt_t:s0</baselabel>
+ </seclabel>""")
+
+
+class LibvirtConfigGuestRngTest(LibvirtConfigBaseTest):
+
+ def test_config_rng_driver(self):
+ obj = config.LibvirtConfigGuestRng()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+<rng model='virtio'>
+ <backend model='random'/>
+</rng>""")
+
+ def test_config_rng_driver_with_rate(self):
+ obj = config.LibvirtConfigGuestRng()
+ obj.backend = '/dev/random'
+ obj.rate_period = '12'
+ obj.rate_bytes = '34'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+<rng model='virtio'>
+ <rate period='12' bytes='34'/>
+ <backend model='random'>/dev/random</backend>
+</rng>""")
+
+
+class LibvirtConfigGuestControllerTest(LibvirtConfigBaseTest):
+
+ def test_config_guest_contoller(self):
+ obj = config.LibvirtConfigGuestController()
+ obj.type = 'scsi'
+ obj.index = 0
+ obj.model = 'virtio-scsi'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <controller type='scsi' index='0' model='virtio-scsi'/>""")
+
+
+class LibvirtConfigGuestWatchdogTest(LibvirtConfigBaseTest):
+ def test_config_watchdog(self):
+ obj = config.LibvirtConfigGuestWatchdog()
+ obj.action = 'none'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, "<watchdog model='i6300esb' action='none'/>")
+
+ def test_config_watchdog_default_action(self):
+ obj = config.LibvirtConfigGuestWatchdog()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, "<watchdog model='i6300esb' action='reset'/>")
+
+
+class LibvirtConfigGuestCPUTuneTest(LibvirtConfigBaseTest):
+
+ def test_config_cputune_timeslice(self):
+ cputune = config.LibvirtConfigGuestCPUTune()
+ cputune.shares = 100
+ cputune.quota = 50000
+ cputune.period = 25000
+
+ xml = cputune.to_xml()
+ self.assertXmlEqual(xml, """
+ <cputune>
+ <shares>100</shares>
+ <quota>50000</quota>
+ <period>25000</period>
+ </cputune>""")
+
+ def test_config_cputune_vcpus(self):
+ cputune = config.LibvirtConfigGuestCPUTune()
+
+ vcpu0 = config.LibvirtConfigGuestCPUTuneVCPUPin()
+ vcpu0.id = 0
+ vcpu0.cpuset = set([0, 1])
+ vcpu1 = config.LibvirtConfigGuestCPUTuneVCPUPin()
+ vcpu1.id = 1
+ vcpu1.cpuset = set([2, 3])
+ vcpu2 = config.LibvirtConfigGuestCPUTuneVCPUPin()
+ vcpu2.id = 2
+ vcpu2.cpuset = set([4, 5])
+ vcpu3 = config.LibvirtConfigGuestCPUTuneVCPUPin()
+ vcpu3.id = 3
+ vcpu3.cpuset = set([6, 7])
+ cputune.vcpupin.extend([vcpu0, vcpu1, vcpu2, vcpu3])
+
+ xml = cputune.to_xml()
+ self.assertXmlEqual(xml, """
+ <cputune>
+ <vcpupin vcpu="0" cpuset="0-1"/>
+ <vcpupin vcpu="1" cpuset="2-3"/>
+ <vcpupin vcpu="2" cpuset="4-5"/>
+ <vcpupin vcpu="3" cpuset="6-7"/>
+ </cputune>""")
+
+
+class LibvirtConfigGuestMemoryBackingTest(LibvirtConfigBaseTest):
+ def test_config_memory_backing_none(self):
+ obj = config.LibvirtConfigGuestMemoryBacking()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, "<memoryBacking/>")
+
+ def test_config_memory_backing_all(self):
+ obj = config.LibvirtConfigGuestMemoryBacking()
+ obj.locked = True
+ obj.sharedpages = False
+ obj.hugepages = True
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <memoryBacking>
+ <hugepages/>
+ <nosharedpages/>
+ <locked/>
+ </memoryBacking>""")
+
+
+class LibvirtConfigGuestMemoryTuneTest(LibvirtConfigBaseTest):
+ def test_config_memory_backing_none(self):
+ obj = config.LibvirtConfigGuestMemoryTune()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, "<memtune/>")
+
+ def test_config_memory_backing_all(self):
+ obj = config.LibvirtConfigGuestMemoryTune()
+ obj.soft_limit = 6
+ obj.hard_limit = 28
+ obj.swap_hard_limit = 140
+ obj.min_guarantee = 270
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <memtune>
+ <hard_limit units="K">28</hard_limit>
+ <soft_limit units="K">6</soft_limit>
+ <swap_hard_limit units="K">140</swap_hard_limit>
+ <min_guarantee units="K">270</min_guarantee>
+ </memtune>""")
+
+
+class LibvirtConfigGuestNUMATuneTest(LibvirtConfigBaseTest):
+ def test_config_numa_tune_none(self):
+ obj = config.LibvirtConfigGuestNUMATune()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("<numatune/>", xml)
+
+ def test_config_numa_tune_memory(self):
+ obj = config.LibvirtConfigGuestNUMATune()
+
+ numamemory = config.LibvirtConfigGuestNUMATuneMemory()
+ numamemory.nodeset = [0, 1, 2, 3, 8]
+
+ obj.memory = numamemory
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("""
+ <numatune>
+ <memory mode="strict" nodeset="0-3,8"/>
+ </numatune>""", xml)
+
+ def test_config_numa_tune_memnodes(self):
+ obj = config.LibvirtConfigGuestNUMATune()
+
+ numamemnode0 = config.LibvirtConfigGuestNUMATuneMemNode()
+ numamemnode0.cellid = 0
+ numamemnode0.nodeset = [0, 1]
+
+ numamemnode1 = config.LibvirtConfigGuestNUMATuneMemNode()
+ numamemnode1.cellid = 1
+ numamemnode1.nodeset = [2, 3]
+
+ numamemnode2 = config.LibvirtConfigGuestNUMATuneMemNode()
+ numamemnode2.cellid = 2
+ numamemnode2.nodeset = [8]
+
+ obj.memnodes.extend([numamemnode0,
+ numamemnode1,
+ numamemnode2])
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("""
+ <numatune>
+ <memnode cellid="0" mode="strict" nodeset="0-1"/>
+ <memnode cellid="1" mode="strict" nodeset="2-3"/>
+ <memnode cellid="2" mode="strict" nodeset="8"/>
+ </numatune>""", xml)
+
+
+class LibvirtConfigGuestMetadataNovaTest(LibvirtConfigBaseTest):
+
+ def test_config_metadata(self):
+ meta = config.LibvirtConfigGuestMetaNovaInstance()
+ meta.package = "2014.2.3"
+ meta.name = "moonbuggy"
+ meta.creationTime = 1234567890
+ meta.roottype = "image"
+ meta.rootid = "fe55c69a-8b2e-4bbc-811a-9ad2023a0426"
+
+ owner = config.LibvirtConfigGuestMetaNovaOwner()
+ owner.userid = "3472c2a6-de91-4fb5-b618-42bc781ef670"
+ owner.username = "buzz"
+ owner.projectid = "f241e906-010e-4917-ae81-53f4fb8aa021"
+ owner.projectname = "moonshot"
+
+ meta.owner = owner
+
+ flavor = config.LibvirtConfigGuestMetaNovaFlavor()
+ flavor.name = "m1.lowgravity"
+ flavor.vcpus = 8
+ flavor.memory = 2048
+ flavor.swap = 10
+ flavor.disk = 50
+ flavor.ephemeral = 10
+
+ meta.flavor = flavor
+
+ xml = meta.to_xml()
+ self.assertXmlEqual(xml, """
+ <nova:instance xmlns:nova='http://openstack.org/xmlns/libvirt/nova/1.0'>
+ <nova:package version="2014.2.3"/>
+ <nova:name>moonbuggy</nova:name>
+ <nova:creationTime>2009-02-13 23:31:30</nova:creationTime>
+ <nova:flavor name="m1.lowgravity">
+ <nova:memory>2048</nova:memory>
+ <nova:disk>50</nova:disk>
+ <nova:swap>10</nova:swap>
+ <nova:ephemeral>10</nova:ephemeral>
+ <nova:vcpus>8</nova:vcpus>
+ </nova:flavor>
+ <nova:owner>
+ <nova:user
+ uuid="3472c2a6-de91-4fb5-b618-42bc781ef670">buzz</nova:user>
+ <nova:project
+ uuid="f241e906-010e-4917-ae81-53f4fb8aa021">moonshot</nova:project>
+ </nova:owner>
+ <nova:root type="image" uuid="fe55c69a-8b2e-4bbc-811a-9ad2023a0426"/>
+ </nova:instance>
+ """)
+
+
+class LibvirtConfigGuestIDMap(LibvirtConfigBaseTest):
+ def test_config_id_map_parse_start_not_int(self):
+ xmlin = "<uid start='a' target='20000' count='5'/>"
+ obj = config.LibvirtConfigGuestIDMap()
+
+ self.assertRaises(ValueError, obj.parse_str, xmlin)
+
+ def test_config_id_map_parse_target_not_int(self):
+ xmlin = "<uid start='2' target='a' count='5'/>"
+ obj = config.LibvirtConfigGuestIDMap()
+
+ self.assertRaises(ValueError, obj.parse_str, xmlin)
+
+ def test_config_id_map_parse_count_not_int(self):
+ xmlin = "<uid start='2' target='20000' count='a'/>"
+ obj = config.LibvirtConfigGuestIDMap()
+
+ self.assertRaises(ValueError, obj.parse_str, xmlin)
+
+ def test_config_uid_map(self):
+ obj = config.LibvirtConfigGuestUIDMap()
+ obj.start = 1
+ obj.target = 10000
+ obj.count = 2
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("<uid start='1' target='10000' count='2'/>", xml)
+
+ def test_config_uid_map_parse(self):
+ xmlin = "<uid start='2' target='20000' count='5'/>"
+ obj = config.LibvirtConfigGuestUIDMap()
+ obj.parse_str(xmlin)
+
+ self.assertEqual(2, obj.start)
+ self.assertEqual(20000, obj.target)
+ self.assertEqual(5, obj.count)
+
+ def test_config_gid_map(self):
+ obj = config.LibvirtConfigGuestGIDMap()
+ obj.start = 1
+ obj.target = 10000
+ obj.count = 2
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("<gid start='1' target='10000' count='2'/>", xml)
+
+ def test_config_gid_map_parse(self):
+ xmlin = "<gid start='2' target='20000' count='5'/>"
+ obj = config.LibvirtConfigGuestGIDMap()
+ obj.parse_str(xmlin)
+
+ self.assertEqual(2, obj.start)
+ self.assertEqual(20000, obj.target)
+ self.assertEqual(5, obj.count)
+
+
+class LibvirtConfigMemoryBalloonTest(LibvirtConfigBaseTest):
+
+ def test_config_memory_balloon_period(self):
+ balloon = config.LibvirtConfigMemoryBalloon()
+ balloon.model = 'fake_virtio'
+ balloon.period = 11
+
+ xml = balloon.to_xml()
+ expected_xml = """
+ <memballoon model='fake_virtio'>
+ <stats period='11'/>
+ </memballoon>"""
+
+ self.assertXmlEqual(expected_xml, xml)
diff --git a/nova/tests/unit/virt/libvirt/test_designer.py b/nova/tests/unit/virt/libvirt/test_designer.py
new file mode 100644
index 0000000000..649144c0d1
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_designer.py
@@ -0,0 +1,30 @@
+# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova.virt.libvirt import designer
+
+
+class DesignerTestCase(test.NoDBTestCase):
+ def test_set_vif_bandwidth_config_no_extra_specs(self):
+ # Test whether test_set_vif_bandwidth_config_no_extra_specs fails when
+ # its second parameter has no 'extra_specs' field.
+
+ try:
+ # The conf will never be user be used, so we can use 'None'.
+ # An empty dictionary is fine: all that matters it that there is no
+ # 'extra_specs' field.
+ designer.set_vif_bandwidth_config(None, {})
+ except KeyError as e:
+ self.fail('KeyError: %s' % e)
diff --git a/nova/tests/unit/virt/libvirt/test_dmcrypt.py b/nova/tests/unit/virt/libvirt/test_dmcrypt.py
new file mode 100644
index 0000000000..02efbe10b5
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_dmcrypt.py
@@ -0,0 +1,72 @@
+# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from nova import test
+from nova import utils
+from nova.virt.libvirt import dmcrypt
+
+
+class LibvirtDmcryptTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(LibvirtDmcryptTestCase, self).setUp()
+
+ self.CIPHER = 'cipher'
+ self.KEY_SIZE = 256
+ self.NAME = 'disk'
+ self.TARGET = dmcrypt.volume_name(self.NAME)
+ self.PATH = '/dev/nova-lvm/instance_disk'
+ self.KEY = range(0, self.KEY_SIZE)
+ self.KEY_STR = ''.join(["%02x" % x for x in range(0, self.KEY_SIZE)])
+
+ self.executes = []
+ self.kwargs = {}
+
+ def fake_execute(*cmd, **kwargs):
+ self.executes.append(cmd)
+ self.kwargs = kwargs
+ return None, None
+
+ def fake_listdir(path):
+ return [self.TARGET, '/dev/mapper/disk']
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ self.stubs.Set(os, 'listdir', fake_listdir)
+
+ def test_create_volume(self):
+ expected_commands = [('cryptsetup',
+ 'create',
+ self.TARGET,
+ self.PATH,
+ '--cipher=' + self.CIPHER,
+ '--key-size=' + str(self.KEY_SIZE),
+ '--key-file=-')]
+ dmcrypt.create_volume(self.TARGET, self.PATH, self.CIPHER,
+ self.KEY_SIZE, self.KEY)
+
+ self.assertEqual(expected_commands, self.executes)
+ self.assertEqual(self.KEY_STR, self.kwargs['process_input'])
+
+ def test_delete_volume(self):
+ expected_commands = [('cryptsetup', 'remove', self.TARGET)]
+ dmcrypt.delete_volume(self.TARGET)
+
+ self.assertEqual(expected_commands, self.executes)
+
+ def test_list_volumes(self):
+ encrypted_volumes = dmcrypt.list_volumes()
+
+ self.assertEqual([self.TARGET], encrypted_volumes)
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
new file mode 100644
index 0000000000..90e25e1b3b
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -0,0 +1,12576 @@
+# Copyright 2010 OpenStack Foundation
+# Copyright 2012 University Of Minho
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import __builtin__
+import contextlib
+import copy
+import datetime
+import errno
+import os
+import random
+import re
+import shutil
+import threading
+import time
+import uuid
+
+import eventlet
+from eventlet import greenthread
+import fixtures
+from lxml import etree
+import mock
+import mox
+from oslo.concurrency import lockutils
+from oslo.concurrency import processutils
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import encodeutils
+from oslo.utils import importutils
+from oslo.utils import timeutils
+from oslo.utils import units
+import six
+
+from nova.api.metadata import base as instance_metadata
+from nova.compute import arch
+from nova.compute import manager
+from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import utils as compute_utils
+from nova.compute import vm_mode
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.network import model as network_model
+from nova import objects
+from nova.openstack.common import fileutils
+from nova.openstack.common import loopingcall
+from nova.openstack.common import uuidutils
+from nova.pci import manager as pci_manager
+from nova import test
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_network
+import nova.tests.unit.image.fake
+from nova.tests.unit import matchers
+from nova.tests.unit.objects import test_pci_device
+from nova.tests.unit.virt.libvirt import fake_imagebackend
+from nova.tests.unit.virt.libvirt import fake_libvirt_utils
+from nova.tests.unit.virt.libvirt import fakelibvirt
+from nova import utils
+from nova import version
+from nova.virt import block_device as driver_block_device
+from nova.virt import configdrive
+from nova.virt.disk import api as disk
+from nova.virt import driver
+from nova.virt import event as virtevent
+from nova.virt import fake
+from nova.virt import firewall as base_firewall
+from nova.virt import hardware
+from nova.virt import images
+from nova.virt.libvirt import blockinfo
+from nova.virt.libvirt import config as vconfig
+from nova.virt.libvirt import driver as libvirt_driver
+from nova.virt.libvirt import firewall
+from nova.virt.libvirt import imagebackend
+from nova.virt.libvirt import rbd_utils
+from nova.virt.libvirt import utils as libvirt_utils
+
+try:
+ import libvirt
+except ImportError:
+ libvirt = fakelibvirt
+libvirt_driver.libvirt = libvirt
+
+
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('my_ip', 'nova.netconf')
+CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
+CONF.import_opt('instances_path', 'nova.compute.manager')
+
+_fake_network_info = fake_network.fake_get_instance_nw_info
+
+_fake_NodeDevXml = \
+ {"pci_0000_04_00_3": """
+ <device>
+ <name>pci_0000_04_00_3</name>
+ <parent>pci_0000_00_01_1</parent>
+ <driver>
+ <name>igb</name>
+ </driver>
+ <capability type='pci'>
+ <domain>0</domain>
+ <bus>4</bus>
+ <slot>0</slot>
+ <function>3</function>
+ <product id='0x1521'>I350 Gigabit Network Connection</product>
+ <vendor id='0x8086'>Intel Corporation</vendor>
+ <capability type='virt_functions'>
+ <address domain='0x0000' bus='0x04' slot='0x10' function='0x3'/>
+ <address domain='0x0000' bus='0x04' slot='0x10' function='0x7'/>
+ <address domain='0x0000' bus='0x04' slot='0x11' function='0x3'/>
+ <address domain='0x0000' bus='0x04' slot='0x11' function='0x7'/>
+ </capability>
+ </capability>
+ </device>""",
+ "pci_0000_04_10_7": """
+ <device>
+ <name>pci_0000_04_10_7</name>
+ <parent>pci_0000_00_01_1</parent>
+ <driver>
+ <name>igbvf</name>
+ </driver>
+ <capability type='pci'>
+ <domain>0</domain>
+ <bus>4</bus>
+ <slot>16</slot>
+ <function>7</function>
+ <product id='0x1520'>I350 Ethernet Controller Virtual Function</product>
+ <vendor id='0x8086'>Intel Corporation</vendor>
+ <capability type='phys_function'>
+ <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
+ </capability>
+ <capability type='virt_functions'>
+ </capability>
+ </capability>
+ </device>"""}
+
+
+def _concurrency(signal, wait, done, target, is_block_dev=False):
+ signal.send()
+ wait.wait()
+ done.send()
+
+
+class FakeVirDomainSnapshot(object):
+
+ def __init__(self, dom=None):
+ self.dom = dom
+
+ def delete(self, flags):
+ pass
+
+
+class FakeVirtDomain(object):
+
+ def __init__(self, fake_xml=None, uuidstr=None, id=None, name=None):
+ if uuidstr is None:
+ uuidstr = str(uuid.uuid4())
+ self.uuidstr = uuidstr
+ self.id = id
+ self.domname = name
+ self._info = [power_state.RUNNING, 2048 * units.Mi, 1234 * units.Mi,
+ None, None]
+ if fake_xml:
+ self._fake_dom_xml = fake_xml
+ else:
+ self._fake_dom_xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ </devices>
+ </domain>
+ """
+
+ def name(self):
+ if self.domname is None:
+ return "fake-domain %s" % self
+ else:
+ return self.domname
+
+ def ID(self):
+ return self.id
+
+ def info(self):
+ return self._info
+
+ def create(self):
+ pass
+
+ def managedSave(self, *args):
+ pass
+
+ def createWithFlags(self, launch_flags):
+ pass
+
+ def XMLDesc(self, *args):
+ return self._fake_dom_xml
+
+ def UUIDString(self):
+ return self.uuidstr
+
+ def attachDeviceFlags(self, xml, flags):
+ pass
+
+ def attachDevice(self, xml):
+ pass
+
+ def detachDeviceFlags(self, xml, flags):
+ pass
+
+ def snapshotCreateXML(self, xml, flags):
+ pass
+
+ def blockCommit(self, disk, base, top, bandwidth=0, flags=0):
+ pass
+
+ def blockRebase(self, disk, base, bandwidth=0, flags=0):
+ pass
+
+ def blockJobInfo(self, path, flags):
+ pass
+
+ def resume(self):
+ pass
+
+ def destroy(self):
+ pass
+
+
+class CacheConcurrencyTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(CacheConcurrencyTestCase, self).setUp()
+
+ self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
+
+ # utils.synchronized() will create the lock_path for us if it
+ # doesn't already exist. It will also delete it when it's done,
+ # which can cause race conditions with the multiple threads we
+ # use for tests. So, create the path here so utils.synchronized()
+ # won't delete it out from under one of the threads.
+ self.lock_path = os.path.join(CONF.instances_path, 'locks')
+ fileutils.ensure_tree(self.lock_path)
+
+ def fake_exists(fname):
+ basedir = os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name)
+ if fname == basedir or fname == self.lock_path:
+ return True
+ return False
+
+ def fake_execute(*args, **kwargs):
+ pass
+
+ def fake_extend(image, size, use_cow=False):
+ pass
+
+ self.stubs.Set(os.path, 'exists', fake_exists)
+ self.stubs.Set(utils, 'execute', fake_execute)
+ self.stubs.Set(imagebackend.disk, 'extend', fake_extend)
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.imagebackend.libvirt_utils',
+ fake_libvirt_utils))
+
+ def test_same_fname_concurrency(self):
+ # Ensures that the same fname cache runs at a sequentially.
+ uuid = uuidutils.generate_uuid()
+
+ backend = imagebackend.Backend(False)
+ wait1 = eventlet.event.Event()
+ done1 = eventlet.event.Event()
+ sig1 = eventlet.event.Event()
+ thr1 = eventlet.spawn(backend.image({'name': 'instance',
+ 'uuid': uuid},
+ 'name').cache,
+ _concurrency, 'fname', None,
+ signal=sig1, wait=wait1, done=done1)
+ eventlet.sleep(0)
+ # Thread 1 should run before thread 2.
+ sig1.wait()
+
+ wait2 = eventlet.event.Event()
+ done2 = eventlet.event.Event()
+ sig2 = eventlet.event.Event()
+ thr2 = eventlet.spawn(backend.image({'name': 'instance',
+ 'uuid': uuid},
+ 'name').cache,
+ _concurrency, 'fname', None,
+ signal=sig2, wait=wait2, done=done2)
+
+ wait2.send()
+ eventlet.sleep(0)
+ try:
+ self.assertFalse(done2.ready())
+ finally:
+ wait1.send()
+ done1.wait()
+ eventlet.sleep(0)
+ self.assertTrue(done2.ready())
+ # Wait on greenthreads to assert they didn't raise exceptions
+ # during execution
+ thr1.wait()
+ thr2.wait()
+
+ def test_different_fname_concurrency(self):
+ # Ensures that two different fname caches are concurrent.
+ uuid = uuidutils.generate_uuid()
+
+ backend = imagebackend.Backend(False)
+ wait1 = eventlet.event.Event()
+ done1 = eventlet.event.Event()
+ sig1 = eventlet.event.Event()
+ thr1 = eventlet.spawn(backend.image({'name': 'instance',
+ 'uuid': uuid},
+ 'name').cache,
+ _concurrency, 'fname2', None,
+ signal=sig1, wait=wait1, done=done1)
+ eventlet.sleep(0)
+ # Thread 1 should run before thread 2.
+ sig1.wait()
+
+ wait2 = eventlet.event.Event()
+ done2 = eventlet.event.Event()
+ sig2 = eventlet.event.Event()
+ thr2 = eventlet.spawn(backend.image({'name': 'instance',
+ 'uuid': uuid},
+ 'name').cache,
+ _concurrency, 'fname1', None,
+ signal=sig2, wait=wait2, done=done2)
+ eventlet.sleep(0)
+ # Wait for thread 2 to start.
+ sig2.wait()
+
+ wait2.send()
+ tries = 0
+ while not done2.ready() and tries < 10:
+ eventlet.sleep(0)
+ tries += 1
+ try:
+ self.assertTrue(done2.ready())
+ finally:
+ wait1.send()
+ eventlet.sleep(0)
+ # Wait on greenthreads to assert they didn't raise exceptions
+ # during execution
+ thr1.wait()
+ thr2.wait()
+
+
+class FakeVolumeDriver(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def attach_volume(self, *args):
+ pass
+
+ def detach_volume(self, *args):
+ pass
+
+ def get_xml(self, *args):
+ return ""
+
+ def get_config(self, *args):
+ """Connect the volume to a fake device."""
+ conf = vconfig.LibvirtConfigGuestDisk()
+ conf.source_type = "network"
+ conf.source_protocol = "fake"
+ conf.source_name = "fake"
+ conf.target_dev = "fake"
+ conf.target_bus = "fake"
+ return conf
+
+ def connect_volume(self, *args):
+ """Connect the volume to a fake device."""
+ return self.get_config()
+
+
+class FakeConfigGuestDisk(object):
+ def __init__(self, *args, **kwargs):
+ self.source_type = None
+ self.driver_cache = None
+
+
+class FakeConfigGuest(object):
+ def __init__(self, *args, **kwargs):
+ self.driver_cache = None
+
+
+class FakeNodeDevice(object):
+ def __init__(self, fakexml):
+ self.xml = fakexml
+
+ def XMLDesc(self, *args):
+ return self.xml
+
+
+class LibvirtConnTestCase(test.NoDBTestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(LibvirtConnTestCase, self).setUp()
+ self.flags(fake_call=True)
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.get_admin_context()
+ temp_dir = self.useFixture(fixtures.TempDir()).path
+ self.flags(instances_path=temp_dir)
+ self.flags(snapshots_directory=temp_dir, group='libvirt')
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.libvirt_utils',
+ fake_libvirt_utils))
+ # Force libvirt to return a host UUID that matches the serial in
+ # nova.tests.unit.fakelibvirt. This is necessary because the host UUID
+ # returned by libvirt becomes the serial whose value is checked for in
+ # test_xml_and_uri_* below.
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.LibvirtDriver._get_host_uuid',
+ lambda _: 'cef19ce0-0ca2-11df-855d-b19fbce37686'))
+ # Prevent test suite trying to find /etc/machine-id
+ # which isn't guaranteed to exist. Instead it will use
+ # the host UUID from libvirt which we mock above
+ self.flags(sysinfo_serial="hardware", group="libvirt")
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.imagebackend.libvirt_utils',
+ fake_libvirt_utils))
+
+ def fake_extend(image, size, use_cow=False):
+ pass
+
+ self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
+
+ self.stubs.Set(imagebackend.Image, 'resolve_driver_format',
+ imagebackend.Image._get_driver_format)
+
+ class FakeConn():
+ def baselineCPU(self, cpu, flag):
+ """Add new libvirt API."""
+ return """<cpu mode='custom' match='exact'>
+ <model fallback='allow'>Westmere</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='aes'/>
+ <feature policy='require' name='hypervisor'/>
+ </cpu>"""
+
+ def getCapabilities(self):
+ """Ensure standard capabilities being returned."""
+ return """<capabilities>
+ <host><cpu><arch>x86_64</arch>
+ <feature policy='require' name='hypervisor'/>
+ </cpu></host>
+ </capabilities>"""
+
+ def getVersion(self):
+ return 1005001
+
+ def getLibVersion(self):
+ return (0 * 1000 * 1000) + (9 * 1000) + 11
+
+ def domainEventRegisterAny(self, *args, **kwargs):
+ pass
+
+ def registerCloseCallback(self, cb, opaque):
+ pass
+
+ def nwfilterDefineXML(self, *args, **kwargs):
+ pass
+
+ def nodeDeviceLookupByName(self, x):
+ pass
+
+ def listDevices(self, cap, flags):
+ return []
+
+ def lookupByName(self, name):
+ pass
+
+ def getHostname(self):
+ return "mustard"
+
+ def getType(self):
+ return "QEMU"
+
+ def numOfDomains(self):
+ return 0
+
+ def listDomainsID(self):
+ return []
+
+ def listDefinedDomains(self):
+ return []
+
+ def getInfo(self):
+ return [arch.X86_64, 123456, 2, 2000,
+ 2, 1, 1, 1]
+
+ self.conn = FakeConn()
+ self.stubs.Set(libvirt_driver.LibvirtDriver, '_connect',
+ lambda *a, **k: self.conn)
+
+ sys_meta = {
+ 'instance_type_memory_mb': 2048,
+ 'instance_type_swap': 0,
+ 'instance_type_vcpu_weight': None,
+ 'instance_type_root_gb': 1,
+ 'instance_type_id': 2,
+ 'instance_type_name': u'm1.small',
+ 'instance_type_ephemeral_gb': 0,
+ 'instance_type_rxtx_factor': 1.0,
+ 'instance_type_flavorid': u'1',
+ 'instance_type_vcpus': 1
+ }
+
+ self.image_service = nova.tests.unit.image.fake.stub_out_image_service(
+ self.stubs)
+ self.test_instance = {
+ 'id': 1,
+ 'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
+ 'memory_kb': '1024000',
+ 'basepath': '/some/path',
+ 'bridge_name': 'br100',
+ 'display_name': "Acme webserver",
+ 'vcpus': 2,
+ 'project_id': 'fake',
+ 'bridge': 'br101',
+ 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ 'root_gb': 10,
+ 'ephemeral_gb': 20,
+ 'instance_type_id': '5', # m1.small
+ 'extra_specs': {},
+ 'system_metadata': sys_meta,
+ 'pci_devices': objects.PciDeviceList(),
+ 'numa_topology': None,
+ 'config_drive': None,
+ 'vm_mode': None,
+ 'kernel_id': None,
+ 'ramdisk_id': None,
+ 'os_type': 'linux',
+ 'user_id': '838a72b0-0d54-4827-8fd6-fb1227633ceb',
+ 'ephemeral_key_uuid': None,
+ }
+
+ def relpath(self, path):
+ return os.path.relpath(path, CONF.instances_path)
+
+ def tearDown(self):
+ nova.tests.unit.image.fake.FakeImageService_reset()
+ super(LibvirtConnTestCase, self).tearDown()
+
+ def create_fake_libvirt_mock(self, **kwargs):
+ """Defining mocks for LibvirtDriver(libvirt is not used)."""
+
+ # A fake libvirt.virConnect
+ class FakeLibvirtDriver(object):
+ def defineXML(self, xml):
+ return FakeVirtDomain()
+
+ # Creating mocks
+ volume_driver = ('iscsi=nova.tests.unit.virt.libvirt.test_driver'
+ '.FakeVolumeDriver')
+ self.flags(volume_drivers=[volume_driver],
+ group='libvirt')
+ fake = FakeLibvirtDriver()
+ # Customizing above fake if necessary
+ for key, val in kwargs.items():
+ fake.__setattr__(key, val)
+
+ self.stubs.Set(libvirt_driver.LibvirtDriver, '_conn', fake)
+
+ def fake_lookup(self, instance_name):
+ return FakeVirtDomain()
+
+ def fake_execute(self, *args, **kwargs):
+ open(args[-1], "a").close()
+
+ def _create_service(self, **kwargs):
+ service_ref = {'host': kwargs.get('host', 'dummy'),
+ 'disabled': kwargs.get('disabled', False),
+ 'binary': 'nova-compute',
+ 'topic': 'compute',
+ 'report_count': 0}
+
+ return objects.Service(**service_ref)
+
+ def _get_launch_flags(self, conn, network_info, power_on=True,
+ vifs_already_plugged=False):
+ timeout = CONF.vif_plugging_timeout
+
+ events = []
+ if (conn._conn_supports_start_paused and
+ utils.is_neutron() and
+ not vifs_already_plugged and
+ power_on and timeout):
+ events = conn._get_neutron_events(network_info)
+
+ launch_flags = events and libvirt.VIR_DOMAIN_START_PAUSED or 0
+
+ return launch_flags
+
+ def test_public_api_signatures(self):
+ baseinst = driver.ComputeDriver(None)
+ inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.assertPublicAPISignatures(baseinst, inst)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_min_version")
+ def test_min_version_start_ok(self, mock_version):
+ mock_version.return_value = True
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ drvr.init_host("dummyhost")
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_min_version")
+ def test_min_version_start_abort(self, mock_version):
+ mock_version.return_value = False
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.assertRaises(exception.NovaException,
+ drvr.init_host,
+ "dummyhost")
+
+ @mock.patch.object(objects.Service, 'get_by_compute_host')
+ def test_set_host_enabled_with_disable(self, mock_svc):
+ # Tests disabling an enabled host.
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ svc = self._create_service(host='fake-mini')
+ mock_svc.return_value = svc
+ conn._set_host_enabled(False)
+ self.assertTrue(svc.disabled)
+
+ @mock.patch.object(objects.Service, 'get_by_compute_host')
+ def test_set_host_enabled_with_enable(self, mock_svc):
+ # Tests enabling a disabled host.
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ svc = self._create_service(disabled=True, host='fake-mini')
+ mock_svc.return_value = svc
+ conn._set_host_enabled(True)
+ self.assertTrue(svc.disabled)
+
+ @mock.patch.object(objects.Service, 'get_by_compute_host')
+ def test_set_host_enabled_with_enable_state_enabled(self, mock_svc):
+ # Tests enabling an enabled host.
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ svc = self._create_service(disabled=False, host='fake-mini')
+ mock_svc.return_value = svc
+ conn._set_host_enabled(True)
+ self.assertFalse(svc.disabled)
+
+ @mock.patch.object(objects.Service, 'get_by_compute_host')
+ def test_set_host_enabled_with_disable_state_disabled(self, mock_svc):
+ # Tests disabling a disabled host.
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ svc = self._create_service(disabled=True, host='fake-mini')
+ mock_svc.return_value = svc
+ conn._set_host_enabled(False)
+ self.assertTrue(svc.disabled)
+
+ def test_set_host_enabled_swallows_exceptions(self):
+ # Tests that set_host_enabled will swallow exceptions coming from the
+ # db_api code so they don't break anything calling it, e.g. the
+ # _get_new_connection method.
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ with mock.patch.object(db, 'service_get_by_compute_host') as db_mock:
+ # Make db.service_get_by_compute_host raise NovaException; this
+ # is more robust than just raising ComputeHostNotFound.
+ db_mock.side_effect = exception.NovaException
+ conn._set_host_enabled(False)
+
+ def test_prepare_pci_device(self):
+
+ pci_devices = [dict(hypervisor_name='xxx')]
+
+ self.flags(virt_type='xen', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ class FakeDev():
+ def attach(self):
+ pass
+
+ def dettach(self):
+ pass
+
+ def reset(self):
+ pass
+
+ self.mox.StubOutWithMock(self.conn, 'nodeDeviceLookupByName')
+ self.conn.nodeDeviceLookupByName('xxx').AndReturn(FakeDev())
+ self.conn.nodeDeviceLookupByName('xxx').AndReturn(FakeDev())
+ self.mox.ReplayAll()
+ conn._prepare_pci_devices_for_use(pci_devices)
+
+ def test_prepare_pci_device_exception(self):
+
+ pci_devices = [dict(hypervisor_name='xxx',
+ id='id1',
+ instance_uuid='uuid')]
+
+ self.flags(virt_type='xen', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ class FakeDev():
+
+ def attach(self):
+ pass
+
+ def dettach(self):
+ raise libvirt.libvirtError("xxxxx")
+
+ def reset(self):
+ pass
+
+ self.stubs.Set(self.conn, 'nodeDeviceLookupByName',
+ lambda x: FakeDev())
+ self.assertRaises(exception.PciDevicePrepareFailed,
+ conn._prepare_pci_devices_for_use, pci_devices)
+
+ def test_detach_pci_devices_exception(self):
+
+ pci_devices = [dict(hypervisor_name='xxx',
+ id='id1',
+ instance_uuid='uuid')]
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_has_min_version')
+ libvirt_driver.LibvirtDriver._has_min_version = lambda x, y: False
+
+ self.assertRaises(exception.PciDeviceDetachFailed,
+ conn._detach_pci_devices, None, pci_devices)
+
+ def test_detach_pci_devices(self):
+
+ fake_domXML1 =\
+ """<domain> <devices>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2' cache='none'/>
+ <source file='xxx'/>
+ <target dev='vda' bus='virtio'/>
+ <alias name='virtio-disk0'/>
+ <address type='pci' domain='0x0000' bus='0x00'
+ slot='0x04' function='0x0'/>
+ </disk>
+ <hostdev mode="subsystem" type="pci" managed="yes">
+ <source>
+ <address function="0x1" slot="0x10" domain="0x0000"
+ bus="0x04"/>
+ </source>
+ </hostdev></devices></domain>"""
+
+ pci_devices = [dict(hypervisor_name='xxx',
+ id='id1',
+ instance_uuid='uuid',
+ address="0001:04:10:1")]
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_has_min_version')
+ libvirt_driver.LibvirtDriver._has_min_version = lambda x, y: True
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_get_guest_pci_device')
+
+ class FakeDev():
+ def to_xml(self):
+ pass
+
+ libvirt_driver.LibvirtDriver._get_guest_pci_device =\
+ lambda x, y: FakeDev()
+
+ class FakeDomain():
+ def detachDeviceFlags(self, xml, flag):
+ pci_devices[0]['hypervisor_name'] = 'marked'
+ pass
+
+ def XMLDesc(self, flag):
+ return fake_domXML1
+
+ conn._detach_pci_devices(FakeDomain(), pci_devices)
+ self.assertEqual(pci_devices[0]['hypervisor_name'], 'marked')
+
+ def test_detach_pci_devices_timeout(self):
+
+ fake_domXML1 =\
+ """<domain>
+ <devices>
+ <hostdev mode="subsystem" type="pci" managed="yes">
+ <source>
+ <address function="0x1" slot="0x10" domain="0x0000" bus="0x04"/>
+ </source>
+ </hostdev>
+ </devices>
+ </domain>"""
+
+ pci_devices = [dict(hypervisor_name='xxx',
+ id='id1',
+ instance_uuid='uuid',
+ address="0000:04:10:1")]
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_has_min_version')
+ libvirt_driver.LibvirtDriver._has_min_version = lambda x, y: True
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_get_guest_pci_device')
+
+ class FakeDev():
+ def to_xml(self):
+ pass
+
+ libvirt_driver.LibvirtDriver._get_guest_pci_device =\
+ lambda x, y: FakeDev()
+
+ class FakeDomain():
+ def detachDeviceFlags(self, xml, flag):
+ pass
+
+ def XMLDesc(self, flag):
+ return fake_domXML1
+ self.assertRaises(exception.PciDeviceDetachFailed,
+ conn._detach_pci_devices, FakeDomain(), pci_devices)
+
+ def test_get_connector(self):
+ initiator = 'fake.initiator.iqn'
+ ip = 'fakeip'
+ host = 'fakehost'
+ wwpns = ['100010604b019419']
+ wwnns = ['200010604b019419']
+ self.flags(my_ip=ip)
+ self.flags(host=host)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ expected = {
+ 'ip': ip,
+ 'initiator': initiator,
+ 'host': host,
+ 'wwpns': wwpns,
+ 'wwnns': wwnns
+ }
+ volume = {
+ 'id': 'fake'
+ }
+ result = conn.get_volume_connector(volume)
+ self.assertThat(expected, matchers.DictMatches(result))
+
+ def test_lifecycle_event_registration(self):
+ calls = []
+
+ def fake_registerErrorHandler(*args, **kwargs):
+ calls.append('fake_registerErrorHandler')
+
+ def fake_get_host_capabilities(**args):
+ cpu = vconfig.LibvirtConfigGuestCPU()
+ cpu.arch = arch.ARMV7
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = cpu
+ calls.append('fake_get_host_capabilities')
+ return caps
+
+ @mock.patch.object(libvirt, 'registerErrorHandler',
+ side_effect=fake_registerErrorHandler)
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_get_host_capabilities',
+ side_effect=fake_get_host_capabilities)
+ def test_init_host(get_host_capabilities, register_error_handler):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ conn.init_host("test_host")
+
+ test_init_host()
+ # NOTE(dkliban): Will fail if get_host_capabilities is called before
+ # registerErrorHandler
+ self.assertEqual(['fake_registerErrorHandler',
+ 'fake_get_host_capabilities'], calls)
+
+ @mock.patch.object(libvirt_driver, 'LOG')
+ def test_connect_auth_cb_exception(self, log_mock):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ creds = dict(authname='nova', password='verybadpass')
+ self.assertRaises(exception.NovaException,
+ conn._connect_auth_cb, creds, False)
+ self.assertEqual(0, len(log_mock.method_calls),
+ 'LOG should not be used in _connect_auth_cb.')
+
+ def test_sanitize_log_to_xml(self):
+ # setup fake data
+ data = {'auth_password': 'scrubme'}
+ bdm = [{'connection_info': {'data': data}}]
+ bdi = {'block_device_mapping': bdm}
+
+ # Tests that the parameters to the _get_guest_xml method
+ # are sanitized for passwords when logged.
+ def fake_debug(*args, **kwargs):
+ if 'auth_password' in args[0]:
+ self.assertNotIn('scrubme', args[0])
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ conf = mock.Mock()
+ with contextlib.nested(
+ mock.patch.object(libvirt_driver.LOG, 'debug',
+ side_effect=fake_debug),
+ mock.patch.object(conn, '_get_guest_config', return_value=conf)
+ ) as (
+ debug_mock, conf_mock
+ ):
+ conn._get_guest_xml(self.context, self.test_instance,
+ network_info={}, disk_info={},
+ image_meta={}, block_device_info=bdi)
+ # we don't care what the log message is, we just want to make sure
+ # our stub method is called which asserts the password is scrubbed
+ self.assertTrue(debug_mock.called)
+
+ def test_close_callback(self):
+ self.close_callback = None
+
+ def set_close_callback(cb, opaque):
+ self.close_callback = cb
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ service_mock = mock.MagicMock()
+ service_mock.disabled.return_value = False
+ with contextlib.nested(
+ mock.patch.object(conn, "_connect", return_value=self.conn),
+ mock.patch.object(self.conn, "registerCloseCallback",
+ side_effect=set_close_callback),
+ mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock)):
+
+ # verify that the driver registers for the close callback
+ # and re-connects after receiving the callback
+ conn._get_connection()
+ self.assertFalse(service_mock.disabled)
+ self.assertTrue(self.close_callback)
+ conn._init_events_pipe()
+ self.close_callback(self.conn, 1, None)
+ conn._dispatch_events()
+
+ self.assertTrue(service_mock.disabled)
+ conn._get_connection()
+
+ def test_close_callback_bad_signature(self):
+ '''Validates that a connection to libvirt exist,
+ even when registerCloseCallback method has a different
+ number of arguments in the libvirt python library.
+ '''
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ service_mock = mock.MagicMock()
+ service_mock.disabled.return_value = False
+ with contextlib.nested(
+ mock.patch.object(conn, "_connect", return_value=self.conn),
+ mock.patch.object(self.conn, "registerCloseCallback",
+ side_effect=TypeError('dd')),
+ mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock)):
+
+ connection = conn._get_connection()
+ self.assertTrue(connection)
+
+ def test_close_callback_not_defined(self):
+ '''Validates that a connection to libvirt exist,
+ even when registerCloseCallback method missing from
+ the libvirt python library.
+ '''
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ service_mock = mock.MagicMock()
+ service_mock.disabled.return_value = False
+ with contextlib.nested(
+ mock.patch.object(conn, "_connect", return_value=self.conn),
+ mock.patch.object(self.conn, "registerCloseCallback",
+ side_effect=AttributeError('dd')),
+ mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock)):
+
+ connection = conn._get_connection()
+ self.assertTrue(connection)
+
+ def test_cpu_features_bug_1217630(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ # Test old version of libvirt, it shouldn't see the `aes' feature
+ with mock.patch('nova.virt.libvirt.driver.libvirt') as mock_libvirt:
+ del mock_libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
+ caps = conn._get_host_capabilities()
+ self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
+
+ # Test new verion of libvirt, should find the `aes' feature
+ with mock.patch('nova.virt.libvirt.driver.libvirt') as mock_libvirt:
+ mock_libvirt['VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'] = 1
+ # Cleanup the capabilities cache firstly
+ conn._caps = None
+ caps = conn._get_host_capabilities()
+ self.assertIn('aes', [x.name for x in caps.host.cpu.features])
+
+ def test_cpu_features_are_not_duplicated(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ # Test old version of libvirt. Should return single 'hypervisor'
+ with mock.patch('nova.virt.libvirt.driver.libvirt') as mock_libvirt:
+ del mock_libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
+ caps = conn._get_host_capabilities()
+ cnt = [x.name for x in caps.host.cpu.features].count('hypervisor')
+ self.assertEqual(1, cnt)
+
+ # Test new version of libvirt. Should still return single 'hypervisor'
+ with mock.patch('nova.virt.libvirt.driver.libvirt') as mock_libvirt:
+ mock_libvirt['VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'] = 1
+ # Cleanup the capabilities cache firstly
+ conn._caps = None
+ caps = conn._get_host_capabilities()
+ cnt = [x.name for x in caps.host.cpu.features].count('hypervisor')
+ self.assertEqual(1, cnt)
+
+ def test_baseline_cpu_not_supported(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ # `mock` has trouble stubbing attributes that don't exist yet, so
+ # fallback to plain-Python attribute setting/deleting
+ cap_str = 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'
+ if not hasattr(libvirt_driver.libvirt, cap_str):
+ setattr(libvirt_driver.libvirt, cap_str, True)
+ self.addCleanup(delattr, libvirt_driver.libvirt, cap_str)
+
+ # Handle just the NO_SUPPORT error
+ not_supported_exc = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ 'this function is not supported by the connection driver:'
+ ' virConnectBaselineCPU',
+ error_code=libvirt.VIR_ERR_NO_SUPPORT)
+
+ with mock.patch.object(conn._conn, 'baselineCPU',
+ side_effect=not_supported_exc):
+ caps = conn._get_host_capabilities()
+ self.assertEqual(vconfig.LibvirtConfigCaps, type(caps))
+ self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
+
+ # Clear cached result so we can test again...
+ conn._caps = None
+
+ # Other errors should not be caught
+ other_exc = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ 'other exc',
+ error_code=libvirt.VIR_ERR_NO_DOMAIN)
+
+ with mock.patch.object(conn._conn, 'baselineCPU',
+ side_effect=other_exc):
+ self.assertRaises(libvirt.libvirtError,
+ conn._get_host_capabilities)
+
+ def test_lxc_get_host_capabilities_failed(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ with mock.patch.object(conn._conn, 'baselineCPU', return_value=-1):
+ setattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES', 1)
+ caps = conn._get_host_capabilities()
+ delattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES')
+ self.assertEqual(vconfig.LibvirtConfigCaps, type(caps))
+ self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(time, "time")
+ def test_get_guest_config(self, time_mock, mock_flavor):
+ time_mock.return_value = 1234567.89
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["display_name"] = "purple tomatoes"
+
+ ctxt = context.RequestContext(project_id=123,
+ project_name="aubergine",
+ user_id=456,
+ user_name="pie")
+
+ flavor = objects.Flavor(name='m1.small',
+ memory_mb=6,
+ vcpus=28,
+ root_gb=496,
+ ephemeral_gb=8128,
+ swap=33550336,
+ extra_specs={})
+ instance_ref = objects.Instance(**test_instance)
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info,
+ context=ctxt)
+
+ self.assertEqual(cfg.uuid, instance_ref["uuid"])
+ self.assertEqual(cfg.pae, False)
+ self.assertEqual(cfg.acpi, True)
+ self.assertEqual(cfg.apic, True)
+ self.assertEqual(cfg.memory, 6 * units.Ki)
+ self.assertEqual(cfg.vcpus, 28)
+ self.assertEqual(cfg.os_type, vm_mode.HVM)
+ self.assertEqual(cfg.os_boot_dev, ["hd"])
+ self.assertIsNone(cfg.os_root)
+ self.assertEqual(len(cfg.devices), 9)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestInterface)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+ self.assertEqual(len(cfg.metadata), 1)
+ self.assertIsInstance(cfg.metadata[0],
+ vconfig.LibvirtConfigGuestMetaNovaInstance)
+ self.assertEqual(version.version_string_with_package(),
+ cfg.metadata[0].package)
+ self.assertEqual("purple tomatoes",
+ cfg.metadata[0].name)
+ self.assertEqual(1234567.89,
+ cfg.metadata[0].creationTime)
+ self.assertEqual("image",
+ cfg.metadata[0].roottype)
+ self.assertEqual(str(instance_ref["image_ref"]),
+ cfg.metadata[0].rootid)
+
+ self.assertIsInstance(cfg.metadata[0].owner,
+ vconfig.LibvirtConfigGuestMetaNovaOwner)
+ self.assertEqual(456,
+ cfg.metadata[0].owner.userid)
+ self.assertEqual("pie",
+ cfg.metadata[0].owner.username)
+ self.assertEqual(123,
+ cfg.metadata[0].owner.projectid)
+ self.assertEqual("aubergine",
+ cfg.metadata[0].owner.projectname)
+
+ self.assertIsInstance(cfg.metadata[0].flavor,
+ vconfig.LibvirtConfigGuestMetaNovaFlavor)
+ self.assertEqual("m1.small",
+ cfg.metadata[0].flavor.name)
+ self.assertEqual(6,
+ cfg.metadata[0].flavor.memory)
+ self.assertEqual(28,
+ cfg.metadata[0].flavor.vcpus)
+ self.assertEqual(496,
+ cfg.metadata[0].flavor.disk)
+ self.assertEqual(8128,
+ cfg.metadata[0].flavor.ephemeral)
+ self.assertEqual(33550336,
+ cfg.metadata[0].flavor.swap)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_lxc(self, mock_flavor):
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ None, {'mapping': {}})
+ self.assertEqual(instance_ref["uuid"], cfg.uuid)
+ self.assertEqual(2 * units.Mi, cfg.memory)
+ self.assertEqual(1, cfg.vcpus)
+ self.assertEqual(vm_mode.EXE, cfg.os_type)
+ self.assertEqual("/sbin/init", cfg.os_init_path)
+ self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline)
+ self.assertIsNone(cfg.os_root)
+ self.assertEqual(3, len(cfg.devices))
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestFilesys)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestInterface)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestConsole)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_lxc_with_id_maps(self, mock_flavor):
+ self.flags(virt_type='lxc', group='libvirt')
+ self.flags(uid_maps=['0:1000:100'], group='libvirt')
+ self.flags(gid_maps=['0:1000:100'], group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ None, {'mapping': {}})
+ self.assertEqual(instance_ref["uuid"], cfg.uuid)
+ self.assertEqual(2 * units.Mi, cfg.memory)
+ self.assertEqual(1, cfg.vcpus)
+ self.assertEqual(vm_mode.EXE, cfg.os_type)
+ self.assertEqual("/sbin/init", cfg.os_init_path)
+ self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline)
+ self.assertIsNone(cfg.os_root)
+ self.assertEqual(3, len(cfg.devices))
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestFilesys)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestInterface)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestConsole)
+ self.assertEqual(len(cfg.idmaps), 2)
+ self.assertIsInstance(cfg.idmaps[0],
+ vconfig.LibvirtConfigGuestUIDMap)
+ self.assertIsInstance(cfg.idmaps[1],
+ vconfig.LibvirtConfigGuestGIDMap)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_numa_host_instance_fits(self, mock_flavor):
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
+ ephemeral_gb=8128, swap=33550336, name='fake',
+ extra_specs={})
+ mock_flavor.return_value = flavor
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = vconfig.LibvirtConfigCPU()
+ caps.host.cpu.arch = "x86_64"
+ caps.host.topology = self._fake_caps_numa_topology()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_has_min_version', return_value=True),
+ mock.patch.object(
+ conn, "_get_host_capabilities", return_value=caps),
+ mock.patch.object(
+ random, 'choice', side_effect=lambda cells: cells[0])):
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(set([0, 1]), cfg.cpuset)
+ self.assertIsNone(cfg.cputune)
+ self.assertIsNone(cfg.cpu.numa)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_numa_host_instance_no_fit(self, mock_flavor):
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
+ ephemeral_gb=8128, swap=33550336, name='fake',
+ extra_specs={})
+ mock_flavor.return_value = flavor
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = vconfig.LibvirtConfigCPU()
+ caps.host.cpu.arch = "x86_64"
+ caps.host.topology = self._fake_caps_numa_topology()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ with contextlib.nested(
+ mock.patch.object(
+ conn, "_get_host_capabilities", return_value=caps),
+ mock.patch.object(
+ hardware, 'get_vcpu_pin_set', return_value=set([3])),
+ mock.patch.object(random, 'choice')
+ ) as (get_host_cap_mock,
+ get_vcpu_pin_set_mock, choice_mock):
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertFalse(choice_mock.called)
+ self.assertEqual(set([3]), cfg.cpuset)
+ self.assertIsNone(cfg.cputune)
+ self.assertIsNone(cfg.cpu.numa)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(self,
+ mock_flavor):
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = objects.Flavor(memory_mb=1024, vcpus=2, root_gb=496,
+ ephemeral_gb=8128, swap=33550336, name='fake',
+ extra_specs={})
+ mock_flavor.return_value = flavor
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = vconfig.LibvirtConfigCPU()
+ caps.host.cpu.arch = "x86_64"
+ caps.host.topology = self._fake_caps_numa_topology()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_has_min_version', return_value=True),
+ mock.patch.object(
+ conn, "_get_host_capabilities", return_value=caps),
+ mock.patch.object(
+ hardware, 'get_vcpu_pin_set', return_value=set([2, 3])),
+ mock.patch.object(
+ random, 'choice', side_effect=lambda cells: cells[0])
+ ) as (has_min_version_mock, get_host_cap_mock,
+ get_vcpu_pin_set_mock, choice_mock):
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ # NOTE(ndipanov): we make sure that pin_set was taken into account
+ # when choosing viable cells
+ choice_mock.assert_called_once_with([set([2, 3])])
+ self.assertEqual(set([2, 3]), cfg.cpuset)
+ self.assertIsNone(cfg.cputune)
+ self.assertIsNone(cfg.cpu.numa)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_non_numa_host_instance_topo(self, mock_flavor):
+ instance_topology = objects.InstanceNUMATopology.obj_from_topology(
+ hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(
+ 0, set([0]), 1024),
+ hardware.VirtNUMATopologyCellInstance(
+ 1, set([2]), 1024)]))
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.numa_topology = instance_topology
+ flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
+ ephemeral_gb=8128, swap=33550336, name='fake',
+ extra_specs={})
+ mock_flavor.return_value = flavor
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = vconfig.LibvirtConfigCPU()
+ caps.host.cpu.arch = "x86_64"
+ caps.host.topology = None
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ with contextlib.nested(
+ mock.patch.object(
+ objects.InstanceNUMATopology, "get_by_instance_uuid",
+ return_value=instance_topology),
+ mock.patch.object(conn, '_has_min_version', return_value=True),
+ mock.patch.object(
+ conn, "_get_host_capabilities", return_value=caps)):
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertIsNone(cfg.cpuset)
+ self.assertIsNone(cfg.cputune)
+ self.assertIsNotNone(cfg.cpu.numa)
+ for instance_cell, numa_cfg_cell in zip(
+ instance_topology.cells, cfg.cpu.numa.cells):
+ self.assertEqual(instance_cell.id, numa_cfg_cell.id)
+ self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
+ self.assertEqual(instance_cell.memory * units.Ki,
+ numa_cfg_cell.memory)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_numa_host_instance_topo(self, mock_flavor):
+ instance_topology = objects.InstanceNUMATopology.obj_from_topology(
+ hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(
+ 0, set([0, 1]), 1024),
+ hardware.VirtNUMATopologyCellInstance(
+ 1, set([2, 3]),
+ 1024)]))
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.numa_topology = instance_topology
+ flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
+ ephemeral_gb=8128, swap=33550336, name='fake',
+ extra_specs={})
+ mock_flavor.return_value = flavor
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = vconfig.LibvirtConfigCPU()
+ caps.host.cpu.arch = "x86_64"
+ caps.host.topology = self._fake_caps_numa_topology()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ with contextlib.nested(
+ mock.patch.object(
+ objects.Flavor, "get_by_id", return_value=flavor),
+ mock.patch.object(
+ objects.InstanceNUMATopology, "get_by_instance_uuid",
+ return_value=instance_topology),
+ mock.patch.object(conn, '_has_min_version', return_value=True),
+ mock.patch.object(
+ conn, "_get_host_capabilities", return_value=caps),
+ mock.patch.object(
+ hardware, 'get_vcpu_pin_set', return_value=set([0, 1, 2]))
+ ):
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertIsNone(cfg.cpuset)
+ # Test that the pinning is correct and limited to allowed only
+ self.assertEqual(0, cfg.cputune.vcpupin[0].id)
+ self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[0].cpuset)
+ self.assertEqual(1, cfg.cputune.vcpupin[1].id)
+ self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[1].cpuset)
+ self.assertEqual(2, cfg.cputune.vcpupin[2].id)
+ self.assertEqual(set([2]), cfg.cputune.vcpupin[2].cpuset)
+ self.assertEqual(3, cfg.cputune.vcpupin[3].id)
+ self.assertEqual(set([2]), cfg.cputune.vcpupin[3].cpuset)
+ self.assertIsNotNone(cfg.cpu.numa)
+ for instance_cell, numa_cfg_cell in zip(
+ instance_topology.cells, cfg.cpu.numa.cells):
+ self.assertEqual(instance_cell.id, numa_cfg_cell.id)
+ self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
+ self.assertEqual(instance_cell.memory * units.Ki,
+ numa_cfg_cell.memory)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_clock(self, mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {}
+ hpet_map = {
+ arch.X86_64: True,
+ arch.I686: True,
+ arch.PPC: False,
+ arch.PPC64: False,
+ arch.ARMV7: False,
+ arch.AARCH64: False,
+ }
+
+ for guestarch, expect_hpet in hpet_map.items():
+ with mock.patch.object(libvirt_driver.libvirt_utils,
+ 'get_arch',
+ return_value=guestarch):
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta,
+ disk_info)
+ self.assertIsInstance(cfg.clock,
+ vconfig.LibvirtConfigGuestClock)
+ self.assertEqual(cfg.clock.offset, "utc")
+ self.assertIsInstance(cfg.clock.timers[0],
+ vconfig.LibvirtConfigGuestTimer)
+ self.assertIsInstance(cfg.clock.timers[1],
+ vconfig.LibvirtConfigGuestTimer)
+ self.assertEqual(cfg.clock.timers[0].name, "pit")
+ self.assertEqual(cfg.clock.timers[0].tickpolicy,
+ "delay")
+ self.assertEqual(cfg.clock.timers[1].name, "rtc")
+ self.assertEqual(cfg.clock.timers[1].tickpolicy,
+ "catchup")
+ if expect_hpet:
+ self.assertEqual(3, len(cfg.clock.timers))
+ self.assertIsInstance(cfg.clock.timers[2],
+ vconfig.LibvirtConfigGuestTimer)
+ self.assertEqual('hpet', cfg.clock.timers[2].name)
+ self.assertFalse(cfg.clock.timers[2].present)
+ else:
+ self.assertEqual(2, len(cfg.clock.timers))
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_windows(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref['os_type'] = 'windows'
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+
+ self.assertIsInstance(cfg.clock,
+ vconfig.LibvirtConfigGuestClock)
+ self.assertEqual(cfg.clock.offset, "localtime")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_two_nics(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 2),
+ {}, disk_info)
+ self.assertEqual(cfg.acpi, True)
+ self.assertEqual(cfg.memory, 2 * units.Mi)
+ self.assertEqual(cfg.vcpus, 1)
+ self.assertEqual(cfg.os_type, vm_mode.HVM)
+ self.assertEqual(cfg.os_boot_dev, ["hd"])
+ self.assertIsNone(cfg.os_root)
+ self.assertEqual(len(cfg.devices), 10)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestInterface)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestInterface)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[9],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_bug_1118829(self, mock_flavor):
+ self.flags(virt_type='uml', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = {'disk_bus': 'virtio',
+ 'cdrom_bus': 'ide',
+ 'mapping': {u'vda': {'bus': 'virtio',
+ 'type': 'disk',
+ 'dev': u'vda'},
+ 'root': {'bus': 'virtio',
+ 'type': 'disk',
+ 'dev': 'vda'}}}
+
+ # NOTE(jdg): For this specific test leave this blank
+ # This will exercise the failed code path still,
+ # and won't require fakes and stubs of the iscsi discovery
+ block_device_info = {}
+ conn._get_guest_config(instance_ref, [], {}, disk_info,
+ None, block_device_info)
+ self.assertEqual(instance_ref['root_device_name'], '/dev/vda')
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_root_device_name(self, mock_flavor):
+ self.flags(virt_type='uml', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ block_device_info = {'root_device_name': '/dev/vdb'}
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ block_device_info)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info,
+ None, block_device_info)
+ self.assertEqual(cfg.acpi, False)
+ self.assertEqual(cfg.memory, 2 * units.Mi)
+ self.assertEqual(cfg.vcpus, 1)
+ self.assertEqual(cfg.os_type, "uml")
+ self.assertEqual(cfg.os_boot_dev, [])
+ self.assertEqual(cfg.os_root, '/dev/vdb')
+ self.assertEqual(len(cfg.devices), 3)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestConsole)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_block_device(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ conn_info = {'driver_volume_type': 'fake'}
+ info = {'block_device_mapping': driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'device_name': '/dev/vdc'}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'device_name': '/dev/vdd'}),
+ ])}
+ info['block_device_mapping'][0]['connection_info'] = conn_info
+ info['block_device_mapping'][1]['connection_info'] = conn_info
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref, info)
+ with mock.patch.object(
+ driver_block_device.DriverVolumeBlockDevice, 'save'):
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info,
+ None, info)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[2].target_dev, 'vdc')
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[3].target_dev, 'vdd')
+ self.assertTrue(info['block_device_mapping'][0].save.called)
+ self.assertTrue(info['block_device_mapping'][1].save.called)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_lxc_with_attached_volume(self, mock_flavor):
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ conn_info = {'driver_volume_type': 'fake'}
+ info = {'block_device_mapping': driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'boot_index': 0}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ }),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 3,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ }),
+ ])}
+
+ info['block_device_mapping'][0]['connection_info'] = conn_info
+ info['block_device_mapping'][1]['connection_info'] = conn_info
+ info['block_device_mapping'][2]['connection_info'] = conn_info
+ info['block_device_mapping'][0]['mount_device'] = '/dev/vda'
+ info['block_device_mapping'][1]['mount_device'] = '/dev/vdc'
+ info['block_device_mapping'][2]['mount_device'] = '/dev/vdd'
+ with mock.patch.object(
+ driver_block_device.DriverVolumeBlockDevice, 'save'):
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref, info)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info,
+ None, info)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[1].target_dev, 'vdc')
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[2].target_dev, 'vdd')
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_configdrive(self, mock_flavor):
+ # It's necessary to check if the architecture is power, because
+ # power doesn't have support to ide, and so libvirt translate
+ # all ide calls to scsi
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ # make configdrive.required_by() return True
+ instance_ref['config_drive'] = True
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+
+ # The last device is selected for this. on x86 is the last ide
+ # device (hdd). Since power only support scsi, the last device
+ # is sdz
+
+ expect = {"ppc": "sdz", "ppc64": "sdz"}
+ disk = expect.get(blockinfo.libvirt_utils.get_arch({}), "hdd")
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[2].target_dev, disk)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_virtio_scsi_bus(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ image_meta = {"properties": {"hw_scsi_model": "virtio-scsi"}}
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref, [], image_meta)
+ cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestController)
+ self.assertEqual(cfg.devices[2].model, 'virtio-scsi')
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_virtio_scsi_bus_bdm(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ image_meta = {"properties": {"hw_scsi_model": "virtio-scsi"}}
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ conn_info = {'driver_volume_type': 'fake'}
+ bd_info = {
+ 'block_device_mapping': driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'device_name': '/dev/sdc', 'disk_bus': 'scsi'}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'device_name': '/dev/sdd', 'disk_bus': 'scsi'}),
+ ])}
+ bd_info['block_device_mapping'][0]['connection_info'] = conn_info
+ bd_info['block_device_mapping'][1]['connection_info'] = conn_info
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref, bd_info, image_meta)
+ with mock.patch.object(
+ driver_block_device.DriverVolumeBlockDevice, 'save'):
+ cfg = conn._get_guest_config(instance_ref, [], image_meta,
+ disk_info, [], bd_info)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[2].target_dev, 'sdc')
+ self.assertEqual(cfg.devices[2].target_bus, 'scsi')
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[3].target_dev, 'sdd')
+ self.assertEqual(cfg.devices[3].target_bus, 'scsi')
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestController)
+ self.assertEqual(cfg.devices[4].model, 'virtio-scsi')
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_vnc(self, mock_flavor):
+ self.flags(vnc_enabled=True)
+ self.flags(virt_type='kvm',
+ use_usb_tablet=False,
+ group='libvirt')
+ self.flags(enabled=False, group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(len(cfg.devices), 7)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].type, "vnc")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_vnc_and_tablet(self, mock_flavor):
+ self.flags(vnc_enabled=True)
+ self.flags(virt_type='kvm',
+ use_usb_tablet=True,
+ group='libvirt')
+ self.flags(enabled=False, group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].type, "tablet")
+ self.assertEqual(cfg.devices[5].type, "vnc")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_spice_and_tablet(self, mock_flavor):
+ self.flags(vnc_enabled=False)
+ self.flags(virt_type='kvm',
+ use_usb_tablet=True,
+ group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=False,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].type, "tablet")
+ self.assertEqual(cfg.devices[5].type, "spice")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_spice_and_agent(self, mock_flavor):
+ self.flags(vnc_enabled=False)
+ self.flags(virt_type='kvm',
+ use_usb_tablet=True,
+ group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=True,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestChannel)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].target_name, "com.redhat.spice.0")
+ self.assertEqual(cfg.devices[5].type, "spice")
+ self.assertEqual(cfg.devices[6].type, "qxl")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch('nova.console.serial.acquire_port')
+ def test_get_guest_config_serial_console(self, acquire_port,
+ mock_flavor):
+ self.flags(enabled=True, group='serial_console')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ acquire_port.return_value = 11111
+
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(8, len(cfg.devices))
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual("tcp", cfg.devices[2].type)
+ self.assertEqual(11111, cfg.devices[2].listen_port)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_serial_console_through_flavor(self, mock_flavor):
+ self.flags(enabled=True, group='serial_console')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw:serial_port_count': 3}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(10, len(cfg.devices))
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[9],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual("tcp", cfg.devices[2].type)
+ self.assertEqual("tcp", cfg.devices[3].type)
+ self.assertEqual("tcp", cfg.devices[4].type)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_serial_console_invalid_flavor(self, mock_flavor):
+ self.flags(enabled=True, group='serial_console')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw:serial_port_count': "a"}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ self.assertRaises(
+ exception.ImageSerialPortNumberInvalid,
+ conn._get_guest_config, instance_ref, [], {}, disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_serial_console_image_and_flavor(self,
+ mock_flavor):
+ self.flags(enabled=True, group='serial_console')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ image_meta = {"properties": {"hw_serial_port_count": "3"}}
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw:serial_port_count': 4}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], image_meta,
+ disk_info)
+ self.assertEqual(10, len(cfg.devices), cfg.devices)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[9],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual("tcp", cfg.devices[2].type)
+ self.assertEqual("tcp", cfg.devices[3].type)
+ self.assertEqual("tcp", cfg.devices[4].type)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_serial_console_invalid_img_meta(self,
+ mock_flavor):
+ self.flags(enabled=True, group='serial_console')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_serial_port_count": "fail"}}
+ self.assertRaises(
+ exception.ImageSerialPortNumberInvalid,
+ conn._get_guest_config, instance_ref, [], image_meta, disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch('nova.console.serial.acquire_port')
+ def test_get_guest_config_serial_console_through_port_rng_exhausted(
+ self, acquire_port, mock_flavor):
+ self.flags(enabled=True, group='serial_console')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ acquire_port.side_effect = exception.SocketPortRangeExhaustedException(
+ '127.0.0.1')
+ self.assertRaises(
+ exception.SocketPortRangeExhaustedException,
+ conn._get_guest_config, instance_ref, [], {}, disk_info)
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
+ def test_get_serial_ports_from_instance(self, _lookup_by_name):
+ i = self._test_get_serial_ports_from_instance(_lookup_by_name)
+ self.assertEqual([
+ ('127.0.0.1', 100),
+ ('127.0.0.1', 101),
+ ('127.0.0.2', 100),
+ ('127.0.0.2', 101)], list(i))
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
+ def test_get_serial_ports_from_instance_bind_only(self, _lookup_by_name):
+ i = self._test_get_serial_ports_from_instance(
+ _lookup_by_name, mode='bind')
+ self.assertEqual([
+ ('127.0.0.1', 101),
+ ('127.0.0.2', 100)], list(i))
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
+ def test_get_serial_ports_from_instance_connect_only(self,
+ _lookup_by_name):
+ i = self._test_get_serial_ports_from_instance(
+ _lookup_by_name, mode='connect')
+ self.assertEqual([
+ ('127.0.0.1', 100),
+ ('127.0.0.2', 101)], list(i))
+
+ def _test_get_serial_ports_from_instance(self, _lookup_by_name, mode=None):
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <serial type="tcp">
+ <source host="127.0.0.1" service="100" mode="connect"/>
+ </serial>
+ <serial type="tcp">
+ <source host="127.0.0.1" service="101" mode="bind"/>
+ </serial>
+ <serial type="tcp">
+ <source host="127.0.0.2" service="100" mode="bind"/>
+ </serial>
+ <serial type="tcp">
+ <source host="127.0.0.2" service="101" mode="connect"/>
+ </serial>
+ </devices>
+ </domain>"""
+
+ dom = mock.MagicMock()
+ dom.XMLDesc.return_value = xml
+ _lookup_by_name.return_value = dom
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ return conn._get_serial_ports_from_instance(
+ {'name': 'fake_instance'}, mode=mode)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_type_xen(self, mock_flavor):
+ self.flags(vnc_enabled=True)
+ self.flags(virt_type='xen',
+ use_usb_tablet=False,
+ group='libvirt')
+ self.flags(enabled=False,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(len(cfg.devices), 6)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestConsole)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[3].type, "vnc")
+ self.assertEqual(cfg.devices[4].type, "xen")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_type_xen_pae_hvm(self, mock_flavor):
+ self.flags(vnc_enabled=True)
+ self.flags(virt_type='xen',
+ use_usb_tablet=False,
+ group='libvirt')
+ self.flags(enabled=False,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref['vm_mode'] = vm_mode.HVM
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+
+ self.assertEqual(cfg.os_type, vm_mode.HVM)
+ self.assertEqual(cfg.os_loader, CONF.libvirt.xen_hvmloader_path)
+ self.assertEqual(cfg.pae, True)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_type_xen_pae_pvm(self, mock_flavor):
+ self.flags(vnc_enabled=True)
+ self.flags(virt_type='xen',
+ use_usb_tablet=False,
+ group='libvirt')
+ self.flags(enabled=False,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+
+ self.assertEqual(cfg.os_type, vm_mode.XEN)
+ self.assertEqual(cfg.pae, True)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_vnc_and_spice(self, mock_flavor):
+ self.flags(vnc_enabled=True)
+ self.flags(virt_type='kvm',
+ use_usb_tablet=True,
+ group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=True,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(len(cfg.devices), 10)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestChannel)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[9],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].type, "tablet")
+ self.assertEqual(cfg.devices[5].target_name, "com.redhat.spice.0")
+ self.assertEqual(cfg.devices[6].type, "vnc")
+ self.assertEqual(cfg.devices[7].type, "spice")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_invalid_watchdog_action(self, mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_watchdog_action": "something"}}
+ self.assertRaises(exception.InvalidWatchdogAction,
+ conn._get_guest_config,
+ instance_ref,
+ [],
+ image_meta,
+ disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_watchdog_action_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_watchdog_action": "none"}}
+ cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 9)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestWatchdog)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual("none", cfg.devices[7].action)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _test_get_guest_config_with_watchdog_action_flavor(self, mock_flavor,
+ hw_watchdog_action="hw:watchdog_action"):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {hw_watchdog_action: 'none'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+
+ self.assertEqual(9, len(cfg.devices))
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestWatchdog)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual("none", cfg.devices[7].action)
+
+ def test_get_guest_config_with_watchdog_action_through_flavor(self):
+ self._test_get_guest_config_with_watchdog_action_flavor()
+
+ # TODO(pkholkin): the test accepting old property name 'hw_watchdog_action'
+ # should be removed in the next release
+ def test_get_guest_config_with_watchdog_action_through_flavor_no_scope(
+ self):
+ self._test_get_guest_config_with_watchdog_action_flavor(
+ hw_watchdog_action="hw_watchdog_action")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_watchdog_overrides_flavor(self,
+ mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw_watchdog_action': 'none'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_watchdog_action": "pause"}}
+
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+
+ self.assertEqual(9, len(cfg.devices))
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestWatchdog)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual("pause", cfg.devices[7].action)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_unsupported_video_driver_through_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_video_model": "something"}}
+ self.assertRaises(exception.InvalidVideoMode,
+ conn._get_guest_config,
+ instance_ref,
+ [],
+ image_meta,
+ disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_video_driver_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_video_model": "vmvga"}}
+ cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[5].type, "vnc")
+ self.assertEqual(cfg.devices[6].type, "vmvga")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_qga_through_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_qemu_guest_agent": "yes"}}
+ cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 9)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestChannel)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].type, "tablet")
+ self.assertEqual(cfg.devices[5].type, "vnc")
+ self.assertEqual(cfg.devices[7].type, "unix")
+ self.assertEqual(cfg.devices[7].target_name, "org.qemu.guest_agent.0")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_video_driver_vram(self, mock_flavor):
+ self.flags(vnc_enabled=False)
+ self.flags(virt_type='kvm', group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=True,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw_video:ram_max_mb': "100"}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_video_model": "qxl",
+ "hw_video_ram": "64"}}
+
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestChannel)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[5].type, "spice")
+ self.assertEqual(cfg.devices[6].type, "qxl")
+ self.assertEqual(cfg.devices[6].vram, 64)
+
+ @mock.patch('nova.virt.disk.api.teardown_container')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
+ @mock.patch('nova.virt.disk.api.setup_container')
+ @mock.patch('nova.openstack.common.fileutils.ensure_tree')
+ @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
+ def test_unmount_fs_if_error_during_lxc_create_domain(self,
+ mock_get_inst_path, mock_ensure_tree, mock_setup_container,
+ mock_get_info, mock_teardown):
+ """If we hit an error during a `_create_domain` call to `libvirt+lxc`
+ we need to ensure the guest FS is unmounted from the host so that any
+ future `lvremove` calls will work.
+ """
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ mock_instance = mock.MagicMock()
+ mock_get_inst_path.return_value = '/tmp/'
+ mock_image_backend = mock.MagicMock()
+ conn.image_backend = mock_image_backend
+ mock_image = mock.MagicMock()
+ mock_image.path = '/tmp/test.img'
+ conn.image_backend.image.return_value = mock_image
+ mock_setup_container.return_value = '/dev/nbd0'
+ mock_get_info.side_effect = exception.InstanceNotFound(
+ instance_id='foo')
+ conn._conn.defineXML = mock.Mock()
+ conn._conn.defineXML.side_effect = ValueError('somethingbad')
+ with contextlib.nested(
+ mock.patch.object(conn, '_is_booted_from_volume',
+ return_value=False),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn, 'firewall_driver'),
+ mock.patch.object(conn, 'cleanup')):
+ self.assertRaises(ValueError,
+ conn._create_domain_and_network,
+ self.context,
+ 'xml',
+ mock_instance, None)
+
+ mock_teardown.assert_called_with(container_dir='/tmp/rootfs')
+
+ def test_video_driver_flavor_limit_not_set(self):
+ self.flags(virt_type='kvm', group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=True,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_video_model": "qxl",
+ "hw_video_ram": "64"}}
+
+ with contextlib.nested(
+ mock.patch.object(objects.Flavor, 'get_by_id'),
+ mock.patch.object(objects.Instance, 'save'),
+ ) as (mock_flavor, mock_instance):
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ self.assertRaises(exception.RequestedVRamTooHigh,
+ conn._get_guest_config,
+ instance_ref,
+ [],
+ image_meta,
+ disk_info)
+
+ def test_video_driver_ram_above_flavor_limit(self):
+ self.flags(virt_type='kvm', group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=True,
+ group='spice')
+
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_type = instance_ref.get_flavor()
+ instance_type.extra_specs = {'hw_video:ram_max_mb': "50"}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_video_model": "qxl",
+ "hw_video_ram": "64"}}
+ with contextlib.nested(
+ mock.patch.object(objects.Flavor, 'get_by_id',
+ return_value=instance_type),
+ mock.patch.object(objects.Instance, 'save')):
+ self.assertRaises(exception.RequestedVRamTooHigh,
+ conn._get_guest_config,
+ instance_ref,
+ [],
+ image_meta,
+ disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_without_qga_through_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_qemu_guest_agent": "no"}}
+ cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].type, "tablet")
+ self.assertEqual(cfg.devices[5].type, "vnc")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_rng_device(self, mock_flavor):
+ self.flags(virt_type='kvm',
+ use_usb_tablet=False,
+ group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw_rng:allowed': 'True'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_rng_model": "virtio"}}
+
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestRng)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[6].model, 'random')
+ self.assertIsNone(cfg.devices[6].backend)
+ self.assertIsNone(cfg.devices[6].rate_bytes)
+ self.assertIsNone(cfg.devices[6].rate_period)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_rng_not_allowed(self, mock_flavor):
+ self.flags(virt_type='kvm',
+ use_usb_tablet=False,
+ group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_rng_model": "virtio"}}
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 7)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_rng_limits(self, mock_flavor):
+ self.flags(virt_type='kvm',
+ use_usb_tablet=False,
+ group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw_rng:allowed': 'True',
+ 'hw_rng:rate_bytes': '1024',
+ 'hw_rng:rate_period': '2'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_rng_model": "virtio"}}
+
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestRng)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[6].model, 'random')
+ self.assertIsNone(cfg.devices[6].backend)
+ self.assertEqual(cfg.devices[6].rate_bytes, 1024)
+ self.assertEqual(cfg.devices[6].rate_period, 2)
+
+ @mock.patch('nova.virt.libvirt.driver.os.path.exists')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_rng_backend(self, mock_flavor, mock_path):
+ self.flags(virt_type='kvm',
+ use_usb_tablet=False,
+ rng_dev_path='/dev/hw_rng',
+ group='libvirt')
+ mock_path.return_value = True
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw_rng:allowed': 'True'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_rng_model": "virtio"}}
+
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestRng)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[6].model, 'random')
+ self.assertEqual(cfg.devices[6].backend, '/dev/hw_rng')
+ self.assertIsNone(cfg.devices[6].rate_bytes)
+ self.assertIsNone(cfg.devices[6].rate_period)
+
+ @mock.patch('nova.virt.libvirt.driver.os.path.exists')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_rng_dev_not_present(self, mock_flavor,
+ mock_path):
+ self.flags(virt_type='kvm',
+ use_usb_tablet=False,
+ rng_dev_path='/dev/hw_rng',
+ group='libvirt')
+ mock_path.return_value = False
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw_rng:allowed': 'True'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_rng_model": "virtio"}}
+
+ self.assertRaises(exception.RngDeviceNotExist,
+ conn._get_guest_config,
+ instance_ref,
+ [],
+ image_meta, disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_cpu_quota(self, mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'quota:cpu_shares': '10000',
+ 'quota:cpu_period': '20000'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+
+ self.assertEqual(10000, cfg.cputune.shares)
+ self.assertEqual(20000, cfg.cputune.period)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_bogus_cpu_quota(self, mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'quota:cpu_shares': 'fishfood',
+ 'quota:cpu_period': '20000'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ self.assertRaises(ValueError,
+ conn._get_guest_config,
+ instance_ref, [], {}, disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _test_get_guest_config_sysinfo_serial(self, expected_serial,
+ mock_flavor):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ cfg = drvr._get_guest_config_sysinfo(instance_ref)
+
+ self.assertIsInstance(cfg, vconfig.LibvirtConfigGuestSysinfo)
+ self.assertEqual(version.vendor_string(),
+ cfg.system_manufacturer)
+ self.assertEqual(version.product_string(),
+ cfg.system_product)
+ self.assertEqual(version.version_string_with_package(),
+ cfg.system_version)
+ self.assertEqual(expected_serial,
+ cfg.system_serial)
+ self.assertEqual(instance_ref['uuid'],
+ cfg.system_uuid)
+
+ def test_get_guest_config_sysinfo_serial_none(self):
+ self.flags(sysinfo_serial="none", group="libvirt")
+ self._test_get_guest_config_sysinfo_serial(None)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_uuid")
+ def test_get_guest_config_sysinfo_serial_hardware(self, mock_uuid):
+ self.flags(sysinfo_serial="hardware", group="libvirt")
+
+ theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
+ mock_uuid.return_value = theuuid
+
+ self._test_get_guest_config_sysinfo_serial(theuuid)
+
+ def test_get_guest_config_sysinfo_serial_os(self):
+ self.flags(sysinfo_serial="os", group="libvirt")
+
+ real_open = __builtin__.open
+ with contextlib.nested(
+ mock.patch.object(__builtin__, "open"),
+ ) as (mock_open, ):
+ theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
+
+ def fake_open(filename, *args, **kwargs):
+ if filename == "/etc/machine-id":
+ h = mock.MagicMock()
+ h.read.return_value = theuuid
+ h.__enter__.return_value = h
+ return h
+ return real_open(filename, *args, **kwargs)
+
+ mock_open.side_effect = fake_open
+
+ self._test_get_guest_config_sysinfo_serial(theuuid)
+
+ def test_get_guest_config_sysinfo_serial_auto_hardware(self):
+ self.flags(sysinfo_serial="auto", group="libvirt")
+
+ real_exists = os.path.exists
+ with contextlib.nested(
+ mock.patch.object(os.path, "exists"),
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_get_host_uuid")
+ ) as (mock_exists, mock_uuid):
+ def fake_exists(filename):
+ if filename == "/etc/machine-id":
+ return False
+ return real_exists(filename)
+
+ mock_exists.side_effect = fake_exists
+
+ theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
+ mock_uuid.return_value = theuuid
+
+ self._test_get_guest_config_sysinfo_serial(theuuid)
+
+ def test_get_guest_config_sysinfo_serial_auto_os(self):
+ self.flags(sysinfo_serial="auto", group="libvirt")
+
+ real_exists = os.path.exists
+ real_open = __builtin__.open
+ with contextlib.nested(
+ mock.patch.object(os.path, "exists"),
+ mock.patch.object(__builtin__, "open"),
+ ) as (mock_exists, mock_open):
+ def fake_exists(filename):
+ if filename == "/etc/machine-id":
+ return True
+ return real_exists(filename)
+
+ mock_exists.side_effect = fake_exists
+
+ theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
+
+ def fake_open(filename, *args, **kwargs):
+ if filename == "/etc/machine-id":
+ h = mock.MagicMock()
+ h.read.return_value = theuuid
+ h.__enter__.return_value = h
+ return h
+ return real_open(filename, *args, **kwargs)
+
+ mock_open.side_effect = fake_open
+
+ self._test_get_guest_config_sysinfo_serial(theuuid)
+
+ def test_get_guest_config_sysinfo_serial_invalid(self):
+ self.flags(sysinfo_serial="invalid", group="libvirt")
+
+ self.assertRaises(exception.NovaException,
+ libvirt_driver.LibvirtDriver,
+ fake.FakeVirtAPI(),
+ True)
+
+ def _create_fake_service_compute(self):
+ service_info = {
+ 'id': 1729,
+ 'host': 'fake',
+ 'report_count': 0
+ }
+ service_ref = objects.Service(**service_info)
+
+ compute_info = {
+ 'id': 1729,
+ 'vcpus': 2,
+ 'memory_mb': 1024,
+ 'local_gb': 2048,
+ 'vcpus_used': 0,
+ 'memory_mb_used': 0,
+ 'local_gb_used': 0,
+ 'free_ram_mb': 1024,
+ 'free_disk_gb': 2048,
+ 'hypervisor_type': 'xen',
+ 'hypervisor_version': 1,
+ 'running_vms': 0,
+ 'cpu_info': '',
+ 'current_workload': 0,
+ 'service_id': service_ref['id']
+ }
+ compute_ref = objects.ComputeNode(**compute_info)
+ return (service_ref, compute_ref)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_pci_passthrough_kvm(self, mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+ service_ref, compute_ref = self._create_fake_service_compute()
+
+ instance = objects.Instance(**self.test_instance)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ pci_device_info = dict(test_pci_device.fake_db_dev)
+ pci_device_info.update(compute_node_id=1,
+ label='fake',
+ status='allocated',
+ address='0000:00:00.1',
+ compute_id=compute_ref['id'],
+ instance_uuid=instance.uuid,
+ request_id=None,
+ extra_info={})
+ pci_device = objects.PciDevice(**pci_device_info)
+ pci_list = objects.PciDeviceList()
+ pci_list.objects.append(pci_device)
+ instance.pci_devices = pci_list
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance)
+ cfg = conn._get_guest_config(instance, [], {}, disk_info)
+
+ had_pci = 0
+ # care only about the PCI devices
+ for dev in cfg.devices:
+ if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
+ had_pci += 1
+ self.assertEqual(dev.type, 'pci')
+ self.assertEqual(dev.managed, 'yes')
+ self.assertEqual(dev.mode, 'subsystem')
+
+ self.assertEqual(dev.domain, "0000")
+ self.assertEqual(dev.bus, "00")
+ self.assertEqual(dev.slot, "00")
+ self.assertEqual(dev.function, "1")
+ self.assertEqual(had_pci, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_pci_passthrough_xen(self, mock_flavor):
+ self.flags(virt_type='xen', group='libvirt')
+ service_ref, compute_ref = self._create_fake_service_compute()
+
+ instance = objects.Instance(**self.test_instance)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ pci_device_info = dict(test_pci_device.fake_db_dev)
+ pci_device_info.update(compute_node_id=1,
+ label='fake',
+ status='allocated',
+ address='0000:00:00.2',
+ compute_id=compute_ref['id'],
+ instance_uuid=instance.uuid,
+ request_id=None,
+ extra_info={})
+ pci_device = objects.PciDevice(**pci_device_info)
+ pci_list = objects.PciDeviceList()
+ pci_list.objects.append(pci_device)
+ instance.pci_devices = pci_list
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance)
+ cfg = conn._get_guest_config(instance, [], {}, disk_info)
+ had_pci = 0
+ # care only about the PCI devices
+ for dev in cfg.devices:
+ if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
+ had_pci += 1
+ self.assertEqual(dev.type, 'pci')
+ self.assertEqual(dev.managed, 'no')
+ self.assertEqual(dev.mode, 'subsystem')
+
+ self.assertEqual(dev.domain, "0000")
+ self.assertEqual(dev.bus, "00")
+ self.assertEqual(dev.slot, "00")
+ self.assertEqual(dev.function, "2")
+ self.assertEqual(had_pci, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_os_command_line_through_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type="kvm",
+ cpu_mode=None,
+ group='libvirt')
+
+ self.test_instance['kernel_id'] = "fake_kernel_id"
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"os_command_line":
+ "fake_os_command_line"}}
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ image_meta, disk_info)
+ self.assertEqual(cfg.os_cmdline, "fake_os_command_line")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_os_command_line_without_kernel_id(self,
+ mock_flavor):
+ self.flags(virt_type="kvm",
+ cpu_mode=None,
+ group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"os_command_line":
+ "fake_os_command_line"}}
+
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ image_meta, disk_info)
+ self.assertIsNone(cfg.os_cmdline)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_os_command_empty(self, mock_flavor):
+ self.flags(virt_type="kvm",
+ cpu_mode=None,
+ group='libvirt')
+
+ self.test_instance['kernel_id'] = "fake_kernel_id"
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ # the instance has 'root=/dev/vda console=tty0 console=ttyS0' set by
+ # default, so testing an empty string and None value in the
+ # os_command_line image property must pass
+ image_meta = {"properties": {"os_command_line": ""}}
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ image_meta, disk_info)
+ self.assertNotEqual(cfg.os_cmdline, "")
+
+ image_meta = {"properties": {"os_command_line": None}}
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ image_meta, disk_info)
+ self.assertIsNotNone(cfg.os_cmdline)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_armv7(self, mock_flavor):
+ def get_host_capabilities_stub(self):
+ cpu = vconfig.LibvirtConfigGuestCPU()
+ cpu.arch = arch.ARMV7
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = cpu
+ return caps
+
+ self.flags(virt_type="kvm",
+ group="libvirt")
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ "_get_host_capabilities",
+ get_host_capabilities_stub)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertEqual(cfg.os_mach_type, "vexpress-a15")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_aarch64(self, mock_flavor):
+ def get_host_capabilities_stub(self):
+ cpu = vconfig.LibvirtConfigGuestCPU()
+ cpu.arch = arch.AARCH64
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = cpu
+ return caps
+
+ self.flags(virt_type="kvm",
+ group="libvirt")
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ "_get_host_capabilities",
+ get_host_capabilities_stub)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertEqual(cfg.os_mach_type, "virt")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_machine_type_through_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type="kvm",
+ group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_machine_type":
+ "fake_machine_type"}}
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ image_meta, disk_info)
+ self.assertEqual(cfg.os_mach_type, "fake_machine_type")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_machine_type_from_config(self, mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+ self.flags(hw_machine_type=['x86_64=fake_machine_type'],
+ group='libvirt')
+
+ def fake_getCapabilities():
+ return """
+ <capabilities>
+ <host>
+ <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <topology sockets='1' cores='2' threads='1'/>
+ <feature name='xtpr'/>
+ </cpu>
+ </host>
+ </capabilities>
+ """
+
+ def fake_baselineCPU(cpu, flag):
+ return """<cpu mode='custom' match='exact'>
+ <model fallback='allow'>Penryn</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='xtpr'/>
+ </cpu>
+ """
+
+ # Make sure the host arch is mocked as x86_64
+ self.create_fake_libvirt_mock(getCapabilities=fake_getCapabilities,
+ baselineCPU=fake_baselineCPU,
+ getVersion=lambda: 1005001)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertEqual(cfg.os_mach_type, "fake_machine_type")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _test_get_guest_config_ppc64(self, device_index, mock_flavor):
+ """Test for nova.virt.libvirt.driver.LibvirtDriver._get_guest_config.
+ """
+ self.flags(virt_type='kvm', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {}
+ expected = (arch.PPC64, arch.PPC)
+ for guestarch in expected:
+ with mock.patch.object(libvirt_driver.libvirt_utils,
+ 'get_arch',
+ return_value=guestarch):
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta,
+ disk_info)
+ self.assertIsInstance(cfg.devices[device_index],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertEqual(cfg.devices[device_index].type, 'vga')
+
+ def test_get_guest_config_ppc64_through_image_meta_vnc_enabled(self):
+ self.flags(vnc_enabled=True)
+ self._test_get_guest_config_ppc64(6)
+
+ def test_get_guest_config_ppc64_through_image_meta_spice_enabled(self):
+ self.flags(enabled=True,
+ agent_enabled=True,
+ group='spice')
+ self._test_get_guest_config_ppc64(8)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_none(self, mock_flavor):
+ self.flags(cpu_mode="none", group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsInstance(conf.cpu,
+ vconfig.LibvirtConfigGuestCPU)
+ self.assertIsNone(conf.cpu.mode)
+ self.assertIsNone(conf.cpu.model)
+ self.assertEqual(conf.cpu.sockets, 1)
+ self.assertEqual(conf.cpu.cores, 1)
+ self.assertEqual(conf.cpu.threads, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_default_kvm(self, mock_flavor):
+ self.flags(virt_type="kvm",
+ cpu_mode=None,
+ group='libvirt')
+
+ def get_lib_version_stub():
+ return (0 * 1000 * 1000) + (9 * 1000) + 11
+
+ self.stubs.Set(self.conn,
+ "getLibVersion",
+ get_lib_version_stub)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsInstance(conf.cpu,
+ vconfig.LibvirtConfigGuestCPU)
+ self.assertEqual(conf.cpu.mode, "host-model")
+ self.assertIsNone(conf.cpu.model)
+ self.assertEqual(conf.cpu.sockets, 1)
+ self.assertEqual(conf.cpu.cores, 1)
+ self.assertEqual(conf.cpu.threads, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_default_uml(self, mock_flavor):
+ self.flags(virt_type="uml",
+ cpu_mode=None,
+ group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsNone(conf.cpu)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_default_lxc(self, mock_flavor):
+ self.flags(virt_type="lxc",
+ cpu_mode=None,
+ group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsNone(conf.cpu)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_host_passthrough(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ self.flags(cpu_mode="host-passthrough", group='libvirt')
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsInstance(conf.cpu,
+ vconfig.LibvirtConfigGuestCPU)
+ self.assertEqual(conf.cpu.mode, "host-passthrough")
+ self.assertIsNone(conf.cpu.model)
+ self.assertEqual(conf.cpu.sockets, 1)
+ self.assertEqual(conf.cpu.cores, 1)
+ self.assertEqual(conf.cpu.threads, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_host_model(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ self.flags(cpu_mode="host-model", group='libvirt')
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsInstance(conf.cpu,
+ vconfig.LibvirtConfigGuestCPU)
+ self.assertEqual(conf.cpu.mode, "host-model")
+ self.assertIsNone(conf.cpu.model)
+ self.assertEqual(conf.cpu.sockets, 1)
+ self.assertEqual(conf.cpu.cores, 1)
+ self.assertEqual(conf.cpu.threads, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_custom(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ self.flags(cpu_mode="custom",
+ cpu_model="Penryn",
+ group='libvirt')
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsInstance(conf.cpu,
+ vconfig.LibvirtConfigGuestCPU)
+ self.assertEqual(conf.cpu.mode, "custom")
+ self.assertEqual(conf.cpu.model, "Penryn")
+ self.assertEqual(conf.cpu.sockets, 1)
+ self.assertEqual(conf.cpu.cores, 1)
+ self.assertEqual(conf.cpu.threads, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_topology(self, mock_flavor):
+ fake_flavor = objects.flavor.Flavor.get_by_id(
+ self.context,
+ self.test_instance['instance_type_id'])
+ fake_flavor.vcpus = 8
+ fake_flavor.extra_specs = {'hw:cpu_max_sockets': '4'}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ with mock.patch.object(objects.flavor.Flavor, 'get_by_id',
+ return_value=fake_flavor):
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsInstance(conf.cpu,
+ vconfig.LibvirtConfigGuestCPU)
+ self.assertEqual(conf.cpu.mode, "host-model")
+ self.assertEqual(conf.cpu.sockets, 4)
+ self.assertEqual(conf.cpu.cores, 2)
+ self.assertEqual(conf.cpu.threads, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_memory_balloon_config_by_default(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ for device in cfg.devices:
+ if device.root_name == 'memballoon':
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigMemoryBalloon)
+ self.assertEqual('virtio', device.model)
+ self.assertEqual(10, device.period)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_memory_balloon_config_disable(self, mock_flavor):
+ self.flags(mem_stats_period_seconds=0, group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ no_exist = True
+ for device in cfg.devices:
+ if device.root_name == 'memballoon':
+ no_exist = False
+ break
+ self.assertTrue(no_exist)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_memory_balloon_config_period_value(self, mock_flavor):
+ self.flags(mem_stats_period_seconds=21, group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ for device in cfg.devices:
+ if device.root_name == 'memballoon':
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigMemoryBalloon)
+ self.assertEqual('virtio', device.model)
+ self.assertEqual(21, device.period)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_memory_balloon_config_qemu(self, mock_flavor):
+ self.flags(virt_type='qemu', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ for device in cfg.devices:
+ if device.root_name == 'memballoon':
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigMemoryBalloon)
+ self.assertEqual('virtio', device.model)
+ self.assertEqual(10, device.period)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_memory_balloon_config_xen(self, mock_flavor):
+ self.flags(virt_type='xen', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ for device in cfg.devices:
+ if device.root_name == 'memballoon':
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigMemoryBalloon)
+ self.assertEqual('xen', device.model)
+ self.assertEqual(10, device.period)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_memory_balloon_config_lxc(self, mock_flavor):
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ no_exist = True
+ for device in cfg.devices:
+ if device.root_name == 'memballoon':
+ no_exist = False
+ break
+ self.assertTrue(no_exist)
+
+ def test_xml_and_uri_no_ramdisk_no_kernel(self):
+ instance_data = dict(self.test_instance)
+ self._check_xml_and_uri(instance_data,
+ expect_kernel=False, expect_ramdisk=False)
+
+ def test_xml_and_uri_no_ramdisk_no_kernel_xen_hvm(self):
+ instance_data = dict(self.test_instance)
+ instance_data.update({'vm_mode': vm_mode.HVM})
+ self._check_xml_and_uri(instance_data, expect_kernel=False,
+ expect_ramdisk=False, expect_xen_hvm=True)
+
+ def test_xml_and_uri_no_ramdisk_no_kernel_xen_pv(self):
+ instance_data = dict(self.test_instance)
+ instance_data.update({'vm_mode': vm_mode.XEN})
+ self._check_xml_and_uri(instance_data, expect_kernel=False,
+ expect_ramdisk=False, expect_xen_hvm=False,
+ xen_only=True)
+
+ def test_xml_and_uri_no_ramdisk(self):
+ instance_data = dict(self.test_instance)
+ instance_data['kernel_id'] = 'aki-deadbeef'
+ self._check_xml_and_uri(instance_data,
+ expect_kernel=True, expect_ramdisk=False)
+
+ def test_xml_and_uri_no_kernel(self):
+ instance_data = dict(self.test_instance)
+ instance_data['ramdisk_id'] = 'ari-deadbeef'
+ self._check_xml_and_uri(instance_data,
+ expect_kernel=False, expect_ramdisk=False)
+
+ def test_xml_and_uri(self):
+ instance_data = dict(self.test_instance)
+ instance_data['ramdisk_id'] = 'ari-deadbeef'
+ instance_data['kernel_id'] = 'aki-deadbeef'
+ self._check_xml_and_uri(instance_data,
+ expect_kernel=True, expect_ramdisk=True)
+
+ def test_xml_and_uri_rescue(self):
+ instance_data = dict(self.test_instance)
+ instance_data['ramdisk_id'] = 'ari-deadbeef'
+ instance_data['kernel_id'] = 'aki-deadbeef'
+ self._check_xml_and_uri(instance_data, expect_kernel=True,
+ expect_ramdisk=True, rescue=instance_data)
+
+ def test_xml_and_uri_rescue_no_kernel_no_ramdisk(self):
+ instance_data = dict(self.test_instance)
+ self._check_xml_and_uri(instance_data, expect_kernel=False,
+ expect_ramdisk=False, rescue=instance_data)
+
+ def test_xml_and_uri_rescue_no_kernel(self):
+ instance_data = dict(self.test_instance)
+ instance_data['ramdisk_id'] = 'aki-deadbeef'
+ self._check_xml_and_uri(instance_data, expect_kernel=False,
+ expect_ramdisk=True, rescue=instance_data)
+
+ def test_xml_and_uri_rescue_no_ramdisk(self):
+ instance_data = dict(self.test_instance)
+ instance_data['kernel_id'] = 'aki-deadbeef'
+ self._check_xml_and_uri(instance_data, expect_kernel=True,
+ expect_ramdisk=False, rescue=instance_data)
+
+ def test_xml_uuid(self):
+ self._check_xml_and_uuid({"disk_format": "raw"})
+
+ def test_lxc_container_and_uri(self):
+ instance_data = dict(self.test_instance)
+ self._check_xml_and_container(instance_data)
+
+ def test_xml_disk_prefix(self):
+ instance_data = dict(self.test_instance)
+ self._check_xml_and_disk_prefix(instance_data, None)
+
+ def test_xml_user_specified_disk_prefix(self):
+ instance_data = dict(self.test_instance)
+ self._check_xml_and_disk_prefix(instance_data, 'sd')
+
+ def test_xml_disk_driver(self):
+ instance_data = dict(self.test_instance)
+ self._check_xml_and_disk_driver(instance_data)
+
+ def test_xml_disk_bus_virtio(self):
+ self._check_xml_and_disk_bus({"disk_format": "raw"},
+ None,
+ (("disk", "virtio", "vda"),))
+
+ def test_xml_disk_bus_ide(self):
+ # It's necessary to check if the architecture is power, because
+ # power doesn't have support to ide, and so libvirt translate
+ # all ide calls to scsi
+
+ expected = {arch.PPC: ("cdrom", "scsi", "sda"),
+ arch.PPC64: ("cdrom", "scsi", "sda")}
+
+ expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
+ ("cdrom", "ide", "hda"))
+ self._check_xml_and_disk_bus({"disk_format": "iso"},
+ None,
+ (expec_val,))
+
+ def test_xml_disk_bus_ide_and_virtio(self):
+ # It's necessary to check if the architecture is power, because
+ # power doesn't have support to ide, and so libvirt translate
+ # all ide calls to scsi
+
+ expected = {arch.PPC: ("cdrom", "scsi", "sda"),
+ arch.PPC64: ("cdrom", "scsi", "sda")}
+
+ swap = {'device_name': '/dev/vdc',
+ 'swap_size': 1}
+ ephemerals = [{'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'device_name': '/dev/vdb',
+ 'size': 1}]
+ block_device_info = {
+ 'swap': swap,
+ 'ephemerals': ephemerals}
+ expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
+ ("cdrom", "ide", "hda"))
+ self._check_xml_and_disk_bus({"disk_format": "iso"},
+ block_device_info,
+ (expec_val,
+ ("disk", "virtio", "vdb"),
+ ("disk", "virtio", "vdc")))
+
+ def test_list_instance_domains_fast(self):
+ if not hasattr(libvirt, "VIR_CONNECT_LIST_DOMAINS_ACTIVE"):
+ self.skipTest("libvirt missing VIR_CONNECT_LIST_DOMAINS_ACTIVE")
+
+ vm1 = FakeVirtDomain(id=3, name="instance00000001")
+ vm2 = FakeVirtDomain(id=17, name="instance00000002")
+ vm3 = FakeVirtDomain(name="instance00000003")
+ vm4 = FakeVirtDomain(name="instance00000004")
+
+ def fake_list_all(flags):
+ vms = []
+ if flags & libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE:
+ vms.extend([vm1, vm2])
+ if flags & libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE:
+ vms.extend([vm3, vm4])
+ return vms
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.listAllDomains = fake_list_all
+
+ self.mox.ReplayAll()
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ doms = drvr._list_instance_domains_fast()
+ self.assertEqual(len(doms), 2)
+ self.assertEqual(doms[0].name(), vm1.name())
+ self.assertEqual(doms[1].name(), vm2.name())
+
+ doms = drvr._list_instance_domains_fast(only_running=False)
+ self.assertEqual(len(doms), 4)
+ self.assertEqual(doms[0].name(), vm1.name())
+ self.assertEqual(doms[1].name(), vm2.name())
+ self.assertEqual(doms[2].name(), vm3.name())
+ self.assertEqual(doms[3].name(), vm4.name())
+
+ def test_list_instance_domains_slow(self):
+ vm1 = FakeVirtDomain(id=3, name="instance00000001")
+ vm2 = FakeVirtDomain(id=17, name="instance00000002")
+ vm3 = FakeVirtDomain(name="instance00000003")
+ vm4 = FakeVirtDomain(name="instance00000004")
+ vms = [vm1, vm2, vm3, vm4]
+
+ def fake_lookup_id(id):
+ for vm in vms:
+ if vm.ID() == id:
+ return vm
+ ex = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ "No such domain",
+ error_code=libvirt.VIR_ERR_NO_DOMAIN)
+ raise ex
+
+ def fake_lookup_name(name):
+ for vm in vms:
+ if vm.name() == name:
+ return vm
+ ex = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ "No such domain",
+ error_code=libvirt.VIR_ERR_NO_DOMAIN)
+ raise ex
+
+ def fake_list_doms():
+ # Include one ID that no longer exists
+ return [vm1.ID(), vm2.ID(), 666]
+
+ def fake_list_ddoms():
+ # Include one name that no longer exists and
+ # one dup from running list to show race in
+ # transition from inactive -> running
+ return [vm1.name(), vm3.name(), vm4.name(), "fishfood"]
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.listDomainsID = fake_list_doms
+ libvirt_driver.LibvirtDriver._conn.listDefinedDomains = fake_list_ddoms
+ libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup_id
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+ libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 2
+ libvirt_driver.LibvirtDriver._conn.numOfDefinedDomains = lambda: 2
+
+ self.mox.ReplayAll()
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ doms = drvr._list_instance_domains_slow()
+ self.assertEqual(len(doms), 2)
+ self.assertEqual(doms[0].name(), vm1.name())
+ self.assertEqual(doms[1].name(), vm2.name())
+
+ doms = drvr._list_instance_domains_slow(only_running=False)
+ self.assertEqual(len(doms), 4)
+ self.assertEqual(doms[0].name(), vm1.name())
+ self.assertEqual(doms[1].name(), vm2.name())
+ self.assertEqual(doms[2].name(), vm3.name())
+ self.assertEqual(doms[3].name(), vm4.name())
+
+ def test_list_instance_domains_fallback_no_support(self):
+ vm1 = FakeVirtDomain(id=3, name="instance00000001")
+ vm2 = FakeVirtDomain(id=17, name="instance00000002")
+ vms = [vm1, vm2]
+
+ def fake_lookup_id(id):
+ for vm in vms:
+ if vm.ID() == id:
+ return vm
+ ex = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ "No such domain",
+ error_code=libvirt.VIR_ERR_NO_DOMAIN)
+ raise ex
+
+ def fake_list_doms():
+ return [vm1.ID(), vm2.ID()]
+
+ def fake_list_all(flags):
+ ex = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ "API is not supported",
+ error_code=libvirt.VIR_ERR_NO_SUPPORT)
+ raise ex
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.listDomainsID = fake_list_doms
+ libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup_id
+ libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 2
+ libvirt_driver.LibvirtDriver._conn.listAllDomains = fake_list_all
+
+ self.mox.ReplayAll()
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ doms = drvr._list_instance_domains()
+ self.assertEqual(len(doms), 2)
+ self.assertEqual(doms[0].id, vm1.id)
+ self.assertEqual(doms[1].id, vm2.id)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains_fast")
+ def test_list_instance_domains_filtering(self, mock_list):
+ vm0 = FakeVirtDomain(id=0, name="Domain-0") # Xen dom-0
+ vm1 = FakeVirtDomain(id=3, name="instance00000001")
+ vm2 = FakeVirtDomain(id=17, name="instance00000002")
+ vm3 = FakeVirtDomain(name="instance00000003")
+ vm4 = FakeVirtDomain(name="instance00000004")
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ mock_list.return_value = [vm0, vm1, vm2]
+ doms = drvr._list_instance_domains()
+ self.assertEqual(len(doms), 2)
+ self.assertEqual(doms[0].name(), vm1.name())
+ self.assertEqual(doms[1].name(), vm2.name())
+ mock_list.assert_called_with(True)
+
+ mock_list.return_value = [vm0, vm1, vm2, vm3, vm4]
+ doms = drvr._list_instance_domains(only_running=False)
+ self.assertEqual(len(doms), 4)
+ self.assertEqual(doms[0].name(), vm1.name())
+ self.assertEqual(doms[1].name(), vm2.name())
+ self.assertEqual(doms[2].name(), vm3.name())
+ self.assertEqual(doms[3].name(), vm4.name())
+ mock_list.assert_called_with(False)
+
+ mock_list.return_value = [vm0, vm1, vm2]
+ doms = drvr._list_instance_domains(only_guests=False)
+ self.assertEqual(len(doms), 3)
+ self.assertEqual(doms[0].name(), vm0.name())
+ self.assertEqual(doms[1].name(), vm1.name())
+ self.assertEqual(doms[2].name(), vm2.name())
+ mock_list.assert_called_with(True)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_list_instances(self, mock_list):
+ vm1 = FakeVirtDomain(id=3, name="instance00000001")
+ vm2 = FakeVirtDomain(id=17, name="instance00000002")
+ vm3 = FakeVirtDomain(name="instance00000003")
+ vm4 = FakeVirtDomain(name="instance00000004")
+
+ mock_list.return_value = [vm1, vm2, vm3, vm4]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ names = drvr.list_instances()
+ self.assertEqual(names[0], vm1.name())
+ self.assertEqual(names[1], vm2.name())
+ self.assertEqual(names[2], vm3.name())
+ self.assertEqual(names[3], vm4.name())
+ mock_list.assert_called_with(only_running=False)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_list_instance_uuids(self, mock_list):
+ vm1 = FakeVirtDomain(id=3, name="instance00000001")
+ vm2 = FakeVirtDomain(id=17, name="instance00000002")
+ vm3 = FakeVirtDomain(name="instance00000003")
+ vm4 = FakeVirtDomain(name="instance00000004")
+
+ mock_list.return_value = [vm1, vm2, vm3, vm4]
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ uuids = drvr.list_instance_uuids()
+ self.assertEqual(len(uuids), 4)
+ self.assertEqual(uuids[0], vm1.UUIDString())
+ self.assertEqual(uuids[1], vm2.UUIDString())
+ self.assertEqual(uuids[2], vm3.UUIDString())
+ self.assertEqual(uuids[3], vm4.UUIDString())
+ mock_list.assert_called_with(only_running=False)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_get_all_block_devices(self, mock_list):
+ xml = [
+ """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ </disk>
+ </devices>
+ </domain>
+ """,
+ """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ </devices>
+ </domain>
+ """,
+ """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/3'/>
+ </disk>
+ </devices>
+ </domain>
+ """,
+ ]
+
+ mock_list.return_value = [
+ FakeVirtDomain(xml[0], id=3, name="instance00000001"),
+ FakeVirtDomain(xml[1], id=1, name="instance00000002"),
+ FakeVirtDomain(xml[2], id=5, name="instance00000003")]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ devices = drvr._get_all_block_devices()
+ self.assertEqual(devices, ['/path/to/dev/1', '/path/to/dev/3'])
+ mock_list.assert_called_with()
+
+ def test_snapshot_in_ami_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./', group='libvirt')
+
+ # Assign different image_ref from nova/images/fakes for testing ami
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['disk_format'], 'ami')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_lxc_snapshot_in_ami_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ virt_type='lxc',
+ group='libvirt')
+
+ # Assign different image_ref from nova/images/fakes for testing ami
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['disk_format'], 'ami')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_snapshot_in_raw_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./', group='libvirt')
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ self.stubs.Set(libvirt_driver.libvirt_utils, 'disk_type', 'raw')
+
+ def convert_image(source, dest, out_format):
+ libvirt_driver.libvirt_utils.files[dest] = ''
+
+ self.stubs.Set(images, 'convert_image', convert_image)
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['disk_format'], 'raw')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_lvm_snapshot_in_raw_format(self):
+ # Tests Lvm backend snapshot functionality with raw format
+ # snapshots.
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='block' device='disk'>
+ <source dev='/dev/some-vg/some-lv'/>
+ </disk>
+ </devices>
+ </domain>
+ """
+ update_task_state_calls = [
+ mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD),
+ mock.call(task_state=task_states.IMAGE_UPLOADING,
+ expected_state=task_states.IMAGE_PENDING_UPLOAD)]
+ mock_update_task_state = mock.Mock()
+ mock_lookupByName = mock.Mock(return_value=FakeVirtDomain(xml),
+ autospec=True)
+ volume_info = {'VG': 'nova-vg', 'LV': 'disk'}
+ mock_volume_info = mock.Mock(return_value=volume_info,
+ autospec=True)
+ mock_volume_info_calls = [mock.call('/dev/nova-vg/lv')]
+ mock_convert_image = mock.Mock()
+
+ def convert_image_side_effect(source, dest, out_format,
+ run_as_root=True):
+ libvirt_driver.libvirt_utils.files[dest] = ''
+ mock_convert_image.side_effect = convert_image_side_effect
+
+ self.flags(snapshots_directory='./',
+ snapshot_image_format='raw',
+ images_type='lvm',
+ images_volume_group='nova-vg', group='libvirt')
+ libvirt_driver.libvirt_utils.disk_type = "lvm"
+
+ # Start test
+ image_service = nova.tests.unit.image.fake.FakeImageService()
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ recv_meta = image_service.create(context, sent_meta)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_conn',
+ autospec=True),
+ mock.patch.object(libvirt_driver.imagebackend.lvm,
+ 'volume_info',
+ mock_volume_info),
+ mock.patch.object(libvirt_driver.imagebackend.images,
+ 'convert_image',
+ mock_convert_image),
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_lookup_by_name',
+ mock_lookupByName)):
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ mock_update_task_state)
+
+ mock_lookupByName.assert_called_once_with("instance-00000001")
+ mock_volume_info.assert_has_calls(mock_volume_info_calls)
+ mock_convert_image.assert_called_once_with('/dev/nova-vg/lv',
+ mock.ANY,
+ 'raw',
+ run_as_root=True)
+ snapshot = image_service.show(context, recv_meta['id'])
+ mock_update_task_state.assert_has_calls(update_task_state_calls)
+ self.assertEqual('available', snapshot['properties']['image_state'])
+ self.assertEqual('active', snapshot['status'])
+ self.assertEqual('raw', snapshot['disk_format'])
+ self.assertEqual(snapshot_name, snapshot['name'])
+ # This is for all the subsequent tests that do not set the value of
+ # images type
+ self.flags(images_type='default', group='libvirt')
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ def test_lxc_snapshot_in_raw_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ virt_type='lxc',
+ group='libvirt')
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ self.stubs.Set(libvirt_driver.libvirt_utils, 'disk_type', 'raw')
+ libvirt_driver.libvirt_utils.disk_type = "raw"
+
+ def convert_image(source, dest, out_format):
+ libvirt_driver.libvirt_utils.files[dest] = ''
+
+ self.stubs.Set(images, 'convert_image', convert_image)
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['disk_format'], 'raw')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_snapshot_in_qcow2_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshot_image_format='qcow2',
+ snapshots_directory='./',
+ group='libvirt')
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['disk_format'], 'qcow2')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_lxc_snapshot_in_qcow2_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshot_image_format='qcow2',
+ snapshots_directory='./',
+ virt_type='lxc',
+ group='libvirt')
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['disk_format'], 'qcow2')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_lvm_snapshot_in_qcow2_format(self):
+ # Tests Lvm backend snapshot functionality with raw format
+ # snapshots.
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='block' device='disk'>
+ <source dev='/dev/some-vg/some-lv'/>
+ </disk>
+ </devices>
+ </domain>
+ """
+ update_task_state_calls = [
+ mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD),
+ mock.call(task_state=task_states.IMAGE_UPLOADING,
+ expected_state=task_states.IMAGE_PENDING_UPLOAD)]
+ mock_update_task_state = mock.Mock()
+ mock_lookupByName = mock.Mock(return_value=FakeVirtDomain(xml),
+ autospec=True)
+ volume_info = {'VG': 'nova-vg', 'LV': 'disk'}
+ mock_volume_info = mock.Mock(return_value=volume_info, autospec=True)
+ mock_volume_info_calls = [mock.call('/dev/nova-vg/lv')]
+ mock_convert_image = mock.Mock()
+
+ def convert_image_side_effect(source, dest, out_format,
+ run_as_root=True):
+ libvirt_driver.libvirt_utils.files[dest] = ''
+ mock_convert_image.side_effect = convert_image_side_effect
+
+ self.flags(snapshots_directory='./',
+ snapshot_image_format='qcow2',
+ images_type='lvm',
+ images_volume_group='nova-vg', group='libvirt')
+ libvirt_driver.libvirt_utils.disk_type = "lvm"
+
+ # Start test
+ image_service = nova.tests.unit.image.fake.FakeImageService()
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ recv_meta = image_service.create(context, sent_meta)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_conn',
+ autospec=True),
+ mock.patch.object(libvirt_driver.imagebackend.lvm,
+ 'volume_info',
+ mock_volume_info),
+ mock.patch.object(libvirt_driver.imagebackend.images,
+ 'convert_image',
+ mock_convert_image),
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_lookup_by_name',
+ mock_lookupByName)):
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ mock_update_task_state)
+
+ mock_lookupByName.assert_called_once_with("instance-00000001")
+ mock_volume_info.assert_has_calls(mock_volume_info_calls)
+ mock_convert_image.assert_called_once_with('/dev/nova-vg/lv',
+ mock.ANY,
+ 'qcow2',
+ run_as_root=True)
+ snapshot = image_service.show(context, recv_meta['id'])
+ mock_update_task_state.assert_has_calls(update_task_state_calls)
+ self.assertEqual('available', snapshot['properties']['image_state'])
+ self.assertEqual('active', snapshot['status'])
+ self.assertEqual('qcow2', snapshot['disk_format'])
+ self.assertEqual(snapshot_name, snapshot['name'])
+ self.flags(images_type='default', group='libvirt')
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ def test_snapshot_no_image_architecture(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ group='libvirt')
+
+ # Assign different image_ref from nova/images/fakes for
+ # testing different base image
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_lxc_snapshot_no_image_architecture(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ virt_type='lxc',
+ group='libvirt')
+
+ # Assign different image_ref from nova/images/fakes for
+ # testing different base image
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**test_instance)
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_snapshot_no_original_image(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ group='libvirt')
+
+ # Assign a non-existent image
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
+
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_lxc_snapshot_no_original_image(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ virt_type='lxc',
+ group='libvirt')
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ # Assign a non-existent image
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
+
+ instance_ref = objects.Instance(**test_instance)
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_snapshot_metadata_image(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ group='libvirt')
+
+ # Assign an image with an architecture defined (x86_64)
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
+
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id),
+ 'architecture': 'fake_arch',
+ 'key_a': 'value_a',
+ 'key_b': 'value_b'}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['properties']['architecture'], 'fake_arch')
+ self.assertEqual(snapshot['properties']['key_a'], 'value_a')
+ self.assertEqual(snapshot['properties']['key_b'], 'value_b')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_snapshot_with_os_type(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ group='libvirt')
+
+ # Assign a non-existent image
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
+ test_instance["os_type"] = 'linux'
+
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id),
+ 'os_type': instance_ref['os_type']}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['properties']['os_type'],
+ instance_ref['os_type'])
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test__create_snapshot_metadata(self):
+ base = {}
+ instance = {'kernel_id': 'kernel',
+ 'project_id': 'prj_id',
+ 'ramdisk_id': 'ram_id',
+ 'os_type': None}
+ img_fmt = 'raw'
+ snp_name = 'snapshot_name'
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ ret = conn._create_snapshot_metadata(base, instance, img_fmt, snp_name)
+ expected = {'is_public': False,
+ 'status': 'active',
+ 'name': snp_name,
+ 'properties': {
+ 'kernel_id': instance['kernel_id'],
+ 'image_location': 'snapshot',
+ 'image_state': 'available',
+ 'owner_id': instance['project_id'],
+ 'ramdisk_id': instance['ramdisk_id'],
+ },
+ 'disk_format': img_fmt,
+ 'container_format': base.get('container_format', 'bare')
+ }
+ self.assertEqual(ret, expected)
+
+ # simulate an instance with os_type field defined
+ # disk format equals to ami
+ # container format not equals to bare
+ instance['os_type'] = 'linux'
+ base['disk_format'] = 'ami'
+ base['container_format'] = 'test_container'
+ expected['properties']['os_type'] = instance['os_type']
+ expected['disk_format'] = base['disk_format']
+ expected['container_format'] = base.get('container_format', 'bare')
+ ret = conn._create_snapshot_metadata(base, instance, img_fmt, snp_name)
+ self.assertEqual(ret, expected)
+
+ @mock.patch('nova.virt.libvirt.volume.LibvirtFakeVolumeDriver.'
+ 'connect_volume')
+ @mock.patch('nova.virt.libvirt.volume.LibvirtFakeVolumeDriver.get_config')
+ def test_get_volume_config(self, get_config, connect_volume):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ connection_info = {'driver_volume_type': 'fake',
+ 'data': {'device_path': '/fake',
+ 'access_mode': 'rw'}}
+ bdm = {'device_name': 'vdb',
+ 'disk_bus': 'fake-bus',
+ 'device_type': 'fake-type'}
+ disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
+ 'dev': 'vdb'}
+ mock_config = mock.MagicMock()
+
+ get_config.return_value = mock_config
+ config = conn._get_volume_config(connection_info, disk_info)
+ get_config.assert_called_once_with(connection_info, disk_info)
+ self.assertEqual(mock_config, config)
+
+ def test_attach_invalid_volume_type(self):
+ self.create_fake_libvirt_mock()
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ instance = fake_instance.fake_instance_obj(
+ self.context, **self.test_instance)
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.VolumeDriverNotFound,
+ conn.attach_volume, None,
+ {"driver_volume_type": "badtype"},
+ instance,
+ "/dev/sda")
+
+ def test_attach_blockio_invalid_hypervisor(self):
+ self.flags(virt_type='fake_type', group='libvirt')
+ self.create_fake_libvirt_mock()
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ instance = fake_instance.fake_instance_obj(
+ self.context, **self.test_instance)
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.InvalidHypervisorType,
+ conn.attach_volume, None,
+ {"driver_volume_type": "fake",
+ "data": {"logical_block_size": "4096",
+ "physical_block_size": "4096"}
+ },
+ instance,
+ "/dev/sda")
+
+ def test_attach_blockio_invalid_version(self):
+ def get_lib_version_stub():
+ return (0 * 1000 * 1000) + (9 * 1000) + 8
+ self.flags(virt_type='qemu', group='libvirt')
+ self.create_fake_libvirt_mock()
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ instance = fake_instance.fake_instance_obj(
+ self.context, **self.test_instance)
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(self.conn, "getLibVersion", get_lib_version_stub)
+ self.assertRaises(exception.Invalid,
+ conn.attach_volume, None,
+ {"driver_volume_type": "fake",
+ "data": {"logical_block_size": "4096",
+ "physical_block_size": "4096"}
+ },
+ instance,
+ "/dev/sda")
+
+ @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
+ def test_attach_volume_with_vir_domain_affect_live_flag(self,
+ mock_lookup_by_name, mock_get_info):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ mock_dom = mock.MagicMock()
+ mock_lookup_by_name.return_value = mock_dom
+
+ connection_info = {"driver_volume_type": "fake",
+ "data": {"device_path": "/fake",
+ "access_mode": "rw"}}
+ bdm = {'device_name': 'vdb',
+ 'disk_bus': 'fake-bus',
+ 'device_type': 'fake-type'}
+ disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
+ 'dev': 'vdb'}
+ mock_get_info.return_value = disk_info
+ mock_conf = mock.MagicMock()
+ flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
+ fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_connect_volume'),
+ mock.patch.object(conn, '_get_volume_config',
+ return_value=mock_conf),
+ mock.patch.object(conn, '_set_cache_mode')
+ ) as (mock_connect_volume, mock_get_volume_config,
+ mock_set_cache_mode):
+ for state in (power_state.RUNNING, power_state.PAUSED):
+ mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
+
+ conn.attach_volume(self.context, connection_info, instance,
+ "/dev/vdb", disk_bus=bdm['disk_bus'],
+ device_type=bdm['device_type'])
+
+ mock_lookup_by_name.assert_called_with(instance['name'])
+ mock_get_info.assert_called_with(CONF.libvirt.virt_type, bdm)
+ mock_connect_volume.assert_called_with(
+ connection_info, disk_info)
+ mock_get_volume_config.assert_called_with(
+ connection_info, disk_info)
+ mock_set_cache_mode.assert_called_with(mock_conf)
+ mock_dom.attachDeviceFlags.assert_called_with(
+ mock_conf.to_xml(), flags)
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_disk_xml')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
+ def test_detach_volume_with_vir_domain_affect_live_flag(self,
+ mock_lookup_by_name, mock_get_disk_xml):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ mock_dom = mock.MagicMock()
+ mock_xml = \
+ """
+ <disk type='file'>
+ <source file='/path/to/fake-volume'/>
+ <target dev='vdc' bus='virtio'/>
+ </disk>
+ """
+ mock_get_disk_xml.return_value = mock_xml
+
+ connection_info = {"driver_volume_type": "fake",
+ "data": {"device_path": "/fake",
+ "access_mode": "rw"}}
+ flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
+ fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
+
+ with mock.patch.object(conn, '_disconnect_volume') as \
+ mock_disconnect_volume:
+ for state in (power_state.RUNNING, power_state.PAUSED):
+ mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
+ mock_lookup_by_name.return_value = mock_dom
+
+ conn.detach_volume(connection_info, instance, '/dev/vdc')
+
+ mock_lookup_by_name.assert_called_with(instance['name'])
+ mock_get_disk_xml.assert_called_with(mock_dom.XMLDesc(0),
+ 'vdc')
+ mock_dom.detachDeviceFlags.assert_called_with(mock_xml, flags)
+ mock_disconnect_volume.assert_called_with(
+ connection_info, 'vdc')
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_multi_nic(self, mock_flavor):
+ network_info = _fake_network_info(self.stubs, 2)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ xml = conn._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info)
+ tree = etree.fromstring(xml)
+ interfaces = tree.findall("./devices/interface")
+ self.assertEqual(len(interfaces), 2)
+ self.assertEqual(interfaces[0].get('type'), 'bridge')
+
+ def _behave_supports_direct_io(self, raise_open=False, raise_write=False,
+ exc=ValueError()):
+ open_behavior = os.open(os.path.join('.', '.directio.test'),
+ os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
+ if raise_open:
+ open_behavior.AndRaise(exc)
+ else:
+ open_behavior.AndReturn(3)
+ write_bahavior = os.write(3, mox.IgnoreArg())
+ if raise_write:
+ write_bahavior.AndRaise(exc)
+ else:
+ os.close(3)
+ os.unlink(3)
+
+ def test_supports_direct_io(self):
+ # O_DIRECT is not supported on all Python runtimes, so on platforms
+ # where it's not supported (e.g. Mac), we can still test the code-path
+ # by stubbing out the value.
+ if not hasattr(os, 'O_DIRECT'):
+ # `mock` seems to have trouble stubbing an attr that doesn't
+ # originally exist, so falling back to stubbing out the attribute
+ # directly.
+ os.O_DIRECT = 16384
+ self.addCleanup(delattr, os, 'O_DIRECT')
+
+ einval = OSError()
+ einval.errno = errno.EINVAL
+ self.mox.StubOutWithMock(os, 'open')
+ self.mox.StubOutWithMock(os, 'write')
+ self.mox.StubOutWithMock(os, 'close')
+ self.mox.StubOutWithMock(os, 'unlink')
+ _supports_direct_io = libvirt_driver.LibvirtDriver._supports_direct_io
+
+ self._behave_supports_direct_io()
+ self._behave_supports_direct_io(raise_write=True)
+ self._behave_supports_direct_io(raise_open=True)
+ self._behave_supports_direct_io(raise_write=True, exc=einval)
+ self._behave_supports_direct_io(raise_open=True, exc=einval)
+
+ self.mox.ReplayAll()
+ self.assertTrue(_supports_direct_io('.'))
+ self.assertRaises(ValueError, _supports_direct_io, '.')
+ self.assertRaises(ValueError, _supports_direct_io, '.')
+ self.assertFalse(_supports_direct_io('.'))
+ self.assertFalse(_supports_direct_io('.'))
+ self.mox.VerifyAll()
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _check_xml_and_container(self, instance, mock_flavor):
+ instance_ref = objects.Instance(**instance)
+
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ self.assertEqual(conn.uri(), 'lxc:///')
+
+ network_info = _fake_network_info(self.stubs, 1)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ xml = conn._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info)
+ tree = etree.fromstring(xml)
+
+ check = [
+ (lambda t: t.find('.').get('type'), 'lxc'),
+ (lambda t: t.find('./os/type').text, 'exe'),
+ (lambda t: t.find('./devices/filesystem/target').get('dir'), '/')]
+
+ for i, (check, expected_result) in enumerate(check):
+ self.assertEqual(check(tree),
+ expected_result,
+ '%s failed common check %d' % (xml, i))
+
+ target = tree.find('./devices/filesystem/source').get('dir')
+ self.assertTrue(len(target) > 0)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _check_xml_and_disk_prefix(self, instance, prefix, mock_flavor):
+ instance_ref = objects.Instance(**instance)
+
+ def _get_prefix(p, default):
+ if p:
+ return p + 'a'
+ return default
+
+ type_disk_map = {
+ 'qemu': [
+ (lambda t: t.find('.').get('type'), 'qemu'),
+ (lambda t: t.find('./devices/disk/target').get('dev'),
+ _get_prefix(prefix, 'vda'))],
+ 'xen': [
+ (lambda t: t.find('.').get('type'), 'xen'),
+ (lambda t: t.find('./devices/disk/target').get('dev'),
+ _get_prefix(prefix, 'sda'))],
+ 'kvm': [
+ (lambda t: t.find('.').get('type'), 'kvm'),
+ (lambda t: t.find('./devices/disk/target').get('dev'),
+ _get_prefix(prefix, 'vda'))],
+ 'uml': [
+ (lambda t: t.find('.').get('type'), 'uml'),
+ (lambda t: t.find('./devices/disk/target').get('dev'),
+ _get_prefix(prefix, 'ubda'))]
+ }
+
+ for (virt_type, checks) in type_disk_map.iteritems():
+ self.flags(virt_type=virt_type, group='libvirt')
+ if prefix:
+ self.flags(disk_prefix=prefix, group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ network_info = _fake_network_info(self.stubs, 1)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ xml = conn._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info)
+ tree = etree.fromstring(xml)
+
+ for i, (check, expected_result) in enumerate(checks):
+ self.assertEqual(check(tree),
+ expected_result,
+ '%s != %s failed check %d' %
+ (check(tree), expected_result, i))
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _check_xml_and_disk_driver(self, image_meta, mock_flavor):
+ os_open = os.open
+ directio_supported = True
+
+ def os_open_stub(path, flags, *args, **kwargs):
+ if flags & os.O_DIRECT:
+ if not directio_supported:
+ raise OSError(errno.EINVAL,
+ '%s: %s' % (os.strerror(errno.EINVAL), path))
+ flags &= ~os.O_DIRECT
+ return os_open(path, flags, *args, **kwargs)
+
+ self.stubs.Set(os, 'open', os_open_stub)
+
+ @staticmethod
+ def connection_supports_direct_io_stub(dirpath):
+ return directio_supported
+
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ '_supports_direct_io', connection_supports_direct_io_stub)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ network_info = _fake_network_info(self.stubs, 1)
+
+ drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ xml = drv._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info, image_meta)
+ tree = etree.fromstring(xml)
+ disks = tree.findall('./devices/disk/driver')
+ for guest_disk in disks:
+ self.assertEqual(guest_disk.get("cache"), "none")
+
+ directio_supported = False
+
+ # The O_DIRECT availability is cached on first use in
+ # LibvirtDriver, hence we re-create it here
+ drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ xml = drv._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info, image_meta)
+ tree = etree.fromstring(xml)
+ disks = tree.findall('./devices/disk/driver')
+ for guest_disk in disks:
+ self.assertEqual(guest_disk.get("cache"), "writethrough")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _check_xml_and_disk_bus(self, image_meta,
+ block_device_info, wantConfig,
+ mock_flavor):
+ instance_ref = objects.Instance(**self.test_instance)
+ network_info = _fake_network_info(self.stubs, 1)
+
+ drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ block_device_info,
+ image_meta)
+
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ xml = drv._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info, image_meta,
+ block_device_info=block_device_info)
+ tree = etree.fromstring(xml)
+
+ got_disks = tree.findall('./devices/disk')
+ got_disk_targets = tree.findall('./devices/disk/target')
+ for i in range(len(wantConfig)):
+ want_device_type = wantConfig[i][0]
+ want_device_bus = wantConfig[i][1]
+ want_device_dev = wantConfig[i][2]
+
+ got_device_type = got_disks[i].get('device')
+ got_device_bus = got_disk_targets[i].get('bus')
+ got_device_dev = got_disk_targets[i].get('dev')
+
+ self.assertEqual(got_device_type, want_device_type)
+ self.assertEqual(got_device_bus, want_device_bus)
+ self.assertEqual(got_device_dev, want_device_dev)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _check_xml_and_uuid(self, image_meta, mock_flavor):
+ instance_ref = objects.Instance(**self.test_instance)
+ network_info = _fake_network_info(self.stubs, 1)
+
+ drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ xml = drv._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info, image_meta)
+ tree = etree.fromstring(xml)
+ self.assertEqual(tree.find('./uuid').text,
+ instance_ref['uuid'])
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _check_xml_and_uri(self, instance, mock_flavor,
+ expect_ramdisk=False, expect_kernel=False,
+ rescue=None, expect_xen_hvm=False, xen_only=False):
+ instance_ref = objects.Instance(**instance)
+
+ xen_vm_mode = vm_mode.XEN
+ if expect_xen_hvm:
+ xen_vm_mode = vm_mode.HVM
+
+ type_uri_map = {'qemu': ('qemu:///system',
+ [(lambda t: t.find('.').get('type'), 'qemu'),
+ (lambda t: t.find('./os/type').text,
+ vm_mode.HVM),
+ (lambda t: t.find('./devices/emulator'), None)]),
+ 'kvm': ('qemu:///system',
+ [(lambda t: t.find('.').get('type'), 'kvm'),
+ (lambda t: t.find('./os/type').text,
+ vm_mode.HVM),
+ (lambda t: t.find('./devices/emulator'), None)]),
+ 'uml': ('uml:///system',
+ [(lambda t: t.find('.').get('type'), 'uml'),
+ (lambda t: t.find('./os/type').text,
+ vm_mode.UML)]),
+ 'xen': ('xen:///',
+ [(lambda t: t.find('.').get('type'), 'xen'),
+ (lambda t: t.find('./os/type').text,
+ xen_vm_mode)])}
+
+ if expect_xen_hvm or xen_only:
+ hypervisors_to_check = ['xen']
+ else:
+ hypervisors_to_check = ['qemu', 'kvm', 'xen']
+
+ for hypervisor_type in hypervisors_to_check:
+ check_list = type_uri_map[hypervisor_type][1]
+
+ if rescue:
+ suffix = '.rescue'
+ else:
+ suffix = ''
+ if expect_kernel:
+ check = (lambda t: self.relpath(t.find('./os/kernel').text).
+ split('/')[1], 'kernel' + suffix)
+ else:
+ check = (lambda t: t.find('./os/kernel'), None)
+ check_list.append(check)
+
+ if expect_kernel:
+ check = (lambda t: "no_timer_check" in t.find('./os/cmdline').
+ text, hypervisor_type == "qemu")
+ check_list.append(check)
+ # Hypervisors that only support vm_mode.HVM and Xen
+ # should not produce configuration that results in kernel
+ # arguments
+ if not expect_kernel and (hypervisor_type in
+ ['qemu', 'kvm', 'xen']):
+ check = (lambda t: t.find('./os/root'), None)
+ check_list.append(check)
+ check = (lambda t: t.find('./os/cmdline'), None)
+ check_list.append(check)
+
+ if expect_ramdisk:
+ check = (lambda t: self.relpath(t.find('./os/initrd').text).
+ split('/')[1], 'ramdisk' + suffix)
+ else:
+ check = (lambda t: t.find('./os/initrd'), None)
+ check_list.append(check)
+
+ if hypervisor_type in ['qemu', 'kvm']:
+ xpath = "./sysinfo/system/entry"
+ check = (lambda t: t.findall(xpath)[0].get("name"),
+ "manufacturer")
+ check_list.append(check)
+ check = (lambda t: t.findall(xpath)[0].text,
+ version.vendor_string())
+ check_list.append(check)
+
+ check = (lambda t: t.findall(xpath)[1].get("name"),
+ "product")
+ check_list.append(check)
+ check = (lambda t: t.findall(xpath)[1].text,
+ version.product_string())
+ check_list.append(check)
+
+ check = (lambda t: t.findall(xpath)[2].get("name"),
+ "version")
+ check_list.append(check)
+ # NOTE(sirp): empty strings don't roundtrip in lxml (they are
+ # converted to None), so we need an `or ''` to correct for that
+ check = (lambda t: t.findall(xpath)[2].text or '',
+ version.version_string_with_package())
+ check_list.append(check)
+
+ check = (lambda t: t.findall(xpath)[3].get("name"),
+ "serial")
+ check_list.append(check)
+ check = (lambda t: t.findall(xpath)[3].text,
+ "cef19ce0-0ca2-11df-855d-b19fbce37686")
+ check_list.append(check)
+
+ check = (lambda t: t.findall(xpath)[4].get("name"),
+ "uuid")
+ check_list.append(check)
+ check = (lambda t: t.findall(xpath)[4].text,
+ instance['uuid'])
+ check_list.append(check)
+
+ if hypervisor_type in ['qemu', 'kvm']:
+ check = (lambda t: t.findall('./devices/serial')[0].get(
+ 'type'), 'file')
+ check_list.append(check)
+ check = (lambda t: t.findall('./devices/serial')[1].get(
+ 'type'), 'pty')
+ check_list.append(check)
+ check = (lambda t: self.relpath(t.findall(
+ './devices/serial/source')[0].get('path')).
+ split('/')[1], 'console.log')
+ check_list.append(check)
+ else:
+ check = (lambda t: t.find('./devices/console').get(
+ 'type'), 'pty')
+ check_list.append(check)
+
+ common_checks = [
+ (lambda t: t.find('.').tag, 'domain'),
+ (lambda t: t.find('./memory').text, '2097152')]
+ if rescue:
+ common_checks += [
+ (lambda t: self.relpath(t.findall('./devices/disk/source')[0].
+ get('file')).split('/')[1], 'disk.rescue'),
+ (lambda t: self.relpath(t.findall('./devices/disk/source')[1].
+ get('file')).split('/')[1], 'disk')]
+ else:
+ common_checks += [(lambda t: self.relpath(t.findall(
+ './devices/disk/source')[0].get('file')).split('/')[1],
+ 'disk')]
+ common_checks += [(lambda t: self.relpath(t.findall(
+ './devices/disk/source')[1].get('file')).split('/')[1],
+ 'disk.local')]
+
+ for virt_type in hypervisors_to_check:
+ expected_uri = type_uri_map[virt_type][0]
+ checks = type_uri_map[virt_type][1]
+ self.flags(virt_type=virt_type, group='libvirt')
+
+ with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt:
+ del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ self.assertEqual(conn.uri(), expected_uri)
+
+ network_info = _fake_network_info(self.stubs, 1)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ rescue=rescue)
+
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ xml = conn._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info,
+ rescue=rescue)
+ tree = etree.fromstring(xml)
+ for i, (check, expected_result) in enumerate(checks):
+ self.assertEqual(check(tree),
+ expected_result,
+ '%s != %s failed check %d' %
+ (check(tree), expected_result, i))
+
+ for i, (check, expected_result) in enumerate(common_checks):
+ self.assertEqual(check(tree),
+ expected_result,
+ '%s != %s failed common check %d' %
+ (check(tree), expected_result, i))
+
+ filterref = './devices/interface/filterref'
+ vif = network_info[0]
+ nic_id = vif['address'].replace(':', '')
+ fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(), conn)
+ instance_filter_name = fw._instance_filter_name(instance_ref,
+ nic_id)
+ self.assertEqual(tree.find(filterref).get('filter'),
+ instance_filter_name)
+
+ # This test is supposed to make sure we don't
+ # override a specifically set uri
+ #
+ # Deliberately not just assigning this string to CONF.connection_uri
+ # and checking against that later on. This way we make sure the
+ # implementation doesn't fiddle around with the CONF.
+ testuri = 'something completely different'
+ self.flags(connection_uri=testuri, group='libvirt')
+ for (virt_type, (expected_uri, checks)) in type_uri_map.iteritems():
+ self.flags(virt_type=virt_type, group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.assertEqual(conn.uri(), testuri)
+
+ def test_ensure_filtering_rules_for_instance_timeout(self):
+ # ensure_filtering_fules_for_instance() finishes with timeout.
+ # Preparing mocks
+ def fake_none(self, *args):
+ return
+
+ def fake_raise(self):
+ raise libvirt.libvirtError('ERR')
+
+ class FakeTime(object):
+ def __init__(self):
+ self.counter = 0
+
+ def sleep(self, t):
+ self.counter += t
+
+ fake_timer = FakeTime()
+
+ def fake_sleep(t):
+ fake_timer.sleep(t)
+
+ # _fake_network_info must be called before create_fake_libvirt_mock(),
+ # as _fake_network_info calls importutils.import_class() and
+ # create_fake_libvirt_mock() mocks importutils.import_class().
+ network_info = _fake_network_info(self.stubs, 1)
+ self.create_fake_libvirt_mock()
+ instance_ref = objects.Instance(**self.test_instance)
+
+ # Start test
+ self.mox.ReplayAll()
+ try:
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn.firewall_driver,
+ 'setup_basic_filtering',
+ fake_none)
+ self.stubs.Set(conn.firewall_driver,
+ 'prepare_instance_filter',
+ fake_none)
+ self.stubs.Set(conn.firewall_driver,
+ 'instance_filter_exists',
+ fake_none)
+ self.stubs.Set(greenthread,
+ 'sleep',
+ fake_sleep)
+ conn.ensure_filtering_rules_for_instance(instance_ref,
+ network_info)
+ except exception.NovaException as e:
+ msg = ('The firewall filter for %s does not exist' %
+ instance_ref['name'])
+ c1 = (0 <= six.text_type(e).find(msg))
+ self.assertTrue(c1)
+
+ self.assertEqual(29, fake_timer.counter, "Didn't wait the expected "
+ "amount of time")
+
+ def test_check_can_live_migrate_dest_all_pass_with_block_migration(self):
+ instance_ref = objects.Instance(**self.test_instance)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ compute_info = {'disk_available_least': 400,
+ 'cpu_info': 'asdf',
+ }
+ filename = "file"
+
+ self.mox.StubOutWithMock(conn, '_create_shared_storage_test_file')
+ self.mox.StubOutWithMock(conn, '_compare_cpu')
+
+ # _check_cpu_match
+ conn._compare_cpu("asdf")
+
+ # mounted_on_same_shared_storage
+ conn._create_shared_storage_test_file().AndReturn(filename)
+
+ self.mox.ReplayAll()
+ return_value = conn.check_can_live_migrate_destination(self.context,
+ instance_ref, compute_info, compute_info, True)
+ self.assertThat({"filename": "file",
+ 'image_type': 'default',
+ 'disk_available_mb': 409600,
+ "disk_over_commit": False,
+ "block_migration": True},
+ matchers.DictMatches(return_value))
+
+ def test_check_can_live_migrate_dest_all_pass_no_block_migration(self):
+ instance_ref = objects.Instance(**self.test_instance)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ compute_info = {'cpu_info': 'asdf'}
+ filename = "file"
+
+ self.mox.StubOutWithMock(conn, '_create_shared_storage_test_file')
+ self.mox.StubOutWithMock(conn, '_compare_cpu')
+
+ # _check_cpu_match
+ conn._compare_cpu("asdf")
+
+ # mounted_on_same_shared_storage
+ conn._create_shared_storage_test_file().AndReturn(filename)
+
+ self.mox.ReplayAll()
+ return_value = conn.check_can_live_migrate_destination(self.context,
+ instance_ref, compute_info, compute_info, False)
+ self.assertThat({"filename": "file",
+ "image_type": 'default',
+ "block_migration": False,
+ "disk_over_commit": False,
+ "disk_available_mb": None},
+ matchers.DictMatches(return_value))
+
+ def test_check_can_live_migrate_dest_incompatible_cpu_raises(self):
+ instance_ref = objects.Instance(**self.test_instance)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ compute_info = {'cpu_info': 'asdf'}
+
+ self.mox.StubOutWithMock(conn, '_compare_cpu')
+
+ conn._compare_cpu("asdf").AndRaise(exception.InvalidCPUInfo(
+ reason='foo')
+ )
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidCPUInfo,
+ conn.check_can_live_migrate_destination,
+ self.context, instance_ref,
+ compute_info, compute_info, False)
+
+ def test_check_can_live_migrate_dest_cleanup_works_correctly(self):
+ objects.Instance(**self.test_instance)
+ dest_check_data = {"filename": "file",
+ "block_migration": True,
+ "disk_over_commit": False,
+ "disk_available_mb": 1024}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.mox.StubOutWithMock(conn, '_cleanup_shared_storage_test_file')
+ conn._cleanup_shared_storage_test_file("file")
+
+ self.mox.ReplayAll()
+ conn.check_can_live_migrate_destination_cleanup(self.context,
+ dest_check_data)
+
+ def _mock_can_live_migrate_source(self, block_migration=False,
+ is_shared_block_storage=False,
+ is_shared_instance_path=False,
+ disk_available_mb=1024):
+ instance = objects.Instance(**self.test_instance)
+ dest_check_data = {'filename': 'file',
+ 'image_type': 'default',
+ 'block_migration': block_migration,
+ 'disk_over_commit': False,
+ 'disk_available_mb': disk_available_mb}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.mox.StubOutWithMock(conn, '_is_shared_block_storage')
+ conn._is_shared_block_storage(instance, dest_check_data).AndReturn(
+ is_shared_block_storage)
+ self.mox.StubOutWithMock(conn, '_check_shared_storage_test_file')
+ conn._check_shared_storage_test_file('file').AndReturn(
+ is_shared_instance_path)
+
+ return (instance, dest_check_data, conn)
+
+ def test_check_can_live_migrate_source_block_migration(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source(
+ block_migration=True)
+
+ self.mox.StubOutWithMock(conn, "_assert_dest_node_has_enough_disk")
+ conn._assert_dest_node_has_enough_disk(
+ self.context, instance, dest_check_data['disk_available_mb'],
+ False, None)
+
+ self.mox.ReplayAll()
+ ret = conn.check_can_live_migrate_source(self.context, instance,
+ dest_check_data)
+ self.assertIsInstance(ret, dict)
+ self.assertIn('is_shared_block_storage', ret)
+ self.assertIn('is_shared_instance_path', ret)
+
+ def test_check_can_live_migrate_source_shared_block_storage(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source(
+ is_shared_block_storage=True)
+ self.mox.ReplayAll()
+ conn.check_can_live_migrate_source(self.context, instance,
+ dest_check_data)
+
+ def test_check_can_live_migrate_source_shared_instance_path(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source(
+ is_shared_instance_path=True)
+ self.mox.ReplayAll()
+ conn.check_can_live_migrate_source(self.context, instance,
+ dest_check_data)
+
+ def test_check_can_live_migrate_source_non_shared_fails(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source()
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidSharedStorage,
+ conn.check_can_live_migrate_source, self.context,
+ instance, dest_check_data)
+
+ def test_check_can_live_migrate_source_shared_block_migration_fails(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source(
+ block_migration=True,
+ is_shared_block_storage=True)
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidLocalStorage,
+ conn.check_can_live_migrate_source,
+ self.context, instance, dest_check_data)
+
+ def test_check_can_live_migrate_shared_path_block_migration_fails(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source(
+ block_migration=True,
+ is_shared_instance_path=True)
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidLocalStorage,
+ conn.check_can_live_migrate_source,
+ self.context, instance, dest_check_data)
+
+ def test_check_can_live_migrate_non_shared_non_block_migration_fails(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source()
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidSharedStorage,
+ conn.check_can_live_migrate_source,
+ self.context, instance, dest_check_data)
+
+ def test_check_can_live_migrate_source_with_dest_not_enough_disk(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source(
+ block_migration=True,
+ disk_available_mb=0)
+
+ self.mox.StubOutWithMock(conn, "get_instance_disk_info")
+ conn.get_instance_disk_info(instance["name"],
+ block_device_info=None).AndReturn(
+ '[{"virt_disk_size":2}]')
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.MigrationError,
+ conn.check_can_live_migrate_source,
+ self.context, instance, dest_check_data)
+
+ def test_is_shared_block_storage_rbd(self):
+ CONF.set_override('images_type', 'rbd', 'libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertTrue(conn._is_shared_block_storage(
+ 'instance', {'image_type': 'rbd'}))
+
+ def test_is_shared_block_storage_non_remote(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertFalse(conn._is_shared_block_storage(
+ 'instance', {'is_shared_instance_path': False}))
+
+ def test_is_shared_block_storage_rbd_only_source(self):
+ CONF.set_override('images_type', 'rbd', 'libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertFalse(conn._is_shared_block_storage(
+ 'instance', {'is_shared_instance_path': False}))
+
+ def test_is_shared_block_storage_rbd_only_dest(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertFalse(conn._is_shared_block_storage(
+ 'instance', {'image_type': 'rbd',
+ 'is_shared_instance_path': False}))
+
+ def test_is_shared_block_storage_volume_backed(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with mock.patch.object(conn, 'get_instance_disk_info') as mock_get:
+ mock_get.return_value = '[]'
+ self.assertTrue(conn._is_shared_block_storage(
+ {'name': 'name'}, {'is_volume_backed': True,
+ 'is_shared_instance_path': False}))
+
+ def test_is_shared_block_storage_volume_backed_with_disk(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with mock.patch.object(conn, 'get_instance_disk_info') as mock_get:
+ mock_get.return_value = '[{"virt_disk_size":2}]'
+ self.assertFalse(conn._is_shared_block_storage(
+ {'name': 'instance_name'},
+ {'is_volume_backed': True, 'is_shared_instance_path': False}))
+ mock_get.assert_called_once_with('instance_name')
+
+ def test_is_shared_block_storage_nfs(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ mock_image_backend = mock.MagicMock()
+ conn.image_backend = mock_image_backend
+ mock_backend = mock.MagicMock()
+ mock_image_backend.backend.return_value = mock_backend
+ mock_backend.is_file_in_instance_path.return_value = True
+ self.assertTrue(conn._is_shared_block_storage(
+ 'instance', {'is_shared_instance_path': True}))
+
+ @mock.patch.object(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', 8675, create=True)
+ def test_live_migration_changes_listen_addresses(self):
+ self.compute = importutils.import_object(CONF.compute_manager)
+ instance_dict = dict(self.test_instance)
+ instance_dict.update({'host': 'fake',
+ 'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.ACTIVE})
+ instance_ref = objects.Instance(**instance_dict)
+
+ xml_tmpl = ("<domain type='kvm'>"
+ "<devices>"
+ "<graphics type='vnc' listen='{vnc}'>"
+ "<listen address='{vnc}'/>"
+ "</graphics>"
+ "<graphics type='spice' listen='{spice}'>"
+ "<listen address='{spice}'/>"
+ "</graphics>"
+ "</devices>"
+ "</domain>")
+
+ initial_xml = xml_tmpl.format(vnc='1.2.3.4',
+ spice='5.6.7.8')
+
+ target_xml = xml_tmpl.format(vnc='10.0.0.1',
+ spice='10.0.0.2')
+ target_xml = etree.tostring(etree.fromstring(target_xml))
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI2")
+ _bandwidth = CONF.libvirt.live_migration_bandwidth
+ vdmock.XMLDesc(libvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
+ initial_xml)
+ vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest',
+ None,
+ target_xml,
+ mox.IgnoreArg(),
+ None,
+ _bandwidth).AndRaise(libvirt.libvirtError("ERR"))
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+ self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
+ self.compute._rollback_live_migration(self.context, instance_ref,
+ 'dest', False)
+
+ # start test
+ migrate_data = {'pre_live_migration_result':
+ {'graphics_listen_addrs':
+ {'vnc': '10.0.0.1', 'spice': '10.0.0.2'}}}
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(libvirt.libvirtError,
+ conn._live_migration,
+ self.context, instance_ref, 'dest', False,
+ self.compute._rollback_live_migration,
+ migrate_data=migrate_data)
+
+ @mock.patch.object(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True)
+ def test_live_migration_uses_migrateToURI_without_migratable_flag(self):
+ self.compute = importutils.import_object(CONF.compute_manager)
+ instance_dict = dict(self.test_instance)
+ instance_dict.update({'host': 'fake',
+ 'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.ACTIVE})
+ instance_ref = objects.Instance(**instance_dict)
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI")
+ _bandwidth = CONF.libvirt.live_migration_bandwidth
+ vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
+ mox.IgnoreArg(),
+ None,
+ _bandwidth).AndRaise(libvirt.libvirtError("ERR"))
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+ self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
+ self.compute._rollback_live_migration(self.context, instance_ref,
+ 'dest', False)
+
+ # start test
+ migrate_data = {'pre_live_migration_result':
+ {'graphics_listen_addrs':
+ {'vnc': '0.0.0.0', 'spice': '0.0.0.0'}}}
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(libvirt.libvirtError,
+ conn._live_migration,
+ self.context, instance_ref, 'dest', False,
+ self.compute._rollback_live_migration,
+ migrate_data=migrate_data)
+
+ def test_live_migration_uses_migrateToURI_without_dest_listen_addrs(self):
+ self.compute = importutils.import_object(CONF.compute_manager)
+ instance_dict = dict(self.test_instance)
+ instance_dict.update({'host': 'fake',
+ 'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.ACTIVE})
+ instance_ref = objects.Instance(**instance_dict)
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI")
+ _bandwidth = CONF.libvirt.live_migration_bandwidth
+ vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
+ mox.IgnoreArg(),
+ None,
+ _bandwidth).AndRaise(libvirt.libvirtError("ERR"))
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+ self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
+ self.compute._rollback_live_migration(self.context, instance_ref,
+ 'dest', False)
+
+ # start test
+ migrate_data = {}
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(libvirt.libvirtError,
+ conn._live_migration,
+ self.context, instance_ref, 'dest', False,
+ self.compute._rollback_live_migration,
+ migrate_data=migrate_data)
+
+ @mock.patch.object(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True)
+ def test_live_migration_fails_without_migratable_flag_or_0_addr(self):
+ self.flags(vnc_enabled=True, vncserver_listen='1.2.3.4')
+ self.compute = importutils.import_object(CONF.compute_manager)
+ instance_dict = dict(self.test_instance)
+ instance_dict.update({'host': 'fake',
+ 'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.ACTIVE})
+ instance_ref = objects.Instance(**instance_dict)
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI")
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+ self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
+ self.compute._rollback_live_migration(self.context, instance_ref,
+ 'dest', False)
+
+ # start test
+ migrate_data = {'pre_live_migration_result':
+ {'graphics_listen_addrs':
+ {'vnc': '1.2.3.4', 'spice': '1.2.3.4'}}}
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.MigrationError,
+ conn._live_migration,
+ self.context, instance_ref, 'dest', False,
+ self.compute._rollback_live_migration,
+ migrate_data=migrate_data)
+
+ def test_live_migration_raises_exception(self):
+ # Confirms recover method is called when exceptions are raised.
+ # Preparing data
+ self.compute = importutils.import_object(CONF.compute_manager)
+ instance_dict = dict(self.test_instance)
+ instance_dict.update({'host': 'fake',
+ 'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.ACTIVE})
+ instance_ref = objects.Instance(**instance_dict)
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI2")
+ _bandwidth = CONF.libvirt.live_migration_bandwidth
+ if getattr(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None) is None:
+ vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
+ mox.IgnoreArg(),
+ None,
+ _bandwidth).AndRaise(
+ libvirt.libvirtError('ERR'))
+ else:
+ vdmock.XMLDesc(libvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
+ FakeVirtDomain().XMLDesc(0))
+ vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest',
+ None,
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ None,
+ _bandwidth).AndRaise(
+ libvirt.libvirtError('ERR'))
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+ self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
+ self.compute._rollback_live_migration(self.context, instance_ref,
+ 'dest', False)
+
+ # start test
+ migrate_data = {'pre_live_migration_result':
+ {'graphics_listen_addrs':
+ {'vnc': '127.0.0.1', 'spice': '127.0.0.1'}}}
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(libvirt.libvirtError,
+ conn._live_migration,
+ self.context, instance_ref, 'dest', False,
+ self.compute._rollback_live_migration,
+ migrate_data=migrate_data)
+
+ self.assertEqual(vm_states.ACTIVE, instance_ref.vm_state)
+ self.assertEqual(power_state.RUNNING, instance_ref.power_state)
+
+ @mock.patch.object(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', 8675, create=True)
+ def test_live_migration_raises_unsupported_config_exception(self):
+ # Tests that when migrateToURI2 fails with VIR_ERR_CONFIG_UNSUPPORTED,
+ # migrateToURI is used instead.
+
+ # Preparing data
+ instance_ref = fake_instance.fake_instance_obj(
+ self.context, **self.test_instance)
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, 'migrateToURI2')
+ self.mox.StubOutWithMock(vdmock, 'migrateToURI')
+ _bandwidth = CONF.libvirt.live_migration_bandwidth
+ vdmock.XMLDesc(libvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
+ FakeVirtDomain().XMLDesc(0))
+ unsupported_config_error = libvirt.libvirtError('ERR')
+ unsupported_config_error.err = (libvirt.VIR_ERR_CONFIG_UNSUPPORTED,)
+ # This is the first error we hit but since the error code is
+ # VIR_ERR_CONFIG_UNSUPPORTED we'll try migrateToURI.
+ vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest', None,
+ mox.IgnoreArg(), mox.IgnoreArg(), None,
+ _bandwidth).AndRaise(unsupported_config_error)
+ # This is the second and final error that will actually kill the run,
+ # we use TestingException to make sure it's not the same libvirtError
+ # above.
+ vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
+ mox.IgnoreArg(), None,
+ _bandwidth).AndRaise(test.TestingException('oops'))
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref.name:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ def fake_recover_method(context, instance, dest, block_migration):
+ pass
+
+ graphics_listen_addrs = {'vnc': '0.0.0.0', 'spice': '127.0.0.1'}
+ migrate_data = {'pre_live_migration_result':
+ {'graphics_listen_addrs': graphics_listen_addrs}}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.mox.StubOutWithMock(
+ conn, '_check_graphics_addresses_can_live_migrate')
+ conn._check_graphics_addresses_can_live_migrate(graphics_listen_addrs)
+ self.mox.ReplayAll()
+
+ # start test
+ self.assertRaises(test.TestingException, conn._live_migration,
+ self.context, instance_ref, 'dest', post_method=None,
+ recover_method=fake_recover_method,
+ migrate_data=migrate_data)
+
+ def test_rollback_live_migration_at_destination(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with mock.patch.object(conn, "destroy") as mock_destroy:
+ conn.rollback_live_migration_at_destination("context",
+ "instance", [], None, True, None)
+ mock_destroy.assert_called_once_with("context",
+ "instance", [], None, True, None)
+
+ def _do_test_create_images_and_backing(self, disk_type):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.mox.StubOutWithMock(conn, '_fetch_instance_kernel_ramdisk')
+ self.mox.StubOutWithMock(libvirt_driver.libvirt_utils, 'create_image')
+
+ disk_info = {'path': 'foo', 'type': disk_type,
+ 'disk_size': 1 * 1024 ** 3,
+ 'virt_disk_size': 20 * 1024 ** 3,
+ 'backing_file': None}
+ disk_info_json = jsonutils.dumps([disk_info])
+
+ libvirt_driver.libvirt_utils.create_image(
+ disk_info['type'], mox.IgnoreArg(), disk_info['virt_disk_size'])
+ conn._fetch_instance_kernel_ramdisk(self.context, self.test_instance)
+ self.mox.ReplayAll()
+
+ self.stubs.Set(os.path, 'exists', lambda *args: False)
+ conn._create_images_and_backing(self.context, self.test_instance,
+ "/fake/instance/dir", disk_info_json)
+
+ def test_create_images_and_backing_qcow2(self):
+ self._do_test_create_images_and_backing('qcow2')
+
+ def test_create_images_and_backing_raw(self):
+ self._do_test_create_images_and_backing('raw')
+
+ def test_create_images_and_backing_ephemeral_gets_created(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ disk_info_json = jsonutils.dumps(
+ [{u'backing_file': u'fake_image_backing_file',
+ u'disk_size': 10747904,
+ u'path': u'disk_path',
+ u'type': u'qcow2',
+ u'virt_disk_size': 25165824},
+ {u'backing_file': u'ephemeral_1_default',
+ u'disk_size': 393216,
+ u'over_committed_disk_size': 1073348608,
+ u'path': u'disk_eph_path',
+ u'type': u'qcow2',
+ u'virt_disk_size': 1073741824}])
+
+ base_dir = os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name)
+ self.test_instance.update({'name': 'fake_instance',
+ 'user_id': 'fake-user',
+ 'os_type': None,
+ 'project_id': 'fake-project'})
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_fetch_instance_kernel_ramdisk'),
+ mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image'),
+ mock.patch.object(conn, '_create_ephemeral')
+ ) as (fetch_kernel_ramdisk_mock, fetch_image_mock,
+ create_ephemeral_mock):
+ conn._create_images_and_backing(self.context, self.test_instance,
+ "/fake/instance/dir",
+ disk_info_json)
+ self.assertEqual(len(create_ephemeral_mock.call_args_list), 1)
+ m_args, m_kwargs = create_ephemeral_mock.call_args_list[0]
+ self.assertEqual(
+ os.path.join(base_dir, 'ephemeral_1_default'),
+ m_kwargs['target'])
+ self.assertEqual(len(fetch_image_mock.call_args_list), 1)
+ m_args, m_kwargs = fetch_image_mock.call_args_list[0]
+ self.assertEqual(
+ os.path.join(base_dir, 'fake_image_backing_file'),
+ m_kwargs['target'])
+
+ def test_create_images_and_backing_disk_info_none(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.mox.StubOutWithMock(conn, '_fetch_instance_kernel_ramdisk')
+
+ conn._fetch_instance_kernel_ramdisk(self.context, self.test_instance)
+ self.mox.ReplayAll()
+
+ conn._create_images_and_backing(self.context, self.test_instance,
+ "/fake/instance/dir", None)
+
+ def test_pre_live_migration_works_correctly_mocked(self):
+ # Creating testdata
+ vol = {'block_device_mapping': [
+ {'connection_info': 'dummy', 'mount_device': '/dev/sda'},
+ {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ class FakeNetworkInfo():
+ def fixed_ips(self):
+ return ["test_ip_addr"]
+
+ def fake_none(*args, **kwargs):
+ return
+
+ self.stubs.Set(conn, '_create_images_and_backing', fake_none)
+
+ inst_ref = {'id': 'foo'}
+ c = context.get_admin_context()
+ nw_info = FakeNetworkInfo()
+
+ # Creating mocks
+ self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
+ driver.block_device_info_get_mapping(vol
+ ).AndReturn(vol['block_device_mapping'])
+ self.mox.StubOutWithMock(conn, "_connect_volume")
+ for v in vol['block_device_mapping']:
+ disk_info = {
+ 'bus': "scsi",
+ 'dev': v['mount_device'].rpartition("/")[2],
+ 'type': "disk"
+ }
+ conn._connect_volume(v['connection_info'],
+ disk_info)
+ self.mox.StubOutWithMock(conn, 'plug_vifs')
+ conn.plug_vifs(mox.IsA(inst_ref), nw_info)
+
+ self.mox.ReplayAll()
+ result = conn.pre_live_migration(c, inst_ref, vol, nw_info, None)
+
+ target_res = {'graphics_listen_addrs': {'spice': '127.0.0.1',
+ 'vnc': '127.0.0.1'}}
+ self.assertEqual(result, target_res)
+
+ def test_pre_live_migration_block_with_config_drive_mocked(self):
+ # Creating testdata
+ vol = {'block_device_mapping': [
+ {'connection_info': 'dummy', 'mount_device': '/dev/sda'},
+ {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ def fake_true(*args, **kwargs):
+ return True
+
+ self.stubs.Set(configdrive, 'required_by', fake_true)
+
+ inst_ref = {'id': 'foo'}
+ c = context.get_admin_context()
+
+ self.assertRaises(exception.NoLiveMigrationForConfigDriveInLibVirt,
+ conn.pre_live_migration, c, inst_ref, vol, None,
+ None, {'is_shared_instance_path': False,
+ 'is_shared_block_storage': False})
+
+ def test_pre_live_migration_vol_backed_works_correctly_mocked(self):
+ # Creating testdata, using temp dir.
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ vol = {'block_device_mapping': [
+ {'connection_info': 'dummy', 'mount_device': '/dev/sda'},
+ {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ def fake_none(*args, **kwargs):
+ return
+
+ self.stubs.Set(conn, '_create_images_and_backing', fake_none)
+
+ class FakeNetworkInfo():
+ def fixed_ips(self):
+ return ["test_ip_addr"]
+ inst_ref = objects.Instance(**self.test_instance)
+ c = context.get_admin_context()
+ nw_info = FakeNetworkInfo()
+ # Creating mocks
+ self.mox.StubOutWithMock(conn, "_connect_volume")
+ for v in vol['block_device_mapping']:
+ disk_info = {
+ 'bus': "scsi",
+ 'dev': v['mount_device'].rpartition("/")[2],
+ 'type': "disk"
+ }
+ conn._connect_volume(v['connection_info'],
+ disk_info)
+ self.mox.StubOutWithMock(conn, 'plug_vifs')
+ conn.plug_vifs(mox.IsA(inst_ref), nw_info)
+ self.mox.ReplayAll()
+ migrate_data = {'is_shared_instance_path': False,
+ 'is_volume_backed': True,
+ 'block_migration': False,
+ 'instance_relative_path': inst_ref['name']
+ }
+ ret = conn.pre_live_migration(c, inst_ref, vol, nw_info, None,
+ migrate_data)
+ target_ret = {'graphics_listen_addrs': {'spice': '127.0.0.1',
+ 'vnc': '127.0.0.1'}}
+ self.assertEqual(ret, target_ret)
+ self.assertTrue(os.path.exists('%s/%s/' % (tmpdir,
+ inst_ref['name'])))
+
+ def test_pre_live_migration_plug_vifs_retry_fails(self):
+ self.flags(live_migration_retry_count=3)
+ instance = {'name': 'test', 'uuid': 'uuid'}
+
+ def fake_plug_vifs(instance, network_info):
+ raise processutils.ProcessExecutionError()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, 'plug_vifs', fake_plug_vifs)
+ self.stubs.Set(eventlet.greenthread, 'sleep', lambda x: None)
+ self.assertRaises(processutils.ProcessExecutionError,
+ conn.pre_live_migration,
+ self.context, instance, block_device_info=None,
+ network_info=[], disk_info={})
+
+ def test_pre_live_migration_plug_vifs_retry_works(self):
+ self.flags(live_migration_retry_count=3)
+ called = {'count': 0}
+ instance = {'name': 'test', 'uuid': 'uuid'}
+
+ def fake_plug_vifs(instance, network_info):
+ called['count'] += 1
+ if called['count'] < CONF.live_migration_retry_count:
+ raise processutils.ProcessExecutionError()
+ else:
+ return
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, 'plug_vifs', fake_plug_vifs)
+ self.stubs.Set(eventlet.greenthread, 'sleep', lambda x: None)
+ conn.pre_live_migration(self.context, instance, block_device_info=None,
+ network_info=[], disk_info={})
+
+ def test_pre_live_migration_image_not_created_with_shared_storage(self):
+ migrate_data_set = [{'is_shared_block_storage': False,
+ 'block_migration': False},
+ {'is_shared_block_storage': True,
+ 'block_migration': False},
+ {'is_shared_block_storage': False,
+ 'block_migration': True}]
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+ # creating mocks
+ with contextlib.nested(
+ mock.patch.object(conn,
+ '_create_images_and_backing'),
+ mock.patch.object(conn,
+ 'ensure_filtering_rules_for_instance'),
+ mock.patch.object(conn, 'plug_vifs'),
+ ) as (
+ create_image_mock,
+ rules_mock,
+ plug_mock,
+ ):
+ for migrate_data in migrate_data_set:
+ res = conn.pre_live_migration(self.context, instance,
+ block_device_info=None,
+ network_info=[], disk_info={},
+ migrate_data=migrate_data)
+ self.assertFalse(create_image_mock.called)
+ self.assertIsInstance(res, dict)
+
+ def test_pre_live_migration_with_not_shared_instance_path(self):
+ migrate_data = {'is_shared_block_storage': False,
+ 'is_shared_instance_path': False}
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+
+ def check_instance_dir(context, instance,
+ instance_dir, disk_info):
+ self.assertTrue(instance_dir)
+ # creating mocks
+ with contextlib.nested(
+ mock.patch.object(conn,
+ '_create_images_and_backing',
+ side_effect=check_instance_dir),
+ mock.patch.object(conn,
+ 'ensure_filtering_rules_for_instance'),
+ mock.patch.object(conn, 'plug_vifs'),
+ ) as (
+ create_image_mock,
+ rules_mock,
+ plug_mock,
+ ):
+ res = conn.pre_live_migration(self.context, instance,
+ block_device_info=None,
+ network_info=[], disk_info={},
+ migrate_data=migrate_data)
+ self.assertTrue(create_image_mock.called)
+ self.assertIsInstance(res, dict)
+
+ def test_get_instance_disk_info_works_correctly(self):
+ # Test data
+ instance_ref = objects.Instance(**self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "</devices></domain>")
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
+ fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
+ fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
+
+ self.mox.StubOutWithMock(os.path, "getsize")
+ os.path.getsize('/test/disk').AndReturn((10737418240))
+ os.path.getsize('/test/disk.local').AndReturn((3328599655))
+
+ ret = ("image: /test/disk\n"
+ "file format: raw\n"
+ "virtual size: 20G (21474836480 bytes)\n"
+ "disk size: 3.1G\n"
+ "cluster_size: 2097152\n"
+ "backing file: /test/dummy (actual path: /backing/file)\n")
+
+ self.mox.StubOutWithMock(os.path, "exists")
+ os.path.exists('/test/disk.local').AndReturn(True)
+
+ self.mox.StubOutWithMock(utils, "execute")
+ utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
+ '/test/disk.local').AndReturn((ret, ''))
+
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ info = conn.get_instance_disk_info(instance_ref['name'])
+ info = jsonutils.loads(info)
+ self.assertEqual(info[0]['type'], 'raw')
+ self.assertEqual(info[0]['path'], '/test/disk')
+ self.assertEqual(info[0]['disk_size'], 10737418240)
+ self.assertEqual(info[0]['backing_file'], "")
+ self.assertEqual(info[0]['over_committed_disk_size'], 0)
+ self.assertEqual(info[1]['type'], 'qcow2')
+ self.assertEqual(info[1]['path'], '/test/disk.local')
+ self.assertEqual(info[1]['virt_disk_size'], 21474836480)
+ self.assertEqual(info[1]['backing_file'], "file")
+ self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
+
+ def test_post_live_migration(self):
+ vol = {'block_device_mapping': [
+ {'connection_info': 'dummy1', 'mount_device': '/dev/sda'},
+ {'connection_info': 'dummy2', 'mount_device': '/dev/sdb'}]}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ inst_ref = {'id': 'foo'}
+ cntx = context.get_admin_context()
+
+ # Set up the mock expectations
+ with contextlib.nested(
+ mock.patch.object(driver, 'block_device_info_get_mapping',
+ return_value=vol['block_device_mapping']),
+ mock.patch.object(conn, '_disconnect_volume')
+ ) as (block_device_info_get_mapping, _disconnect_volume):
+ conn.post_live_migration(cntx, inst_ref, vol)
+
+ block_device_info_get_mapping.assert_has_calls([
+ mock.call(vol)])
+ _disconnect_volume.assert_has_calls([
+ mock.call(v['connection_info'],
+ v['mount_device'].rpartition("/")[2])
+ for v in vol['block_device_mapping']])
+
+ def test_get_instance_disk_info_excludes_volumes(self):
+ # Test data
+ instance_ref = objects.Instance(**self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/fake/path/to/volume1'/>"
+ "<target dev='vdc' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/fake/path/to/volume2'/>"
+ "<target dev='vdd' bus='virtio'/></disk>"
+ "</devices></domain>")
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
+ fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
+ fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
+
+ self.mox.StubOutWithMock(os.path, "getsize")
+ os.path.getsize('/test/disk').AndReturn((10737418240))
+ os.path.getsize('/test/disk.local').AndReturn((3328599655))
+
+ ret = ("image: /test/disk\n"
+ "file format: raw\n"
+ "virtual size: 20G (21474836480 bytes)\n"
+ "disk size: 3.1G\n"
+ "cluster_size: 2097152\n"
+ "backing file: /test/dummy (actual path: /backing/file)\n")
+
+ self.mox.StubOutWithMock(os.path, "exists")
+ os.path.exists('/test/disk.local').AndReturn(True)
+
+ self.mox.StubOutWithMock(utils, "execute")
+ utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
+ '/test/disk.local').AndReturn((ret, ''))
+
+ self.mox.ReplayAll()
+ conn_info = {'driver_volume_type': 'fake'}
+ info = {'block_device_mapping': [
+ {'connection_info': conn_info, 'mount_device': '/dev/vdc'},
+ {'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ info = conn.get_instance_disk_info(instance_ref['name'],
+ block_device_info=info)
+ info = jsonutils.loads(info)
+ self.assertEqual(info[0]['type'], 'raw')
+ self.assertEqual(info[0]['path'], '/test/disk')
+ self.assertEqual(info[0]['disk_size'], 10737418240)
+ self.assertEqual(info[0]['backing_file'], "")
+ self.assertEqual(info[0]['over_committed_disk_size'], 0)
+ self.assertEqual(info[1]['type'], 'qcow2')
+ self.assertEqual(info[1]['path'], '/test/disk.local')
+ self.assertEqual(info[1]['virt_disk_size'], 21474836480)
+ self.assertEqual(info[1]['backing_file'], "file")
+ self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_spawn_with_network_info(self, mock_flavor):
+ # Preparing mocks
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_getLibVersion():
+ return 9011
+
+ def fake_getCapabilities():
+ return """
+ <capabilities>
+ <host>
+ <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <topology sockets='1' cores='2' threads='1'/>
+ <feature name='xtpr'/>
+ </cpu>
+ </host>
+ </capabilities>
+ """
+
+ def fake_baselineCPU(cpu, flag):
+ return """<cpu mode='custom' match='exact'>
+ <model fallback='allow'>Penryn</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='xtpr'/>
+ </cpu>
+ """
+
+ # _fake_network_info must be called before create_fake_libvirt_mock(),
+ # as _fake_network_info calls importutils.import_class() and
+ # create_fake_libvirt_mock() mocks importutils.import_class().
+ network_info = _fake_network_info(self.stubs, 1)
+ self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
+ getCapabilities=fake_getCapabilities,
+ getVersion=lambda: 1005001,
+ baselineCPU=fake_baselineCPU)
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
+ instance = objects.Instance(**instance_ref)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+
+ mock_flavor.return_value = flavor
+
+ # Mock out the get_info method of the LibvirtDriver so that the polling
+ # in the spawn method of the LibvirtDriver returns immediately
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, 'get_info')
+ libvirt_driver.LibvirtDriver.get_info(instance
+ ).AndReturn({'state': power_state.RUNNING})
+
+ # Start test
+ self.mox.ReplayAll()
+
+ with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt:
+ del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn.firewall_driver,
+ 'setup_basic_filtering',
+ fake_none)
+ self.stubs.Set(conn.firewall_driver,
+ 'prepare_instance_filter',
+ fake_none)
+ self.stubs.Set(imagebackend.Image,
+ 'cache',
+ fake_none)
+
+ conn.spawn(self.context, instance, None, [], 'herp',
+ network_info=network_info)
+
+ path = os.path.join(CONF.instances_path, instance['name'])
+ if os.path.isdir(path):
+ shutil.rmtree(path)
+
+ path = os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name)
+ if os.path.isdir(path):
+ shutil.rmtree(os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name))
+
+ def test_spawn_without_image_meta(self):
+ self.create_image_called = False
+
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_create_image(*args, **kwargs):
+ self.create_image_called = True
+
+ def fake_get_info(instance):
+ return {'state': power_state.RUNNING}
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 1
+ instance = objects.Instance(**instance_ref)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_get_guest_xml', fake_none)
+ self.stubs.Set(conn, '_create_image', fake_create_image)
+ self.stubs.Set(conn, '_create_domain_and_network', fake_none)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+
+ conn.spawn(self.context, instance, None, [], None)
+ self.assertTrue(self.create_image_called)
+
+ conn.spawn(self.context,
+ instance,
+ {'id': instance['image_ref']},
+ [],
+ None)
+ self.assertTrue(self.create_image_called)
+
+ def test_spawn_from_volume_calls_cache(self):
+ self.cache_called_for_disk = False
+
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_cache(*args, **kwargs):
+ if kwargs.get('image_id') == 'my_fake_image':
+ self.cache_called_for_disk = True
+
+ def fake_get_info(instance):
+ return {'state': power_state.RUNNING}
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_get_guest_xml', fake_none)
+
+ self.stubs.Set(imagebackend.Image, 'cache', fake_cache)
+ self.stubs.Set(conn, '_create_domain_and_network', fake_none)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+
+ block_device_info = {'root_device_name': '/dev/vda',
+ 'block_device_mapping': [
+ {'mount_device': 'vda',
+ 'boot_index': 0}
+ ]
+ }
+
+ # Volume-backed instance created without image
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = ''
+ instance_ref['root_device_name'] = '/dev/vda'
+ instance_ref['uuid'] = uuidutils.generate_uuid()
+ instance = objects.Instance(**instance_ref)
+
+ conn.spawn(self.context, instance, None, [], None,
+ block_device_info=block_device_info)
+ self.assertFalse(self.cache_called_for_disk)
+
+ # Booted from volume but with placeholder image
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 'my_fake_image'
+ instance_ref['root_device_name'] = '/dev/vda'
+ instance_ref['uuid'] = uuidutils.generate_uuid()
+ instance = objects.Instance(**instance_ref)
+
+ conn.spawn(self.context, instance, None, [], None,
+ block_device_info=block_device_info)
+ self.assertFalse(self.cache_called_for_disk)
+
+ # Booted from an image
+ instance_ref['image_ref'] = 'my_fake_image'
+ instance_ref['uuid'] = uuidutils.generate_uuid()
+ instance = objects.Instance(**instance_ref)
+ conn.spawn(self.context, instance, None, [], None)
+ self.assertTrue(self.cache_called_for_disk)
+
+ def test_start_lxc_from_volume(self):
+ self.flags(virt_type="lxc",
+ group='libvirt')
+
+ def check_setup_container(path, container_dir=None, use_cow=False):
+ self.assertEqual(path, '/dev/path/to/dev')
+ self.assertTrue(use_cow)
+ return '/dev/nbd1'
+
+ bdm = {
+ 'guest_format': None,
+ 'boot_index': 0,
+ 'mount_device': '/dev/sda',
+ 'connection_info': {
+ 'driver_volume_type': 'iscsi',
+ 'serial': 'afc1',
+ 'data': {
+ 'access_mode': 'rw',
+ 'device_path': '/dev/path/to/dev',
+ 'target_discovered': False,
+ 'encrypted': False,
+ 'qos_specs': None,
+ 'target_iqn': 'iqn: volume-afc1',
+ 'target_portal': 'ip: 3260',
+ 'volume_id': 'afc1',
+ 'target_lun': 1,
+ 'auth_password': 'uj',
+ 'auth_username': '47',
+ 'auth_method': 'CHAP'
+ }
+ },
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'delete_on_termination': False
+ }
+
+ def _get(key, opt=None):
+ return bdm.get(key, opt)
+
+ def getitem(key):
+ return bdm[key]
+
+ def setitem(key, val):
+ bdm[key] = val
+
+ bdm_mock = mock.MagicMock()
+ bdm_mock.__getitem__.side_effect = getitem
+ bdm_mock.__setitem__.side_effect = setitem
+ bdm_mock.get = _get
+
+ disk_mock = mock.MagicMock()
+ disk_mock.source_path = '/dev/path/to/dev'
+
+ block_device_info = {'block_device_mapping': [bdm_mock],
+ 'root_device_name': '/dev/sda'}
+
+ # Volume-backed instance created without image
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = ''
+ instance_ref['root_device_name'] = '/dev/sda'
+ instance_ref['ephemeral_gb'] = 0
+ instance_ref['uuid'] = uuidutils.generate_uuid()
+ instance_ref['system_metadata']['image_disk_format'] = 'qcow2'
+ inst_obj = objects.Instance(**instance_ref)
+
+ flavor = inst_obj.get_flavor()
+ flavor.extra_specs = {}
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(conn, '_create_images_and_backing'),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
+ mock.patch.object(conn.firewall_driver, 'prepare_instance_filter'),
+ mock.patch.object(conn.firewall_driver, 'apply_instance_filter'),
+ mock.patch.object(conn, '_create_domain'),
+ mock.patch.object(conn, '_connect_volume'),
+ mock.patch.object(conn, '_get_volume_config',
+ return_value=disk_mock),
+ mock.patch.object(conn, 'get_info',
+ return_value={'state': power_state.RUNNING}),
+ mock.patch('nova.virt.disk.api.setup_container',
+ side_effect=check_setup_container),
+ mock.patch('nova.virt.disk.api.teardown_container'),
+ mock.patch.object(objects.Instance, 'save'),
+ mock.patch.object(objects.Flavor, 'get_by_id',
+ return_value=flavor)):
+
+ conn.spawn(self.context, inst_obj, None, [], None,
+ network_info=[],
+ block_device_info=block_device_info)
+ self.assertEqual('/dev/nbd1',
+ inst_obj.system_metadata.get(
+ 'rootfs_device_name'))
+
+ def test_spawn_with_pci_devices(self):
+ def fake_none(*args, **kwargs):
+ return None
+
+ def fake_get_info(instance):
+ return {'state': power_state.RUNNING}
+
+ class FakeLibvirtPciDevice():
+ def dettach(self):
+ return None
+
+ def reset(self):
+ return None
+
+ def fake_node_device_lookup_by_name(address):
+ pattern = ("pci_%(hex)s{4}_%(hex)s{2}_%(hex)s{2}_%(oct)s{1}"
+ % dict(hex='[\da-f]', oct='[0-8]'))
+ pattern = re.compile(pattern)
+ if pattern.match(address) is None:
+ raise libvirt.libvirtError()
+ return FakeLibvirtPciDevice()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_get_guest_xml', fake_none)
+ self.stubs.Set(conn, '_create_image', fake_none)
+ self.stubs.Set(conn, '_create_domain_and_network', fake_none)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+
+ conn._conn.nodeDeviceLookupByName = \
+ fake_node_device_lookup_by_name
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 'my_fake_image'
+ instance = objects.Instance(**instance_ref)
+ instance = dict(instance.iteritems())
+ instance['pci_devices'] = [{'address': '0000:00:00.0'}]
+
+ conn.spawn(self.context, instance, None, [], None)
+
+ def test_chown_disk_config_for_instance(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = copy.deepcopy(self.test_instance)
+ instance['name'] = 'test_name'
+ self.mox.StubOutWithMock(fake_libvirt_utils, 'get_instance_path')
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(fake_libvirt_utils, 'chown')
+ fake_libvirt_utils.get_instance_path(instance).AndReturn('/tmp/uuid')
+ os.path.exists('/tmp/uuid/disk.config').AndReturn(True)
+ fake_libvirt_utils.chown('/tmp/uuid/disk.config', os.getuid())
+
+ self.mox.ReplayAll()
+ conn._chown_disk_config_for_instance(instance)
+
+ def _test_create_image_plain(self, os_type='', filename='', mkfs=False):
+ gotFiles = []
+
+ def fake_image(self, instance, name, image_type=''):
+ class FakeImage(imagebackend.Image):
+ def __init__(self, instance, name, is_block_dev=False):
+ self.path = os.path.join(instance['name'], name)
+ self.is_block_dev = is_block_dev
+
+ def create_image(self, prepare_template, base,
+ size, *args, **kwargs):
+ pass
+
+ def cache(self, fetch_func, filename, size=None,
+ *args, **kwargs):
+ gotFiles.append({'filename': filename,
+ 'size': size})
+
+ def snapshot(self, name):
+ pass
+
+ return FakeImage(instance, name)
+
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_get_info(instance):
+ return {'state': power_state.RUNNING}
+
+ # Stop 'libvirt_driver._create_image' touching filesystem
+ self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
+ fake_image)
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 1
+ instance = objects.Instance(**instance_ref)
+ instance['os_type'] = os_type
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_get_guest_xml', fake_none)
+ self.stubs.Set(conn, '_create_domain_and_network', fake_none)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+ if mkfs:
+ self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
+ {os_type: 'mkfs.ext3 --label %(fs_label)s %(target)s'})
+
+ image_meta = {'id': instance['image_ref']}
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance,
+ None,
+ image_meta)
+ conn._create_image(context, instance, disk_info['mapping'])
+ conn._get_guest_xml(self.context, instance, None,
+ disk_info, image_meta)
+
+ wantFiles = [
+ {'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
+ 'size': 10 * units.Gi},
+ {'filename': filename,
+ 'size': 20 * units.Gi},
+ ]
+ self.assertEqual(gotFiles, wantFiles)
+
+ def test_create_image_plain_os_type_blank(self):
+ self._test_create_image_plain(os_type='',
+ filename='ephemeral_20_default',
+ mkfs=False)
+
+ def test_create_image_plain_os_type_none(self):
+ self._test_create_image_plain(os_type=None,
+ filename='ephemeral_20_default',
+ mkfs=False)
+
+ def test_create_image_plain_os_type_set_no_fs(self):
+ self._test_create_image_plain(os_type='test',
+ filename='ephemeral_20_default',
+ mkfs=False)
+
+ def test_create_image_plain_os_type_set_with_fs(self):
+ self._test_create_image_plain(os_type='test',
+ filename='ephemeral_20_test',
+ mkfs=True)
+
+ def test_create_image_with_swap(self):
+ gotFiles = []
+
+ def fake_image(self, instance, name, image_type=''):
+ class FakeImage(imagebackend.Image):
+ def __init__(self, instance, name, is_block_dev=False):
+ self.path = os.path.join(instance['name'], name)
+ self.is_block_dev = is_block_dev
+
+ def create_image(self, prepare_template, base,
+ size, *args, **kwargs):
+ pass
+
+ def cache(self, fetch_func, filename, size=None,
+ *args, **kwargs):
+ gotFiles.append({'filename': filename,
+ 'size': size})
+
+ def snapshot(self, name):
+ pass
+
+ return FakeImage(instance, name)
+
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_get_info(instance):
+ return {'state': power_state.RUNNING}
+
+ # Stop 'libvirt_driver._create_image' touching filesystem
+ self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
+ fake_image)
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 1
+ # Turn on some swap to exercise that codepath in _create_image
+ instance_ref['system_metadata']['instance_type_swap'] = 500
+ instance = objects.Instance(**instance_ref)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_get_guest_xml', fake_none)
+ self.stubs.Set(conn, '_create_domain_and_network', fake_none)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+
+ image_meta = {'id': instance['image_ref']}
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance,
+ None,
+ image_meta)
+ conn._create_image(context, instance, disk_info['mapping'])
+ conn._get_guest_xml(self.context, instance, None,
+ disk_info, image_meta)
+
+ wantFiles = [
+ {'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
+ 'size': 10 * units.Gi},
+ {'filename': 'ephemeral_20_default',
+ 'size': 20 * units.Gi},
+ {'filename': 'swap_500',
+ 'size': 500 * units.Mi},
+ ]
+ self.assertEqual(gotFiles, wantFiles)
+
+ @mock.patch.object(utils, 'execute')
+ def test_create_ephemeral_specified_fs(self, mock_exec):
+ self.flags(default_ephemeral_format='ext3')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
+ is_block_dev=True, max_size=20,
+ specified_fs='ext4')
+ mock_exec.assert_called_once_with('mkfs', '-t', 'ext4', '-F', '-L',
+ 'myVol', '/dev/something',
+ run_as_root=True)
+
+ def test_create_ephemeral_specified_fs_not_valid(self):
+ CONF.set_override('default_ephemeral_format', 'ext4')
+ ephemerals = [{'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'device_name': '/dev/vdb',
+ 'guest_format': 'dummy',
+ 'size': 1}]
+ block_device_info = {
+ 'ephemerals': ephemerals}
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 1
+ instance = objects.Instance(**instance_ref)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ image_meta = {'id': instance['image_ref']}
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance,
+ None,
+ image_meta)
+ disk_info['mapping'].pop('disk.local')
+
+ with contextlib.nested(
+ mock.patch.object(utils, 'execute'),
+ mock.patch.object(conn, 'get_info'),
+ mock.patch.object(conn, '_create_domain_and_network')):
+ self.assertRaises(exception.InvalidBDMFormat, conn._create_image,
+ context, instance, disk_info['mapping'],
+ block_device_info=block_device_info)
+
+ def test_create_ephemeral_default(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('mkfs', '-t', 'ext3', '-F', '-L', 'myVol',
+ '/dev/something', run_as_root=True)
+ self.mox.ReplayAll()
+ conn._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
+ is_block_dev=True, max_size=20)
+
+ def test_create_ephemeral_with_conf(self):
+ CONF.set_override('default_ephemeral_format', 'ext4')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol',
+ '/dev/something', run_as_root=True)
+ self.mox.ReplayAll()
+ conn._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
+ is_block_dev=True)
+
+ def test_create_ephemeral_with_arbitrary(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
+ {'linux': 'mkfs.ext3 --label %(fs_label)s %(target)s'})
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('mkfs.ext3', '--label', 'myVol', '/dev/something',
+ run_as_root=True)
+ self.mox.ReplayAll()
+ conn._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
+ is_block_dev=True)
+
+ def test_create_swap_default(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('mkswap', '/dev/something', run_as_root=False)
+ self.mox.ReplayAll()
+
+ conn._create_swap('/dev/something', 1, max_size=20)
+
+ def test_get_console_output_file(self):
+ fake_libvirt_utils.files['console.log'] = '01234567890'
+
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 123456
+ instance = objects.Instance(**instance_ref)
+
+ console_dir = (os.path.join(tmpdir, instance['name']))
+ console_log = '%s/console.log' % (console_dir)
+ fake_dom_xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ <console type='file'>
+ <source path='%s'/>
+ <target port='0'/>
+ </console>
+ </devices>
+ </domain>
+ """ % console_log
+
+ def fake_lookup(id):
+ return FakeVirtDomain(fake_dom_xml)
+
+ self.create_fake_libvirt_mock()
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ try:
+ prev_max = libvirt_driver.MAX_CONSOLE_BYTES
+ libvirt_driver.MAX_CONSOLE_BYTES = 5
+ output = conn.get_console_output(self.context, instance)
+ finally:
+ libvirt_driver.MAX_CONSOLE_BYTES = prev_max
+
+ self.assertEqual('67890', output)
+
+ def test_get_console_output_pty(self):
+ fake_libvirt_utils.files['pty'] = '01234567890'
+
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 123456
+ instance = objects.Instance(**instance_ref)
+
+ console_dir = (os.path.join(tmpdir, instance['name']))
+ pty_file = '%s/fake_pty' % (console_dir)
+ fake_dom_xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ <console type='pty'>
+ <source path='%s'/>
+ <target port='0'/>
+ </console>
+ </devices>
+ </domain>
+ """ % pty_file
+
+ def fake_lookup(id):
+ return FakeVirtDomain(fake_dom_xml)
+
+ def _fake_flush(self, fake_pty):
+ return 'foo'
+
+ def _fake_append_to_file(self, data, fpath):
+ return 'pty'
+
+ self.create_fake_libvirt_mock()
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
+ libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush
+ libvirt_driver.LibvirtDriver._append_to_file = _fake_append_to_file
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ try:
+ prev_max = libvirt_driver.MAX_CONSOLE_BYTES
+ libvirt_driver.MAX_CONSOLE_BYTES = 5
+ output = conn.get_console_output(self.context, instance)
+ finally:
+ libvirt_driver.MAX_CONSOLE_BYTES = prev_max
+
+ self.assertEqual('67890', output)
+
+ def test_get_host_ip_addr(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ ip = conn.get_host_ip_addr()
+ self.assertEqual(ip, CONF.my_ip)
+
+ def test_broken_connection(self):
+ for (error, domain) in (
+ (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_REMOTE),
+ (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_RPC),
+ (libvirt.VIR_ERR_INTERNAL_ERROR, libvirt.VIR_FROM_RPC)):
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.mox.StubOutWithMock(conn, "_wrapped_conn")
+ self.mox.StubOutWithMock(conn._wrapped_conn, "getLibVersion")
+ self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_code")
+ self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_domain")
+
+ conn._wrapped_conn.getLibVersion().AndRaise(
+ libvirt.libvirtError("fake failure"))
+
+ libvirt.libvirtError.get_error_code().AndReturn(error)
+ libvirt.libvirtError.get_error_domain().AndReturn(domain)
+
+ self.mox.ReplayAll()
+
+ self.assertFalse(conn._test_connection(conn._wrapped_conn))
+
+ self.mox.UnsetStubs()
+
+ def test_command_with_broken_connection(self):
+ self.mox.UnsetStubs()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(libvirt, 'openAuth',
+ side_effect=libvirt.libvirtError("fake")),
+ mock.patch.object(libvirt.libvirtError, "get_error_code"),
+ mock.patch.object(libvirt.libvirtError, "get_error_domain"),
+ mock.patch.object(conn, '_set_host_enabled')):
+ self.assertRaises(exception.HypervisorUnavailable,
+ conn.get_num_instances)
+
+ def test_broken_connection_disable_service(self):
+ self.mox.UnsetStubs()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn._init_events_pipe()
+ with contextlib.nested(
+ mock.patch.object(conn, '_set_host_enabled')):
+ conn._close_callback(conn._wrapped_conn, 'ERROR!', '')
+ conn._dispatch_events()
+ conn._set_host_enabled.assert_called_once_with(
+ False,
+ disable_reason=u'Connection to libvirt lost: ERROR!')
+
+ def test_service_resume_after_broken_connection(self):
+ self.mox.UnsetStubs()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ service_mock = mock.MagicMock()
+ service_mock.disabled.return_value = True
+ with contextlib.nested(
+ mock.patch.object(libvirt, 'openAuth',
+ return_value=mock.MagicMock()),
+ mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock)):
+
+ conn.get_num_instances()
+ self.assertTrue(not service_mock.disabled and
+ service_mock.disabled_reason is 'None')
+
+ def test_broken_connection_no_wrapped_conn(self):
+ # Tests that calling _close_callback when _wrapped_conn is None
+ # is a no-op, i.e. set_host_enabled won't be called.
+ self.mox.UnsetStubs()
+ # conn._wrapped_conn will be None since we never call libvirt.openAuth
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ # create our mock connection that libvirt will send to the callback
+ mock_failed_conn = mock.MagicMock()
+ mock_failed_conn.__getitem__.return_value = True
+ # nothing should happen when calling _close_callback since
+ # _wrapped_conn is None in the driver
+ conn._init_events_pipe()
+ conn._close_callback(mock_failed_conn, reason=None, opaque=None)
+ conn._dispatch_events()
+
+ def test_immediate_delete(self):
+ def fake_lookup_by_name(instance_name):
+ raise exception.InstanceNotFound(instance_id=instance_name)
+
+ def fake_delete_instance_files(instance):
+ pass
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, '_delete_instance_files',
+ fake_delete_instance_files)
+
+ instance = objects.Instance(**self.test_instance)
+ conn.destroy(self.context, instance, {})
+
+ def _test_destroy_removes_disk(self, volume_fail=False):
+ instance = {"name": "instancename", "id": "42",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64",
+ "cleaned": 0, 'info_cache': None, 'security_groups': []}
+ vol = {'block_device_mapping': [
+ {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_undefine_domain')
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(),
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(instance)
+ self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
+ driver.block_device_info_get_mapping(vol
+ ).AndReturn(vol['block_device_mapping'])
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ "_disconnect_volume")
+ if volume_fail:
+ libvirt_driver.LibvirtDriver._disconnect_volume(
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).\
+ AndRaise(exception.VolumeNotFound('vol'))
+ else:
+ libvirt_driver.LibvirtDriver._disconnect_volume(
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ 'delete_instance_files')
+ (libvirt_driver.LibvirtDriver.delete_instance_files(mox.IgnoreArg()).
+ AndReturn(True))
+ libvirt_driver.LibvirtDriver._undefine_domain(instance)
+
+ # Start test
+ self.mox.ReplayAll()
+
+ def fake_destroy(instance):
+ pass
+
+ def fake_os_path_exists(path):
+ return True
+
+ def fake_unplug_vifs(instance, network_info, ignore_errors=False):
+ pass
+
+ def fake_unfilter_instance(instance, network_info):
+ pass
+
+ def fake_obj_load_attr(self, attrname):
+ if not hasattr(self, attrname):
+ self[attrname] = {}
+
+ def fake_save(self, context):
+ pass
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.stubs.Set(conn, '_destroy', fake_destroy)
+ self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
+ self.stubs.Set(conn.firewall_driver,
+ 'unfilter_instance', fake_unfilter_instance)
+ self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+ self.stubs.Set(objects.Instance, 'fields',
+ {'id': int, 'uuid': str, 'cleaned': int})
+ self.stubs.Set(objects.Instance, 'obj_load_attr',
+ fake_obj_load_attr)
+ self.stubs.Set(objects.Instance, 'save', fake_save)
+
+ conn.destroy(self.context, instance, [], vol)
+
+ def test_destroy_removes_disk(self):
+ self._test_destroy_removes_disk(volume_fail=False)
+
+ def test_destroy_removes_disk_volume_fails(self):
+ self._test_destroy_removes_disk(volume_fail=True)
+
+ def test_destroy_not_removes_disk(self):
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_undefine_domain')
+ libvirt_driver.LibvirtDriver._undefine_domain(instance)
+
+ # Start test
+ self.mox.ReplayAll()
+
+ def fake_destroy(instance):
+ pass
+
+ def fake_os_path_exists(path):
+ return True
+
+ def fake_unplug_vifs(instance, network_info, ignore_errors=False):
+ pass
+
+ def fake_unfilter_instance(instance, network_info):
+ pass
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.stubs.Set(conn, '_destroy', fake_destroy)
+ self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
+ self.stubs.Set(conn.firewall_driver,
+ 'unfilter_instance', fake_unfilter_instance)
+ self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+ conn.destroy(self.context, instance, [], None, False)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_lookup_by_name')
+ def test_destroy_lxc_calls_teardown_container(self, mock_look_up,
+ mock_teardown_container,
+ mock_cleanup):
+ self.flags(virt_type='lxc', group='libvirt')
+ fake_domain = FakeVirtDomain()
+
+ def destroy_side_effect(*args, **kwargs):
+ fake_domain._info[0] = power_state.SHUTDOWN
+
+ with mock.patch.object(fake_domain, 'destroy',
+ side_effect=destroy_side_effect) as mock_domain_destroy:
+ mock_look_up.return_value = fake_domain
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ network_info = []
+ conn.destroy(self.context, instance, network_info, None, False)
+
+ mock_look_up.assert_has_calls([mock.call(instance.name),
+ mock.call(instance.name)])
+ mock_domain_destroy.assert_called_once_with()
+ mock_teardown_container.assert_called_once_with(instance)
+ mock_cleanup.assert_called_once_with(self.context, instance,
+ network_info, None, False,
+ None)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_lookup_by_name')
+ def test_destroy_lxc_calls_teardown_container_when_no_domain(self,
+ mock_look_up, mock_teardown_container, mock_cleanup):
+ self.flags(virt_type='lxc', group='libvirt')
+ instance = fake_instance.fake_instance_obj(self.context)
+ inf_exception = exception.InstanceNotFound(instance_id=instance.name)
+ mock_look_up.side_effect = inf_exception
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ network_info = []
+ conn.destroy(self.context, instance, network_info, None, False)
+
+ mock_look_up.assert_has_calls([mock.call(instance.name),
+ mock.call(instance.name)])
+ mock_teardown_container.assert_called_once_with(instance)
+ mock_cleanup.assert_called_once_with(self.context, instance,
+ network_info, None, False,
+ None)
+
+ def test_reboot_different_ids(self):
+ class FakeLoopingCall:
+ def start(self, *a, **k):
+ return self
+
+ def wait(self):
+ return None
+
+ self.flags(wait_soft_reboot_seconds=1, group='libvirt')
+ info_tuple = ('fake', 'fake', 'fake', 'also_fake')
+ self.reboot_create_called = False
+
+ # Mock domain
+ mock_domain = self.mox.CreateMock(libvirt.virDomain)
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
+ mock_domain.ID().AndReturn('some_fake_id')
+ mock_domain.shutdown()
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_CRASHED,) + info_tuple)
+ mock_domain.ID().AndReturn('some_other_fake_id')
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock_domain
+
+ def fake_create_domain(**kwargs):
+ self.reboot_create_called = True
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, '_create_domain', fake_create_domain)
+ self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
+ lambda *a, **k: FakeLoopingCall())
+ self.stubs.Set(pci_manager, 'get_instance_pci_devs', lambda *a: [])
+ conn.reboot(None, instance, [], 'SOFT')
+ self.assertTrue(self.reboot_create_called)
+
+ def test_reboot_same_ids(self):
+ class FakeLoopingCall:
+ def start(self, *a, **k):
+ return self
+
+ def wait(self):
+ return None
+
+ self.flags(wait_soft_reboot_seconds=1, group='libvirt')
+ info_tuple = ('fake', 'fake', 'fake', 'also_fake')
+ self.reboot_hard_reboot_called = False
+
+ # Mock domain
+ mock_domain = self.mox.CreateMock(libvirt.virDomain)
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
+ mock_domain.ID().AndReturn('some_fake_id')
+ mock_domain.shutdown()
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_CRASHED,) + info_tuple)
+ mock_domain.ID().AndReturn('some_fake_id')
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock_domain
+
+ def fake_hard_reboot(*args, **kwargs):
+ self.reboot_hard_reboot_called = True
+
+ def fake_sleep(interval):
+ pass
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(greenthread, 'sleep', fake_sleep)
+ self.stubs.Set(conn, '_hard_reboot', fake_hard_reboot)
+ self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
+ lambda *a, **k: FakeLoopingCall())
+ self.stubs.Set(pci_manager, 'get_instance_pci_devs', lambda *a: [])
+ conn.reboot(None, instance, [], 'SOFT')
+ self.assertTrue(self.reboot_hard_reboot_called)
+
+ def test_soft_reboot_libvirt_exception(self):
+ # Tests that a hard reboot is performed when a soft reboot results
+ # in raising a libvirtError.
+ info_tuple = ('fake', 'fake', 'fake', 'also_fake')
+
+ # setup mocks
+ mock_domain = self.mox.CreateMock(libvirt.virDomain)
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
+ mock_domain.ID().AndReturn('some_fake_id')
+ mock_domain.shutdown().AndRaise(libvirt.libvirtError('Err'))
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ context = None
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ network_info = []
+
+ self.mox.StubOutWithMock(conn, '_lookup_by_name')
+ conn._lookup_by_name(instance['name']).AndReturn(mock_domain)
+ self.mox.StubOutWithMock(conn, '_hard_reboot')
+ conn._hard_reboot(context, instance, network_info, None)
+
+ self.mox.ReplayAll()
+
+ conn.reboot(context, instance, network_info, 'SOFT')
+
+ def _test_resume_state_on_host_boot_with_state(self, state):
+ called = {'count': 0}
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.info().AndReturn([state, None, None, None, None])
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ def fake_hard_reboot(*args):
+ called['count'] += 1
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, '_hard_reboot', fake_hard_reboot)
+ instance_details = {"name": "instancename", "id": 1,
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ instance = fake_instance.fake_instance_obj(
+ self.context, **instance_details)
+ network_info = _fake_network_info(self.stubs, 1)
+
+ conn.resume_state_on_host_boot(self.context, instance, network_info,
+ block_device_info=None)
+
+ ignored_states = (power_state.RUNNING,
+ power_state.SUSPENDED,
+ power_state.NOSTATE,
+ power_state.PAUSED)
+ if state in ignored_states:
+ self.assertEqual(called['count'], 0)
+ else:
+ self.assertEqual(called['count'], 1)
+
+ def test_resume_state_on_host_boot_with_running_state(self):
+ self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
+
+ def test_resume_state_on_host_boot_with_suspended_state(self):
+ self._test_resume_state_on_host_boot_with_state(power_state.SUSPENDED)
+
+ def test_resume_state_on_host_boot_with_paused_state(self):
+ self._test_resume_state_on_host_boot_with_state(power_state.PAUSED)
+
+ def test_resume_state_on_host_boot_with_nostate(self):
+ self._test_resume_state_on_host_boot_with_state(power_state.NOSTATE)
+
+ def test_resume_state_on_host_boot_with_shutdown_state(self):
+ self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
+
+ def test_resume_state_on_host_boot_with_crashed_state(self):
+ self._test_resume_state_on_host_boot_with_state(power_state.CRASHED)
+
+ def test_resume_state_on_host_boot_with_instance_not_found_on_driver(self):
+ called = {'count': 0}
+ instance_details = {'name': 'test'}
+ instance = fake_instance.fake_instance_obj(
+ self.context, **instance_details)
+
+ def fake_lookup_by_name(instance_name):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ def fake_hard_reboot(*args):
+ called['count'] += 1
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, '_hard_reboot', fake_hard_reboot)
+ conn.resume_state_on_host_boot(self.context, instance, network_info=[],
+ block_device_info=None)
+
+ self.assertEqual(called['count'], 1)
+
+ def test_hard_reboot(self):
+ called = {'count': 0}
+ instance = objects.Instance(**self.test_instance)
+ network_info = _fake_network_info(self.stubs, 1)
+ block_device_info = None
+
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "</devices></domain>")
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.mox.StubOutWithMock(conn, '_destroy')
+ self.mox.StubOutWithMock(conn, '_get_instance_disk_info')
+ self.mox.StubOutWithMock(conn, '_get_guest_xml')
+ self.mox.StubOutWithMock(conn, '_create_images_and_backing')
+ self.mox.StubOutWithMock(conn, '_create_domain_and_network')
+
+ def fake_get_info(instance_name):
+ called['count'] += 1
+ if called['count'] == 1:
+ state = power_state.SHUTDOWN
+ else:
+ state = power_state.RUNNING
+ return dict(state=state)
+
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+
+ conn._destroy(instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance, block_device_info)
+
+ system_meta = utils.instance_sys_meta(instance)
+ image_meta = utils.get_image_from_system_metadata(system_meta)
+
+ conn._get_guest_xml(self.context, instance, network_info, disk_info,
+ image_meta=image_meta,
+ block_device_info=block_device_info,
+ write_to_disk=True).AndReturn(dummyxml)
+ disk_info_json = '[{"virt_disk_size": 2}]'
+ conn._get_instance_disk_info(instance["name"], dummyxml,
+ block_device_info).AndReturn(disk_info_json)
+ conn._create_images_and_backing(self.context, instance,
+ libvirt_utils.get_instance_path(instance),
+ disk_info_json)
+ conn._create_domain_and_network(self.context, dummyxml, instance,
+ network_info, block_device_info,
+ reboot=True, vifs_already_plugged=True)
+ self.mox.ReplayAll()
+
+ conn._hard_reboot(self.context, instance, network_info,
+ block_device_info)
+
+ @mock.patch('nova.openstack.common.loopingcall.FixedIntervalLoopingCall')
+ @mock.patch('nova.pci.manager.get_instance_pci_devs')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._prepare_pci_devices_for_use')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info')
+ @mock.patch('nova.virt.libvirt.utils.write_to_file')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_config')
+ @mock.patch('nova.virt.libvirt.blockinfo.get_disk_info')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._destroy')
+ def test_hard_reboot_does_not_call_glance_show(self,
+ mock_destroy, mock_get_disk_info, mock_get_guest_config,
+ mock_get_instance_path, mock_write_to_file,
+ mock_get_instance_disk_info, mock_create_images_and_backing,
+ mock_create_domand_and_network, mock_prepare_pci_devices_for_use,
+ mock_get_instance_pci_devs, mock_looping_call):
+ """For a hard reboot, we shouldn't need an additional call to glance
+ to get the image metadata.
+
+ This is important for automatically spinning up instances on a
+ host-reboot, since we won't have a user request context that'll allow
+ the Glance request to go through. We have to rely on the cached image
+ metadata, instead.
+
+ https://bugs.launchpad.net/nova/+bug/1339386
+ """
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ instance = objects.Instance(**self.test_instance)
+
+ network_info = mock.MagicMock()
+ block_device_info = mock.MagicMock()
+ mock_get_disk_info.return_value = {}
+ mock_get_guest_config.return_value = mock.MagicMock()
+ mock_get_instance_path.return_value = '/foo'
+ mock_looping_call.return_value = mock.MagicMock()
+ conn._image_api = mock.MagicMock()
+
+ conn._hard_reboot(self.context, instance, network_info,
+ block_device_info)
+
+ self.assertFalse(conn._image_api.get.called)
+
+ def test_power_on(self):
+
+ def _check_xml_bus(name, xml, block_info):
+ tree = etree.fromstring(xml)
+ got_disk_targets = tree.findall('./devices/disk/target')
+ system_meta = utils.instance_sys_meta(instance)
+ image_meta = utils.get_image_from_system_metadata(system_meta)
+ want_device_bus = image_meta.get('hw_disk_bus')
+ if not want_device_bus:
+ want_device_bus = self.fake_img['properties']['hw_disk_bus']
+ got_device_bus = got_disk_targets[0].get('bus')
+ self.assertEqual(got_device_bus, want_device_bus)
+
+ def fake_get_info(instance_name):
+ called['count'] += 1
+ if called['count'] == 1:
+ state = power_state.SHUTDOWN
+ else:
+ state = power_state.RUNNING
+ return dict(state=state)
+
+ def _get_inst(with_meta=True):
+ inst_ref = self.test_instance
+ inst_ref['uuid'] = uuidutils.generate_uuid()
+ if with_meta:
+ inst_ref['system_metadata']['image_hw_disk_bus'] = 'ide'
+ instance = objects.Instance(**inst_ref)
+ instance['image_ref'] = '70a599e0-31e7-49b7-b260-868f221a761e'
+ return instance
+
+ called = {'count': 0}
+ self.fake_img = {'id': '70a599e0-31e7-49b7-b260-868f221a761e',
+ 'name': 'myfakeimage',
+ 'created_at': '',
+ 'updated_at': '',
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'bare',
+ 'disk_format': 'qcow2',
+ 'size': '74185822',
+ 'properties': {'hw_disk_bus': 'ide'}}
+
+ instance = _get_inst()
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ network_info = _fake_network_info(self.stubs, 1)
+ block_device_info = None
+ image_service_mock = mock.Mock()
+ image_service_mock.show.return_value = self.fake_img
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(conn, '_destroy', return_value=None),
+ mock.patch.object(conn, '_create_images_and_backing'),
+ mock.patch.object(conn, '_create_domain_and_network'),
+ mock.patch.object(objects.Flavor, 'get_by_id',
+ return_value = flavor),
+ mock.patch.object(objects.Instance, 'save')):
+ conn.get_info = fake_get_info
+ conn._get_instance_disk_info = _check_xml_bus
+ conn._hard_reboot(self.context, instance, network_info,
+ block_device_info)
+
+ instance = _get_inst(with_meta=False)
+ conn._hard_reboot(self.context, instance, network_info,
+ block_device_info)
+
+ def _test_clean_shutdown(self, seconds_to_shutdown,
+ timeout, retry_interval,
+ shutdown_attempts, succeeds):
+ self.stubs.Set(time, 'sleep', lambda x: None)
+ info_tuple = ('fake', 'fake', 'fake', 'also_fake')
+ shutdown_count = []
+
+ def count_shutdowns():
+ shutdown_count.append("shutdown")
+
+ # Mock domain
+ mock_domain = self.mox.CreateMock(libvirt.virDomain)
+
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
+ mock_domain.shutdown().WithSideEffects(count_shutdowns)
+
+ retry_countdown = retry_interval
+ for x in xrange(min(seconds_to_shutdown, timeout)):
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
+ if retry_countdown == 0:
+ mock_domain.shutdown().WithSideEffects(count_shutdowns)
+ retry_countdown = retry_interval
+ else:
+ retry_countdown -= 1
+
+ if seconds_to_shutdown < timeout:
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_SHUTDOWN,) + info_tuple)
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock_domain
+
+ def fake_create_domain(**kwargs):
+ self.reboot_create_called = True
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, '_create_domain', fake_create_domain)
+ result = conn._clean_shutdown(instance, timeout, retry_interval)
+
+ self.assertEqual(succeeds, result)
+ self.assertEqual(shutdown_attempts, len(shutdown_count))
+
+ def test_clean_shutdown_first_time(self):
+ self._test_clean_shutdown(seconds_to_shutdown=2,
+ timeout=5,
+ retry_interval=3,
+ shutdown_attempts=1,
+ succeeds=True)
+
+ def test_clean_shutdown_with_retry(self):
+ self._test_clean_shutdown(seconds_to_shutdown=4,
+ timeout=5,
+ retry_interval=3,
+ shutdown_attempts=2,
+ succeeds=True)
+
+ def test_clean_shutdown_failure(self):
+ self._test_clean_shutdown(seconds_to_shutdown=6,
+ timeout=5,
+ retry_interval=3,
+ shutdown_attempts=2,
+ succeeds=False)
+
+ def test_clean_shutdown_no_wait(self):
+ self._test_clean_shutdown(seconds_to_shutdown=6,
+ timeout=0,
+ retry_interval=3,
+ shutdown_attempts=1,
+ succeeds=False)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(FakeVirtDomain, 'attachDevice')
+ @mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
+ @mock.patch.object(compute_utils, 'get_image_metadata', return_value=None)
+ def test_attach_sriov_ports(self,
+ mock_get_image_metadata,
+ mock_ID,
+ mock_attachDevice,
+ mock_flavor):
+ instance = objects.Instance(**self.test_instance)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ network_info = _fake_network_info(self.stubs, 1)
+ network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
+ domain = FakeVirtDomain()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ conn._attach_sriov_ports(self.context, instance, domain, network_info)
+ mock_get_image_metadata.assert_called_once_with(self.context,
+ conn._image_api, instance['image_ref'], instance)
+ self.assertTrue(mock_attachDevice.called)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(FakeVirtDomain, 'attachDevice')
+ @mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
+ @mock.patch.object(compute_utils, 'get_image_metadata', return_value=None)
+ def test_attach_sriov_ports_with_info_cache(self,
+ mock_get_image_metadata,
+ mock_ID,
+ mock_attachDevice,
+ mock_flavor):
+ instance = objects.Instance(**self.test_instance)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ network_info = _fake_network_info(self.stubs, 1)
+ network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
+ instance.info_cache = objects.InstanceInfoCache(
+ network_info=network_info)
+ domain = FakeVirtDomain()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ conn._attach_sriov_ports(self.context, instance, domain, None)
+ mock_get_image_metadata.assert_called_once_with(self.context,
+ conn._image_api, instance['image_ref'], instance)
+ self.assertTrue(mock_attachDevice.called)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_has_min_version', return_value=True)
+ @mock.patch.object(FakeVirtDomain, 'detachDeviceFlags')
+ @mock.patch.object(compute_utils, 'get_image_metadata', return_value=None)
+ def test_detach_sriov_ports(self,
+ mock_get_image_metadata,
+ mock_detachDeviceFlags,
+ mock_has_min_version,
+ mock_flavor):
+ instance = objects.Instance(**self.test_instance)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ network_info = _fake_network_info(self.stubs, 1)
+ network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
+ instance.info_cache = objects.InstanceInfoCache(
+ network_info=network_info)
+
+ domain = FakeVirtDomain()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ conn._detach_sriov_ports(instance, domain)
+ mock_get_image_metadata.assert_called_once_with(mock.ANY,
+ conn._image_api, instance['image_ref'], instance)
+ self.assertTrue(mock_detachDeviceFlags.called)
+
+ def test_resume(self):
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "</devices></domain>")
+ instance = objects.Instance(**self.test_instance)
+ network_info = _fake_network_info(self.stubs, 1)
+ block_device_info = None
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(conn, '_get_existing_domain_xml',
+ return_value=dummyxml),
+ mock.patch.object(conn, '_create_domain_and_network',
+ return_value='fake_dom'),
+ mock.patch.object(conn, '_attach_pci_devices'),
+ mock.patch.object(pci_manager, 'get_instance_pci_devs',
+ return_value='fake_pci_devs'),
+ ) as (_get_existing_domain_xml, _create_domain_and_network,
+ _attach_pci_devices, get_instance_pci_devs):
+ conn.resume(self.context, instance, network_info,
+ block_device_info)
+ _get_existing_domain_xml.assert_has_calls([mock.call(instance,
+ network_info, block_device_info)])
+ _create_domain_and_network.assert_has_calls([mock.call(
+ self.context, dummyxml,
+ instance, network_info,
+ block_device_info=block_device_info,
+ vifs_already_plugged=True)])
+ _attach_pci_devices.assert_has_calls([mock.call('fake_dom',
+ 'fake_pci_devs')])
+
+ def test_destroy_undefines(self):
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
+ mock.destroy()
+ mock.undefineFlags(1).AndReturn(1)
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ def fake_get_info(instance_name):
+ return {'state': power_state.SHUTDOWN, 'id': -1}
+
+ def fake_delete_instance_files(instance):
+ return None
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+ self.stubs.Set(conn, '_delete_instance_files',
+ fake_delete_instance_files)
+
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ conn.destroy(self.context, instance, [])
+
+ @mock.patch.object(rbd_utils, 'RBDDriver')
+ def test_cleanup_rbd(self, mock_driver):
+ driver = mock_driver.return_value
+ driver.cleanup_volumes = mock.Mock()
+ fake_instance = {'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'}
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ conn._cleanup_rbd(fake_instance)
+
+ driver.cleanup_volumes.assert_called_once_with(fake_instance)
+
+ def test_destroy_undefines_no_undefine_flags(self):
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
+ mock.destroy()
+ mock.undefineFlags(1).AndRaise(libvirt.libvirtError('Err'))
+ mock.undefine()
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ def fake_get_info(instance_name):
+ return {'state': power_state.SHUTDOWN, 'id': -1}
+
+ def fake_delete_instance_files(instance):
+ return None
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+ self.stubs.Set(conn, '_delete_instance_files',
+ fake_delete_instance_files)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ conn.destroy(self.context, instance, [])
+
+ def test_destroy_undefines_no_attribute_with_managed_save(self):
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
+ mock.destroy()
+ mock.undefineFlags(1).AndRaise(AttributeError())
+ mock.hasManagedSaveImage(0).AndReturn(True)
+ mock.managedSaveRemove(0)
+ mock.undefine()
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ def fake_get_info(instance_name):
+ return {'state': power_state.SHUTDOWN, 'id': -1}
+
+ def fake_delete_instance_files(instance):
+ return None
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+ self.stubs.Set(conn, '_delete_instance_files',
+ fake_delete_instance_files)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ conn.destroy(self.context, instance, [])
+
+ def test_destroy_undefines_no_attribute_no_managed_save(self):
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
+ mock.destroy()
+ mock.undefineFlags(1).AndRaise(AttributeError())
+ mock.hasManagedSaveImage(0).AndRaise(AttributeError())
+ mock.undefine()
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ def fake_get_info(instance_name):
+ return {'state': power_state.SHUTDOWN, 'id': -1}
+
+ def fake_delete_instance_files(instance):
+ return None
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+ self.stubs.Set(conn, '_delete_instance_files',
+ fake_delete_instance_files)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ conn.destroy(self.context, instance, [])
+
+ def test_destroy_timed_out(self):
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
+ mock.destroy().AndRaise(libvirt.libvirtError("timed out"))
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ def fake_get_error_code(self):
+ return libvirt.VIR_ERR_OPERATION_TIMEOUT
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(libvirt.libvirtError, 'get_error_code',
+ fake_get_error_code)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ self.assertRaises(exception.InstancePowerOffFailure,
+ conn.destroy, self.context, instance, [])
+
+ def test_private_destroy_not_found(self):
+ ex = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ "No such domain",
+ error_code=libvirt.VIR_ERR_NO_DOMAIN)
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
+ mock.destroy().AndRaise(ex)
+ mock.info().AndRaise(ex)
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ # NOTE(vish): verifies destroy doesn't raise if the instance disappears
+ conn._destroy(instance)
+
+ def test_undefine_domain_with_not_found_instance(self):
+ def fake_lookup(instance_name):
+ raise libvirt.libvirtError("not found")
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
+ self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_code")
+ libvirt.libvirtError.get_error_code().AndReturn(
+ libvirt.VIR_ERR_NO_DOMAIN)
+
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = {'name': 'test'}
+
+ # NOTE(wenjianhn): verifies undefine doesn't raise if the
+ # instance disappears
+ conn._undefine_domain(instance)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_disk_over_committed_size_total(self, mock_list):
+ # Ensure destroy calls managedSaveRemove for saved instance.
+ class DiagFakeDomain(object):
+ def __init__(self, name):
+ self._name = name
+
+ def ID(self):
+ return 1
+
+ def name(self):
+ return self._name
+
+ def UUIDString(self):
+ return "19479fee-07a5-49bb-9138-d3738280d63c"
+
+ def XMLDesc(self, flags):
+ return "<domain/>"
+
+ mock_list.return_value = [
+ DiagFakeDomain("instance0000001"),
+ DiagFakeDomain("instance0000002")]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ fake_disks = {'instance0000001':
+ [{'type': 'qcow2', 'path': '/somepath/disk1',
+ 'virt_disk_size': '10737418240',
+ 'backing_file': '/somepath/disk1',
+ 'disk_size': '83886080',
+ 'over_committed_disk_size': '10653532160'}],
+ 'instance0000002':
+ [{'type': 'raw', 'path': '/somepath/disk2',
+ 'virt_disk_size': '0',
+ 'backing_file': '/somepath/disk2',
+ 'disk_size': '10737418240',
+ 'over_committed_disk_size': '0'}]}
+
+ def get_info(instance_name, xml, **kwargs):
+ return jsonutils.dumps(fake_disks.get(instance_name))
+
+ with mock.patch.object(drvr,
+ "_get_instance_disk_info") as mock_info:
+ mock_info.side_effect = get_info
+
+ result = drvr._get_disk_over_committed_size_total()
+ self.assertEqual(result, 10653532160)
+ mock_list.assert_called_with()
+ mock_info.assert_called()
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_disk_over_committed_size_total_eperm(self, mock_list):
+ # Ensure destroy calls managedSaveRemove for saved instance.
+ class DiagFakeDomain(object):
+ def __init__(self, name):
+ self._name = name
+
+ def ID(self):
+ return 1
+
+ def name(self):
+ return self._name
+
+ def UUIDString(self):
+ return "19479fee-07a5-49bb-9138-d3738280d63c"
+
+ def XMLDesc(self, flags):
+ return "<domain/>"
+
+ mock_list.return_value = [
+ DiagFakeDomain("instance0000001"),
+ DiagFakeDomain("instance0000002")]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ fake_disks = {'instance0000001':
+ [{'type': 'qcow2', 'path': '/somepath/disk1',
+ 'virt_disk_size': '10737418240',
+ 'backing_file': '/somepath/disk1',
+ 'disk_size': '83886080',
+ 'over_committed_disk_size': '10653532160'}],
+ 'instance0000002':
+ [{'type': 'raw', 'path': '/somepath/disk2',
+ 'virt_disk_size': '0',
+ 'backing_file': '/somepath/disk2',
+ 'disk_size': '10737418240',
+ 'over_committed_disk_size': '21474836480'}]}
+
+ def side_effect(name, dom):
+ if name == 'instance0000001':
+ raise OSError(errno.EACCES, 'Permission denied')
+ if name == 'instance0000002':
+ return jsonutils.dumps(fake_disks.get(name))
+ get_disk_info = mock.Mock()
+ get_disk_info.side_effect = side_effect
+ drvr._get_instance_disk_info = get_disk_info
+
+ result = drvr._get_disk_over_committed_size_total()
+ self.assertEqual(21474836480, result)
+ mock_list.assert_called_with()
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, "_list_instance_domains",
+ return_value=[mock.MagicMock(name='foo')])
+ @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_instance_disk_info",
+ side_effect=exception.VolumeBDMPathNotFound(path='bar'))
+ def test_disk_over_committed_size_total_bdm_not_found(self,
+ mock_get_disk_info,
+ mock_list_domains):
+ # Tests that we handle VolumeBDMPathNotFound gracefully.
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertEqual(0, drvr._get_disk_over_committed_size_total())
+
+ def test_cpu_info(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ def get_host_capabilities_stub(self):
+ cpu = vconfig.LibvirtConfigCPU()
+ cpu.model = "Opteron_G4"
+ cpu.vendor = "AMD"
+ cpu.arch = arch.X86_64
+
+ cpu.cores = 2
+ cpu.threads = 1
+ cpu.sockets = 4
+
+ cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic"))
+ cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow"))
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = cpu
+
+ guest = vconfig.LibvirtConfigGuest()
+ guest.ostype = vm_mode.HVM
+ guest.arch = arch.X86_64
+ guest.domtype = ["kvm"]
+ caps.guests.append(guest)
+
+ guest = vconfig.LibvirtConfigGuest()
+ guest.ostype = vm_mode.HVM
+ guest.arch = arch.I686
+ guest.domtype = ["kvm"]
+ caps.guests.append(guest)
+
+ return caps
+
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ '_get_host_capabilities',
+ get_host_capabilities_stub)
+
+ want = {"vendor": "AMD",
+ "features": ["extapic", "3dnow"],
+ "model": "Opteron_G4",
+ "arch": arch.X86_64,
+ "topology": {"cores": 2, "threads": 1, "sockets": 4}}
+ got = jsonutils.loads(conn._get_cpu_info())
+ self.assertEqual(want, got)
+
+ def test_get_pcidev_info(self):
+
+ def fake_nodeDeviceLookupByName(name):
+ return FakeNodeDevice(_fake_NodeDevXml[name])
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.nodeDeviceLookupByName =\
+ fake_nodeDeviceLookupByName
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ actualvf = conn._get_pcidev_info("pci_0000_04_00_3")
+ expect_vf = {
+ "dev_id": "pci_0000_04_00_3",
+ "address": "0000:04:00.3",
+ "product_id": '1521',
+ "vendor_id": '8086',
+ "label": 'label_8086_1521',
+ "dev_type": 'type-PF',
+ }
+
+ self.assertEqual(actualvf, expect_vf)
+ actualvf = conn._get_pcidev_info("pci_0000_04_10_7")
+ expect_vf = {
+ "dev_id": "pci_0000_04_10_7",
+ "address": "0000:04:10.7",
+ "product_id": '1520',
+ "vendor_id": '8086',
+ "label": 'label_8086_1520',
+ "dev_type": 'type-VF',
+ "phys_function": '0000:04:00.3',
+ }
+
+ self.assertEqual(actualvf, expect_vf)
+
+ def test_pci_device_assignable(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn.dev_filter, 'device_assignable', lambda x: True)
+
+ fake_dev = {'dev_type': 'type-PF'}
+ self.assertFalse(conn._pci_device_assignable(fake_dev))
+ fake_dev = {'dev_type': 'type-VF'}
+ self.assertTrue(conn._pci_device_assignable(fake_dev))
+ fake_dev = {'dev_type': 'type-PCI'}
+ self.assertTrue(conn._pci_device_assignable(fake_dev))
+
+ def test_list_devices_not_supported(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ # Handle just the NO_SUPPORT error
+ not_supported_exc = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ 'this function is not supported by the connection driver:'
+ ' virNodeNumOfDevices',
+ error_code=libvirt.VIR_ERR_NO_SUPPORT)
+
+ with mock.patch.object(conn._conn, 'listDevices',
+ side_effect=not_supported_exc):
+ self.assertEqual('[]', conn._get_pci_passthrough_devices())
+
+ # We cache not supported status to avoid emitting too many logging
+ # messages. Clear this value to test the other exception case.
+ del conn._list_devices_supported
+
+ # Other errors should not be caught
+ other_exc = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ 'other exc',
+ error_code=libvirt.VIR_ERR_NO_DOMAIN)
+
+ with mock.patch.object(conn._conn, 'listDevices',
+ side_effect=other_exc):
+ self.assertRaises(libvirt.libvirtError,
+ conn._get_pci_passthrough_devices)
+
+ def test_get_pci_passthrough_devices(self):
+
+ def fakelistDevices(caps, fakeargs=0):
+ return ['pci_0000_04_00_3', 'pci_0000_04_10_7']
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.listDevices = fakelistDevices
+
+ def fake_nodeDeviceLookupByName(name):
+ return FakeNodeDevice(_fake_NodeDevXml[name])
+
+ libvirt_driver.LibvirtDriver._conn.nodeDeviceLookupByName =\
+ fake_nodeDeviceLookupByName
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn.dev_filter, 'device_assignable', lambda x: x)
+ actjson = conn._get_pci_passthrough_devices()
+
+ expectvfs = [
+ {
+ "dev_id": "pci_0000_04_00_3",
+ "address": "0000:04:10.3",
+ "product_id": '1521',
+ "vendor_id": '8086',
+ "dev_type": 'type-PF',
+ "phys_function": None},
+ {
+ "dev_id": "pci_0000_04_10_7",
+ "domain": 0,
+ "address": "0000:04:10.7",
+ "product_id": '1520',
+ "vendor_id": '8086',
+ "dev_type": 'type-VF',
+ "phys_function": [('0x0000', '0x04', '0x00', '0x3')],
+ }
+ ]
+
+ actctualvfs = jsonutils.loads(actjson)
+ for key in actctualvfs[0].keys():
+ if key not in ['phys_function', 'virt_functions', 'label']:
+ self.assertEqual(actctualvfs[0][key], expectvfs[1][key])
+
+ def _fake_caps_numa_topology(self):
+ topology = vconfig.LibvirtConfigCapsNUMATopology()
+
+ cell_0 = vconfig.LibvirtConfigCapsNUMACell()
+ cell_0.id = 0
+ cell_0.memory = 1024 * units.Ki
+ cpu_0_0 = vconfig.LibvirtConfigCapsNUMACPU()
+ cpu_0_0.id = 0
+ cpu_0_0.socket_id = 0
+ cpu_0_0.core_id = 0
+ cpu_0_0.sibling = 0
+ cpu_0_1 = vconfig.LibvirtConfigCapsNUMACPU()
+ cpu_0_1.id = 1
+ cpu_0_1.socket_id = 0
+ cpu_0_1.core_id = 1
+ cpu_0_1.sibling = 1
+ cell_0.cpus = [cpu_0_0, cpu_0_1]
+
+ cell_1 = vconfig.LibvirtConfigCapsNUMACell()
+ cell_1.id = 1
+ cell_1.memory = 1024 * units.Ki
+ cpu_1_0 = vconfig.LibvirtConfigCapsNUMACPU()
+ cpu_1_0.id = 2
+ cpu_1_0.socket_id = 1
+ cpu_1_0.core_id = 0
+ cpu_1_0.sibling = 2
+ cpu_1_1 = vconfig.LibvirtConfigCapsNUMACPU()
+ cpu_1_1.id = 3
+ cpu_1_1.socket_id = 1
+ cpu_1_1.core_id = 1
+ cpu_1_1.sibling = 3
+ cell_1.cpus = [cpu_1_0, cpu_1_1]
+
+ topology.cells = [cell_0, cell_1]
+ return topology
+
+ def test_get_host_numa_topology(self):
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.topology = self._fake_caps_numa_topology()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ expected_topo_dict = {'cells': [
+ {'cpus': '0,1', 'cpu_usage': 0,
+ 'mem': {'total': 1024, 'used': 0},
+ 'id': 0},
+ {'cpus': '3', 'cpu_usage': 0,
+ 'mem': {'total': 1024, 'used': 0},
+ 'id': 1}]}
+ with contextlib.nested(
+ mock.patch.object(conn, '_has_min_version', return_value=True),
+ mock.patch.object(
+ conn, '_get_host_capabilities', return_value=caps),
+ mock.patch.object(
+ hardware, 'get_vcpu_pin_set', return_value=set([0, 1, 3]))
+ ):
+ got_topo = conn._get_host_numa_topology()
+ got_topo_dict = got_topo._to_dict()
+ self.assertThat(
+ expected_topo_dict, matchers.DictMatches(got_topo_dict))
+
+ def test_get_host_numa_topology_empty(self):
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.topology = None
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(conn, '_has_min_version', return_value=True),
+ mock.patch.object(conn, '_get_host_capabilities',
+ return_value=caps)
+ ) as (has_min_version, get_caps):
+ self.assertIsNone(conn._get_host_numa_topology())
+ get_caps.assert_called_once_with()
+
+ def test_get_host_numa_topology_not_supported(self):
+ # Tests that libvirt isn't new enough to support numa topology.
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with mock.patch.object(conn, '_has_min_version', return_value=False):
+ self.assertIsNone(conn._get_host_numa_topology())
+
+ def test_diagnostic_vcpus_exception(self):
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
+ <interface type='network'>
+ <mac address='52:54:00:a4:38:38'/>
+ <source network='default'/>
+ <target dev='vnet0'/>
+ </interface>
+ </devices>
+ </domain>
+ """
+
+ class DiagFakeDomain(FakeVirtDomain):
+
+ def __init__(self):
+ super(DiagFakeDomain, self).__init__(fake_xml=xml)
+
+ def vcpus(self):
+ raise libvirt.libvirtError('vcpus missing')
+
+ def blockStats(self, path):
+ return (169L, 688640L, 0L, 0L, -1L)
+
+ def interfaceStats(self, path):
+ return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
+
+ def memoryStats(self):
+ return {'actual': 220160L, 'rss': 200164L}
+
+ def maxMemory(self):
+ return 280160L
+
+ def fake_lookup_name(name):
+ return DiagFakeDomain()
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ actual = conn.get_diagnostics({"name": "testvirt"})
+ expect = {'vda_read': 688640L,
+ 'vda_read_req': 169L,
+ 'vda_write': 0L,
+ 'vda_write_req': 0L,
+ 'vda_errors': -1L,
+ 'vdb_read': 688640L,
+ 'vdb_read_req': 169L,
+ 'vdb_write': 0L,
+ 'vdb_write_req': 0L,
+ 'vdb_errors': -1L,
+ 'memory': 280160L,
+ 'memory-actual': 220160L,
+ 'memory-rss': 200164L,
+ 'vnet0_rx': 4408L,
+ 'vnet0_rx_drop': 0L,
+ 'vnet0_rx_errors': 0L,
+ 'vnet0_rx_packets': 82L,
+ 'vnet0_tx': 0L,
+ 'vnet0_tx_drop': 0L,
+ 'vnet0_tx_errors': 0L,
+ 'vnet0_tx_packets': 0L,
+ }
+ self.assertEqual(actual, expect)
+
+ lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ timeutils.set_time_override(diags_time)
+
+ actual = conn.get_instance_diagnostics({"name": "testvirt",
+ "launched_at": lt})
+ expected = {'config_drive': False,
+ 'cpu_details': [],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L},
+ {'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L}],
+ 'driver': 'libvirt',
+ 'hypervisor_os': 'linux',
+ 'memory_details': {'maximum': 2048, 'used': 1234},
+ 'nic_details': [{'mac_address': '52:54:00:a4:38:38',
+ 'rx_drop': 0L,
+ 'rx_errors': 0L,
+ 'rx_octets': 4408L,
+ 'rx_packets': 82L,
+ 'tx_drop': 0L,
+ 'tx_errors': 0L,
+ 'tx_octets': 0L,
+ 'tx_packets': 0L}],
+ 'state': 'running',
+ 'uptime': 10,
+ 'version': '1.0'}
+ self.assertEqual(expected, actual.serialize())
+
+ def test_diagnostic_blockstats_exception(self):
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
+ <interface type='network'>
+ <mac address='52:54:00:a4:38:38'/>
+ <source network='default'/>
+ <target dev='vnet0'/>
+ </interface>
+ </devices>
+ </domain>
+ """
+
+ class DiagFakeDomain(FakeVirtDomain):
+
+ def __init__(self):
+ super(DiagFakeDomain, self).__init__(fake_xml=xml)
+
+ def vcpus(self):
+ return ([(0, 1, 15340000000L, 0),
+ (1, 1, 1640000000L, 0),
+ (2, 1, 3040000000L, 0),
+ (3, 1, 1420000000L, 0)],
+ [(True, False),
+ (True, False),
+ (True, False),
+ (True, False)])
+
+ def blockStats(self, path):
+ raise libvirt.libvirtError('blockStats missing')
+
+ def interfaceStats(self, path):
+ return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
+
+ def memoryStats(self):
+ return {'actual': 220160L, 'rss': 200164L}
+
+ def maxMemory(self):
+ return 280160L
+
+ def fake_lookup_name(name):
+ return DiagFakeDomain()
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ actual = conn.get_diagnostics({"name": "testvirt"})
+ expect = {'cpu0_time': 15340000000L,
+ 'cpu1_time': 1640000000L,
+ 'cpu2_time': 3040000000L,
+ 'cpu3_time': 1420000000L,
+ 'memory': 280160L,
+ 'memory-actual': 220160L,
+ 'memory-rss': 200164L,
+ 'vnet0_rx': 4408L,
+ 'vnet0_rx_drop': 0L,
+ 'vnet0_rx_errors': 0L,
+ 'vnet0_rx_packets': 82L,
+ 'vnet0_tx': 0L,
+ 'vnet0_tx_drop': 0L,
+ 'vnet0_tx_errors': 0L,
+ 'vnet0_tx_packets': 0L,
+ }
+ self.assertEqual(actual, expect)
+
+ lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ timeutils.set_time_override(diags_time)
+
+ actual = conn.get_instance_diagnostics({"name": "testvirt",
+ "launched_at": lt})
+ expected = {'config_drive': False,
+ 'cpu_details': [{'time': 15340000000L},
+ {'time': 1640000000L},
+ {'time': 3040000000L},
+ {'time': 1420000000L}],
+ 'disk_details': [],
+ 'driver': 'libvirt',
+ 'hypervisor_os': 'linux',
+ 'memory_details': {'maximum': 2048, 'used': 1234},
+ 'nic_details': [{'mac_address': '52:54:00:a4:38:38',
+ 'rx_drop': 0L,
+ 'rx_errors': 0L,
+ 'rx_octets': 4408L,
+ 'rx_packets': 82L,
+ 'tx_drop': 0L,
+ 'tx_errors': 0L,
+ 'tx_octets': 0L,
+ 'tx_packets': 0L}],
+ 'state': 'running',
+ 'uptime': 10,
+ 'version': '1.0'}
+ self.assertEqual(expected, actual.serialize())
+
+ def test_diagnostic_interfacestats_exception(self):
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
+ <interface type='network'>
+ <mac address='52:54:00:a4:38:38'/>
+ <source network='default'/>
+ <target dev='vnet0'/>
+ </interface>
+ </devices>
+ </domain>
+ """
+
+ class DiagFakeDomain(FakeVirtDomain):
+
+ def __init__(self):
+ super(DiagFakeDomain, self).__init__(fake_xml=xml)
+
+ def vcpus(self):
+ return ([(0, 1, 15340000000L, 0),
+ (1, 1, 1640000000L, 0),
+ (2, 1, 3040000000L, 0),
+ (3, 1, 1420000000L, 0)],
+ [(True, False),
+ (True, False),
+ (True, False),
+ (True, False)])
+
+ def blockStats(self, path):
+ return (169L, 688640L, 0L, 0L, -1L)
+
+ def interfaceStats(self, path):
+ raise libvirt.libvirtError('interfaceStat missing')
+
+ def memoryStats(self):
+ return {'actual': 220160L, 'rss': 200164L}
+
+ def maxMemory(self):
+ return 280160L
+
+ def fake_lookup_name(name):
+ return DiagFakeDomain()
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ actual = conn.get_diagnostics({"name": "testvirt"})
+ expect = {'cpu0_time': 15340000000L,
+ 'cpu1_time': 1640000000L,
+ 'cpu2_time': 3040000000L,
+ 'cpu3_time': 1420000000L,
+ 'vda_read': 688640L,
+ 'vda_read_req': 169L,
+ 'vda_write': 0L,
+ 'vda_write_req': 0L,
+ 'vda_errors': -1L,
+ 'vdb_read': 688640L,
+ 'vdb_read_req': 169L,
+ 'vdb_write': 0L,
+ 'vdb_write_req': 0L,
+ 'vdb_errors': -1L,
+ 'memory': 280160L,
+ 'memory-actual': 220160L,
+ 'memory-rss': 200164L,
+ }
+ self.assertEqual(actual, expect)
+
+ lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ timeutils.set_time_override(diags_time)
+
+ actual = conn.get_instance_diagnostics({"name": "testvirt",
+ "launched_at": lt})
+ expected = {'config_drive': False,
+ 'cpu_details': [{'time': 15340000000L},
+ {'time': 1640000000L},
+ {'time': 3040000000L},
+ {'time': 1420000000L}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L},
+ {'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L}],
+ 'driver': 'libvirt',
+ 'hypervisor_os': 'linux',
+ 'memory_details': {'maximum': 2048, 'used': 1234},
+ 'nic_details': [],
+ 'state': 'running',
+ 'uptime': 10,
+ 'version': '1.0'}
+ self.assertEqual(expected, actual.serialize())
+
+ def test_diagnostic_memorystats_exception(self):
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
+ <interface type='network'>
+ <mac address='52:54:00:a4:38:38'/>
+ <source network='default'/>
+ <target dev='vnet0'/>
+ </interface>
+ </devices>
+ </domain>
+ """
+
+ class DiagFakeDomain(FakeVirtDomain):
+
+ def __init__(self):
+ super(DiagFakeDomain, self).__init__(fake_xml=xml)
+
+ def vcpus(self):
+ return ([(0, 1, 15340000000L, 0),
+ (1, 1, 1640000000L, 0),
+ (2, 1, 3040000000L, 0),
+ (3, 1, 1420000000L, 0)],
+ [(True, False),
+ (True, False),
+ (True, False),
+ (True, False)])
+
+ def blockStats(self, path):
+ return (169L, 688640L, 0L, 0L, -1L)
+
+ def interfaceStats(self, path):
+ return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
+
+ def memoryStats(self):
+ raise libvirt.libvirtError('memoryStats missing')
+
+ def maxMemory(self):
+ return 280160L
+
+ def fake_lookup_name(name):
+ return DiagFakeDomain()
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ actual = conn.get_diagnostics({"name": "testvirt"})
+ expect = {'cpu0_time': 15340000000L,
+ 'cpu1_time': 1640000000L,
+ 'cpu2_time': 3040000000L,
+ 'cpu3_time': 1420000000L,
+ 'vda_read': 688640L,
+ 'vda_read_req': 169L,
+ 'vda_write': 0L,
+ 'vda_write_req': 0L,
+ 'vda_errors': -1L,
+ 'vdb_read': 688640L,
+ 'vdb_read_req': 169L,
+ 'vdb_write': 0L,
+ 'vdb_write_req': 0L,
+ 'vdb_errors': -1L,
+ 'memory': 280160L,
+ 'vnet0_rx': 4408L,
+ 'vnet0_rx_drop': 0L,
+ 'vnet0_rx_errors': 0L,
+ 'vnet0_rx_packets': 82L,
+ 'vnet0_tx': 0L,
+ 'vnet0_tx_drop': 0L,
+ 'vnet0_tx_errors': 0L,
+ 'vnet0_tx_packets': 0L,
+ }
+ self.assertEqual(actual, expect)
+
+ lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ timeutils.set_time_override(diags_time)
+
+ actual = conn.get_instance_diagnostics({"name": "testvirt",
+ "launched_at": lt})
+ expected = {'config_drive': False,
+ 'cpu_details': [{'time': 15340000000L},
+ {'time': 1640000000L},
+ {'time': 3040000000L},
+ {'time': 1420000000L}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L},
+ {'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L}],
+ 'driver': 'libvirt',
+ 'hypervisor_os': 'linux',
+ 'memory_details': {'maximum': 2048, 'used': 1234},
+ 'nic_details': [{'mac_address': '52:54:00:a4:38:38',
+ 'rx_drop': 0L,
+ 'rx_errors': 0L,
+ 'rx_octets': 4408L,
+ 'rx_packets': 82L,
+ 'tx_drop': 0L,
+ 'tx_errors': 0L,
+ 'tx_octets': 0L,
+ 'tx_packets': 0L}],
+ 'state': 'running',
+ 'uptime': 10,
+ 'version': '1.0'}
+ self.assertEqual(expected, actual.serialize())
+
+ def test_diagnostic_full(self):
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
+ <interface type='network'>
+ <mac address='52:54:00:a4:38:38'/>
+ <source network='default'/>
+ <target dev='vnet0'/>
+ </interface>
+ </devices>
+ </domain>
+ """
+
+ class DiagFakeDomain(FakeVirtDomain):
+
+ def __init__(self):
+ super(DiagFakeDomain, self).__init__(fake_xml=xml)
+
+ def vcpus(self):
+ return ([(0, 1, 15340000000L, 0),
+ (1, 1, 1640000000L, 0),
+ (2, 1, 3040000000L, 0),
+ (3, 1, 1420000000L, 0)],
+ [(True, False),
+ (True, False),
+ (True, False),
+ (True, False)])
+
+ def blockStats(self, path):
+ return (169L, 688640L, 0L, 0L, -1L)
+
+ def interfaceStats(self, path):
+ return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
+
+ def memoryStats(self):
+ return {'actual': 220160L, 'rss': 200164L}
+
+ def maxMemory(self):
+ return 280160L
+
+ def fake_lookup_name(name):
+ return DiagFakeDomain()
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ actual = conn.get_diagnostics({"name": "testvirt"})
+ expect = {'cpu0_time': 15340000000L,
+ 'cpu1_time': 1640000000L,
+ 'cpu2_time': 3040000000L,
+ 'cpu3_time': 1420000000L,
+ 'vda_read': 688640L,
+ 'vda_read_req': 169L,
+ 'vda_write': 0L,
+ 'vda_write_req': 0L,
+ 'vda_errors': -1L,
+ 'vdb_read': 688640L,
+ 'vdb_read_req': 169L,
+ 'vdb_write': 0L,
+ 'vdb_write_req': 0L,
+ 'vdb_errors': -1L,
+ 'memory': 280160L,
+ 'memory-actual': 220160L,
+ 'memory-rss': 200164L,
+ 'vnet0_rx': 4408L,
+ 'vnet0_rx_drop': 0L,
+ 'vnet0_rx_errors': 0L,
+ 'vnet0_rx_packets': 82L,
+ 'vnet0_tx': 0L,
+ 'vnet0_tx_drop': 0L,
+ 'vnet0_tx_errors': 0L,
+ 'vnet0_tx_packets': 0L,
+ }
+ self.assertEqual(actual, expect)
+
+ lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ timeutils.set_time_override(diags_time)
+
+ actual = conn.get_instance_diagnostics({"name": "testvirt",
+ "launched_at": lt})
+ expected = {'config_drive': False,
+ 'cpu_details': [{'time': 15340000000L},
+ {'time': 1640000000L},
+ {'time': 3040000000L},
+ {'time': 1420000000L}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L},
+ {'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L}],
+ 'driver': 'libvirt',
+ 'hypervisor_os': 'linux',
+ 'memory_details': {'maximum': 2048, 'used': 1234},
+ 'nic_details': [{'mac_address': '52:54:00:a4:38:38',
+ 'rx_drop': 0L,
+ 'rx_errors': 0L,
+ 'rx_octets': 4408L,
+ 'rx_packets': 82L,
+ 'tx_drop': 0L,
+ 'tx_errors': 0L,
+ 'tx_octets': 0L,
+ 'tx_packets': 0L}],
+ 'state': 'running',
+ 'uptime': 10,
+ 'version': '1.0'}
+ self.assertEqual(expected, actual.serialize())
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_failing_vcpu_count(self, mock_list):
+ """Domain can fail to return the vcpu description in case it's
+ just starting up or shutting down. Make sure None is handled
+ gracefully.
+ """
+
+ class DiagFakeDomain(object):
+ def __init__(self, vcpus):
+ self._vcpus = vcpus
+
+ def vcpus(self):
+ if self._vcpus is None:
+ raise libvirt.libvirtError("fake-error")
+ else:
+ return ([1] * self._vcpus, [True] * self._vcpus)
+
+ def ID(self):
+ return 1
+
+ def name(self):
+ return "instance000001"
+
+ def UUIDString(self):
+ return "19479fee-07a5-49bb-9138-d3738280d63c"
+
+ mock_list.return_value = [
+ DiagFakeDomain(None), DiagFakeDomain(5)]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.assertEqual(5, drvr._get_vcpu_used())
+ mock_list.assert_called_with()
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_failing_vcpu_count_none(self, mock_list):
+ """Domain will return zero if the current number of vcpus used
+ is None. This is in case of VM state starting up or shutting
+ down. None type returned is counted as zero.
+ """
+
+ class DiagFakeDomain(object):
+ def __init__(self):
+ pass
+
+ def vcpus(self):
+ return None
+
+ def ID(self):
+ return 1
+
+ def name(self):
+ return "instance000001"
+
+ mock_list.return_value = [DiagFakeDomain()]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertEqual(0, drvr._get_vcpu_used())
+ mock_list.assert_called_with()
+
+ def test_get_memory_used_normal(self):
+ m = mock.mock_open(read_data="""
+MemTotal: 16194180 kB
+MemFree: 233092 kB
+MemAvailable: 8892356 kB
+Buffers: 567708 kB
+Cached: 8362404 kB
+SwapCached: 0 kB
+Active: 8381604 kB
+""")
+ with contextlib.nested(
+ mock.patch("__builtin__.open", m, create=True),
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_conn"),
+ mock.patch('sys.platform', 'linux2'),
+ ) as (mock_file, mock_conn, mock_platform):
+ mock_conn.getInfo.return_value = [
+ arch.X86_64, 15814L, 8, 1208, 1, 1, 4, 2]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.assertEqual(6866, drvr._get_memory_mb_used())
+
+ def test_get_memory_used_xen(self):
+ self.flags(virt_type='xen', group='libvirt')
+
+ class DiagFakeDomain(object):
+ def __init__(self, id, memmb):
+ self.id = id
+ self.memmb = memmb
+
+ def info(self):
+ return [0, 0, self.memmb * 1024]
+
+ def ID(self):
+ return self.id
+
+ def name(self):
+ return "instance000001"
+
+ def UUIDString(self):
+ return str(uuid.uuid4())
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ m = mock.mock_open(read_data="""
+MemTotal: 16194180 kB
+MemFree: 233092 kB
+MemAvailable: 8892356 kB
+Buffers: 567708 kB
+Cached: 8362404 kB
+SwapCached: 0 kB
+Active: 8381604 kB
+""")
+
+ with contextlib.nested(
+ mock.patch("__builtin__.open", m, create=True),
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains"),
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_conn"),
+ mock.patch('sys.platform', 'linux2'),
+ ) as (mock_file, mock_list, mock_conn, mock_platform):
+ mock_list.return_value = [
+ DiagFakeDomain(0, 15814),
+ DiagFakeDomain(1, 750),
+ DiagFakeDomain(2, 1042)]
+ mock_conn.getInfo.return_value = [
+ arch.X86_64, 15814L, 8, 1208, 1, 1, 4, 2]
+
+ self.assertEqual(8657, drvr._get_memory_mb_used())
+ mock_list.assert_called_with(only_guests=False)
+
+ def test_get_instance_capabilities(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ def get_host_capabilities_stub(self):
+ caps = vconfig.LibvirtConfigCaps()
+
+ guest = vconfig.LibvirtConfigGuest()
+ guest.ostype = 'hvm'
+ guest.arch = arch.X86_64
+ guest.domtype = ['kvm', 'qemu']
+ caps.guests.append(guest)
+
+ guest = vconfig.LibvirtConfigGuest()
+ guest.ostype = 'hvm'
+ guest.arch = arch.I686
+ guest.domtype = ['kvm']
+ caps.guests.append(guest)
+
+ return caps
+
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ '_get_host_capabilities',
+ get_host_capabilities_stub)
+
+ want = [(arch.X86_64, 'kvm', 'hvm'),
+ (arch.X86_64, 'qemu', 'hvm'),
+ (arch.I686, 'kvm', 'hvm')]
+ got = conn._get_instance_capabilities()
+ self.assertEqual(want, got)
+
+ def test_event_dispatch(self):
+ # Validate that the libvirt self-pipe for forwarding
+ # events between threads is working sanely
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ got_events = []
+
+ def handler(event):
+ got_events.append(event)
+
+ conn.register_event_listener(handler)
+
+ conn._init_events_pipe()
+
+ event1 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STARTED)
+ event2 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_PAUSED)
+ conn._queue_event(event1)
+ conn._queue_event(event2)
+ conn._dispatch_events()
+
+ want_events = [event1, event2]
+ self.assertEqual(want_events, got_events)
+
+ event3 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_RESUMED)
+ event4 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STOPPED)
+
+ conn._queue_event(event3)
+ conn._queue_event(event4)
+ conn._dispatch_events()
+
+ want_events = [event1, event2, event3, event4]
+ self.assertEqual(want_events, got_events)
+
+ def test_event_lifecycle(self):
+ # Validate that libvirt events are correctly translated
+ # to Nova events
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ got_events = []
+
+ def handler(event):
+ got_events.append(event)
+
+ conn.register_event_listener(handler)
+ conn._init_events_pipe()
+ fake_dom_xml = """
+ <domain type='kvm'>
+ <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ </devices>
+ </domain>
+ """
+ dom = FakeVirtDomain(fake_dom_xml,
+ "cef19ce0-0ca2-11df-855d-b19fbce37686")
+
+ conn._event_lifecycle_callback(conn._conn,
+ dom,
+ libvirt.VIR_DOMAIN_EVENT_STOPPED,
+ 0,
+ conn)
+ conn._dispatch_events()
+ self.assertEqual(len(got_events), 1)
+ self.assertIsInstance(got_events[0], virtevent.LifecycleEvent)
+ self.assertEqual(got_events[0].uuid,
+ "cef19ce0-0ca2-11df-855d-b19fbce37686")
+ self.assertEqual(got_events[0].transition,
+ virtevent.EVENT_LIFECYCLE_STOPPED)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, 'emit_event')
+ def test_event_emit_delayed_call_now(self, emit_event_mock):
+ self.flags(virt_type="kvm", group="libvirt")
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ conn._event_emit_delayed(None)
+ emit_event_mock.assert_called_once_with(None)
+
+ @mock.patch.object(greenthread, 'spawn_after')
+ def test_event_emit_delayed_call_delayed(self, spawn_after_mock):
+ CONF.set_override("virt_type", "xen", group="libvirt")
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ event = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STOPPED)
+ conn._event_emit_delayed(event)
+ spawn_after_mock.assert_called_once_with(15, conn.emit_event, event)
+
+ @mock.patch.object(greenthread, 'spawn_after')
+ def test_event_emit_delayed_call_delayed_pending(self, spawn_after_mock):
+ self.flags(virt_type="xen", group="libvirt")
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ uuid = "cef19ce0-0ca2-11df-855d-b19fbce37686"
+ conn._events_delayed[uuid] = None
+ event = virtevent.LifecycleEvent(
+ uuid, virtevent.EVENT_LIFECYCLE_STOPPED)
+ conn._event_emit_delayed(event)
+ self.assertFalse(spawn_after_mock.called)
+
+ def test_event_delayed_cleanup(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ uuid = "cef19ce0-0ca2-11df-855d-b19fbce37686"
+ event = virtevent.LifecycleEvent(
+ uuid, virtevent.EVENT_LIFECYCLE_STARTED)
+ gt_mock = mock.Mock()
+ conn._events_delayed[uuid] = gt_mock
+ conn._event_delayed_cleanup(event)
+ gt_mock.cancel.assert_called_once_with()
+ self.assertNotIn(uuid, conn._events_delayed.keys())
+
+ def test_set_cache_mode(self):
+ self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ fake_conf = FakeConfigGuestDisk()
+
+ fake_conf.source_type = 'file'
+ conn._set_cache_mode(fake_conf)
+ self.assertEqual(fake_conf.driver_cache, 'directsync')
+
+ def test_set_cache_mode_invalid_mode(self):
+ self.flags(disk_cachemodes=['file=FAKE'], group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ fake_conf = FakeConfigGuestDisk()
+
+ fake_conf.source_type = 'file'
+ conn._set_cache_mode(fake_conf)
+ self.assertIsNone(fake_conf.driver_cache)
+
+ def test_set_cache_mode_invalid_object(self):
+ self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ fake_conf = FakeConfigGuest()
+
+ fake_conf.driver_cache = 'fake'
+ conn._set_cache_mode(fake_conf)
+ self.assertEqual(fake_conf.driver_cache, 'fake')
+
+ def _test_shared_storage_detection(self, is_same):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.mox.StubOutWithMock(conn, 'get_host_ip_addr')
+ self.mox.StubOutWithMock(utils, 'execute')
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(os, 'unlink')
+ conn.get_host_ip_addr().AndReturn('bar')
+ utils.execute('ssh', 'foo', 'touch', mox.IgnoreArg())
+ os.path.exists(mox.IgnoreArg()).AndReturn(is_same)
+ if is_same:
+ os.unlink(mox.IgnoreArg())
+ else:
+ utils.execute('ssh', 'foo', 'rm', mox.IgnoreArg())
+ self.mox.ReplayAll()
+ return conn._is_storage_shared_with('foo', '/path')
+
+ def test_shared_storage_detection_same_host(self):
+ self.assertTrue(self._test_shared_storage_detection(True))
+
+ def test_shared_storage_detection_different_host(self):
+ self.assertFalse(self._test_shared_storage_detection(False))
+
+ def test_shared_storage_detection_easy(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.mox.StubOutWithMock(conn, 'get_host_ip_addr')
+ self.mox.StubOutWithMock(utils, 'execute')
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(os, 'unlink')
+ conn.get_host_ip_addr().AndReturn('foo')
+ self.mox.ReplayAll()
+ self.assertTrue(conn._is_storage_shared_with('foo', '/path'))
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
+ def test_get_domain_info_with_more_return(self, lookup_mock):
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ dom_mock = mock.MagicMock()
+ dom_mock.info.return_value = [
+ 1, 2048, 737, 8, 12345, 888888
+ ]
+ dom_mock.ID.return_value = mock.sentinel.instance_id
+ lookup_mock.return_value = dom_mock
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ info = conn.get_info(instance)
+ expect = {'state': 1,
+ 'max_mem': 2048,
+ 'mem': 737,
+ 'num_cpu': 8,
+ 'cpu_time': 12345,
+ 'id': mock.sentinel.instance_id}
+ self.assertEqual(expect, info)
+ dom_mock.info.assert_called_once_with()
+ dom_mock.ID.assert_called_once_with()
+ lookup_mock.assert_called_once_with(instance['name'])
+
+ @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
+ @mock.patch.object(encodeutils, 'safe_decode')
+ def test_create_domain(self, mock_safe_decode, mock_get_inst_path):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ mock_domain = mock.MagicMock()
+ mock_instance = mock.MagicMock()
+ mock_get_inst_path.return_value = '/tmp/'
+
+ domain = conn._create_domain(domain=mock_domain,
+ instance=mock_instance)
+
+ self.assertEqual(mock_domain, domain)
+ mock_get_inst_path.assertHasCalls([mock.call(mock_instance)])
+ mock_domain.createWithFlags.assertHasCalls([mock.call(0)])
+ self.assertEqual(2, mock_safe_decode.call_count)
+
+ @mock.patch('nova.virt.disk.api.clean_lxc_namespace')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
+ @mock.patch('nova.virt.disk.api.setup_container')
+ @mock.patch('nova.openstack.common.fileutils.ensure_tree')
+ @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
+ def test_create_domain_lxc(self, mock_get_inst_path, mock_ensure_tree,
+ mock_setup_container, mock_get_info, mock_clean):
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ mock_instance = mock.MagicMock()
+ inst_sys_meta = dict()
+ mock_instance.system_metadata = inst_sys_meta
+ mock_get_inst_path.return_value = '/tmp/'
+ mock_image_backend = mock.MagicMock()
+ conn.image_backend = mock_image_backend
+ mock_image = mock.MagicMock()
+ mock_image.path = '/tmp/test.img'
+ conn.image_backend.image.return_value = mock_image
+ mock_setup_container.return_value = '/dev/nbd0'
+ mock_get_info.return_value = {'state': power_state.RUNNING}
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_create_images_and_backing'),
+ mock.patch.object(conn, '_is_booted_from_volume',
+ return_value=False),
+ mock.patch.object(conn, '_create_domain'),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
+ mock.patch.object(conn.firewall_driver, 'prepare_instance_filter'),
+ mock.patch.object(conn.firewall_driver, 'apply_instance_filter')):
+ conn._create_domain_and_network(self.context, 'xml',
+ mock_instance, [])
+
+ self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
+ mock_instance.save.assert_not_called()
+ mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
+ mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
+ conn.image_backend.image.assert_has_calls([mock.call(mock_instance,
+ 'disk')])
+ setup_container_call = mock.call('/tmp/test.img',
+ container_dir='/tmp/rootfs',
+ use_cow=CONF.use_cow_images)
+ mock_setup_container.assert_has_calls([setup_container_call])
+ mock_get_info.assert_has_calls([mock.call(mock_instance)])
+ mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
+
+ @mock.patch('nova.virt.disk.api.clean_lxc_namespace')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
+ @mock.patch.object(fake_libvirt_utils, 'chown_for_id_maps')
+ @mock.patch('nova.virt.disk.api.setup_container')
+ @mock.patch('nova.openstack.common.fileutils.ensure_tree')
+ @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
+ def test_create_domain_lxc_id_maps(self, mock_get_inst_path,
+ mock_ensure_tree, mock_setup_container,
+ mock_chown, mock_get_info, mock_clean):
+ self.flags(virt_type='lxc', uid_maps=["0:1000:100"],
+ gid_maps=["0:1000:100"], group='libvirt')
+
+ def chown_side_effect(path, id_maps):
+ self.assertEqual('/tmp/rootfs', path)
+ self.assertIsInstance(id_maps[0], vconfig.LibvirtConfigGuestUIDMap)
+ self.assertEqual(0, id_maps[0].start)
+ self.assertEqual(1000, id_maps[0].target)
+ self.assertEqual(100, id_maps[0].count)
+ self.assertIsInstance(id_maps[1], vconfig.LibvirtConfigGuestGIDMap)
+ self.assertEqual(0, id_maps[1].start)
+ self.assertEqual(1000, id_maps[1].target)
+ self.assertEqual(100, id_maps[1].count)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ mock_instance = mock.MagicMock()
+ inst_sys_meta = dict()
+ mock_instance.system_metadata = inst_sys_meta
+ mock_get_inst_path.return_value = '/tmp/'
+ mock_image_backend = mock.MagicMock()
+ conn.image_backend = mock_image_backend
+ mock_image = mock.MagicMock()
+ mock_image.path = '/tmp/test.img'
+ conn.image_backend.image.return_value = mock_image
+ mock_setup_container.return_value = '/dev/nbd0'
+ mock_chown.side_effect = chown_side_effect
+ mock_get_info.return_value = {'state': power_state.RUNNING}
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_create_images_and_backing'),
+ mock.patch.object(conn, '_is_booted_from_volume',
+ return_value=False),
+ mock.patch.object(conn, '_create_domain'),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
+ mock.patch.object(conn.firewall_driver, 'prepare_instance_filter'),
+ mock.patch.object(conn.firewall_driver, 'apply_instance_filter')):
+ conn._create_domain_and_network(self.context, 'xml',
+ mock_instance, [])
+
+ self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
+ mock_instance.save.assert_not_called()
+ mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
+ mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
+ conn.image_backend.image.assert_has_calls([mock.call(mock_instance,
+ 'disk')])
+ setup_container_call = mock.call('/tmp/test.img',
+ container_dir='/tmp/rootfs',
+ use_cow=CONF.use_cow_images)
+ mock_setup_container.assert_has_calls([setup_container_call])
+ mock_get_info.assert_has_calls([mock.call(mock_instance)])
+ mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
+
+ @mock.patch('nova.virt.disk.api.teardown_container')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
+ @mock.patch('nova.virt.disk.api.setup_container')
+ @mock.patch('nova.openstack.common.fileutils.ensure_tree')
+ @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
+ def test_create_domain_lxc_not_running(self, mock_get_inst_path,
+ mock_ensure_tree,
+ mock_setup_container,
+ mock_get_info, mock_teardown):
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ mock_instance = mock.MagicMock()
+ inst_sys_meta = dict()
+ mock_instance.system_metadata = inst_sys_meta
+ mock_get_inst_path.return_value = '/tmp/'
+ mock_image_backend = mock.MagicMock()
+ conn.image_backend = mock_image_backend
+ mock_image = mock.MagicMock()
+ mock_image.path = '/tmp/test.img'
+ conn.image_backend.image.return_value = mock_image
+ mock_setup_container.return_value = '/dev/nbd0'
+ mock_get_info.return_value = {'state': power_state.SHUTDOWN}
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_create_images_and_backing'),
+ mock.patch.object(conn, '_is_booted_from_volume',
+ return_value=False),
+ mock.patch.object(conn, '_create_domain'),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
+ mock.patch.object(conn.firewall_driver, 'prepare_instance_filter'),
+ mock.patch.object(conn.firewall_driver, 'apply_instance_filter')):
+ conn._create_domain_and_network(self.context, 'xml',
+ mock_instance, [])
+
+ self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
+ mock_instance.save.assert_not_called()
+ mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
+ mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
+ conn.image_backend.image.assert_has_calls([mock.call(mock_instance,
+ 'disk')])
+ setup_container_call = mock.call('/tmp/test.img',
+ container_dir='/tmp/rootfs',
+ use_cow=CONF.use_cow_images)
+ mock_setup_container.assert_has_calls([setup_container_call])
+ mock_get_info.assert_has_calls([mock.call(mock_instance)])
+ teardown_call = mock.call(container_dir='/tmp/rootfs')
+ mock_teardown.assert_has_calls([teardown_call])
+
+ def test_create_domain_define_xml_fails(self):
+ """Tests that the xml is logged when defining the domain fails."""
+ fake_xml = "<test>this is a test</test>"
+
+ def fake_defineXML(xml):
+ self.assertEqual(fake_xml, xml)
+ raise libvirt.libvirtError('virDomainDefineXML() failed')
+
+ self.log_error_called = False
+
+ def fake_error(msg, *args):
+ self.log_error_called = True
+ self.assertIn(fake_xml, msg % args)
+
+ self.stubs.Set(nova.virt.libvirt.driver.LOG, 'error', fake_error)
+
+ self.create_fake_libvirt_mock(defineXML=fake_defineXML)
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ self.assertRaises(libvirt.libvirtError, conn._create_domain, fake_xml)
+ self.assertTrue(self.log_error_called)
+
+ def test_create_domain_with_flags_fails(self):
+ """Tests that the xml is logged when creating the domain with flags
+ fails
+ """
+ fake_xml = "<test>this is a test</test>"
+ fake_domain = FakeVirtDomain(fake_xml)
+
+ def fake_createWithFlags(launch_flags):
+ raise libvirt.libvirtError('virDomainCreateWithFlags() failed')
+
+ self.log_error_called = False
+
+ def fake_error(msg, *args):
+ self.log_error_called = True
+ self.assertIn(fake_xml, msg % args)
+
+ self.stubs.Set(fake_domain, 'createWithFlags', fake_createWithFlags)
+ self.stubs.Set(nova.virt.libvirt.driver.LOG, 'error', fake_error)
+
+ self.create_fake_libvirt_mock()
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ self.assertRaises(libvirt.libvirtError, conn._create_domain,
+ domain=fake_domain)
+ self.assertTrue(self.log_error_called)
+
+ def test_create_domain_enable_hairpin_fails(self):
+ """Tests that the xml is logged when enabling hairpin mode for the
+ domain fails.
+ """
+ fake_xml = "<test>this is a test</test>"
+ fake_domain = FakeVirtDomain(fake_xml)
+
+ def fake_enable_hairpin(launch_flags):
+ raise processutils.ProcessExecutionError('error')
+
+ self.log_error_called = False
+
+ def fake_error(msg, *args):
+ self.log_error_called = True
+ self.assertIn(fake_xml, msg % args)
+
+ self.stubs.Set(nova.virt.libvirt.driver.LOG, 'error', fake_error)
+
+ self.create_fake_libvirt_mock()
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.stubs.Set(conn, '_enable_hairpin', fake_enable_hairpin)
+
+ self.assertRaises(processutils.ProcessExecutionError,
+ conn._create_domain,
+ domain=fake_domain,
+ power_on=False)
+ self.assertTrue(self.log_error_called)
+
+ def test_get_vnc_console(self):
+ instance = objects.Instance(**self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<graphics type='vnc' port='5900'/>"
+ "</devices></domain>")
+
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance['name']:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ vnc_dict = conn.get_vnc_console(self.context, instance)
+ self.assertEqual(vnc_dict.port, '5900')
+
+ def test_get_vnc_console_unavailable(self):
+ instance = objects.Instance(**self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices></devices></domain>")
+
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance['name']:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.ConsoleTypeUnavailable,
+ conn.get_vnc_console, self.context, instance)
+
+ def test_get_spice_console(self):
+ instance = objects.Instance(**self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<graphics type='spice' port='5950'/>"
+ "</devices></domain>")
+
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance['name']:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ spice_dict = conn.get_spice_console(self.context, instance)
+ self.assertEqual(spice_dict.port, '5950')
+
+ def test_get_spice_console_unavailable(self):
+ instance = objects.Instance(**self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices></devices></domain>")
+
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance['name']:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.ConsoleTypeUnavailable,
+ conn.get_spice_console, self.context, instance)
+
+ def test_detach_volume_with_instance_not_found(self):
+ # Test that detach_volume() method does not raise exception,
+ # if the instance does not exist.
+
+ instance = objects.Instance(**self.test_instance)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_lookup_by_name',
+ side_effect=exception.InstanceNotFound(
+ instance_id=instance.name)),
+ mock.patch.object(conn, '_disconnect_volume')
+ ) as (_lookup_by_name, _disconnect_volume):
+ connection_info = {'driver_volume_type': 'fake'}
+ conn.detach_volume(connection_info, instance, '/dev/sda')
+ _lookup_by_name.assert_called_once_with(instance.name)
+ _disconnect_volume.assert_called_once_with(connection_info,
+ 'sda')
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _test_attach_detach_interface_get_config(self, method_name,
+ mock_flavor):
+ """Tests that the get_config() method is properly called in
+ attach_interface() and detach_interface().
+
+ method_name: either \"attach_interface\" or \"detach_interface\"
+ depending on the method to test.
+ """
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+
+ instance = objects.Instance(**self.test_instance)
+ mock_flavor.return_value = instance.get_flavor()
+ network_info = _fake_network_info(self.stubs, 1)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ if method_name == "attach_interface":
+ fake_image_meta = {'id': instance['image_ref']}
+ elif method_name == "detach_interface":
+ fake_image_meta = None
+ else:
+ raise ValueError("Unhandled method %" % method_name)
+
+ if method_name == "attach_interface":
+ self.mox.StubOutWithMock(conn.firewall_driver,
+ 'setup_basic_filtering')
+ conn.firewall_driver.setup_basic_filtering(instance, network_info)
+
+ expected = conn.vif_driver.get_config(instance, network_info[0],
+ fake_image_meta,
+ instance.get_flavor(),
+ CONF.libvirt.virt_type)
+ self.mox.StubOutWithMock(conn.vif_driver, 'get_config')
+ conn.vif_driver.get_config(instance, network_info[0],
+ fake_image_meta,
+ mox.IsA(objects.Flavor),
+ CONF.libvirt.virt_type).\
+ AndReturn(expected)
+
+ self.mox.ReplayAll()
+
+ if method_name == "attach_interface":
+ conn.attach_interface(instance, fake_image_meta,
+ network_info[0])
+ elif method_name == "detach_interface":
+ conn.detach_interface(instance, network_info[0])
+ else:
+ raise ValueError("Unhandled method %" % method_name)
+
+ @mock.patch.object(lockutils, "external_lock")
+ def test_attach_interface_get_config(self, mock_lock):
+ """Tests that the get_config() method is properly called in
+ attach_interface().
+ """
+ mock_lock.return_value = threading.Semaphore()
+
+ self._test_attach_detach_interface_get_config("attach_interface")
+
+ def test_detach_interface_get_config(self):
+ """Tests that the get_config() method is properly called in
+ detach_interface().
+ """
+ self._test_attach_detach_interface_get_config("detach_interface")
+
+ def test_default_root_device_name(self):
+ instance = {'uuid': 'fake_instance'}
+ image_meta = {'id': 'fake'}
+ root_bdm = {'source_type': 'image',
+ 'detination_type': 'volume',
+ 'image_id': 'fake_id'}
+ self.flags(virt_type='fake_libvirt_type', group='libvirt')
+
+ self.mox.StubOutWithMock(blockinfo, 'get_disk_bus_for_device_type')
+ self.mox.StubOutWithMock(blockinfo, 'get_root_info')
+
+ blockinfo.get_disk_bus_for_device_type('fake_libvirt_type',
+ image_meta,
+ 'disk').InAnyOrder().\
+ AndReturn('virtio')
+ blockinfo.get_disk_bus_for_device_type('fake_libvirt_type',
+ image_meta,
+ 'cdrom').InAnyOrder().\
+ AndReturn('ide')
+ blockinfo.get_root_info('fake_libvirt_type',
+ image_meta, root_bdm,
+ 'virtio', 'ide').AndReturn({'dev': 'vda'})
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertEqual(conn.default_root_device_name(instance, image_meta,
+ root_bdm), '/dev/vda')
+
+ def test_default_device_names_for_instance(self):
+ instance = {'uuid': 'fake_instance'}
+ root_device_name = '/dev/vda'
+ ephemerals = [{'device_name': 'vdb'}]
+ swap = [{'device_name': 'vdc'}]
+ block_device_mapping = [{'device_name': 'vdc'}]
+ self.flags(virt_type='fake_libvirt_type', group='libvirt')
+
+ self.mox.StubOutWithMock(blockinfo, 'default_device_names')
+
+ blockinfo.default_device_names('fake_libvirt_type', mox.IgnoreArg(),
+ instance, root_device_name,
+ ephemerals, swap, block_device_mapping)
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.default_device_names_for_instance(instance, root_device_name,
+ ephemerals, swap,
+ block_device_mapping)
+
+ def test_is_supported_fs_format(self):
+ supported_fs = [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3,
+ disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS]
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ for fs in supported_fs:
+ self.assertTrue(conn.is_supported_fs_format(fs))
+
+ supported_fs = ['', 'dummy']
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ for fs in supported_fs:
+ self.assertFalse(conn.is_supported_fs_format(fs))
+
+ def test_hypervisor_hostname_caching(self):
+ # Make sure that the first hostname is always returned
+ class FakeConn(object):
+ def getHostname(self):
+ pass
+
+ def getLibVersion(self):
+ return 99999
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn._wrapped_conn = FakeConn()
+ self.mox.StubOutWithMock(conn._wrapped_conn, 'getHostname')
+ conn._conn.getHostname().AndReturn('foo')
+ conn._conn.getHostname().AndReturn('bar')
+ self.mox.ReplayAll()
+ self.assertEqual('foo', conn._get_hypervisor_hostname())
+ self.assertEqual('foo', conn._get_hypervisor_hostname())
+
+ def test_get_connection_serial(self):
+
+ def get_conn_currency(driver):
+ driver._conn.getLibVersion()
+
+ def connect_with_block(*a, **k):
+ # enough to allow another connect to run
+ eventlet.sleep(0)
+ self.connect_calls += 1
+ return self.conn
+
+ def fake_register(*a, **k):
+ self.register_calls += 1
+
+ self.connect_calls = 0
+ self.register_calls = 0
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ '_connect', connect_with_block)
+ driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.stubs.Set(self.conn, 'domainEventRegisterAny', fake_register)
+
+ # call serially
+ get_conn_currency(driver)
+ get_conn_currency(driver)
+ self.assertEqual(self.connect_calls, 1)
+ self.assertEqual(self.register_calls, 1)
+
+ def test_get_connection_concurrency(self):
+
+ def get_conn_currency(driver):
+ driver._conn.getLibVersion()
+
+ def connect_with_block(*a, **k):
+ # enough to allow another connect to run
+ eventlet.sleep(0)
+ self.connect_calls += 1
+ return self.conn
+
+ def fake_register(*a, **k):
+ self.register_calls += 1
+
+ self.connect_calls = 0
+ self.register_calls = 0
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ '_connect', connect_with_block)
+ driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.stubs.Set(self.conn, 'domainEventRegisterAny', fake_register)
+
+ # call concurrently
+ thr1 = eventlet.spawn(get_conn_currency, driver=driver)
+ thr2 = eventlet.spawn(get_conn_currency, driver=driver)
+
+ # let threads run
+ eventlet.sleep(0)
+
+ thr1.wait()
+ thr2.wait()
+ self.assertEqual(self.connect_calls, 1)
+ self.assertEqual(self.register_calls, 1)
+
+ def test_post_live_migration_at_destination_with_block_device_info(self):
+ # Preparing mocks
+ mock_domain = self.mox.CreateMock(libvirt.virDomain)
+ self.resultXML = None
+
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_getLibVersion():
+ return 9011
+
+ def fake_getCapabilities():
+ return """
+ <capabilities>
+ <host>
+ <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <topology sockets='1' cores='2' threads='1'/>
+ <feature name='xtpr'/>
+ </cpu>
+ </host>
+ </capabilities>
+ """
+
+ def fake_to_xml(context, instance, network_info, disk_info,
+ image_meta=None, rescue=None,
+ block_device_info=None, write_to_disk=False):
+ if image_meta is None:
+ image_meta = {}
+ conf = conn._get_guest_config(instance, network_info, image_meta,
+ disk_info, rescue, block_device_info)
+ self.resultXML = conf.to_xml()
+ return self.resultXML
+
+ def fake_lookup_name(instance_name):
+ return mock_domain
+
+ def fake_defineXML(xml):
+ return
+
+ def fake_baselineCPU(cpu, flag):
+ return """<cpu mode='custom' match='exact'>
+ <model fallback='allow'>Westmere</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='aes'/>
+ </cpu>
+ """
+
+ network_info = _fake_network_info(self.stubs, 1)
+ self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
+ getCapabilities=fake_getCapabilities,
+ getVersion=lambda: 1005001)
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
+ instance = objects.Instance(**instance_ref)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
+ libvirt_driver.LibvirtDriver._conn.getCapabilities = \
+ fake_getCapabilities
+ libvirt_driver.LibvirtDriver._conn.getVersion = lambda: 1005001
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+ libvirt_driver.LibvirtDriver._conn.defineXML = fake_defineXML
+ libvirt_driver.LibvirtDriver._conn.baselineCPU = fake_baselineCPU
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn,
+ '_get_guest_xml',
+ fake_to_xml)
+ self.stubs.Set(conn,
+ '_lookup_by_name',
+ fake_lookup_name)
+ block_device_info = {'block_device_mapping':
+ driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'guest_format': None,
+ 'boot_index': 0,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': '/dev/vda',
+ 'disk_bus': 'virtio',
+ 'device_type': 'disk',
+ 'delete_on_termination': False}),
+ ])}
+ block_device_info['block_device_mapping'][0]['connection_info'] = (
+ {'driver_volume_type': 'iscsi'})
+ with contextlib.nested(
+ mock.patch.object(
+ driver_block_device.DriverVolumeBlockDevice, 'save'),
+ mock.patch.object(objects.Flavor, 'get_by_id',
+ return_value=flavor),
+ mock.patch.object(objects.Instance, 'save')):
+ conn.post_live_migration_at_destination(
+ self.context, instance, network_info, True,
+ block_device_info=block_device_info)
+ self.assertTrue('fake' in self.resultXML)
+ self.assertTrue(
+ block_device_info['block_device_mapping'][0].save.called)
+
+ def test_create_propagates_exceptions(self):
+ self.flags(virt_type='lxc', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(id=1, uuid='fake-uuid',
+ image_ref='my_fake_image')
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_create_domain_setup_lxc'),
+ mock.patch.object(conn, '_create_domain_cleanup_lxc'),
+ mock.patch.object(conn, '_is_booted_from_volume',
+ return_value=False),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn, 'firewall_driver'),
+ mock.patch.object(conn, '_create_domain',
+ side_effect=exception.NovaException),
+ mock.patch.object(conn, 'cleanup')):
+ self.assertRaises(exception.NovaException,
+ conn._create_domain_and_network,
+ self.context,
+ 'xml',
+ instance, None)
+
+ def test_create_without_pause(self):
+ self.flags(virt_type='lxc', group='libvirt')
+
+ @contextlib.contextmanager
+ def fake_lxc_disk_handler(*args, **kwargs):
+ yield
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(id=1, uuid='fake-uuid')
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_lxc_disk_handler',
+ side_effect=fake_lxc_disk_handler),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn, 'firewall_driver'),
+ mock.patch.object(conn, '_create_domain'),
+ mock.patch.object(conn, 'cleanup')) as (
+ _handler, cleanup, firewall_driver, create, plug_vifs):
+ domain = conn._create_domain_and_network(self.context, 'xml',
+ instance, None)
+ self.assertEqual(0, create.call_args_list[0][1]['launch_flags'])
+ self.assertEqual(0, domain.resume.call_count)
+
+ def _test_create_with_network_events(self, neutron_failure=None,
+ power_on=True):
+ generated_events = []
+
+ def wait_timeout():
+ event = mock.MagicMock()
+ if neutron_failure == 'timeout':
+ raise eventlet.timeout.Timeout()
+ elif neutron_failure == 'error':
+ event.status = 'failed'
+ else:
+ event.status = 'completed'
+ return event
+
+ def fake_prepare(instance, event_name):
+ m = mock.MagicMock()
+ m.instance = instance
+ m.event_name = event_name
+ m.wait.side_effect = wait_timeout
+ generated_events.append(m)
+ return m
+
+ virtapi = manager.ComputeVirtAPI(mock.MagicMock())
+ prepare = virtapi._compute.instance_events.prepare_for_instance_event
+ prepare.side_effect = fake_prepare
+ conn = libvirt_driver.LibvirtDriver(virtapi, False)
+
+ instance = objects.Instance(id=1, uuid='fake-uuid')
+ vifs = [{'id': 'vif1', 'active': False},
+ {'id': 'vif2', 'active': False}]
+
+ @mock.patch.object(conn, 'plug_vifs')
+ @mock.patch.object(conn, 'firewall_driver')
+ @mock.patch.object(conn, '_create_domain')
+ @mock.patch.object(conn, 'cleanup')
+ def test_create(cleanup, create, fw_driver, plug_vifs):
+ domain = conn._create_domain_and_network(self.context, 'xml',
+ instance, vifs,
+ power_on=power_on)
+ plug_vifs.assert_called_with(instance, vifs)
+
+ flag = self._get_launch_flags(conn, vifs, power_on=power_on)
+ self.assertEqual(flag,
+ create.call_args_list[0][1]['launch_flags'])
+ if flag:
+ domain.resume.assert_called_once_with()
+ if neutron_failure and CONF.vif_plugging_is_fatal:
+ cleanup.assert_called_once_with(self.context,
+ instance, network_info=vifs,
+ block_device_info=None)
+
+ test_create()
+
+ if utils.is_neutron() and CONF.vif_plugging_timeout and power_on:
+ prepare.assert_has_calls([
+ mock.call(instance, 'network-vif-plugged-vif1'),
+ mock.call(instance, 'network-vif-plugged-vif2')])
+ for event in generated_events:
+ if neutron_failure and generated_events.index(event) != 0:
+ self.assertEqual(0, event.call_count)
+ elif (neutron_failure == 'error' and
+ not CONF.vif_plugging_is_fatal):
+ event.wait.assert_called_once_with()
+ else:
+ self.assertEqual(0, prepare.call_count)
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron(self, is_neutron):
+ self._test_create_with_network_events()
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron_power_off(self,
+ is_neutron):
+ # Tests that we don't wait for events if we don't start the instance.
+ self._test_create_with_network_events(power_on=False)
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron_nowait(self, is_neutron):
+ self.flags(vif_plugging_timeout=0)
+ self._test_create_with_network_events()
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron_failed_nonfatal_timeout(
+ self, is_neutron):
+ self.flags(vif_plugging_is_fatal=False)
+ self._test_create_with_network_events(neutron_failure='timeout')
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron_failed_fatal_timeout(
+ self, is_neutron):
+ self.assertRaises(exception.VirtualInterfaceCreateException,
+ self._test_create_with_network_events,
+ neutron_failure='timeout')
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron_failed_nonfatal_error(
+ self, is_neutron):
+ self.flags(vif_plugging_is_fatal=False)
+ self._test_create_with_network_events(neutron_failure='error')
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron_failed_fatal_error(
+ self, is_neutron):
+ self.assertRaises(exception.VirtualInterfaceCreateException,
+ self._test_create_with_network_events,
+ neutron_failure='error')
+
+ @mock.patch('nova.utils.is_neutron', return_value=False)
+ def test_create_with_network_events_non_neutron(self, is_neutron):
+ self._test_create_with_network_events()
+
+ @mock.patch('nova.volume.encryptors.get_encryption_metadata')
+ @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
+ def test_create_with_bdm(self, get_info_from_bdm, get_encryption_metadata):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ mock_dom = mock.MagicMock()
+ mock_encryption_meta = mock.MagicMock()
+ get_encryption_metadata.return_value = mock_encryption_meta
+
+ fake_xml = """
+ <domain>
+ <name>instance-00000001</name>
+ <memory>1048576</memory>
+ <vcpu>1</vcpu>
+ <devices>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='raw' cache='none'/>
+ <source file='/path/fake-volume1'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ </devices>
+ </domain>
+ """
+ fake_volume_id = "fake-volume-id"
+ connection_info = {"driver_volume_type": "fake",
+ "data": {"access_mode": "rw",
+ "volume_id": fake_volume_id}}
+
+ def fake_getitem(*args, **kwargs):
+ fake_bdm = {'connection_info': connection_info,
+ 'mount_device': '/dev/vda'}
+ return fake_bdm.get(args[0])
+
+ mock_volume = mock.MagicMock()
+ mock_volume.__getitem__.side_effect = fake_getitem
+ bdi = {'block_device_mapping': [mock_volume]}
+ network_info = [network_model.VIF(id='1'),
+ network_model.VIF(id='2', active=True)]
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_get_volume_encryptor'),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
+ mock.patch.object(conn.firewall_driver,
+ 'prepare_instance_filter'),
+ mock.patch.object(conn, '_create_domain'),
+ mock.patch.object(conn.firewall_driver, 'apply_instance_filter'),
+ ) as (get_volume_encryptor, plug_vifs, setup_basic_filtering,
+ prepare_instance_filter, create_domain, apply_instance_filter):
+ create_domain.return_value = mock_dom
+
+ domain = conn._create_domain_and_network(self.context, fake_xml,
+ instance, network_info,
+ block_device_info=bdi)
+
+ get_encryption_metadata.assert_called_once_with(self.context,
+ conn._volume_api, fake_volume_id, connection_info)
+ get_volume_encryptor.assert_called_once_with(connection_info,
+ mock_encryption_meta)
+ plug_vifs.assert_called_once_with(instance, network_info)
+ setup_basic_filtering.assert_called_once_with(instance,
+ network_info)
+ prepare_instance_filter.assert_called_once_with(instance,
+ network_info)
+ flags = self._get_launch_flags(conn, network_info)
+ create_domain.assert_called_once_with(fake_xml, instance=instance,
+ launch_flags=flags,
+ power_on=True)
+ self.assertEqual(mock_dom, domain)
+
+ def test_get_guest_storage_config(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["default_swap_device"] = None
+ instance = objects.Instance(**test_instance)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ conn_info = {'driver_volume_type': 'fake', 'data': {}}
+ bdi = {'block_device_mapping':
+ driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 1,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': '/dev/vdc'})
+ ])}
+ bdm = bdi['block_device_mapping'][0]
+ bdm['connection_info'] = conn_info
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance, bdi)
+ mock_conf = mock.MagicMock(source_path='fake')
+
+ with contextlib.nested(
+ mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
+ 'save'),
+ mock.patch.object(conn, '_connect_volume'),
+ mock.patch.object(conn, '_get_volume_config',
+ return_value=mock_conf),
+ mock.patch.object(conn, '_set_cache_mode')
+ ) as (volume_save, connect_volume, get_volume_config, set_cache_mode):
+ devices = conn._get_guest_storage_config(instance, None,
+ disk_info, False, bdi, flavor)
+
+ self.assertEqual(3, len(devices))
+ self.assertEqual('/dev/vdb', instance.default_ephemeral_device)
+ self.assertIsNone(instance.default_swap_device)
+ connect_volume.assert_called_with(bdm['connection_info'],
+ {'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
+ get_volume_config.assert_called_with(bdm['connection_info'],
+ {'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
+ self.assertEqual(1, volume_save.call_count)
+ self.assertEqual(3, set_cache_mode.call_count)
+
+ def test_get_neutron_events(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ network_info = [network_model.VIF(id='1'),
+ network_model.VIF(id='2', active=True)]
+ events = conn._get_neutron_events(network_info)
+ self.assertEqual([('network-vif-plugged', '1')], events)
+
+ def test_unplug_vifs_ignores_errors(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ with mock.patch.object(conn, 'vif_driver') as vif_driver:
+ vif_driver.unplug.side_effect = exception.AgentError(
+ method='unplug')
+ conn._unplug_vifs('inst', [1], ignore_errors=True)
+ vif_driver.unplug.assert_called_once_with('inst', 1)
+
+ def test_unplug_vifs_reports_errors(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ with mock.patch.object(conn, 'vif_driver') as vif_driver:
+ vif_driver.unplug.side_effect = exception.AgentError(
+ method='unplug')
+ self.assertRaises(exception.AgentError,
+ conn.unplug_vifs, 'inst', [1])
+ vif_driver.unplug.assert_called_once_with('inst', 1)
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
+ def test_cleanup_pass_with_no_mount_device(self, undefine, unplug):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ conn.firewall_driver = mock.Mock()
+ conn._disconnect_volume = mock.Mock()
+ fake_inst = {'name': 'foo'}
+ fake_bdms = [{'connection_info': 'foo',
+ 'mount_device': None}]
+ with mock.patch('nova.virt.driver'
+ '.block_device_info_get_mapping',
+ return_value=fake_bdms):
+ conn.cleanup('ctxt', fake_inst, 'netinfo', destroy_disks=False)
+ self.assertTrue(conn._disconnect_volume.called)
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
+ def test_cleanup_wants_vif_errors_ignored(self, undefine, unplug):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ fake_inst = {'name': 'foo'}
+ with mock.patch.object(conn._conn, 'lookupByName') as lookup:
+ lookup.return_value = fake_inst
+ # NOTE(danms): Make unplug cause us to bail early, since
+ # we only care about how it was called
+ unplug.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ conn.cleanup, 'ctxt', fake_inst, 'netinfo')
+ unplug.assert_called_once_with(fake_inst, 'netinfo', True)
+
+ @mock.patch('nova.virt.driver.block_device_info_get_mapping')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
+ '_get_serial_ports_from_instance')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
+ def test_cleanup_serial_console_enabled(
+ self, undefine, get_ports,
+ block_device_info_get_mapping):
+ self.flags(enabled="True", group='serial_console')
+ instance = 'i1'
+ network_info = {}
+ bdm_info = {}
+ firewall_driver = mock.MagicMock()
+
+ get_ports.return_value = iter([('127.0.0.1', 10000)])
+ block_device_info_get_mapping.return_value = ()
+
+ # We want to ensure undefine_domain is called after
+ # lookup_domain.
+ def undefine_domain(instance):
+ get_ports.side_effect = Exception("domain undefined")
+ undefine.side_effect = undefine_domain
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ conn.firewall_driver = firewall_driver
+ conn.cleanup(
+ 'ctx', instance, network_info,
+ block_device_info=bdm_info,
+ destroy_disks=False, destroy_vifs=False)
+
+ get_ports.assert_called_once_with(instance)
+ undefine.assert_called_once_with(instance)
+ firewall_driver.unfilter_instance.assert_called_once_with(
+ instance, network_info=network_info)
+ block_device_info_get_mapping.assert_called_once_with(bdm_info)
+
+ def test_swap_volume(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ mock_dom = mock.MagicMock()
+
+ with mock.patch.object(drvr._conn, 'defineXML',
+ create=True) as mock_define:
+ xmldoc = "<domain/>"
+ srcfile = "/first/path"
+ dstfile = "/second/path"
+
+ mock_dom.XMLDesc.return_value = xmldoc
+ mock_dom.isPersistent.return_value = True
+ mock_dom.blockJobInfo.return_value = {}
+
+ drvr._swap_volume(mock_dom, srcfile, dstfile, 1)
+
+ mock_dom.XMLDesc.assert_called_once_with(
+ fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
+ fakelibvirt.VIR_DOMAIN_XML_SECURE)
+ mock_dom.blockRebase.assert_called_once_with(
+ srcfile, dstfile, 0,
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
+ mock_dom.blockResize.assert_called_once_with(
+ srcfile, 1 * units.Gi / units.Ki)
+ mock_define.assert_called_once_with(xmldoc)
+
+ def test_live_snapshot(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ mock_dom = mock.MagicMock()
+
+ with contextlib.nested(
+ mock.patch.object(drvr._conn, 'defineXML', create=True),
+ mock.patch.object(fake_libvirt_utils, 'get_disk_size'),
+ mock.patch.object(fake_libvirt_utils, 'get_disk_backing_file'),
+ mock.patch.object(fake_libvirt_utils, 'create_cow_image'),
+ mock.patch.object(fake_libvirt_utils, 'chown'),
+ mock.patch.object(fake_libvirt_utils, 'extract_snapshot'),
+ ) as (mock_define, mock_size, mock_backing, mock_create_cow,
+ mock_chown, mock_snapshot):
+
+ xmldoc = "<domain/>"
+ srcfile = "/first/path"
+ dstfile = "/second/path"
+ bckfile = "/other/path"
+ dltfile = dstfile + ".delta"
+
+ mock_dom.XMLDesc.return_value = xmldoc
+ mock_dom.isPersistent.return_value = True
+ mock_size.return_value = 1004009
+ mock_backing.return_value = bckfile
+
+ drvr._live_snapshot(mock_dom, srcfile, dstfile, "qcow2")
+
+ mock_dom.XMLDesc.assert_called_once_with(
+ fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
+ fakelibvirt.VIR_DOMAIN_XML_SECURE)
+ mock_dom.blockRebase.assert_called_once_with(
+ srcfile, dltfile, 0,
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
+
+ mock_size.assert_called_once_with(srcfile)
+ mock_backing.assert_called_once_with(srcfile, basename=False)
+ mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009)
+ mock_chown.assert_called_once_with(dltfile, os.getuid())
+ mock_snapshot.assert_called_once_with(dltfile, "qcow2",
+ dstfile, "qcow2")
+ mock_define.assert_called_once_with(xmldoc)
+
+ @mock.patch.object(greenthread, "spawn")
+ def test_live_migration_hostname_valid(self, mock_spawn):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ drvr.live_migration(self.context, self.test_instance,
+ "host1.example.com",
+ lambda x: x,
+ lambda x: x)
+ self.assertEqual(1, mock_spawn.call_count)
+
+ @mock.patch.object(greenthread, "spawn")
+ @mock.patch.object(fake_libvirt_utils, "is_valid_hostname")
+ def test_live_migration_hostname_invalid(self, mock_hostname, mock_spawn):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ mock_hostname.return_value = False
+ self.assertRaises(exception.InvalidHostname,
+ drvr.live_migration,
+ self.context, self.test_instance,
+ "foo/?com=/bin/sh",
+ lambda x: x,
+ lambda x: x)
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('tempfile.mkstemp')
+ @mock.patch('os.close', return_value=None)
+ def test_check_instance_shared_storage_local_raw(self,
+ mock_close,
+ mock_mkstemp,
+ mock_exists):
+ instance_uuid = str(uuid.uuid4())
+ self.flags(images_type='raw', group='libvirt')
+ self.flags(instances_path='/tmp')
+ mock_mkstemp.return_value = (-1,
+ '/tmp/{0}/file'.format(instance_uuid))
+ driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = fake_instance.fake_instance_obj(self.context)
+ temp_file = driver.check_instance_shared_storage_local(self.context,
+ instance)
+ self.assertEqual('/tmp/{0}/file'.format(instance_uuid),
+ temp_file['filename'])
+
+ def test_check_instance_shared_storage_local_rbd(self):
+ self.flags(images_type='rbd', group='libvirt')
+ driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = fake_instance.fake_instance_obj(self.context)
+ self.assertIsNone(driver.
+ check_instance_shared_storage_local(self.context,
+ instance))
+
+
+class HostStateTestCase(test.NoDBTestCase):
+
+ cpu_info = ('{"vendor": "Intel", "model": "pentium", "arch": "i686", '
+ '"features": ["ssse3", "monitor", "pni", "sse2", "sse", '
+ '"fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge", '
+ '"mtrr", "sep", "apic"], '
+ '"topology": {"cores": "1", "threads": "1", "sockets": "1"}}')
+ instance_caps = [(arch.X86_64, "kvm", "hvm"),
+ (arch.I686, "kvm", "hvm")]
+ pci_devices = [{
+ "dev_id": "pci_0000_04_00_3",
+ "address": "0000:04:10.3",
+ "product_id": '1521',
+ "vendor_id": '8086',
+ "dev_type": 'type-PF',
+ "phys_function": None}]
+ numa_topology = hardware.VirtNUMAHostTopology(
+ cells=[hardware.VirtNUMATopologyCellUsage(
+ 1, set([1, 2]), 1024),
+ hardware.VirtNUMATopologyCellUsage(
+ 2, set([3, 4]), 1024)])
+
+ class FakeConnection(libvirt_driver.LibvirtDriver):
+ """Fake connection object."""
+ def __init__(self):
+ super(HostStateTestCase.FakeConnection,
+ self).__init__(fake.FakeVirtAPI(), True)
+
+ def _get_vcpu_total(self):
+ return 1
+
+ def _get_vcpu_used(self):
+ return 0
+
+ def _get_cpu_info(self):
+ return HostStateTestCase.cpu_info
+
+ def _get_disk_over_committed_size_total(self):
+ return 0
+
+ def _get_local_gb_info(self):
+ return {'total': 100, 'used': 20, 'free': 80}
+
+ def _get_memory_mb_total(self):
+ return 497
+
+ def _get_memory_mb_used(self):
+ return 88
+
+ def _get_hypervisor_type(self):
+ return 'QEMU'
+
+ def _get_hypervisor_version(self):
+ return 13091
+
+ def _get_hypervisor_hostname(self):
+ return 'compute1'
+
+ def get_host_uptime(self):
+ return ('10:01:16 up 1:36, 6 users, '
+ 'load average: 0.21, 0.16, 0.19')
+
+ def _get_disk_available_least(self):
+ return 13091
+
+ def _get_instance_capabilities(self):
+ return HostStateTestCase.instance_caps
+
+ def _get_pci_passthrough_devices(self):
+ return jsonutils.dumps(HostStateTestCase.pci_devices)
+
+ def _get_host_numa_topology(self):
+ return HostStateTestCase.numa_topology
+
+ def test_update_status(self):
+ drvr = HostStateTestCase.FakeConnection()
+
+ stats = drvr.get_available_resource("compute1")
+ self.assertEqual(stats["vcpus"], 1)
+ self.assertEqual(stats["memory_mb"], 497)
+ self.assertEqual(stats["local_gb"], 100)
+ self.assertEqual(stats["vcpus_used"], 0)
+ self.assertEqual(stats["memory_mb_used"], 88)
+ self.assertEqual(stats["local_gb_used"], 20)
+ self.assertEqual(stats["hypervisor_type"], 'QEMU')
+ self.assertEqual(stats["hypervisor_version"], 13091)
+ self.assertEqual(stats["hypervisor_hostname"], 'compute1')
+ self.assertEqual(jsonutils.loads(stats["cpu_info"]),
+ {"vendor": "Intel", "model": "pentium",
+ "arch": arch.I686,
+ "features": ["ssse3", "monitor", "pni", "sse2", "sse",
+ "fxsr", "clflush", "pse36", "pat", "cmov",
+ "mca", "pge", "mtrr", "sep", "apic"],
+ "topology": {"cores": "1", "threads": "1", "sockets": "1"}
+ })
+ self.assertEqual(stats["disk_available_least"], 80)
+ self.assertEqual(jsonutils.loads(stats["pci_passthrough_devices"]),
+ HostStateTestCase.pci_devices)
+ self.assertThat(hardware.VirtNUMAHostTopology.from_json(
+ stats['numa_topology'])._to_dict(),
+ matchers.DictMatches(
+ HostStateTestCase.numa_topology._to_dict()))
+
+
+class LibvirtDriverTestCase(test.NoDBTestCase):
+ """Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver."""
+ def setUp(self):
+ super(LibvirtDriverTestCase, self).setUp()
+ self.libvirtconnection = libvirt_driver.LibvirtDriver(
+ fake.FakeVirtAPI(), read_only=True)
+ self.context = context.get_admin_context()
+
+ def _create_instance(self, params=None):
+ """Create a test instance."""
+ if not params:
+ params = {}
+
+ sys_meta = {
+ 'instance_type_memory_mb': 512,
+ 'instance_type_swap': 0,
+ 'instance_type_vcpu_weight': None,
+ 'instance_type_root_gb': 1,
+ 'instance_type_id': 2,
+ 'instance_type_name': u'm1.tiny',
+ 'instance_type_ephemeral_gb': 0,
+ 'instance_type_rxtx_factor': 1.0,
+ 'instance_type_flavorid': u'1',
+ 'instance_type_vcpus': 1
+ }
+
+ inst = {}
+ inst['id'] = 1
+ inst['uuid'] = '52d3b512-1152-431f-a8f7-28f0288a622b'
+ inst['os_type'] = 'linux'
+ inst['image_ref'] = '1'
+ inst['reservation_id'] = 'r-fakeres'
+ inst['user_id'] = 'fake'
+ inst['project_id'] = 'fake'
+ inst['instance_type_id'] = 2
+ inst['ami_launch_index'] = 0
+ inst['host'] = 'host1'
+ inst['root_gb'] = 10
+ inst['ephemeral_gb'] = 20
+ inst['config_drive'] = True
+ inst['kernel_id'] = 2
+ inst['ramdisk_id'] = 3
+ inst['key_data'] = 'ABCDEFG'
+ inst['system_metadata'] = sys_meta
+
+ inst.update(params)
+
+ return objects.Instance(**inst)
+
+ def test_migrate_disk_and_power_off_exception(self):
+ """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
+ .migrate_disk_and_power_off.
+ """
+
+ self.counter = 0
+ self.checked_shared_storage = False
+
+ def fake_get_instance_disk_info(instance,
+ block_device_info=None):
+ return '[]'
+
+ def fake_destroy(instance):
+ pass
+
+ def fake_get_host_ip_addr():
+ return '10.0.0.1'
+
+ def fake_execute(*args, **kwargs):
+ self.counter += 1
+ if self.counter == 1:
+ assert False, "intentional failure"
+
+ def fake_os_path_exists(path):
+ return True
+
+ def fake_is_storage_shared(dest, inst_base):
+ self.checked_shared_storage = True
+ return False
+
+ self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
+ fake_get_instance_disk_info)
+ self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
+ self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
+ fake_get_host_ip_addr)
+ self.stubs.Set(self.libvirtconnection, '_is_storage_shared_with',
+ fake_is_storage_shared)
+ self.stubs.Set(utils, 'execute', fake_execute)
+ self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+
+ ins_ref = self._create_instance()
+ flavor = {'root_gb': 10, 'ephemeral_gb': 20}
+
+ self.assertRaises(AssertionError,
+ self.libvirtconnection.migrate_disk_and_power_off,
+ None, ins_ref, '10.0.0.2', flavor, None)
+
+ def test_migrate_disk_and_power_off(self):
+ """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
+ .migrate_disk_and_power_off.
+ """
+
+ disk_info = [{'type': 'qcow2', 'path': '/test/disk',
+ 'virt_disk_size': '10737418240',
+ 'backing_file': '/base/disk',
+ 'disk_size': '83886080'},
+ {'type': 'raw', 'path': '/test/disk.local',
+ 'virt_disk_size': '10737418240',
+ 'backing_file': '/base/disk.local',
+ 'disk_size': '83886080'}]
+ disk_info_text = jsonutils.dumps(disk_info)
+
+ def fake_get_instance_disk_info(instance,
+ block_device_info=None):
+ return disk_info_text
+
+ def fake_destroy(instance):
+ pass
+
+ def fake_get_host_ip_addr():
+ return '10.0.0.1'
+
+ def fake_execute(*args, **kwargs):
+ pass
+
+ self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
+ fake_get_instance_disk_info)
+ self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
+ self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
+ fake_get_host_ip_addr)
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ ins_ref = self._create_instance()
+ flavor = {'root_gb': 10, 'ephemeral_gb': 20}
+
+ # dest is different host case
+ out = self.libvirtconnection.migrate_disk_and_power_off(
+ None, ins_ref, '10.0.0.2', flavor, None)
+ self.assertEqual(out, disk_info_text)
+
+ # dest is same host case
+ out = self.libvirtconnection.migrate_disk_and_power_off(
+ None, ins_ref, '10.0.0.1', flavor, None)
+ self.assertEqual(out, disk_info_text)
+
+ @mock.patch('nova.utils.execute')
+ @mock.patch('nova.virt.libvirt.utils.copy_image')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
+ '.get_instance_disk_info')
+ def test_migrate_disk_and_power_off_swap(self, mock_get_disk_info,
+ get_host_ip_addr,
+ mock_destroy,
+ mock_copy_image,
+ mock_execute):
+ """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
+ .migrate_disk_and_power_off.
+ """
+ self.copy_or_move_swap_called = False
+
+ # 10G root and 512M swap disk
+ disk_info = [{'disk_size': 1, 'type': 'qcow2',
+ 'virt_disk_size': 10737418240, 'path': '/test/disk',
+ 'backing_file': '/base/disk'},
+ {'disk_size': 1, 'type': 'qcow2',
+ 'virt_disk_size': 536870912, 'path': '/test/disk.swap',
+ 'backing_file': '/base/swap_512'}]
+ disk_info_text = jsonutils.dumps(disk_info)
+ mock_get_disk_info.return_value = disk_info_text
+ get_host_ip_addr.return_value = '10.0.0.1'
+
+ def fake_copy_image(*args, **kwargs):
+ # disk.swap should not be touched since it is skipped over
+ if '/test/disk.swap' in list(args):
+ self.copy_or_move_swap_called = True
+
+ def fake_execute(*args, **kwargs):
+ # disk.swap should not be touched since it is skipped over
+ if set(['mv', '/test/disk.swap']).issubset(list(args)):
+ self.copy_or_move_swap_called = True
+
+ mock_copy_image.side_effect = fake_copy_image
+ mock_execute.side_effect = fake_execute
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ # Original instance config
+ instance = self._create_instance({'root_gb': 10,
+ 'ephemeral_gb': 0})
+
+ # Re-size fake instance to 20G root and 1024M swap disk
+ flavor = {'root_gb': 20, 'ephemeral_gb': 0, 'swap': 1024}
+
+ # Destination is same host
+ out = conn.migrate_disk_and_power_off(None, instance, '10.0.0.1',
+ flavor, None)
+
+ mock_get_disk_info.assert_called_once_with(instance.name,
+ block_device_info=None)
+ self.assertTrue(get_host_ip_addr.called)
+ mock_destroy.assert_called_once_with(instance)
+ self.assertFalse(self.copy_or_move_swap_called)
+ self.assertEqual(disk_info_text, out)
+
+ def test_migrate_disk_and_power_off_lvm(self):
+ """Test for nova.virt.libvirt.libvirt_driver.LibvirtConnection
+ .migrate_disk_and_power_off.
+ """
+
+ self.flags(images_type='lvm', group='libvirt')
+ disk_info = [{'type': 'raw', 'path': '/dev/vg/disk',
+ 'disk_size': '83886080'},
+ {'type': 'raw', 'path': '/dev/disk.local',
+ 'disk_size': '83886080'}]
+ disk_info_text = jsonutils.dumps(disk_info)
+
+ def fake_get_instance_disk_info(instance, xml=None,
+ block_device_info=None):
+ return disk_info_text
+
+ def fake_destroy(instance):
+ pass
+
+ def fake_get_host_ip_addr():
+ return '10.0.0.1'
+
+ def fake_execute(*args, **kwargs):
+ pass
+
+ self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
+ fake_get_instance_disk_info)
+ self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
+ self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
+ fake_get_host_ip_addr)
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ ins_ref = self._create_instance()
+ flavor = {'root_gb': 10, 'ephemeral_gb': 20}
+
+ # Migration is not implemented for LVM backed instances
+ self.assertRaises(exception.MigrationPreCheckError,
+ self.libvirtconnection.migrate_disk_and_power_off,
+ None, ins_ref, '10.0.0.1', flavor, None)
+
+ def test_migrate_disk_and_power_off_resize_error(self):
+ instance = self._create_instance()
+ flavor = {'root_gb': 5}
+ self.assertRaises(
+ exception.InstanceFaultRollback,
+ self.libvirtconnection.migrate_disk_and_power_off,
+ 'ctx', instance, '10.0.0.1', flavor, None)
+
+ def test_wait_for_running(self):
+ def fake_get_info(instance):
+ if instance['name'] == "not_found":
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+ elif instance['name'] == "running":
+ return {'state': power_state.RUNNING}
+ else:
+ return {'state': power_state.SHUTDOWN}
+
+ self.stubs.Set(self.libvirtconnection, 'get_info',
+ fake_get_info)
+
+ # instance not found case
+ self.assertRaises(exception.InstanceNotFound,
+ self.libvirtconnection._wait_for_running,
+ {'name': 'not_found',
+ 'uuid': 'not_found_uuid'})
+
+ # instance is running case
+ self.assertRaises(loopingcall.LoopingCallDone,
+ self.libvirtconnection._wait_for_running,
+ {'name': 'running',
+ 'uuid': 'running_uuid'})
+
+ # else case
+ self.libvirtconnection._wait_for_running({'name': 'else',
+ 'uuid': 'other_uuid'})
+
+ def test_disk_size_from_instance_disk_info(self):
+ inst = {'root_gb': 10, 'ephemeral_gb': 20, 'swap_gb': 30}
+
+ info = {'path': '/path/disk'}
+ self.assertEqual(10 * units.Gi,
+ self.libvirtconnection._disk_size_from_instance(inst, info))
+
+ info = {'path': '/path/disk.local'}
+ self.assertEqual(20 * units.Gi,
+ self.libvirtconnection._disk_size_from_instance(inst, info))
+
+ info = {'path': '/path/disk.swap'}
+ self.assertEqual(0,
+ self.libvirtconnection._disk_size_from_instance(inst, info))
+
+ @mock.patch('nova.utils.execute')
+ def test_disk_raw_to_qcow2(self, mock_execute):
+ path = '/test/disk'
+ _path_qcow = path + '_qcow'
+
+ self.libvirtconnection._disk_raw_to_qcow2(path)
+ mock_execute.assert_has_calls([
+ mock.call('qemu-img', 'convert', '-f', 'raw',
+ '-O', 'qcow2', path, _path_qcow),
+ mock.call('mv', _path_qcow, path)])
+
+ @mock.patch('nova.utils.execute')
+ def test_disk_qcow2_to_raw(self, mock_execute):
+ path = '/test/disk'
+ _path_raw = path + '_raw'
+
+ self.libvirtconnection._disk_qcow2_to_raw(path)
+ mock_execute.assert_has_calls([
+ mock.call('qemu-img', 'convert', '-f', 'qcow2',
+ '-O', 'raw', path, _path_raw),
+ mock.call('mv', _path_raw, path)])
+
+ @mock.patch('nova.virt.disk.api.extend')
+ def test_disk_resize_raw(self, mock_extend):
+ info = {'type': 'raw', 'path': '/test/disk'}
+
+ self.libvirtconnection._disk_resize(info, 50)
+ mock_extend.assert_called_once_with(info['path'], 50, use_cow=False)
+
+ @mock.patch('nova.virt.disk.api.can_resize_image')
+ @mock.patch('nova.virt.disk.api.is_image_partitionless')
+ @mock.patch('nova.virt.disk.api.extend')
+ def test_disk_resize_qcow2(
+ self, mock_extend, mock_can_resize, mock_is_partitionless):
+ info = {'type': 'qcow2', 'path': '/test/disk'}
+
+ with contextlib.nested(
+ mock.patch.object(
+ self.libvirtconnection, '_disk_qcow2_to_raw'),
+ mock.patch.object(
+ self.libvirtconnection, '_disk_raw_to_qcow2'))\
+ as (mock_disk_qcow2_to_raw, mock_disk_raw_to_qcow2):
+
+ mock_can_resize.return_value = True
+ mock_is_partitionless.return_value = True
+
+ self.libvirtconnection._disk_resize(info, 50)
+
+ mock_disk_qcow2_to_raw.assert_called_once_with(info['path'])
+ mock_extend.assert_called_once_with(
+ info['path'], 50, use_cow=False)
+ mock_disk_raw_to_qcow2.assert_called_once_with(info['path'])
+
+ def _test_finish_migration(self, power_on, resize_instance=False):
+ """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
+ .finish_migration.
+ """
+
+ disk_info = [{'type': 'qcow2', 'path': '/test/disk',
+ 'local_gb': 10, 'backing_file': '/base/disk'},
+ {'type': 'raw', 'path': '/test/disk.local',
+ 'local_gb': 10, 'backing_file': '/base/disk.local'}]
+ disk_info_text = jsonutils.dumps(disk_info)
+ powered_on = power_on
+ self.fake_create_domain_called = False
+ self.fake_disk_resize_called = False
+
+ def fake_to_xml(context, instance, network_info, disk_info,
+ image_meta=None, rescue=None,
+ block_device_info=None, write_to_disk=False):
+ return ""
+
+ def fake_plug_vifs(instance, network_info):
+ pass
+
+ def fake_create_image(context, inst,
+ disk_mapping, suffix='',
+ disk_images=None, network_info=None,
+ block_device_info=None, inject_files=True):
+ self.assertFalse(inject_files)
+
+ def fake_create_domain_and_network(
+ context, xml, instance, network_info,
+ block_device_info=None, power_on=True, reboot=False,
+ vifs_already_plugged=False):
+ self.fake_create_domain_called = True
+ self.assertEqual(powered_on, power_on)
+ self.assertTrue(vifs_already_plugged)
+
+ def fake_enable_hairpin(instance):
+ pass
+
+ def fake_execute(*args, **kwargs):
+ pass
+
+ def fake_get_info(instance):
+ if powered_on:
+ return {'state': power_state.RUNNING}
+ else:
+ return {'state': power_state.SHUTDOWN}
+
+ def fake_disk_resize(info, size):
+ self.fake_disk_resize_called = True
+
+ self.flags(use_cow_images=True)
+ self.stubs.Set(self.libvirtconnection, '_disk_resize',
+ fake_disk_resize)
+ self.stubs.Set(self.libvirtconnection, '_get_guest_xml', fake_to_xml)
+ self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs)
+ self.stubs.Set(self.libvirtconnection, '_create_image',
+ fake_create_image)
+ self.stubs.Set(self.libvirtconnection, '_create_domain_and_network',
+ fake_create_domain_and_network)
+ self.stubs.Set(self.libvirtconnection, '_enable_hairpin',
+ fake_enable_hairpin)
+ self.stubs.Set(utils, 'execute', fake_execute)
+ fw = base_firewall.NoopFirewallDriver()
+ self.stubs.Set(self.libvirtconnection, 'firewall_driver', fw)
+ self.stubs.Set(self.libvirtconnection, 'get_info',
+ fake_get_info)
+
+ ins_ref = self._create_instance()
+
+ self.libvirtconnection.finish_migration(
+ context.get_admin_context(), None, ins_ref,
+ disk_info_text, [], None,
+ resize_instance, None, power_on)
+ self.assertTrue(self.fake_create_domain_called)
+ self.assertEqual(
+ resize_instance, self.fake_disk_resize_called)
+
+ def test_finish_migration_resize(self):
+ self._test_finish_migration(True, resize_instance=True)
+
+ def test_finish_migration_power_on(self):
+ self._test_finish_migration(True)
+
+ def test_finish_migration_power_off(self):
+ self._test_finish_migration(False)
+
+ def _test_finish_revert_migration(self, power_on):
+ """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
+ .finish_revert_migration.
+ """
+ powered_on = power_on
+ self.fake_create_domain_called = False
+
+ def fake_execute(*args, **kwargs):
+ pass
+
+ def fake_plug_vifs(instance, network_info):
+ pass
+
+ def fake_create_domain(xml, instance=None, launch_flags=0,
+ power_on=True):
+ self.fake_create_domain_called = True
+ self.assertEqual(powered_on, power_on)
+ return mock.MagicMock()
+
+ def fake_enable_hairpin(instance):
+ pass
+
+ def fake_get_info(instance):
+ if powered_on:
+ return {'state': power_state.RUNNING}
+ else:
+ return {'state': power_state.SHUTDOWN}
+
+ def fake_to_xml(context, instance, network_info, disk_info,
+ image_meta=None, rescue=None,
+ block_device_info=None):
+ return ""
+
+ self.stubs.Set(self.libvirtconnection, '_get_guest_xml', fake_to_xml)
+ self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs)
+ self.stubs.Set(utils, 'execute', fake_execute)
+ fw = base_firewall.NoopFirewallDriver()
+ self.stubs.Set(self.libvirtconnection, 'firewall_driver', fw)
+ self.stubs.Set(self.libvirtconnection, '_create_domain',
+ fake_create_domain)
+ self.stubs.Set(self.libvirtconnection, '_enable_hairpin',
+ fake_enable_hairpin)
+ self.stubs.Set(self.libvirtconnection, 'get_info',
+ fake_get_info)
+
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ ins_ref = self._create_instance()
+ os.mkdir(os.path.join(tmpdir, ins_ref['name']))
+ libvirt_xml_path = os.path.join(tmpdir,
+ ins_ref['name'],
+ 'libvirt.xml')
+ f = open(libvirt_xml_path, 'w')
+ f.close()
+
+ self.libvirtconnection.finish_revert_migration(
+ context.get_admin_context(), ins_ref,
+ [], None, power_on)
+ self.assertTrue(self.fake_create_domain_called)
+
+ def test_finish_revert_migration_power_on(self):
+ self._test_finish_revert_migration(True)
+
+ def test_finish_revert_migration_power_off(self):
+ self._test_finish_revert_migration(False)
+
+ def _test_finish_revert_migration_after_crash(self, backup_made=True,
+ del_inst_failed=False):
+ class FakeLoopingCall:
+ def start(self, *a, **k):
+ return self
+
+ def wait(self):
+ return None
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(shutil, 'rmtree')
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ self.stubs.Set(blockinfo, 'get_disk_info', lambda *a: None)
+ self.stubs.Set(self.libvirtconnection, '_get_guest_xml',
+ lambda *a, **k: None)
+ self.stubs.Set(self.libvirtconnection, '_create_domain_and_network',
+ lambda *a: None)
+ self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
+ lambda *a, **k: FakeLoopingCall())
+
+ libvirt_utils.get_instance_path({}).AndReturn('/fake/foo')
+ os.path.exists('/fake/foo_resize').AndReturn(backup_made)
+ if backup_made:
+ if del_inst_failed:
+ os_error = OSError(errno.ENOENT, 'No such file or directory')
+ shutil.rmtree('/fake/foo').AndRaise(os_error)
+ else:
+ shutil.rmtree('/fake/foo')
+ utils.execute('mv', '/fake/foo_resize', '/fake/foo')
+
+ self.mox.ReplayAll()
+
+ self.libvirtconnection.finish_revert_migration(context, {}, [])
+
+ def test_finish_revert_migration_after_crash(self):
+ self._test_finish_revert_migration_after_crash(backup_made=True)
+
+ def test_finish_revert_migration_after_crash_before_new(self):
+ self._test_finish_revert_migration_after_crash(backup_made=True)
+
+ def test_finish_revert_migration_after_crash_before_backup(self):
+ self._test_finish_revert_migration_after_crash(backup_made=False)
+
+ def test_finish_revert_migration_after_crash_delete_failed(self):
+ self._test_finish_revert_migration_after_crash(backup_made=True,
+ del_inst_failed=True)
+
+ def test_cleanup_failed_migration(self):
+ self.mox.StubOutWithMock(shutil, 'rmtree')
+ shutil.rmtree('/fake/inst')
+ self.mox.ReplayAll()
+ self.libvirtconnection._cleanup_failed_migration('/fake/inst')
+
+ def test_confirm_migration(self):
+ ins_ref = self._create_instance()
+
+ self.mox.StubOutWithMock(self.libvirtconnection, "_cleanup_resize")
+ self.libvirtconnection._cleanup_resize(ins_ref,
+ _fake_network_info(self.stubs, 1))
+
+ self.mox.ReplayAll()
+ self.libvirtconnection.confirm_migration("migration_ref", ins_ref,
+ _fake_network_info(self.stubs, 1))
+
+ def test_cleanup_resize_same_host(self):
+ CONF.set_override('policy_dirs', [])
+ ins_ref = self._create_instance({'host': CONF.host})
+
+ def fake_os_path_exists(path):
+ return True
+
+ self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+
+ self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ libvirt_utils.get_instance_path(ins_ref,
+ forceold=True).AndReturn('/fake/inst')
+ utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
+ attempts=5)
+
+ self.mox.ReplayAll()
+ self.libvirtconnection._cleanup_resize(ins_ref,
+ _fake_network_info(self.stubs, 1))
+
+ def test_cleanup_resize_not_same_host(self):
+ CONF.set_override('policy_dirs', [])
+ host = 'not' + CONF.host
+ ins_ref = self._create_instance({'host': host})
+
+ def fake_os_path_exists(path):
+ return True
+
+ def fake_undefine_domain(instance):
+ pass
+
+ def fake_unplug_vifs(instance, network_info, ignore_errors=False):
+ pass
+
+ def fake_unfilter_instance(instance, network_info):
+ pass
+
+ self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+ self.stubs.Set(self.libvirtconnection, '_undefine_domain',
+ fake_undefine_domain)
+ self.stubs.Set(self.libvirtconnection, 'unplug_vifs',
+ fake_unplug_vifs)
+ self.stubs.Set(self.libvirtconnection.firewall_driver,
+ 'unfilter_instance', fake_unfilter_instance)
+
+ self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ libvirt_utils.get_instance_path(ins_ref,
+ forceold=True).AndReturn('/fake/inst')
+ utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
+ attempts=5)
+
+ self.mox.ReplayAll()
+ self.libvirtconnection._cleanup_resize(ins_ref,
+ _fake_network_info(self.stubs, 1))
+
+ def test_get_instance_disk_info_exception(self):
+ instance_name = "fake-instance-name"
+
+ class FakeExceptionDomain(FakeVirtDomain):
+ def __init__(self):
+ super(FakeExceptionDomain, self).__init__()
+
+ def XMLDesc(self, *args):
+ raise libvirt.libvirtError("Libvirt error")
+
+ def fake_lookup_by_name(instance_name):
+ return FakeExceptionDomain()
+
+ self.stubs.Set(self.libvirtconnection, '_lookup_by_name',
+ fake_lookup_by_name)
+ self.assertRaises(exception.InstanceNotFound,
+ self.libvirtconnection.get_instance_disk_info,
+ instance_name)
+
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.lvm.list_volumes')
+ def test_lvm_disks(self, listlvs, exists):
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+ self.flags(images_volume_group='vols', group='libvirt')
+ exists.return_value = True
+ listlvs.return_value = ['fake-uuid_foo',
+ 'other-uuid_foo']
+ disks = self.libvirtconnection._lvm_disks(instance)
+ self.assertEqual(['/dev/vols/fake-uuid_foo'], disks)
+
+ def test_is_booted_from_volume(self):
+ func = libvirt_driver.LibvirtDriver._is_booted_from_volume
+ instance, disk_mapping = {}, {}
+
+ self.assertTrue(func(instance, disk_mapping))
+ disk_mapping['disk'] = 'map'
+ self.assertTrue(func(instance, disk_mapping))
+
+ instance['image_ref'] = 'uuid'
+ self.assertFalse(func(instance, disk_mapping))
+
+ @mock.patch('nova.virt.netutils.get_injected_network_template')
+ @mock.patch('nova.virt.disk.api.inject_data')
+ def _test_inject_data(self, driver_params, disk_params,
+ disk_inject_data, inj_network,
+ called=True):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ class ImageBackend(object):
+ path = '/path'
+
+ def check_image_exists(self):
+ if self.path == '/fail/path':
+ return False
+ return True
+
+ def fake_inj_network(*args, **kwds):
+ return args[0] or None
+ inj_network.side_effect = fake_inj_network
+
+ image_backend = ImageBackend()
+ image_backend.path = disk_params[0]
+
+ with mock.patch.object(
+ conn.image_backend,
+ 'image',
+ return_value=image_backend):
+ self.flags(inject_partition=0, group='libvirt')
+
+ conn._inject_data(**driver_params)
+
+ if called:
+ disk_inject_data.assert_called_once_with(
+ *disk_params,
+ partition=None, mandatory=('files',), use_cow=True)
+
+ self.assertEqual(disk_inject_data.called, called)
+
+ def _test_inject_data_default_driver_params(self):
+ return {
+ 'instance': {
+ 'uuid': 'fake-uuid',
+ 'id': 1,
+ 'kernel_id': None,
+ 'image_ref': 1,
+ 'key_data': None,
+ 'metadata': None
+ },
+ 'network_info': None,
+ 'admin_pass': None,
+ 'files': None,
+ 'suffix': ''
+ }
+
+ def test_inject_data_adminpass(self):
+ self.flags(inject_password=True, group='libvirt')
+ driver_params = self._test_inject_data_default_driver_params()
+ driver_params['admin_pass'] = 'foobar'
+ disk_params = [
+ '/path', # injection_path
+ None, # key
+ None, # net
+ None, # metadata
+ 'foobar', # admin_pass
+ None, # files
+ ]
+ self._test_inject_data(driver_params, disk_params)
+
+ # Test with the configuration setted to false.
+ self.flags(inject_password=False, group='libvirt')
+ self._test_inject_data(driver_params, disk_params, called=False)
+
+ def test_inject_data_key(self):
+ driver_params = self._test_inject_data_default_driver_params()
+ driver_params['instance']['key_data'] = 'key-content'
+
+ self.flags(inject_key=True, group='libvirt')
+ disk_params = [
+ '/path', # injection_path
+ 'key-content', # key
+ None, # net
+ None, # metadata
+ None, # admin_pass
+ None, # files
+ ]
+ self._test_inject_data(driver_params, disk_params)
+
+ # Test with the configuration setted to false.
+ self.flags(inject_key=False, group='libvirt')
+ self._test_inject_data(driver_params, disk_params, called=False)
+
+ def test_inject_data_metadata(self):
+ driver_params = self._test_inject_data_default_driver_params()
+ driver_params['instance']['metadata'] = 'data'
+ disk_params = [
+ '/path', # injection_path
+ None, # key
+ None, # net
+ 'data', # metadata
+ None, # admin_pass
+ None, # files
+ ]
+ self._test_inject_data(driver_params, disk_params)
+
+ def test_inject_data_files(self):
+ driver_params = self._test_inject_data_default_driver_params()
+ driver_params['files'] = ['file1', 'file2']
+ disk_params = [
+ '/path', # injection_path
+ None, # key
+ None, # net
+ None, # metadata
+ None, # admin_pass
+ ['file1', 'file2'], # files
+ ]
+ self._test_inject_data(driver_params, disk_params)
+
+ def test_inject_data_net(self):
+ driver_params = self._test_inject_data_default_driver_params()
+ driver_params['network_info'] = {'net': 'eno1'}
+ disk_params = [
+ '/path', # injection_path
+ None, # key
+ {'net': 'eno1'}, # net
+ None, # metadata
+ None, # admin_pass
+ None, # files
+ ]
+ self._test_inject_data(driver_params, disk_params)
+
+ def test_inject_not_exist_image(self):
+ driver_params = self._test_inject_data_default_driver_params()
+ disk_params = [
+ '/fail/path', # injection_path
+ 'key-content', # key
+ None, # net
+ None, # metadata
+ None, # admin_pass
+ None, # files
+ ]
+ self._test_inject_data(driver_params, disk_params, called=False)
+
+ def _test_attach_detach_interface(self, method, power_state,
+ expected_flags):
+ instance = self._create_instance()
+ network_info = _fake_network_info(self.stubs, 1)
+ domain = FakeVirtDomain()
+ self.mox.StubOutWithMock(self.libvirtconnection, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.libvirtconnection.firewall_driver,
+ 'setup_basic_filtering')
+ self.mox.StubOutWithMock(domain, 'attachDeviceFlags')
+ self.mox.StubOutWithMock(domain, 'info')
+ self.mox.StubOutWithMock(objects.Flavor, 'get_by_id')
+
+ self.libvirtconnection._lookup_by_name(
+ 'instance-00000001').AndReturn(domain)
+ if method == 'attach_interface':
+ self.libvirtconnection.firewall_driver.setup_basic_filtering(
+ instance, [network_info[0]])
+
+ fake_flavor = instance.get_flavor()
+
+ objects.Flavor.get_by_id(mox.IgnoreArg(), 2).AndReturn(fake_flavor)
+
+ if method == 'attach_interface':
+ fake_image_meta = {'id': instance['image_ref']}
+ elif method == 'detach_interface':
+ fake_image_meta = None
+ expected = self.libvirtconnection.vif_driver.get_config(
+ instance, network_info[0], fake_image_meta, fake_flavor,
+ CONF.libvirt.virt_type)
+
+ self.mox.StubOutWithMock(self.libvirtconnection.vif_driver,
+ 'get_config')
+ self.libvirtconnection.vif_driver.get_config(
+ instance, network_info[0],
+ fake_image_meta,
+ mox.IsA(objects.Flavor),
+ CONF.libvirt.virt_type).AndReturn(expected)
+ domain.info().AndReturn([power_state])
+ if method == 'attach_interface':
+ domain.attachDeviceFlags(expected.to_xml(), expected_flags)
+ elif method == 'detach_interface':
+ domain.detachDeviceFlags(expected.to_xml(), expected_flags)
+
+ self.mox.ReplayAll()
+ if method == 'attach_interface':
+ self.libvirtconnection.attach_interface(
+ instance, fake_image_meta, network_info[0])
+ elif method == 'detach_interface':
+ self.libvirtconnection.detach_interface(
+ instance, network_info[0])
+ self.mox.VerifyAll()
+
+ def test_attach_interface_with_running_instance(self):
+ self._test_attach_detach_interface(
+ 'attach_interface', power_state.RUNNING,
+ expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG |
+ libvirt.VIR_DOMAIN_AFFECT_LIVE))
+
+ def test_attach_interface_with_pause_instance(self):
+ self._test_attach_detach_interface(
+ 'attach_interface', power_state.PAUSED,
+ expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG |
+ libvirt.VIR_DOMAIN_AFFECT_LIVE))
+
+ def test_attach_interface_with_shutdown_instance(self):
+ self._test_attach_detach_interface(
+ 'attach_interface', power_state.SHUTDOWN,
+ expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG))
+
+ def test_detach_interface_with_running_instance(self):
+ self._test_attach_detach_interface(
+ 'detach_interface', power_state.RUNNING,
+ expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG |
+ libvirt.VIR_DOMAIN_AFFECT_LIVE))
+
+ def test_detach_interface_with_pause_instance(self):
+ self._test_attach_detach_interface(
+ 'detach_interface', power_state.PAUSED,
+ expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG |
+ libvirt.VIR_DOMAIN_AFFECT_LIVE))
+
+ def test_detach_interface_with_shutdown_instance(self):
+ self._test_attach_detach_interface(
+ 'detach_interface', power_state.SHUTDOWN,
+ expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG))
+
+ def test_rescue(self):
+ instance = self._create_instance({'config_drive': None})
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "</devices></domain>")
+ network_info = _fake_network_info(self.stubs, 1)
+
+ self.mox.StubOutWithMock(self.libvirtconnection,
+ '_get_existing_domain_xml')
+ self.mox.StubOutWithMock(libvirt_utils, 'write_to_file')
+ self.mox.StubOutWithMock(imagebackend.Backend, 'image')
+ self.mox.StubOutWithMock(imagebackend.Image, 'cache')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_get_guest_xml')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_destroy')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_create_domain')
+
+ self.libvirtconnection._get_existing_domain_xml(mox.IgnoreArg(),
+ mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml)
+ libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
+ libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+ imagebackend.Backend.image(instance, 'kernel.rescue', 'raw'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Backend.image(instance, 'disk.rescue', 'default'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Image.cache(context=mox.IgnoreArg(),
+ fetch_func=mox.IgnoreArg(),
+ filename=mox.IgnoreArg(),
+ image_id=mox.IgnoreArg(),
+ project_id=mox.IgnoreArg(),
+ user_id=mox.IgnoreArg()).MultipleTimes()
+
+ imagebackend.Image.cache(context=mox.IgnoreArg(),
+ fetch_func=mox.IgnoreArg(),
+ filename=mox.IgnoreArg(),
+ image_id=mox.IgnoreArg(),
+ project_id=mox.IgnoreArg(),
+ size=None, user_id=mox.IgnoreArg())
+
+ image_meta = {'id': 'fake', 'name': 'fake'}
+ self.libvirtconnection._get_guest_xml(mox.IgnoreArg(), instance,
+ network_info, mox.IgnoreArg(),
+ image_meta, rescue=mox.IgnoreArg(),
+ write_to_disk=mox.IgnoreArg()
+ ).AndReturn(dummyxml)
+
+ self.libvirtconnection._destroy(instance)
+ self.libvirtconnection._create_domain(mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ rescue_password = 'fake_password'
+
+ self.libvirtconnection.rescue(self.context, instance,
+ network_info, image_meta, rescue_password)
+ self.mox.VerifyAll()
+
+ def test_rescue_config_drive(self):
+ instance = self._create_instance()
+ uuid = instance.uuid
+ configdrive_path = uuid + '/disk.config.rescue'
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "</devices></domain>")
+ network_info = _fake_network_info(self.stubs, 1)
+
+ self.mox.StubOutWithMock(self.libvirtconnection,
+ '_get_existing_domain_xml')
+ self.mox.StubOutWithMock(libvirt_utils, 'write_to_file')
+ self.mox.StubOutWithMock(imagebackend.Backend, 'image')
+ self.mox.StubOutWithMock(imagebackend.Image, 'cache')
+ self.mox.StubOutWithMock(instance_metadata.InstanceMetadata,
+ '__init__')
+ self.mox.StubOutWithMock(configdrive, 'ConfigDriveBuilder')
+ self.mox.StubOutWithMock(configdrive.ConfigDriveBuilder, 'make_drive')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_get_guest_xml')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_destroy')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_create_domain')
+
+ self.libvirtconnection._get_existing_domain_xml(mox.IgnoreArg(),
+ mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml)
+ libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
+ libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ imagebackend.Backend.image(instance, 'kernel.rescue', 'raw'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Backend.image(instance, 'disk.rescue', 'default'
+ ).AndReturn(fake_imagebackend.Raw())
+
+ imagebackend.Image.cache(context=mox.IgnoreArg(),
+ fetch_func=mox.IgnoreArg(),
+ filename=mox.IgnoreArg(),
+ image_id=mox.IgnoreArg(),
+ project_id=mox.IgnoreArg(),
+ user_id=mox.IgnoreArg()).MultipleTimes()
+
+ imagebackend.Image.cache(context=mox.IgnoreArg(),
+ fetch_func=mox.IgnoreArg(),
+ filename=mox.IgnoreArg(),
+ image_id=mox.IgnoreArg(),
+ project_id=mox.IgnoreArg(),
+ size=None, user_id=mox.IgnoreArg())
+
+ instance_metadata.InstanceMetadata.__init__(mox.IgnoreArg(),
+ content=mox.IgnoreArg(),
+ extra_md=mox.IgnoreArg(),
+ network_info=mox.IgnoreArg())
+ cdb = self.mox.CreateMockAnything()
+ m = configdrive.ConfigDriveBuilder(instance_md=mox.IgnoreArg())
+ m.AndReturn(cdb)
+ # __enter__ and __exit__ are required by "with"
+ cdb.__enter__().AndReturn(cdb)
+ cdb.make_drive(mox.Regex(configdrive_path))
+ cdb.__exit__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()
+ ).AndReturn(None)
+ image_meta = {'id': 'fake', 'name': 'fake'}
+ self.libvirtconnection._get_guest_xml(mox.IgnoreArg(), instance,
+ network_info, mox.IgnoreArg(),
+ image_meta, rescue=mox.IgnoreArg(),
+ write_to_disk=mox.IgnoreArg()
+ ).AndReturn(dummyxml)
+ self.libvirtconnection._destroy(instance)
+ self.libvirtconnection._create_domain(mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ rescue_password = 'fake_password'
+
+ self.libvirtconnection.rescue(self.context, instance, network_info,
+ image_meta, rescue_password)
+ self.mox.VerifyAll()
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files(self, get_instance_path, exists, exe,
+ shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ exists.side_effect = [False, False, True, False]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ exe.assert_called_with('mv', '/path', '/path_del')
+ shutil.assert_called_with('/path_del')
+ self.assertTrue(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_resize(self, get_instance_path, exists,
+ exe, shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ nova.utils.execute.side_effect = [Exception(), None]
+ exists.side_effect = [False, False, True, False]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ expected = [mock.call('mv', '/path', '/path_del'),
+ mock.call('mv', '/path_resize', '/path_del')]
+ self.assertEqual(expected, exe.mock_calls)
+ shutil.assert_called_with('/path_del')
+ self.assertTrue(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_failed(self, get_instance_path, exists, exe,
+ shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ exists.side_effect = [False, False, True, True]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ exe.assert_called_with('mv', '/path', '/path_del')
+ shutil.assert_called_with('/path_del')
+ self.assertFalse(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_mv_failed(self, get_instance_path, exists,
+ exe, shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ nova.utils.execute.side_effect = Exception()
+ exists.side_effect = [True, True]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ expected = [mock.call('mv', '/path', '/path_del'),
+ mock.call('mv', '/path_resize', '/path_del')] * 2
+ self.assertEqual(expected, exe.mock_calls)
+ self.assertFalse(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_resume(self, get_instance_path, exists,
+ exe, shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ nova.utils.execute.side_effect = Exception()
+ exists.side_effect = [False, False, True, False]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ expected = [mock.call('mv', '/path', '/path_del'),
+ mock.call('mv', '/path_resize', '/path_del')] * 2
+ self.assertEqual(expected, exe.mock_calls)
+ self.assertTrue(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_none(self, get_instance_path, exists,
+ exe, shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ nova.utils.execute.side_effect = Exception()
+ exists.side_effect = [False, False, False, False]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ expected = [mock.call('mv', '/path', '/path_del'),
+ mock.call('mv', '/path_resize', '/path_del')] * 2
+ self.assertEqual(expected, exe.mock_calls)
+ self.assertEqual(0, len(shutil.mock_calls))
+ self.assertTrue(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_concurrent(self, get_instance_path, exists,
+ exe, shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ nova.utils.execute.side_effect = [Exception(), Exception(), None]
+ exists.side_effect = [False, False, True, False]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ expected = [mock.call('mv', '/path', '/path_del'),
+ mock.call('mv', '/path_resize', '/path_del')]
+ expected.append(expected[0])
+ self.assertEqual(expected, exe.mock_calls)
+ shutil.assert_called_with('/path_del')
+ self.assertTrue(result)
+
+ def _assert_on_id_map(self, idmap, klass, start, target, count):
+ self.assertIsInstance(idmap, klass)
+ self.assertEqual(start, idmap.start)
+ self.assertEqual(target, idmap.target)
+ self.assertEqual(count, idmap.count)
+
+ def test_get_id_maps(self):
+ self.flags(virt_type="lxc", group="libvirt")
+ CONF.libvirt.virt_type = "lxc"
+ CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
+ CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ idmaps = conn._get_guest_idmaps()
+
+ self.assertEqual(len(idmaps), 4)
+ self._assert_on_id_map(idmaps[0],
+ vconfig.LibvirtConfigGuestUIDMap,
+ 0, 10000, 1)
+ self._assert_on_id_map(idmaps[1],
+ vconfig.LibvirtConfigGuestUIDMap,
+ 1, 20000, 10)
+ self._assert_on_id_map(idmaps[2],
+ vconfig.LibvirtConfigGuestGIDMap,
+ 0, 10000, 1)
+ self._assert_on_id_map(idmaps[3],
+ vconfig.LibvirtConfigGuestGIDMap,
+ 1, 20000, 10)
+
+ def test_get_id_maps_not_lxc(self):
+ CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
+ CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ idmaps = conn._get_guest_idmaps()
+
+ self.assertEqual(0, len(idmaps))
+
+ def test_get_id_maps_only_uid(self):
+ self.flags(virt_type="lxc", group="libvirt")
+ CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
+ CONF.libvirt.gid_maps = []
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ idmaps = conn._get_guest_idmaps()
+
+ self.assertEqual(2, len(idmaps))
+ self._assert_on_id_map(idmaps[0],
+ vconfig.LibvirtConfigGuestUIDMap,
+ 0, 10000, 1)
+ self._assert_on_id_map(idmaps[1],
+ vconfig.LibvirtConfigGuestUIDMap,
+ 1, 20000, 10)
+
+ def test_get_id_maps_only_gid(self):
+ self.flags(virt_type="lxc", group="libvirt")
+ CONF.libvirt.uid_maps = []
+ CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ idmaps = conn._get_guest_idmaps()
+
+ self.assertEqual(2, len(idmaps))
+ self._assert_on_id_map(idmaps[0],
+ vconfig.LibvirtConfigGuestGIDMap,
+ 0, 10000, 1)
+ self._assert_on_id_map(idmaps[1],
+ vconfig.LibvirtConfigGuestGIDMap,
+ 1, 20000, 10)
+
+ def test_instance_on_disk(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+ self.assertFalse(conn.instance_on_disk(instance))
+
+ def test_instance_on_disk_rbd(self):
+ self.flags(images_type='rbd', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+ self.assertTrue(conn.instance_on_disk(instance))
+
+ @mock.patch("nova.objects.Flavor.get_by_id")
+ @mock.patch("nova.compute.utils.get_image_metadata")
+ def test_prepare_args_for_get_config(self, mock_image, mock_get):
+ instance = self._create_instance()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ def fake_get_by_id(context, id):
+ self.assertEqual('yes', context.read_deleted)
+
+ mock_get.side_effect = fake_get_by_id
+
+ conn._prepare_args_for_get_config(self.context, instance)
+
+ mock_get.assert_called_once_with(self.context,
+ instance['instance_type_id'])
+
+
+class LibvirtVolumeUsageTestCase(test.NoDBTestCase):
+ """Test for LibvirtDriver.get_all_volume_usage."""
+
+ def setUp(self):
+ super(LibvirtVolumeUsageTestCase, self).setUp()
+ self.conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.c = context.get_admin_context()
+
+ self.ins_ref = objects.Instance(
+ id=1729,
+ uuid='875a8070-d0b9-4949-8b31-104d125c9a64'
+ )
+
+ # verify bootable volume device path also
+ self.bdms = [{'volume_id': 1,
+ 'device_name': '/dev/vde'},
+ {'volume_id': 2,
+ 'device_name': 'vda'}]
+
+ def test_get_all_volume_usage(self):
+ def fake_block_stats(instance_name, disk):
+ return (169L, 688640L, 0L, 0L, -1L)
+
+ self.stubs.Set(self.conn, 'block_stats', fake_block_stats)
+ vol_usage = self.conn.get_all_volume_usage(self.c,
+ [dict(instance=self.ins_ref, instance_bdms=self.bdms)])
+
+ expected_usage = [{'volume': 1,
+ 'instance': self.ins_ref,
+ 'rd_bytes': 688640L, 'wr_req': 0L,
+ 'flush_operations': -1L, 'rd_req': 169L,
+ 'wr_bytes': 0L},
+ {'volume': 2,
+ 'instance': self.ins_ref,
+ 'rd_bytes': 688640L, 'wr_req': 0L,
+ 'flush_operations': -1L, 'rd_req': 169L,
+ 'wr_bytes': 0L}]
+ self.assertEqual(vol_usage, expected_usage)
+
+ def test_get_all_volume_usage_device_not_found(self):
+ def fake_lookup(instance_name):
+ raise libvirt.libvirtError('invalid path')
+
+ self.stubs.Set(self.conn, '_lookup_by_name', fake_lookup)
+ vol_usage = self.conn.get_all_volume_usage(self.c,
+ [dict(instance=self.ins_ref, instance_bdms=self.bdms)])
+ self.assertEqual(vol_usage, [])
+
+
+class LibvirtNonblockingTestCase(test.NoDBTestCase):
+ """Test libvirtd calls are nonblocking."""
+
+ def setUp(self):
+ super(LibvirtNonblockingTestCase, self).setUp()
+ self.flags(connection_uri="test:///default",
+ group='libvirt')
+
+ def test_connection_to_primitive(self):
+ # Test bug 962840.
+ import nova.virt.libvirt.driver as libvirt_driver
+ connection = libvirt_driver.LibvirtDriver('')
+ connection.set_host_enabled = mock.Mock()
+ jsonutils.to_primitive(connection._conn, convert_instances=True)
+
+ def test_tpool_execute_calls_libvirt(self):
+ conn = libvirt.virConnect()
+ conn.is_expected = True
+
+ self.mox.StubOutWithMock(eventlet.tpool, 'execute')
+ eventlet.tpool.execute(
+ libvirt.openAuth,
+ 'test:///default',
+ mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(conn)
+ eventlet.tpool.execute(
+ conn.domainEventRegisterAny,
+ None,
+ libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ if hasattr(libvirt.virConnect, 'registerCloseCallback'):
+ eventlet.tpool.execute(
+ conn.registerCloseCallback,
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ c = driver._get_connection()
+ self.assertEqual(True, c.is_expected)
+
+
+class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
+ """Tests for libvirtDriver.volume_snapshot_create/delete."""
+
+ def setUp(self):
+ super(LibvirtVolumeSnapshotTestCase, self).setUp()
+
+ self.conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.c = context.get_admin_context()
+
+ self.flags(instance_name_template='instance-%s')
+ self.flags(qemu_allowed_storage_drivers=[], group='libvirt')
+
+ # creating instance
+ self.inst = {}
+ self.inst['uuid'] = uuidutils.generate_uuid()
+ self.inst['id'] = '1'
+
+ # create domain info
+ self.dom_xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='disk1_file'/>
+ <target dev='vda' bus='virtio'/>
+ <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ <target dev='vdb' bus='virtio' serial='1234'/>
+ </disk>
+ </devices>
+ </domain>"""
+
+ # alternate domain info with network-backed snapshot chain
+ self.dom_netdisk_xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='disk1_file'/>
+ <target dev='vda' bus='virtio'/>
+ <serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial>
+ </disk>
+ <disk type='network' device='disk'>
+ <driver name='qemu' type='qcow2'/>
+ <source protocol='gluster' name='vol1/root.img'>
+ <host name='server1' port='24007'/>
+ </source>
+ <backingStore type='network' index='1'>
+ <driver name='qemu' type='qcow2'/>
+ <source protocol='gluster' name='vol1/snap.img'>
+ <host name='server1' port='24007'/>
+ </source>
+ <backingStore type='network' index='2'>
+ <driver name='qemu' type='qcow2'/>
+ <source protocol='gluster' name='vol1/snap-b.img'>
+ <host name='server1' port='24007'/>
+ </source>
+ <backingStore/>
+ </backingStore>
+ </backingStore>
+ <target dev='vdb' bus='virtio'/>
+ <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
+ </disk>
+ </devices>
+ </domain>
+ """
+
+ self.create_info = {'type': 'qcow2',
+ 'snapshot_id': '1234-5678',
+ 'new_file': 'new-file'}
+
+ self.volume_uuid = '0e38683e-f0af-418f-a3f1-6b67ea0f919d'
+ self.snapshot_id = '9c3ca9f4-9f4e-4dba-bedd-5c5e4b52b162'
+
+ self.delete_info_1 = {'type': 'qcow2',
+ 'file_to_merge': 'snap.img',
+ 'merge_target_file': None}
+
+ self.delete_info_2 = {'type': 'qcow2',
+ 'file_to_merge': 'snap.img',
+ 'merge_target_file': 'other-snap.img'}
+
+ self.delete_info_netdisk = {'type': 'qcow2',
+ 'file_to_merge': 'snap.img',
+ 'merge_target_file': 'root.img'}
+
+ self.delete_info_invalid_type = {'type': 'made_up_type',
+ 'file_to_merge': 'some_file',
+ 'merge_target_file':
+ 'some_other_file'}
+
+ def tearDown(self):
+ super(LibvirtVolumeSnapshotTestCase, self).tearDown()
+
+ @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.'
+ 'refresh_connection_info')
+ @mock.patch('nova.objects.block_device.BlockDeviceMapping.'
+ 'get_by_volume_id')
+ def test_volume_refresh_connection_info(self, mock_get_by_volume_id,
+ mock_refresh_connection_info):
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 123,
+ 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdb',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id-1',
+ 'connection_info': '{"fake": "connection_info"}'})
+ mock_get_by_volume_id.return_value = fake_bdm
+
+ self.conn._volume_refresh_connection_info(self.c, self.inst,
+ self.volume_uuid)
+
+ mock_get_by_volume_id.assert_called_once_with(self.c, self.volume_uuid)
+ mock_refresh_connection_info.assert_called_once_with(self.c, self.inst,
+ self.conn._volume_api, self.conn)
+
+ def test_volume_snapshot_create(self, quiesce=True):
+ """Test snapshot creation with file-based disk."""
+ self.flags(instance_name_template='instance-%s')
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+
+ instance = objects.Instance(**self.inst)
+
+ new_file = 'new-file'
+
+ domain = FakeVirtDomain(fake_xml=self.dom_xml)
+ self.mox.StubOutWithMock(domain, 'XMLDesc')
+ self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
+ domain.XMLDesc(0).AndReturn(self.dom_xml)
+
+ snap_xml_src = (
+ '<domainsnapshot>\n'
+ ' <disks>\n'
+ ' <disk name="disk1_file" snapshot="external" type="file">\n'
+ ' <source file="new-file"/>\n'
+ ' </disk>\n'
+ ' <disk name="vdb" snapshot="no"/>\n'
+ ' </disks>\n'
+ '</domainsnapshot>\n')
+
+ # Older versions of libvirt may be missing these.
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
+
+ snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
+
+ snap_flags_q = snap_flags | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
+
+ if quiesce:
+ domain.snapshotCreateXML(snap_xml_src, snap_flags_q)
+ else:
+ domain.snapshotCreateXML(snap_xml_src, snap_flags_q).\
+ AndRaise(libvirt.libvirtError('quiescing failed, no qemu-ga'))
+ domain.snapshotCreateXML(snap_xml_src, snap_flags).AndReturn(0)
+
+ self.mox.ReplayAll()
+
+ self.conn._volume_snapshot_create(self.c, instance, domain,
+ self.volume_uuid, new_file)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_create_libgfapi(self, quiesce=True):
+ """Test snapshot creation with libgfapi network disk."""
+ self.flags(instance_name_template = 'instance-%s')
+ self.flags(qemu_allowed_storage_drivers = ['gluster'], group='libvirt')
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+
+ self.dom_xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='disk1_file'/>
+ <target dev='vda' bus='virtio'/>
+ <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
+ </disk>
+ <disk type='block'>
+ <source protocol='gluster' name='gluster1/volume-1234'>
+ <host name='127.3.4.5' port='24007'/>
+ </source>
+ <target dev='vdb' bus='virtio' serial='1234'/>
+ </disk>
+ </devices>
+ </domain>"""
+
+ instance = objects.Instance(**self.inst)
+
+ new_file = 'new-file'
+
+ domain = FakeVirtDomain(fake_xml=self.dom_xml)
+ self.mox.StubOutWithMock(domain, 'XMLDesc')
+ self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
+ domain.XMLDesc(0).AndReturn(self.dom_xml)
+
+ snap_xml_src = (
+ '<domainsnapshot>\n'
+ ' <disks>\n'
+ ' <disk name="disk1_file" snapshot="external" type="file">\n'
+ ' <source file="new-file"/>\n'
+ ' </disk>\n'
+ ' <disk name="vdb" snapshot="no"/>\n'
+ ' </disks>\n'
+ '</domainsnapshot>\n')
+
+ # Older versions of libvirt may be missing these.
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
+
+ snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
+
+ snap_flags_q = snap_flags | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
+
+ if quiesce:
+ domain.snapshotCreateXML(snap_xml_src, snap_flags_q)
+ else:
+ domain.snapshotCreateXML(snap_xml_src, snap_flags_q).\
+ AndRaise(libvirt.libvirtError('quiescing failed, no qemu-ga'))
+ domain.snapshotCreateXML(snap_xml_src, snap_flags).AndReturn(0)
+
+ self.mox.ReplayAll()
+
+ self.conn._volume_snapshot_create(self.c, instance, domain,
+ self.volume_uuid, new_file)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_create_noquiesce(self):
+ self.test_volume_snapshot_create(quiesce=False)
+
+ def test_volume_snapshot_create_outer_success(self):
+ instance = objects.Instance(**self.inst)
+
+ domain = FakeVirtDomain(fake_xml=self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+ self.mox.StubOutWithMock(self.conn, '_volume_snapshot_create')
+
+ self.conn._lookup_by_name('instance-1').AndReturn(domain)
+
+ self.conn._volume_snapshot_create(self.c,
+ instance,
+ domain,
+ self.volume_uuid,
+ self.create_info['new_file'])
+
+ self.conn._volume_api.update_snapshot_status(
+ self.c, self.create_info['snapshot_id'], 'creating')
+
+ self.mox.StubOutWithMock(self.conn._volume_api, 'get_snapshot')
+ self.conn._volume_api.get_snapshot(self.c,
+ self.create_info['snapshot_id']).AndReturn({'status': 'available'})
+ self.mox.StubOutWithMock(self.conn, '_volume_refresh_connection_info')
+ self.conn._volume_refresh_connection_info(self.c, instance,
+ self.volume_uuid)
+
+ self.mox.ReplayAll()
+
+ self.conn.volume_snapshot_create(self.c, instance, self.volume_uuid,
+ self.create_info)
+
+ def test_volume_snapshot_create_outer_failure(self):
+ instance = objects.Instance(**self.inst)
+
+ domain = FakeVirtDomain(fake_xml=self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+ self.mox.StubOutWithMock(self.conn, '_volume_snapshot_create')
+
+ self.conn._lookup_by_name('instance-1').AndReturn(domain)
+
+ self.conn._volume_snapshot_create(self.c,
+ instance,
+ domain,
+ self.volume_uuid,
+ self.create_info['new_file']).\
+ AndRaise(exception.NovaException('oops'))
+
+ self.conn._volume_api.update_snapshot_status(
+ self.c, self.create_info['snapshot_id'], 'error')
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.NovaException,
+ self.conn.volume_snapshot_create,
+ self.c,
+ instance,
+ self.volume_uuid,
+ self.create_info)
+
+ def test_volume_snapshot_delete_1(self):
+ """Deleting newest snapshot -- blockRebase."""
+
+ instance = objects.Instance(**self.inst)
+ snapshot_id = 'snapshot-1234'
+
+ domain = FakeVirtDomain(fake_xml=self.dom_xml)
+ self.mox.StubOutWithMock(domain, 'XMLDesc')
+ domain.XMLDesc(0).AndReturn(self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_has_min_version')
+ self.mox.StubOutWithMock(domain, 'blockRebase')
+ self.mox.StubOutWithMock(domain, 'blockCommit')
+ self.mox.StubOutWithMock(domain, 'blockJobInfo')
+
+ self.conn._lookup_by_name('instance-%s' % instance['id']).\
+ AndReturn(domain)
+ self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
+
+ domain.blockRebase('vda', 'snap.img', 0, 0)
+
+ domain.blockJobInfo('vda', 0).AndReturn({'cur': 1, 'end': 1000})
+ domain.blockJobInfo('vda', 0).AndReturn({'cur': 1000, 'end': 1000})
+
+ self.mox.ReplayAll()
+
+ self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid,
+ snapshot_id, self.delete_info_1)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_delete_2(self):
+ """Deleting older snapshot -- blockCommit."""
+
+ instance = objects.Instance(**self.inst)
+ snapshot_id = 'snapshot-1234'
+
+ domain = FakeVirtDomain(fake_xml=self.dom_xml)
+ self.mox.StubOutWithMock(domain, 'XMLDesc')
+ domain.XMLDesc(0).AndReturn(self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_has_min_version')
+ self.mox.StubOutWithMock(domain, 'blockRebase')
+ self.mox.StubOutWithMock(domain, 'blockCommit')
+ self.mox.StubOutWithMock(domain, 'blockJobInfo')
+
+ self.conn._lookup_by_name('instance-%s' % instance['id']).\
+ AndReturn(domain)
+ self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
+
+ domain.blockCommit('vda', 'other-snap.img', 'snap.img', 0, 0)
+
+ domain.blockJobInfo('vda', 0).AndReturn({'cur': 1, 'end': 1000})
+ domain.blockJobInfo('vda', 0).AndReturn({})
+
+ self.mox.ReplayAll()
+
+ self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid,
+ snapshot_id, self.delete_info_2)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_delete_outer_success(self):
+ instance = objects.Instance(**self.inst)
+ snapshot_id = 'snapshot-1234'
+
+ FakeVirtDomain(fake_xml=self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+ self.mox.StubOutWithMock(self.conn, '_volume_snapshot_delete')
+
+ self.conn._volume_snapshot_delete(self.c,
+ instance,
+ self.volume_uuid,
+ snapshot_id,
+ delete_info=self.delete_info_1)
+
+ self.conn._volume_api.update_snapshot_status(
+ self.c, snapshot_id, 'deleting')
+
+ self.mox.StubOutWithMock(self.conn, '_volume_refresh_connection_info')
+ self.conn._volume_refresh_connection_info(self.c, instance,
+ self.volume_uuid)
+
+ self.mox.ReplayAll()
+
+ self.conn.volume_snapshot_delete(self.c, instance, self.volume_uuid,
+ snapshot_id,
+ self.delete_info_1)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_delete_outer_failure(self):
+ instance = objects.Instance(**self.inst)
+ snapshot_id = '1234-9876'
+
+ FakeVirtDomain(fake_xml=self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+ self.mox.StubOutWithMock(self.conn, '_volume_snapshot_delete')
+
+ self.conn._volume_snapshot_delete(self.c,
+ instance,
+ self.volume_uuid,
+ snapshot_id,
+ delete_info=self.delete_info_1).\
+ AndRaise(exception.NovaException('oops'))
+
+ self.conn._volume_api.update_snapshot_status(
+ self.c, snapshot_id, 'error_deleting')
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.NovaException,
+ self.conn.volume_snapshot_delete,
+ self.c,
+ instance,
+ self.volume_uuid,
+ snapshot_id,
+ self.delete_info_1)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_delete_invalid_type(self):
+ instance = objects.Instance(**self.inst)
+
+ FakeVirtDomain(fake_xml=self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+ self.mox.StubOutWithMock(self.conn, '_has_min_version')
+
+ self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
+
+ self.conn._volume_api.update_snapshot_status(
+ self.c, self.snapshot_id, 'error_deleting')
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.NovaException,
+ self.conn.volume_snapshot_delete,
+ self.c,
+ instance,
+ self.volume_uuid,
+ self.snapshot_id,
+ self.delete_info_invalid_type)
+
+ def test_volume_snapshot_delete_netdisk_1(self):
+ """Delete newest snapshot -- blockRebase for libgfapi/network disk."""
+
+ class FakeNetdiskDomain(FakeVirtDomain):
+ def __init__(self, *args, **kwargs):
+ super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
+
+ def XMLDesc(self, *args):
+ return self.dom_netdisk_xml
+
+ # Ensure the libvirt lib has VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
+ self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
+
+ instance = objects.Instance(**self.inst)
+ snapshot_id = 'snapshot-1234'
+
+ domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
+ self.mox.StubOutWithMock(domain, 'XMLDesc')
+ domain.XMLDesc(0).AndReturn(self.dom_netdisk_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_has_min_version')
+ self.mox.StubOutWithMock(domain, 'blockRebase')
+ self.mox.StubOutWithMock(domain, 'blockCommit')
+ self.mox.StubOutWithMock(domain, 'blockJobInfo')
+
+ self.conn._lookup_by_name('instance-%s' % instance['id']).\
+ AndReturn(domain)
+ self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
+
+ domain.blockRebase('vdb', 'vdb[1]', 0, 0)
+
+ domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1, 'end': 1000})
+ domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1000, 'end': 1000})
+
+ self.mox.ReplayAll()
+
+ self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid,
+ snapshot_id, self.delete_info_1)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_delete_netdisk_2(self):
+ """Delete older snapshot -- blockCommit for libgfapi/network disk."""
+
+ class FakeNetdiskDomain(FakeVirtDomain):
+ def __init__(self, *args, **kwargs):
+ super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
+
+ def XMLDesc(self, *args):
+ return self.dom_netdisk_xml
+
+ # Ensure the libvirt lib has VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
+ self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
+
+ instance = objects.Instance(**self.inst)
+ snapshot_id = 'snapshot-1234'
+
+ domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
+ self.mox.StubOutWithMock(domain, 'XMLDesc')
+ domain.XMLDesc(0).AndReturn(self.dom_netdisk_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_has_min_version')
+ self.mox.StubOutWithMock(domain, 'blockRebase')
+ self.mox.StubOutWithMock(domain, 'blockCommit')
+ self.mox.StubOutWithMock(domain, 'blockJobInfo')
+
+ self.conn._lookup_by_name('instance-%s' % instance['id']).\
+ AndReturn(domain)
+ self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
+
+ domain.blockCommit('vdb', 'vdb[0]', 'vdb[1]', 0,
+ fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
+
+ domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1, 'end': 1000})
+ domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1000, 'end': 1000})
+
+ self.mox.ReplayAll()
+
+ self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid,
+ snapshot_id,
+ self.delete_info_netdisk)
+
+ self.mox.VerifyAll()
diff --git a/nova/tests/unit/virt/libvirt/test_fakelibvirt.py b/nova/tests/unit/virt/libvirt/test_fakelibvirt.py
new file mode 100644
index 0000000000..7a6d020426
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_fakelibvirt.py
@@ -0,0 +1,386 @@
+# Copyright 2010 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+
+from lxml import etree
+
+from nova.compute import arch
+import nova.tests.unit.virt.libvirt.fakelibvirt as libvirt
+
+
+def get_vm_xml(name="testname", uuid=None, source_type='file',
+ interface_type='bridge'):
+ uuid_tag = ''
+ if uuid:
+ uuid_tag = '<uuid>%s</uuid>' % (uuid,)
+
+ return '''<domain type='kvm'>
+ <name>%(name)s</name>
+%(uuid_tag)s
+ <memory>128000</memory>
+ <vcpu>1</vcpu>
+ <os>
+ <type>hvm</type>
+ <kernel>/somekernel</kernel>
+ <cmdline>root=/dev/sda</cmdline>
+ <boot dev='hd'/>
+ </os>
+ <features>
+ <acpi/>
+ </features>
+ <devices>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2'/>
+ <source %(source_type)s='/somefile'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <interface type='%(interface_type)s'>
+ <mac address='05:26:3e:31:28:1f'/>
+ <source %(interface_type)s='br100'/>
+ </interface>
+ <input type='mouse' bus='ps2'/>
+ <graphics type='vnc' port='5901' autoport='yes' keymap='en-us'/>
+ <graphics type='spice' port='5901' autoport='yes' keymap='en-us'/>
+ </devices>
+</domain>''' % {'name': name,
+ 'uuid_tag': uuid_tag,
+ 'source_type': source_type,
+ 'interface_type': interface_type}
+
+
+class FakeLibvirtTests(test.NoDBTestCase):
+ def tearDown(self):
+ super(FakeLibvirtTests, self).tearDown()
+ libvirt._reset()
+
+ def get_openAuth_curry_func(self, readOnly=False):
+ def fake_cb(credlist):
+ return 0
+
+ creds = [[libvirt.VIR_CRED_AUTHNAME,
+ libvirt.VIR_CRED_NOECHOPROMPT],
+ fake_cb,
+ None]
+ flags = 0
+ if readOnly:
+ flags = libvirt.VIR_CONNECT_RO
+ return lambda uri: libvirt.openAuth(uri, creds, flags)
+
+ def test_openAuth_accepts_None_uri_by_default(self):
+ conn_method = self.get_openAuth_curry_func()
+ conn = conn_method(None)
+ self.assertNotEqual(conn, None, "Connecting to fake libvirt failed")
+
+ def test_openAuth_can_refuse_None_uri(self):
+ conn_method = self.get_openAuth_curry_func()
+ libvirt.allow_default_uri_connection = False
+ self.addCleanup(libvirt._reset)
+ self.assertRaises(ValueError, conn_method, None)
+
+ def test_openAuth_refuses_invalid_URI(self):
+ conn_method = self.get_openAuth_curry_func()
+ self.assertRaises(libvirt.libvirtError, conn_method, 'blah')
+
+ def test_getInfo(self):
+ conn_method = self.get_openAuth_curry_func(readOnly=True)
+ res = conn_method(None).getInfo()
+ self.assertIn(res[0], (arch.I686, arch.X86_64))
+ self.assertTrue(1024 <= res[1] <= 16384,
+ "Memory unusually high or low.")
+ self.assertTrue(1 <= res[2] <= 32,
+ "Active CPU count unusually high or low.")
+ self.assertTrue(800 <= res[3] <= 4500,
+ "CPU speed unusually high or low.")
+ self.assertTrue(res[2] <= (res[5] * res[6]),
+ "More active CPUs than num_sockets*cores_per_socket")
+
+ def test_createXML_detects_invalid_xml(self):
+ self._test_XML_func_detects_invalid_xml('createXML', [0])
+
+ def test_defineXML_detects_invalid_xml(self):
+ self._test_XML_func_detects_invalid_xml('defineXML', [])
+
+ def _test_XML_func_detects_invalid_xml(self, xmlfunc_name, args):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ try:
+ getattr(conn, xmlfunc_name)("this is not valid </xml>", *args)
+ except libvirt.libvirtError as e:
+ self.assertEqual(e.get_error_code(), libvirt.VIR_ERR_XML_DETAIL)
+ self.assertEqual(e.get_error_domain(), libvirt.VIR_FROM_DOMAIN)
+ return
+ raise self.failureException("Invalid XML didn't raise libvirtError")
+
+ def test_defineXML_defines_domain(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.defineXML(get_vm_xml())
+ dom = conn.lookupByName('testname')
+ self.assertEqual('testname', dom.name())
+ self.assertEqual(0, dom.isActive())
+ dom.undefine()
+ self.assertRaises(libvirt.libvirtError,
+ conn.lookupByName,
+ 'testname')
+
+ def test_blockStats(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.createXML(get_vm_xml(), 0)
+ dom = conn.lookupByName('testname')
+ blockstats = dom.blockStats('vda')
+ self.assertEqual(len(blockstats), 5)
+ for x in blockstats:
+ self.assertIn(type(x), [int, long])
+
+ def test_attach_detach(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.createXML(get_vm_xml(), 0)
+ dom = conn.lookupByName('testname')
+ xml = '''<disk type='block'>
+ <driver name='qemu' type='raw'/>
+ <source dev='/dev/nbd0'/>
+ <target dev='/dev/vdc' bus='virtio'/>
+ </disk>'''
+ self.assertTrue(dom.attachDevice(xml))
+ self.assertTrue(dom.detachDevice(xml))
+
+ def test_info(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.createXML(get_vm_xml(), 0)
+ dom = conn.lookupByName('testname')
+ info = dom.info()
+ self.assertEqual(info[0], libvirt.VIR_DOMAIN_RUNNING)
+ self.assertEqual(info[1], 128000)
+ self.assertTrue(info[2] <= 128000)
+ self.assertEqual(info[3], 1)
+ self.assertIn(type(info[4]), [int, long])
+
+ def test_createXML_runs_domain(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.createXML(get_vm_xml(), 0)
+ dom = conn.lookupByName('testname')
+ self.assertEqual('testname', dom.name())
+ self.assertEqual(1, dom.isActive())
+ dom.destroy()
+ try:
+ dom = conn.lookupByName('testname')
+ except libvirt.libvirtError as e:
+ self.assertEqual(e.get_error_code(), libvirt.VIR_ERR_NO_DOMAIN)
+ self.assertEqual(e.get_error_domain(), libvirt.VIR_FROM_QEMU)
+ return
+ self.fail("lookupByName succeeded for destroyed non-defined VM")
+
+ def test_defineXML_remembers_uuid(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ uuid = 'b21f957d-a72f-4b93-b5a5-45b1161abb02'
+ conn.defineXML(get_vm_xml(uuid=uuid))
+ dom = conn.lookupByName('testname')
+ self.assertEqual(dom.UUIDString(), uuid)
+
+ def test_createWithFlags(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.defineXML(get_vm_xml())
+ dom = conn.lookupByName('testname')
+ self.assertFalse(dom.isActive(), 'Defined domain was running.')
+ dom.createWithFlags(0)
+ self.assertTrue(dom.isActive(),
+ 'Domain wasn\'t running after createWithFlags')
+
+ def test_managedSave(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.defineXML(get_vm_xml())
+ dom = conn.lookupByName('testname')
+ self.assertFalse(dom.isActive(), 'Defined domain was running.')
+ dom.createWithFlags(0)
+ self.assertEqual(dom.hasManagedSaveImage(0), 0)
+ dom.managedSave(0)
+ self.assertEqual(dom.hasManagedSaveImage(0), 1)
+ dom.managedSaveRemove(0)
+ self.assertEqual(dom.hasManagedSaveImage(0), 0)
+
+ def test_listDomainsId_and_lookupById(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ self.assertEqual(conn.listDomainsID(), [])
+ conn.defineXML(get_vm_xml())
+ dom = conn.lookupByName('testname')
+ dom.createWithFlags(0)
+ self.assertEqual(len(conn.listDomainsID()), 1)
+
+ dom_id = conn.listDomainsID()[0]
+ self.assertEqual(conn.lookupByID(dom_id), dom)
+
+ dom_id = conn.listDomainsID()[0]
+ try:
+ conn.lookupByID(dom_id + 1)
+ except libvirt.libvirtError as e:
+ self.assertEqual(e.get_error_code(), libvirt.VIR_ERR_NO_DOMAIN)
+ self.assertEqual(e.get_error_domain(), libvirt.VIR_FROM_QEMU)
+ return
+ raise self.failureException("Looking up an invalid domain ID didn't "
+ "raise libvirtError")
+
+ def test_define_and_retrieve(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ self.assertEqual(conn.listDomainsID(), [])
+ conn.defineXML(get_vm_xml())
+ dom = conn.lookupByName('testname')
+ xml = dom.XMLDesc(0)
+ etree.fromstring(xml)
+
+ def _test_accepts_source_type(self, source_type):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ self.assertEqual(conn.listDomainsID(), [])
+ conn.defineXML(get_vm_xml(source_type=source_type))
+ dom = conn.lookupByName('testname')
+ xml = dom.XMLDesc(0)
+ tree = etree.fromstring(xml)
+ elem = tree.find('./devices/disk/source')
+ self.assertEqual(elem.get('file'), '/somefile')
+
+ def test_accepts_source_dev(self):
+ self._test_accepts_source_type('dev')
+
+ def test_accepts_source_path(self):
+ self._test_accepts_source_type('path')
+
+ def test_network_type_bridge_sticks(self):
+ self._test_network_type_sticks('bridge')
+
+ def test_network_type_network_sticks(self):
+ self._test_network_type_sticks('network')
+
+ def _test_network_type_sticks(self, network_type):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ self.assertEqual(conn.listDomainsID(), [])
+ conn.defineXML(get_vm_xml(interface_type=network_type))
+ dom = conn.lookupByName('testname')
+ xml = dom.XMLDesc(0)
+ tree = etree.fromstring(xml)
+ elem = tree.find('./devices/interface')
+ self.assertEqual(elem.get('type'), network_type)
+ elem = elem.find('./source')
+ self.assertEqual(elem.get(network_type), 'br100')
+
+ def test_getType(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ self.assertEqual(conn.getType(), 'QEMU')
+
+ def test_getVersion(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ self.assertIsInstance(conn.getVersion(), int)
+
+ def test_getCapabilities(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ etree.fromstring(conn.getCapabilities())
+
+ def test_nwfilter_define_undefine(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ # Will raise an exception if it's not valid XML
+ xml = '''<filter name='nova-instance-instance-789' chain='root'>
+ <uuid>946878c6-3ad3-82b2-87f3-c709f3807f58</uuid>
+ </filter>'''
+
+ conn.nwfilterDefineXML(xml)
+ nwfilter = conn.nwfilterLookupByName('nova-instance-instance-789')
+ nwfilter.undefine()
+ try:
+ conn.nwfilterLookupByName('nova-instance-instance-789320334')
+ except libvirt.libvirtError as e:
+ self.assertEqual(e.get_error_code(), libvirt.VIR_ERR_NO_NWFILTER)
+ self.assertEqual(e.get_error_domain(), libvirt.VIR_FROM_NWFILTER)
+ return
+ raise self.failureException("Invalid NWFilter name didn't"
+ " raise libvirtError")
+
+ def test_compareCPU_compatible(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+
+ xml = '''<cpu>
+ <arch>%s</arch>
+ <model>%s</model>
+ <vendor>%s</vendor>
+ <topology sockets="%d" cores="%d" threads="%d"/>
+ </cpu>''' % (libvirt.node_arch,
+ libvirt.node_cpu_model,
+ libvirt.node_cpu_vendor,
+ libvirt.node_sockets,
+ libvirt.node_cores,
+ libvirt.node_threads)
+ self.assertEqual(conn.compareCPU(xml, 0),
+ libvirt.VIR_CPU_COMPARE_IDENTICAL)
+
+ def test_compareCPU_incompatible_vendor(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+
+ xml = '''<cpu>
+ <arch>%s</arch>
+ <model>%s</model>
+ <vendor>%s</vendor>
+ <topology sockets="%d" cores="%d" threads="%d"/>
+ </cpu>''' % (libvirt.node_arch,
+ libvirt.node_cpu_model,
+ "AnotherVendor",
+ libvirt.node_sockets,
+ libvirt.node_cores,
+ libvirt.node_threads)
+ self.assertEqual(conn.compareCPU(xml, 0),
+ libvirt.VIR_CPU_COMPARE_INCOMPATIBLE)
+
+ def test_compareCPU_incompatible_arch(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+
+ xml = '''<cpu>
+ <arch>%s</arch>
+ <model>%s</model>
+ <vendor>%s</vendor>
+ <topology sockets="%d" cores="%d" threads="%d"/>
+ </cpu>''' % ('not-a-valid-arch',
+ libvirt.node_cpu_model,
+ libvirt.node_cpu_vendor,
+ libvirt.node_sockets,
+ libvirt.node_cores,
+ libvirt.node_threads)
+ self.assertEqual(conn.compareCPU(xml, 0),
+ libvirt.VIR_CPU_COMPARE_INCOMPATIBLE)
+
+ def test_compareCPU_incompatible_model(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+
+ xml = '''<cpu>
+ <arch>%s</arch>
+ <model>%s</model>
+ <vendor>%s</vendor>
+ <topology sockets="%d" cores="%d" threads="%d"/>
+ </cpu>''' % (libvirt.node_arch,
+ "AnotherModel",
+ libvirt.node_cpu_vendor,
+ libvirt.node_sockets,
+ libvirt.node_cores,
+ libvirt.node_threads)
+ self.assertEqual(conn.compareCPU(xml, 0),
+ libvirt.VIR_CPU_COMPARE_INCOMPATIBLE)
+
+ def test_compareCPU_compatible_unspecified_model(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+
+ xml = '''<cpu>
+ <arch>%s</arch>
+ <vendor>%s</vendor>
+ <topology sockets="%d" cores="%d" threads="%d"/>
+ </cpu>''' % (libvirt.node_arch,
+ libvirt.node_cpu_vendor,
+ libvirt.node_sockets,
+ libvirt.node_cores,
+ libvirt.node_threads)
+ self.assertEqual(conn.compareCPU(xml, 0),
+ libvirt.VIR_CPU_COMPARE_IDENTICAL)
diff --git a/nova/tests/unit/virt/libvirt/test_firewall.py b/nova/tests/unit/virt/libvirt/test_firewall.py
new file mode 100644
index 0000000000..b6d4cddf51
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_firewall.py
@@ -0,0 +1,749 @@
+# Copyright 2010 OpenStack Foundation
+# Copyright 2012 University Of Minho
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+import threading
+import uuid
+from xml.dom import minidom
+
+from lxml import etree
+import mock
+import mox
+from oslo.concurrency import lockutils
+
+from nova.compute import utils as compute_utils
+from nova import exception
+from nova.network import linux_net
+from nova import objects
+from nova import test
+from nova.tests.unit import fake_network
+from nova.tests.unit.virt.libvirt import fakelibvirt
+from nova.virt.libvirt import firewall
+from nova.virt import netutils
+from nova.virt import virtapi
+
+try:
+ import libvirt
+except ImportError:
+ libvirt = fakelibvirt
+
+_fake_network_info = fake_network.fake_get_instance_nw_info
+_fake_stub_out_get_nw_info = fake_network.stub_out_nw_api_get_instance_nw_info
+_ipv4_like = fake_network.ipv4_like
+
+
+class NWFilterFakes:
+ def __init__(self):
+ self.filters = {}
+
+ def nwfilterLookupByName(self, name):
+ if name in self.filters:
+ return self.filters[name]
+ raise libvirt.libvirtError('Filter Not Found')
+
+ def filterDefineXMLMock(self, xml):
+ class FakeNWFilterInternal:
+ def __init__(self, parent, name, u, xml):
+ self.name = name
+ self.uuid = u
+ self.parent = parent
+ self.xml = xml
+
+ def XMLDesc(self, flags):
+ return self.xml
+
+ def undefine(self):
+ del self.parent.filters[self.name]
+
+ tree = etree.fromstring(xml)
+ name = tree.get('name')
+ u = tree.find('uuid')
+ if u is None:
+ u = uuid.uuid4().hex
+ else:
+ u = u.text
+ if name not in self.filters:
+ self.filters[name] = FakeNWFilterInternal(self, name, u, xml)
+ else:
+ if self.filters[name].uuid != u:
+ raise libvirt.libvirtError(
+ "Mismatching name '%s' with uuid '%s' vs '%s'"
+ % (name, self.filters[name].uuid, u))
+ self.filters[name].xml = xml
+ return True
+
+
+class FakeVirtAPI(virtapi.VirtAPI):
+ def provider_fw_rule_get_all(self, context):
+ return []
+
+
+class IptablesFirewallTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(IptablesFirewallTestCase, self).setUp()
+
+ class FakeLibvirtDriver(object):
+ def nwfilterDefineXML(*args, **kwargs):
+ """setup_basic_rules in nwfilter calls this."""
+ pass
+
+ self.fake_libvirt_connection = FakeLibvirtDriver()
+ self.fw = firewall.IptablesFirewallDriver(
+ FakeVirtAPI(),
+ get_connection=lambda: self.fake_libvirt_connection)
+
+ in_rules = [
+ '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
+ '*nat',
+ ':PREROUTING ACCEPT [1170:189210]',
+ ':INPUT ACCEPT [844:71028]',
+ ':OUTPUT ACCEPT [5149:405186]',
+ ':POSTROUTING ACCEPT [5063:386098]',
+ '# Completed on Tue Dec 18 15:50:25 2012',
+ '# Generated by iptables-save v1.4.12 on Tue Dec 18 15:50:25 201;',
+ '*mangle',
+ ':PREROUTING ACCEPT [241:39722]',
+ ':INPUT ACCEPT [230:39282]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [266:26558]',
+ ':POSTROUTING ACCEPT [267:26590]',
+ '-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM '
+ '--checksum-fill',
+ 'COMMIT',
+ '# Completed on Tue Dec 18 15:50:25 2012',
+ '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
+ '*filter',
+ ':INPUT ACCEPT [969615:281627771]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [915599:63811649]',
+ ':nova-block-ipv4 - [0:0]',
+ '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
+ ',ESTABLISHED -j ACCEPT ',
+ '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -o virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ '[0:0] -A FORWARD -i virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ 'COMMIT',
+ '# Completed on Mon Dec 6 11:54:13 2010',
+ ]
+
+ in6_filter_rules = [
+ '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
+ '*filter',
+ ':INPUT ACCEPT [349155:75810423]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [349256:75777230]',
+ 'COMMIT',
+ '# Completed on Tue Jan 18 23:47:56 2011',
+ ]
+
+ def _create_instance_ref(self,
+ uuid="74526555-9166-4893-a203-126bdcab0d67"):
+ inst = objects.Instance(
+ id=7,
+ uuid=uuid,
+ user_id="fake",
+ project_id="fake",
+ image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ instance_type_id=1)
+ inst.info_cache = objects.InstanceInfoCache()
+ inst.info_cache.deleted = False
+ return inst
+
+ @mock.patch.object(objects.InstanceList, "get_by_security_group_id")
+ @mock.patch.object(objects.SecurityGroupRuleList,
+ "get_by_security_group_id")
+ @mock.patch.object(objects.SecurityGroupList, "get_by_instance")
+ @mock.patch.object(lockutils, "external_lock")
+ def test_static_filters(self, mock_lock, mock_secgroup,
+ mock_secrule, mock_instlist):
+ mock_lock.return_value = threading.Semaphore()
+
+ UUID = "2674993b-6adb-4733-abd9-a7c10cc1f146"
+ SRC_UUID = "0e0a76b2-7c52-4bc0-9a60-d83017e42c1a"
+ instance_ref = self._create_instance_ref(UUID)
+ src_instance_ref = self._create_instance_ref(SRC_UUID)
+
+ secgroup = objects.SecurityGroup(id=1,
+ user_id='fake',
+ project_id='fake',
+ name='testgroup',
+ description='test group')
+
+ src_secgroup = objects.SecurityGroup(id=2,
+ user_id='fake',
+ project_id='fake',
+ name='testsourcegroup',
+ description='src group')
+
+ r1 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
+ protocol='icmp',
+ from_port=-1,
+ to_port=-1,
+ cidr='192.168.11.0/24',
+ grantee_group=None)
+
+ r2 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
+ protocol='icmp',
+ from_port=8,
+ to_port=-1,
+ cidr='192.168.11.0/24',
+ grantee_group=None)
+
+ r3 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
+ protocol='tcp',
+ from_port=80,
+ to_port=81,
+ cidr='192.168.10.0/24',
+ grantee_group=None)
+
+ r4 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
+ protocol='tcp',
+ from_port=80,
+ to_port=81,
+ cidr=None,
+ grantee_group=src_secgroup,
+ group_id=src_secgroup['id'])
+
+ r5 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
+ protocol=None,
+ cidr=None,
+ grantee_group=src_secgroup,
+ group_id=src_secgroup['id'])
+
+ secgroup_list = objects.SecurityGroupList()
+ secgroup_list.objects.append(secgroup)
+ src_secgroup_list = objects.SecurityGroupList()
+ src_secgroup_list.objects.append(src_secgroup)
+ instance_ref.security_groups = secgroup_list
+ src_instance_ref.security_groups = src_secgroup_list
+
+ def _fake_secgroup(ctxt, instance):
+ if instance.uuid == UUID:
+ return instance_ref.security_groups
+ else:
+ return src_instance_ref.security_groups
+
+ mock_secgroup.side_effect = _fake_secgroup
+
+ def _fake_secrule(ctxt, id):
+ if id == secgroup.id:
+ rules = objects.SecurityGroupRuleList()
+ rules.objects.extend([r1, r2, r3, r4, r5])
+ return rules
+ else:
+ return []
+
+ mock_secrule.side_effect = _fake_secrule
+
+ def _fake_instlist(ctxt, id):
+ if id == src_secgroup['id']:
+ insts = objects.InstanceList()
+ insts.objects.append(src_instance_ref)
+ return insts
+ else:
+ insts = objects.InstanceList()
+ insts.objects.append(instance_ref)
+ return insts
+
+ mock_instlist.side_effect = _fake_instlist
+
+ def fake_iptables_execute(*cmd, **kwargs):
+ process_input = kwargs.get('process_input', None)
+ if cmd == ('ip6tables-save', '-c'):
+ return '\n'.join(self.in6_filter_rules), None
+ if cmd == ('iptables-save', '-c'):
+ return '\n'.join(self.in_rules), None
+ if cmd == ('iptables-restore', '-c'):
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ self.out_rules = lines
+ return '', ''
+ if cmd == ('ip6tables-restore', '-c',):
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ self.out6_rules = lines
+ return '', ''
+
+ network_model = _fake_network_info(self.stubs, 1)
+
+ linux_net.iptables_manager.execute = fake_iptables_execute
+
+ self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
+ lambda instance: network_model)
+
+ self.fw.prepare_instance_filter(instance_ref, network_model)
+ self.fw.apply_instance_filter(instance_ref, network_model)
+
+ in_rules = filter(lambda l: not l.startswith('#'),
+ self.in_rules)
+ for rule in in_rules:
+ if 'nova' not in rule:
+ self.assertTrue(rule in self.out_rules,
+ 'Rule went missing: %s' % rule)
+
+ instance_chain = None
+ for rule in self.out_rules:
+ # This is pretty crude, but it'll do for now
+ # last two octets change
+ if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
+ instance_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(instance_chain, "The instance chain wasn't added")
+
+ security_group_chain = None
+ for rule in self.out_rules:
+ # This is pretty crude, but it'll do for now
+ if '-A %s -j' % instance_chain in rule:
+ security_group_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(security_group_chain,
+ "The security group chain wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp '
+ '-s 192.168.11.0/24')
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
+ "ICMP acceptance rule wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp '
+ '--icmp-type 8 -s 192.168.11.0/24')
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
+ "ICMP Echo Request acceptance rule wasn't added")
+
+ for ip in network_model.fixed_ips():
+ if ip['version'] != 4:
+ continue
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp -m multiport '
+ '--dports 80:81 -s %s' % ip['address'])
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
+ "TCP port 80/81 acceptance rule wasn't added")
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -s '
+ '%s' % ip['address'])
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
+ "Protocol/port-less acceptance rule wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp '
+ '-m multiport --dports 80:81 -s 192.168.10.0/24')
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
+ "TCP port 80/81 acceptance rule wasn't added")
+
+ def test_filters_for_instance_with_ip_v6(self):
+ self.flags(use_ipv6=True)
+ network_info = _fake_network_info(self.stubs, 1)
+ rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
+ self.assertEqual(len(rulesv4), 2)
+ self.assertEqual(len(rulesv6), 1)
+
+ def test_filters_for_instance_without_ip_v6(self):
+ self.flags(use_ipv6=False)
+ network_info = _fake_network_info(self.stubs, 1)
+ rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
+ self.assertEqual(len(rulesv4), 2)
+ self.assertEqual(len(rulesv6), 0)
+
+ @mock.patch.object(objects.SecurityGroupList, "get_by_instance")
+ @mock.patch.object(lockutils, "external_lock")
+ def test_multinic_iptables(self, mock_lock, mock_secgroup):
+ mock_lock.return_value = threading.Semaphore()
+ mock_secgroup.return_value = objects.SecurityGroupList()
+
+ ipv4_rules_per_addr = 1
+ ipv4_addr_per_network = 2
+ ipv6_rules_per_addr = 1
+ ipv6_addr_per_network = 1
+ networks_count = 5
+ instance_ref = self._create_instance_ref()
+ network_info = _fake_network_info(self.stubs, networks_count,
+ ipv4_addr_per_network)
+ network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
+ '1.1.1.1'
+ ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
+ ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
+ inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
+ network_info)
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ ipv4 = self.fw.iptables.ipv4['filter'].rules
+ ipv6 = self.fw.iptables.ipv6['filter'].rules
+ ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
+ ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
+ # Extra rules are for the DHCP request
+ rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
+ networks_count) + 2
+ self.assertEqual(ipv4_network_rules, rules)
+ self.assertEqual(ipv6_network_rules,
+ ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
+
+ @mock.patch.object(lockutils, "external_lock")
+ def test_do_refresh_security_group_rules(self, mock_lock):
+ mock_lock.return_value = threading.Semaphore()
+ instance_ref = self._create_instance_ref()
+ self.mox.StubOutWithMock(self.fw,
+ 'instance_rules')
+ self.mox.StubOutWithMock(self.fw,
+ 'add_filters_for_instance',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(self.fw.iptables.ipv4['filter'],
+ 'has_chain')
+
+ self.fw.instance_rules(instance_ref,
+ mox.IgnoreArg()).AndReturn((None, None))
+ self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg())
+ self.fw.instance_rules(instance_ref,
+ mox.IgnoreArg()).AndReturn((None, None))
+ self.fw.iptables.ipv4['filter'].has_chain(mox.IgnoreArg()
+ ).AndReturn(True)
+ self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ self.fw.prepare_instance_filter(instance_ref, mox.IgnoreArg())
+ self.fw.instance_info[instance_ref['id']] = (instance_ref, None)
+ self.fw.do_refresh_security_group_rules("fake")
+
+ @mock.patch.object(lockutils, "external_lock")
+ def test_do_refresh_security_group_rules_instance_gone(self, mock_lock):
+ mock_lock.return_value = threading.Semaphore()
+ instance1 = {'id': 1, 'uuid': 'fake-uuid1'}
+ instance2 = {'id': 2, 'uuid': 'fake-uuid2'}
+ self.fw.instance_info = {1: (instance1, 'netinfo1'),
+ 2: (instance2, 'netinfo2')}
+ mock_filter = mock.MagicMock()
+ with mock.patch.dict(self.fw.iptables.ipv4, {'filter': mock_filter}):
+ mock_filter.has_chain.return_value = False
+ with mock.patch.object(self.fw, 'instance_rules') as mock_ir:
+ mock_ir.return_value = (None, None)
+ self.fw.do_refresh_security_group_rules('secgroup')
+ self.assertEqual(2, mock_ir.call_count)
+ # NOTE(danms): Make sure that it is checking has_chain each time,
+ # continuing to process all the instances, and never adding the
+ # new chains back if has_chain() is False
+ mock_filter.has_chain.assert_has_calls([mock.call('inst-1'),
+ mock.call('inst-2')],
+ any_order=True)
+ self.assertEqual(0, mock_filter.add_chain.call_count)
+
+ @mock.patch.object(objects.InstanceList, "get_by_security_group_id")
+ @mock.patch.object(objects.SecurityGroupRuleList,
+ "get_by_security_group_id")
+ @mock.patch.object(objects.SecurityGroupList, "get_by_instance")
+ @mock.patch.object(lockutils, "external_lock")
+ def test_unfilter_instance_undefines_nwfilter(self, mock_lock,
+ mock_secgroup,
+ mock_secrule,
+ mock_instlist):
+ mock_lock.return_value = threading.Semaphore()
+
+ fakefilter = NWFilterFakes()
+ _xml_mock = fakefilter.filterDefineXMLMock
+ self.fw.nwfilter._conn.nwfilterDefineXML = _xml_mock
+ _lookup_name = fakefilter.nwfilterLookupByName
+ self.fw.nwfilter._conn.nwfilterLookupByName = _lookup_name
+ instance_ref = self._create_instance_ref()
+
+ mock_secgroup.return_value = objects.SecurityGroupList()
+
+ network_info = _fake_network_info(self.stubs, 1)
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.fw.apply_instance_filter(instance_ref, network_info)
+ original_filter_count = len(fakefilter.filters)
+ self.fw.unfilter_instance(instance_ref, network_info)
+
+ # should undefine just the instance filter
+ self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
+
+ @mock.patch.object(FakeVirtAPI, "provider_fw_rule_get_all")
+ @mock.patch.object(objects.SecurityGroupList, "get_by_instance")
+ @mock.patch.object(lockutils, "external_lock")
+ def test_provider_firewall_rules(self, mock_lock, mock_secgroup,
+ mock_fwrules):
+ mock_lock.return_value = threading.Semaphore()
+ mock_secgroup.return_value = objects.SecurityGroupList()
+
+ # setup basic instance data
+ instance_ref = self._create_instance_ref()
+ # FRAGILE: peeks at how the firewall names chains
+ chain_name = 'inst-%s' % instance_ref['id']
+
+ # create a firewall via setup_basic_filtering like libvirt_conn.spawn
+ # should have a chain with 0 rules
+ network_info = _fake_network_info(self.stubs, 1)
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+ self.assertIn('provider', self.fw.iptables.ipv4['filter'].chains)
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(0, len(rules))
+
+ # add a rule angd send the update message, check for 1 rule
+ mock_fwrules.return_value = [{'protocol': 'tcp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535}]
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(1, len(rules))
+
+ # Add another, refresh, and make sure number of rules goes to two
+ mock_fwrules.return_value = [{'protocol': 'tcp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535},
+ {'protocol': 'udp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535}]
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(2, len(rules))
+
+ # create the instance filter and make sure it has a jump rule
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.fw.apply_instance_filter(instance_ref, network_info)
+ inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == chain_name]
+ jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
+ provjump_rules = []
+ # IptablesTable doesn't make rules unique internally
+ for rule in jump_rules:
+ if 'provider' in rule.rule and rule not in provjump_rules:
+ provjump_rules.append(rule)
+ self.assertEqual(1, len(provjump_rules))
+
+ # remove a rule from the db, cast to compute to refresh rule
+ mock_fwrules.return_value = [{'protocol': 'udp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535}]
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(1, len(rules))
+
+
+class NWFilterTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(NWFilterTestCase, self).setUp()
+
+ class Mock(object):
+ pass
+
+ self.fake_libvirt_connection = Mock()
+
+ self.fw = firewall.NWFilterFirewall(
+ FakeVirtAPI(),
+ lambda: self.fake_libvirt_connection)
+
+ def _create_security_group(self, instance_ref):
+ secgroup = objects.SecurityGroup(id=1,
+ user_id='fake',
+ project_id='fake',
+ name='testgroup',
+ description='test group description')
+
+ secgroup_list = objects.SecurityGroupList()
+ secgroup_list.objects.append(secgroup)
+ instance_ref.security_groups = secgroup_list
+
+ return secgroup
+
+ def _create_instance(self):
+ inst = objects.Instance(
+ id=7,
+ uuid="74526555-9166-4893-a203-126bdcab0d67",
+ user_id="fake",
+ project_id="fake",
+ image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ instance_type_id=1)
+ inst.info_cache = objects.InstanceInfoCache()
+ inst.info_cache.deleted = False
+ return inst
+
+ def test_creates_base_rule_first(self):
+ # These come pre-defined by libvirt
+ self.defined_filters = ['no-mac-spoofing',
+ 'no-ip-spoofing',
+ 'no-arp-spoofing',
+ 'allow-dhcp-server']
+
+ self.recursive_depends = {}
+ for f in self.defined_filters:
+ self.recursive_depends[f] = []
+
+ def _filterDefineXMLMock(xml):
+ dom = minidom.parseString(xml)
+ name = dom.firstChild.getAttribute('name')
+ self.recursive_depends[name] = []
+ for f in dom.getElementsByTagName('filterref'):
+ ref = f.getAttribute('filter')
+ self.assertTrue(ref in self.defined_filters,
+ ('%s referenced filter that does ' +
+ 'not yet exist: %s') % (name, ref))
+ dependencies = [ref] + self.recursive_depends[ref]
+ self.recursive_depends[name] += dependencies
+
+ self.defined_filters.append(name)
+ return True
+
+ self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock
+
+ instance_ref = self._create_instance()
+ self._create_security_group(instance_ref)
+
+ def _ensure_all_called(mac, allow_dhcp):
+ instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
+ mac.translate({ord(':'): None}))
+ requiredlist = ['no-arp-spoofing', 'no-ip-spoofing',
+ 'no-mac-spoofing']
+ required_not_list = []
+ if allow_dhcp:
+ requiredlist.append('allow-dhcp-server')
+ else:
+ required_not_list.append('allow-dhcp-server')
+ for required in requiredlist:
+ self.assertTrue(required in
+ self.recursive_depends[instance_filter],
+ "Instance's filter does not include %s" %
+ required)
+ for required_not in required_not_list:
+ self.assertFalse(required_not in
+ self.recursive_depends[instance_filter],
+ "Instance filter includes %s" % required_not)
+
+ network_info = _fake_network_info(self.stubs, 1)
+ # since there is one (network_info) there is one vif
+ # pass this vif's mac to _ensure_all_called()
+ # to set the instance_filter properly
+ mac = network_info[0]['address']
+ network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
+ '1.1.1.1'
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+ allow_dhcp = True
+ _ensure_all_called(mac, allow_dhcp)
+
+ network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = None
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+ allow_dhcp = False
+ _ensure_all_called(mac, allow_dhcp)
+
+ def test_unfilter_instance_undefines_nwfilters(self):
+ fakefilter = NWFilterFakes()
+ self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
+ self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
+
+ instance_ref = self._create_instance()
+ self._create_security_group(instance_ref)
+
+ network_info = _fake_network_info(self.stubs, 1)
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+ original_filter_count = len(fakefilter.filters)
+ self.fw.unfilter_instance(instance_ref, network_info)
+ self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
+
+ def test_redefining_nwfilters(self):
+ fakefilter = NWFilterFakes()
+ self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
+ self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
+
+ instance_ref = self._create_instance()
+ self._create_security_group(instance_ref)
+
+ network_info = _fake_network_info(self.stubs, 1)
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+
+ def test_nwfilter_parameters(self):
+ fakefilter = NWFilterFakes()
+ self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
+ self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
+
+ instance_ref = self._create_instance()
+ self._create_security_group(instance_ref)
+
+ network_info = _fake_network_info(self.stubs, 1)
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+
+ vif = network_info[0]
+ nic_id = vif['address'].replace(':', '')
+ instance_filter_name = self.fw._instance_filter_name(instance_ref,
+ nic_id)
+ f = fakefilter.nwfilterLookupByName(instance_filter_name)
+ tree = etree.fromstring(f.xml)
+
+ for fref in tree.findall('filterref'):
+ parameters = fref.findall('./parameter')
+ for parameter in parameters:
+ subnet_v4, subnet_v6 = vif['network']['subnets']
+ if parameter.get('name') == 'IP':
+ self.assertTrue(_ipv4_like(parameter.get('value'),
+ '192.168'))
+ elif parameter.get('name') == 'DHCPSERVER':
+ dhcp_server = subnet_v4.get('dhcp_server')
+ self.assertEqual(parameter.get('value'), dhcp_server)
+ elif parameter.get('name') == 'RASERVER':
+ ra_server = subnet_v6['gateway']['address'] + "/128"
+ self.assertEqual(parameter.get('value'), ra_server)
+ elif parameter.get('name') == 'PROJNET':
+ ipv4_cidr = subnet_v4['cidr']
+ net, mask = netutils.get_net_and_mask(ipv4_cidr)
+ self.assertEqual(parameter.get('value'), net)
+ elif parameter.get('name') == 'PROJMASK':
+ ipv4_cidr = subnet_v4['cidr']
+ net, mask = netutils.get_net_and_mask(ipv4_cidr)
+ self.assertEqual(parameter.get('value'), mask)
+ elif parameter.get('name') == 'PROJNET6':
+ ipv6_cidr = subnet_v6['cidr']
+ net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
+ self.assertEqual(parameter.get('value'), net)
+ elif parameter.get('name') == 'PROJMASK6':
+ ipv6_cidr = subnet_v6['cidr']
+ net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
+ self.assertEqual(parameter.get('value'), prefix)
+ else:
+ raise exception.InvalidParameterValue('unknown parameter '
+ 'in filter')
+
+ def test_multinic_base_filter_selection(self):
+ fakefilter = NWFilterFakes()
+ self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
+ self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
+
+ instance_ref = self._create_instance()
+ self._create_security_group(instance_ref)
+
+ network_info = _fake_network_info(self.stubs, 2)
+ network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
+ '1.1.1.1'
+
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+
+ def assert_filterref(instance, vif, expected=None):
+ expected = expected or []
+ nic_id = vif['address'].replace(':', '')
+ filter_name = self.fw._instance_filter_name(instance, nic_id)
+ f = fakefilter.nwfilterLookupByName(filter_name)
+ tree = etree.fromstring(f.xml)
+ frefs = [fr.get('filter') for fr in tree.findall('filterref')]
+ self.assertEqual(set(expected), set(frefs))
+
+ assert_filterref(instance_ref, network_info[0],
+ expected=['nova-base'])
+ assert_filterref(instance_ref, network_info[1],
+ expected=['nova-nodhcp'])
diff --git a/nova/tests/unit/virt/libvirt/test_imagebackend.py b/nova/tests/unit/virt/libvirt/test_imagebackend.py
new file mode 100644
index 0000000000..e865c165da
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_imagebackend.py
@@ -0,0 +1,1309 @@
+# Copyright 2012 Grid Dynamics
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import inspect
+import os
+import shutil
+import tempfile
+
+import fixtures
+import mock
+from oslo.concurrency import lockutils
+from oslo.config import cfg
+from oslo.utils import units
+
+from nova import context
+from nova import exception
+from nova import keymgr
+from nova.openstack.common.fixture import config as config_fixture
+from nova.openstack.common import imageutils
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit import fake_processutils
+from nova.tests.unit.virt.libvirt import fake_libvirt_utils
+from nova.virt import images
+from nova.virt.libvirt import imagebackend
+from nova.virt.libvirt import rbd_utils
+
+CONF = cfg.CONF
+CONF.import_opt('fixed_key', 'nova.keymgr.conf_key_mgr', group='keymgr')
+
+
+class _ImageTestCase(object):
+
+ def mock_create_image(self, image):
+ def create_image(fn, base, size, *args, **kwargs):
+ fn(target=base, *args, **kwargs)
+ image.create_image = create_image
+
+ def setUp(self):
+ super(_ImageTestCase, self).setUp()
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.INSTANCES_PATH = tempfile.mkdtemp(suffix='instances')
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instances_path=self.INSTANCES_PATH)
+ self.INSTANCE = {'name': 'instance',
+ 'uuid': uuidutils.generate_uuid()}
+ self.DISK_INFO_PATH = os.path.join(self.INSTANCES_PATH,
+ self.INSTANCE['uuid'], 'disk.info')
+ self.NAME = 'fake.vm'
+ self.TEMPLATE = 'template'
+ self.CONTEXT = context.get_admin_context()
+
+ self.OLD_STYLE_INSTANCE_PATH = \
+ fake_libvirt_utils.get_instance_path(self.INSTANCE, forceold=True)
+ self.PATH = os.path.join(
+ fake_libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
+
+ # TODO(mikal): rename template_dir to base_dir and template_path
+ # to cached_image_path. This will be less confusing.
+ self.TEMPLATE_DIR = os.path.join(CONF.instances_path, '_base')
+ self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.imagebackend.libvirt_utils',
+ fake_libvirt_utils))
+
+ def tearDown(self):
+ super(_ImageTestCase, self).tearDown()
+ shutil.rmtree(self.INSTANCES_PATH)
+
+ def test_prealloc_image(self):
+ CONF.set_override('preallocate_images', 'space')
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(os, 'access', lambda p, w: True)
+
+ # Call twice to verify testing fallocate is only called once.
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(),
+ ['fallocate -n -l 1 %s.fallocate_test' % self.PATH,
+ 'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
+ 'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
+
+ def test_prealloc_image_without_write_access(self):
+ CONF.set_override('preallocate_images', 'space')
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ self.stubs.Set(image, 'check_image_exists', lambda: True)
+ self.stubs.Set(image, '_can_fallocate', lambda: True)
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(os, 'access', lambda p, w: False)
+
+ # Testing fallocate is only called when user has write access.
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(), [])
+
+
+class RawTestCase(_ImageTestCase, test.NoDBTestCase):
+
+ SIZE = 1024
+
+ def setUp(self):
+ self.image_class = imagebackend.Raw
+ super(RawTestCase, self).setUp()
+ self.stubs.Set(imagebackend.Raw, 'correct_format', lambda _: None)
+
+ def prepare_mocks(self):
+ fn = self.mox.CreateMockAnything()
+ self.mox.StubOutWithMock(imagebackend.utils.synchronized,
+ '__call__')
+ self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
+ self.mox.StubOutWithMock(imagebackend.disk, 'extend')
+ return fn
+
+ def test_cache(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_image_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_base_dir_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_template_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_create_image(self):
+ fn = self.prepare_mocks()
+ fn(target=self.TEMPLATE_PATH, max_size=None, image_id=None)
+ imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, None, image_id=None)
+
+ self.mox.VerifyAll()
+
+ def test_create_image_generated(self):
+ fn = self.prepare_mocks()
+ fn(target=self.PATH)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, None)
+
+ self.mox.VerifyAll()
+
+ @mock.patch.object(images, 'qemu_img_info',
+ return_value=imageutils.QemuImgInfo())
+ def test_create_image_extend(self, fake_qemu_img_info):
+ fn = self.prepare_mocks()
+ fn(max_size=self.SIZE, target=self.TEMPLATE_PATH, image_id=None)
+ imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
+ imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=False)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE, image_id=None)
+
+ self.mox.VerifyAll()
+
+ def test_correct_format(self):
+ self.stubs.UnsetAll()
+
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(imagebackend.images, 'qemu_img_info')
+
+ os.path.exists(self.PATH).AndReturn(True)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ info = self.mox.CreateMockAnything()
+ info.file_format = 'foo'
+ imagebackend.images.qemu_img_info(self.PATH).AndReturn(info)
+ os.path.exists(CONF.instances_path).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME, path=self.PATH)
+ self.assertEqual(image.driver_format, 'foo')
+
+ self.mox.VerifyAll()
+
+ @mock.patch.object(images, 'qemu_img_info',
+ side_effect=exception.InvalidDiskInfo(
+ reason='invalid path'))
+ def test_resolve_driver_format(self, fake_qemu_img_info):
+ image = self.image_class(self.INSTANCE, self.NAME)
+ driver_format = image.resolve_driver_format()
+ self.assertEqual(driver_format, 'raw')
+
+
+class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
+ SIZE = units.Gi
+
+ def setUp(self):
+ self.image_class = imagebackend.Qcow2
+ super(Qcow2TestCase, self).setUp()
+ self.QCOW2_BASE = (self.TEMPLATE_PATH +
+ '_%d' % (self.SIZE / units.Gi))
+
+ def prepare_mocks(self):
+ fn = self.mox.CreateMockAnything()
+ self.mox.StubOutWithMock(imagebackend.utils.synchronized,
+ '__call__')
+ self.mox.StubOutWithMock(imagebackend.libvirt_utils,
+ 'create_cow_image')
+ self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
+ self.mox.StubOutWithMock(imagebackend.disk, 'extend')
+ return fn
+
+ def test_cache(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(CONF.instances_path).AndReturn(True)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_image_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_base_dir_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_template_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_create_image(self):
+ fn = self.prepare_mocks()
+ fn(max_size=None, target=self.TEMPLATE_PATH)
+ imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
+ self.PATH)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, None)
+
+ self.mox.VerifyAll()
+
+ def test_create_image_with_size(self):
+ fn = self.prepare_mocks()
+ fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(False)
+ imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
+ self.PATH)
+ imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
+
+ self.mox.VerifyAll()
+
+ def test_create_image_too_small(self):
+ fn = self.prepare_mocks()
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(imagebackend.Qcow2, 'get_disk_size')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ imagebackend.Qcow2.get_disk_size(self.TEMPLATE_PATH
+ ).AndReturn(self.SIZE)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ image.create_image, fn, self.TEMPLATE_PATH, 1)
+ self.mox.VerifyAll()
+
+ def test_generate_resized_backing_files(self):
+ fn = self.prepare_mocks()
+ fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(imagebackend.libvirt_utils,
+ 'get_disk_backing_file')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(CONF.instances_path).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(True)
+
+ imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
+ .AndReturn(self.QCOW2_BASE)
+ os.path.exists(self.QCOW2_BASE).AndReturn(False)
+ imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH,
+ self.QCOW2_BASE)
+ imagebackend.disk.extend(self.QCOW2_BASE, self.SIZE, use_cow=True)
+
+ os.path.exists(self.PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
+
+ self.mox.VerifyAll()
+
+ def test_qcow2_exists_and_has_no_backing_file(self):
+ fn = self.prepare_mocks()
+ fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(imagebackend.libvirt_utils,
+ 'get_disk_backing_file')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(True)
+
+ imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
+ .AndReturn(None)
+ os.path.exists(self.PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
+
+ self.mox.VerifyAll()
+
+ def test_resolve_driver_format(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+ driver_format = image.resolve_driver_format()
+ self.assertEqual(driver_format, 'qcow2')
+
+
+class LvmTestCase(_ImageTestCase, test.NoDBTestCase):
+ VG = 'FakeVG'
+ TEMPLATE_SIZE = 512
+ SIZE = 1024
+
+ def setUp(self):
+ self.image_class = imagebackend.Lvm
+ super(LvmTestCase, self).setUp()
+ self.flags(images_volume_group=self.VG, group='libvirt')
+ self.flags(enabled=False, group='ephemeral_storage_encryption')
+ self.INSTANCE['ephemeral_key_uuid'] = None
+ self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME)
+ self.OLD_STYLE_INSTANCE_PATH = None
+ self.PATH = os.path.join('/dev', self.VG, self.LV)
+ self.disk = imagebackend.disk
+ self.utils = imagebackend.utils
+ self.lvm = imagebackend.lvm
+
+ def prepare_mocks(self):
+ fn = self.mox.CreateMockAnything()
+ self.mox.StubOutWithMock(self.disk, 'resize2fs')
+ self.mox.StubOutWithMock(self.lvm, 'create_volume')
+ self.mox.StubOutWithMock(self.disk, 'get_disk_size')
+ self.mox.StubOutWithMock(self.utils, 'execute')
+ return fn
+
+ def _create_image(self, sparse):
+ fn = self.prepare_mocks()
+ fn(max_size=None, target=self.TEMPLATE_PATH)
+ self.lvm.create_volume(self.VG,
+ self.LV,
+ self.TEMPLATE_SIZE,
+ sparse=sparse)
+ self.disk.get_disk_size(self.TEMPLATE_PATH
+ ).AndReturn(self.TEMPLATE_SIZE)
+ cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
+ self.PATH)
+ self.utils.execute(*cmd, run_as_root=True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, None)
+
+ self.mox.VerifyAll()
+
+ def _create_image_generated(self, sparse):
+ fn = self.prepare_mocks()
+ self.lvm.create_volume(self.VG, self.LV,
+ self.SIZE, sparse=sparse)
+ fn(target=self.PATH, ephemeral_size=None)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH,
+ self.SIZE, ephemeral_size=None)
+
+ self.mox.VerifyAll()
+
+ def _create_image_resize(self, sparse):
+ fn = self.prepare_mocks()
+ fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
+ self.lvm.create_volume(self.VG, self.LV,
+ self.SIZE, sparse=sparse)
+ self.disk.get_disk_size(self.TEMPLATE_PATH
+ ).AndReturn(self.TEMPLATE_SIZE)
+ cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
+ self.PATH)
+ self.utils.execute(*cmd, run_as_root=True)
+ self.disk.resize2fs(self.PATH, run_as_root=True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
+
+ self.mox.VerifyAll()
+
+ def test_cache(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_image_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_base_dir_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_create_image(self):
+ self._create_image(False)
+
+ def test_create_image_sparsed(self):
+ self.flags(sparse_logical_volumes=True, group='libvirt')
+ self._create_image(True)
+
+ def test_create_image_generated(self):
+ self._create_image_generated(False)
+
+ def test_create_image_generated_sparsed(self):
+ self.flags(sparse_logical_volumes=True, group='libvirt')
+ self._create_image_generated(True)
+
+ def test_create_image_resize(self):
+ self._create_image_resize(False)
+
+ def test_create_image_resize_sparsed(self):
+ self.flags(sparse_logical_volumes=True, group='libvirt')
+ self._create_image_resize(True)
+
+ def test_create_image_negative(self):
+ fn = self.prepare_mocks()
+ fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
+ self.lvm.create_volume(self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=False
+ ).AndRaise(RuntimeError())
+ self.disk.get_disk_size(self.TEMPLATE_PATH
+ ).AndReturn(self.TEMPLATE_SIZE)
+ self.mox.StubOutWithMock(self.lvm, 'remove_volumes')
+ self.lvm.remove_volumes([self.PATH])
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ self.assertRaises(RuntimeError, image.create_image, fn,
+ self.TEMPLATE_PATH, self.SIZE)
+ self.mox.VerifyAll()
+
+ def test_create_image_generated_negative(self):
+ fn = self.prepare_mocks()
+ fn(target=self.PATH,
+ ephemeral_size=None).AndRaise(RuntimeError())
+ self.lvm.create_volume(self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=False)
+ self.mox.StubOutWithMock(self.lvm, 'remove_volumes')
+ self.lvm.remove_volumes([self.PATH])
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ self.assertRaises(RuntimeError, image.create_image, fn,
+ self.TEMPLATE_PATH, self.SIZE,
+ ephemeral_size=None)
+ self.mox.VerifyAll()
+
+ def test_prealloc_image(self):
+ CONF.set_override('preallocate_images', 'space')
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(image, 'check_image_exists', lambda: True)
+
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(), [])
+
+
+class EncryptedLvmTestCase(_ImageTestCase, test.NoDBTestCase):
+ VG = 'FakeVG'
+ TEMPLATE_SIZE = 512
+ SIZE = 1024
+
+ def setUp(self):
+ super(EncryptedLvmTestCase, self).setUp()
+ self.image_class = imagebackend.Lvm
+ self.flags(enabled=True, group='ephemeral_storage_encryption')
+ self.flags(cipher='aes-xts-plain64',
+ group='ephemeral_storage_encryption')
+ self.flags(key_size=512, group='ephemeral_storage_encryption')
+ self.flags(fixed_key='00000000000000000000000000000000'
+ '00000000000000000000000000000000',
+ group='keymgr')
+ self.flags(images_volume_group=self.VG, group='libvirt')
+ self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME)
+ self.OLD_STYLE_INSTANCE_PATH = None
+ self.LV_PATH = os.path.join('/dev', self.VG, self.LV)
+ self.PATH = os.path.join('/dev/mapper',
+ imagebackend.dmcrypt.volume_name(self.LV))
+ self.key_manager = keymgr.API()
+ self.INSTANCE['ephemeral_key_uuid'] =\
+ self.key_manager.create_key(self.CONTEXT)
+ self.KEY = self.key_manager.get_key(self.CONTEXT,
+ self.INSTANCE['ephemeral_key_uuid']).get_encoded()
+
+ self.lvm = imagebackend.lvm
+ self.disk = imagebackend.disk
+ self.utils = imagebackend.utils
+ self.libvirt_utils = imagebackend.libvirt_utils
+ self.dmcrypt = imagebackend.dmcrypt
+
+ def _create_image(self, sparse):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.TEMPLATE_SIZE,
+ context=self.CONTEXT)
+
+ fn.assert_called_with(context=self.CONTEXT,
+ max_size=self.TEMPLATE_SIZE,
+ target=self.TEMPLATE_PATH)
+ self.lvm.create_volume.assert_called_with(self.VG,
+ self.LV,
+ self.TEMPLATE_SIZE,
+ sparse=sparse)
+ self.dmcrypt.create_volume.assert_called_with(
+ self.PATH.rpartition('/')[2],
+ self.LV_PATH,
+ CONF.ephemeral_storage_encryption.cipher,
+ CONF.ephemeral_storage_encryption.key_size,
+ self.KEY)
+ cmd = ('qemu-img',
+ 'convert',
+ '-O',
+ 'raw',
+ self.TEMPLATE_PATH,
+ self.PATH)
+ self.utils.execute.assert_called_with(*cmd, run_as_root=True)
+
+ def _create_image_generated(self, sparse):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH,
+ self.SIZE,
+ ephemeral_size=None,
+ context=self.CONTEXT)
+
+ self.lvm.create_volume.assert_called_with(
+ self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=sparse)
+ self.dmcrypt.create_volume.assert_called_with(
+ self.PATH.rpartition('/')[2],
+ self.LV_PATH,
+ CONF.ephemeral_storage_encryption.cipher,
+ CONF.ephemeral_storage_encryption.key_size,
+ self.KEY)
+ fn.assert_called_with(target=self.PATH,
+ ephemeral_size=None, context=self.CONTEXT)
+
+ def _create_image_resize(self, sparse):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE,
+ context=self.CONTEXT)
+
+ fn.assert_called_with(context=self.CONTEXT, max_size=self.SIZE,
+ target=self.TEMPLATE_PATH)
+ self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH)
+ self.lvm.create_volume.assert_called_with(
+ self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=sparse)
+ self.dmcrypt.create_volume.assert_called_with(
+ self.PATH.rpartition('/')[2],
+ self.LV_PATH,
+ CONF.ephemeral_storage_encryption.cipher,
+ CONF.ephemeral_storage_encryption.key_size,
+ self.KEY)
+ cmd = ('qemu-img',
+ 'convert',
+ '-O',
+ 'raw',
+ self.TEMPLATE_PATH,
+ self.PATH)
+ self.utils.execute.assert_called_with(*cmd, run_as_root=True)
+ self.disk.resize2fs.assert_called_with(self.PATH, run_as_root=True)
+
+ def test_create_image(self):
+ self._create_image(False)
+
+ def test_create_image_sparsed(self):
+ self.flags(sparse_logical_volumes=True, group='libvirt')
+ self._create_image(True)
+
+ def test_create_image_generated(self):
+ self._create_image_generated(False)
+
+ def test_create_image_generated_sparsed(self):
+ self.flags(sparse_logical_volumes=True, group='libvirt')
+ self._create_image_generated(True)
+
+ def test_create_image_resize(self):
+ self._create_image_resize(False)
+
+ def test_create_image_resize_sparsed(self):
+ self.flags(sparse_logical_volumes=True, group='libvirt')
+ self._create_image_resize(True)
+
+ def test_create_image_negative(self):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+ self.lvm.create_volume.side_effect = RuntimeError()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.assertRaises(
+ RuntimeError,
+ image.create_image,
+ fn,
+ self.TEMPLATE_PATH,
+ self.SIZE,
+ context=self.CONTEXT)
+
+ fn.assert_called_with(
+ context=self.CONTEXT,
+ max_size=self.SIZE,
+ target=self.TEMPLATE_PATH)
+ self.disk.get_disk_size.assert_called_with(
+ self.TEMPLATE_PATH)
+ self.lvm.create_volume.assert_called_with(
+ self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=False)
+ self.dmcrypt.delete_volume.assert_called_with(
+ self.PATH.rpartition('/')[2])
+ self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
+
+ def test_create_image_encrypt_negative(self):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+ self.dmcrypt.create_volume.side_effect = RuntimeError()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.assertRaises(
+ RuntimeError,
+ image.create_image,
+ fn,
+ self.TEMPLATE_PATH,
+ self.SIZE,
+ context=self.CONTEXT)
+
+ fn.assert_called_with(
+ context=self.CONTEXT,
+ max_size=self.SIZE,
+ target=self.TEMPLATE_PATH)
+ self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH)
+ self.lvm.create_volume.assert_called_with(
+ self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=False)
+ self.dmcrypt.create_volume.assert_called_with(
+ self.dmcrypt.volume_name(self.LV),
+ self.LV_PATH,
+ CONF.ephemeral_storage_encryption.cipher,
+ CONF.ephemeral_storage_encryption.key_size,
+ self.KEY)
+ self.dmcrypt.delete_volume.assert_called_with(
+ self.PATH.rpartition('/')[2])
+ self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
+
+ def test_create_image_generated_negative(self):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+ fn.side_effect = RuntimeError()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.assertRaises(RuntimeError,
+ image.create_image,
+ fn,
+ self.TEMPLATE_PATH,
+ self.SIZE,
+ ephemeral_size=None,
+ context=self.CONTEXT)
+
+ self.lvm.create_volume.assert_called_with(
+ self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=False)
+ self.dmcrypt.create_volume.assert_called_with(
+ self.PATH.rpartition('/')[2],
+ self.LV_PATH,
+ CONF.ephemeral_storage_encryption.cipher,
+ CONF.ephemeral_storage_encryption.key_size,
+ self.KEY)
+ fn.assert_called_with(
+ target=self.PATH,
+ ephemeral_size=None,
+ context=self.CONTEXT)
+ self.dmcrypt.delete_volume.assert_called_with(
+ self.PATH.rpartition('/')[2])
+ self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
+
+ def test_create_image_generated_encrypt_negative(self):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+ fn.side_effect = RuntimeError()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.assertRaises(
+ RuntimeError,
+ image.create_image,
+ fn,
+ self.TEMPLATE_PATH,
+ self.SIZE,
+ ephemeral_size=None,
+ context=self.CONTEXT)
+
+ self.lvm.create_volume.assert_called_with(
+ self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=False)
+ self.dmcrypt.create_volume.assert_called_with(
+ self.PATH.rpartition('/')[2],
+ self.LV_PATH,
+ CONF.ephemeral_storage_encryption.cipher,
+ CONF.ephemeral_storage_encryption.key_size,
+ self.KEY)
+ self.dmcrypt.delete_volume.assert_called_with(
+ self.PATH.rpartition('/')[2])
+ self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
+
+ def test_prealloc_image(self):
+ self.flags(preallocate_images='space')
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(image, 'check_image_exists', lambda: True)
+
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(), [])
+
+
+class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
+ POOL = "FakePool"
+ USER = "FakeUser"
+ CONF = "FakeConf"
+ SIZE = 1024
+
+ def setUp(self):
+ self.image_class = imagebackend.Rbd
+ super(RbdTestCase, self).setUp()
+ self.flags(images_rbd_pool=self.POOL,
+ rbd_user=self.USER,
+ images_rbd_ceph_conf=self.CONF,
+ group='libvirt')
+ self.libvirt_utils = imagebackend.libvirt_utils
+ self.utils = imagebackend.utils
+ self.mox.StubOutWithMock(rbd_utils, 'rbd')
+ self.mox.StubOutWithMock(rbd_utils, 'rados')
+
+ def test_cache(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
+ image.check_image_exists().AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
+ self.mox.ReplayAll()
+
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_base_dir_exists(self):
+ fn = self.mox.CreateMockAnything()
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ image.check_image_exists().AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ self.mox.ReplayAll()
+
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_image_exists(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ image.check_image_exists().AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_template_exists(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ image.check_image_exists().AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ self.mock_create_image(image)
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_create_image(self):
+ fn = self.mox.CreateMockAnything()
+ fn(max_size=None, target=self.TEMPLATE_PATH)
+
+ rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ image.check_image_exists().AndReturn(False)
+ image.check_image_exists().AndReturn(False)
+ self.mox.ReplayAll()
+
+ image.create_image(fn, self.TEMPLATE_PATH, None)
+
+ rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
+ cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
+ rbd_name, '--new-format', '--id', self.USER,
+ '--conf', self.CONF)
+ self.assertEqual(fake_processutils.fake_execute_get_log(),
+ [' '.join(cmd)])
+ self.mox.VerifyAll()
+
+ def test_create_image_resize(self):
+ fn = self.mox.CreateMockAnything()
+ full_size = self.SIZE * 2
+ fn(max_size=full_size, target=self.TEMPLATE_PATH)
+
+ rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ image.check_image_exists().AndReturn(False)
+ image.check_image_exists().AndReturn(False)
+ rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
+ cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
+ rbd_name, '--new-format', '--id', self.USER,
+ '--conf', self.CONF)
+ self.mox.StubOutWithMock(image, 'get_disk_size')
+ image.get_disk_size(rbd_name).AndReturn(self.SIZE)
+ self.mox.StubOutWithMock(image.driver, 'resize')
+ image.driver.resize(rbd_name, full_size)
+
+ self.mox.ReplayAll()
+
+ image.create_image(fn, self.TEMPLATE_PATH, full_size)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(),
+ [' '.join(cmd)])
+ self.mox.VerifyAll()
+
+ def test_create_image_already_exists(self):
+ rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ image.check_image_exists().AndReturn(True)
+ self.mox.StubOutWithMock(image, 'get_disk_size')
+ image.get_disk_size(self.TEMPLATE_PATH).AndReturn(self.SIZE)
+ image.check_image_exists().AndReturn(True)
+ rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
+ image.get_disk_size(rbd_name).AndReturn(self.SIZE)
+
+ self.mox.ReplayAll()
+
+ fn = self.mox.CreateMockAnything()
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
+
+ self.mox.VerifyAll()
+
+ def test_prealloc_image(self):
+ CONF.set_override('preallocate_images', 'space')
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ def fake_resize(rbd_name, size):
+ return
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(image, 'check_image_exists', lambda: True)
+
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(), [])
+
+ def test_parent_compatible(self):
+ self.assertEqual(inspect.getargspec(imagebackend.Image.libvirt_info),
+ inspect.getargspec(self.image_class.libvirt_info))
+
+ def test_image_path(self):
+
+ conf = "FakeConf"
+ pool = "FakePool"
+ user = "FakeUser"
+
+ self.flags(images_rbd_pool=pool, group='libvirt')
+ self.flags(images_rbd_ceph_conf=conf, group='libvirt')
+ self.flags(rbd_user=user, group='libvirt')
+ image = self.image_class(self.INSTANCE, self.NAME)
+ rbd_path = "rbd:%s/%s:id=%s:conf=%s" % (pool, image.rbd_name,
+ user, conf)
+
+ self.assertEqual(image.path, rbd_path)
+
+
+class BackendTestCase(test.NoDBTestCase):
+ INSTANCE = {'name': 'fake-instance',
+ 'uuid': uuidutils.generate_uuid()}
+ NAME = 'fake-name.suffix'
+
+ def setUp(self):
+ super(BackendTestCase, self).setUp()
+ self.flags(enabled=False, group='ephemeral_storage_encryption')
+ self.INSTANCE['ephemeral_key_uuid'] = None
+
+ def get_image(self, use_cow, image_type):
+ return imagebackend.Backend(use_cow).image(self.INSTANCE,
+ self.NAME,
+ image_type)
+
+ def _test_image(self, image_type, image_not_cow, image_cow):
+ image1 = self.get_image(False, image_type)
+ image2 = self.get_image(True, image_type)
+
+ def assertIsInstance(instance, class_object):
+ failure = ('Expected %s,' +
+ ' but got %s.') % (class_object.__name__,
+ instance.__class__.__name__)
+ self.assertIsInstance(instance, class_object, msg=failure)
+
+ assertIsInstance(image1, image_not_cow)
+ assertIsInstance(image2, image_cow)
+
+ def test_image_raw(self):
+ self._test_image('raw', imagebackend.Raw, imagebackend.Raw)
+
+ def test_image_qcow2(self):
+ self._test_image('qcow2', imagebackend.Qcow2, imagebackend.Qcow2)
+
+ def test_image_lvm(self):
+ self.flags(images_volume_group='FakeVG', group='libvirt')
+ self._test_image('lvm', imagebackend.Lvm, imagebackend.Lvm)
+
+ def test_image_rbd(self):
+ conf = "FakeConf"
+ pool = "FakePool"
+ self.flags(images_rbd_pool=pool, group='libvirt')
+ self.flags(images_rbd_ceph_conf=conf, group='libvirt')
+ self.mox.StubOutWithMock(rbd_utils, 'rbd')
+ self.mox.StubOutWithMock(rbd_utils, 'rados')
+ self._test_image('rbd', imagebackend.Rbd, imagebackend.Rbd)
+
+ def test_image_default(self):
+ self._test_image('default', imagebackend.Raw, imagebackend.Qcow2)
+
+
+class UtilTestCase(test.NoDBTestCase):
+ def test_get_hw_disk_discard(self):
+ self.assertEqual('unmap', imagebackend.get_hw_disk_discard("unmap"))
+ self.assertEqual('ignore', imagebackend.get_hw_disk_discard("ignore"))
+ self.assertIsNone(imagebackend.get_hw_disk_discard(None))
+ self.assertRaises(RuntimeError, imagebackend.get_hw_disk_discard,
+ "fake")
diff --git a/nova/tests/unit/virt/libvirt/test_imagecache.py b/nova/tests/unit/virt/libvirt/test_imagecache.py
new file mode 100644
index 0000000000..d7bed2fcd0
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_imagecache.py
@@ -0,0 +1,887 @@
+# Copyright 2012 Michael Still and Canonical Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import contextlib
+import cStringIO
+import hashlib
+import os
+import time
+
+from oslo.concurrency import processutils
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import importutils
+
+from nova import conductor
+from nova import db
+from nova.openstack.common import log as logging
+from nova import test
+from nova.tests.unit import fake_instance
+from nova import utils
+from nova.virt.libvirt import imagecache
+from nova.virt.libvirt import utils as libvirt_utils
+
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('host', 'nova.netconf')
+
+
+@contextlib.contextmanager
+def intercept_log_messages():
+ try:
+ mylog = logging.getLogger('nova')
+ stream = cStringIO.StringIO()
+ handler = logging.logging.StreamHandler(stream)
+ handler.setFormatter(logging.ContextFormatter())
+ mylog.logger.addHandler(handler)
+ yield stream
+ finally:
+ mylog.logger.removeHandler(handler)
+
+
+class ImageCacheManagerTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(ImageCacheManagerTestCase, self).setUp()
+ self.stock_instance_names = set(['instance-00000001',
+ 'instance-00000002',
+ 'instance-00000003',
+ 'banana-42-hamster'])
+
+ def test_read_stored_checksum_missing(self):
+ self.stubs.Set(os.path, 'exists', lambda x: False)
+ csum = imagecache.read_stored_checksum('/tmp/foo', timestamped=False)
+ self.assertIsNone(csum)
+
+ def test_read_stored_checksum(self):
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+
+ csum_input = '{"sha1": "fdghkfhkgjjksfdgjksjkghsdf"}\n'
+ fname = os.path.join(tmpdir, 'aaa')
+ info_fname = imagecache.get_info_filename(fname)
+ f = open(info_fname, 'w')
+ f.write(csum_input)
+ f.close()
+
+ csum_output = imagecache.read_stored_checksum(fname,
+ timestamped=False)
+ self.assertEqual(csum_input.rstrip(),
+ '{"sha1": "%s"}' % csum_output)
+
+ def test_read_stored_checksum_legacy_essex(self):
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+
+ fname = os.path.join(tmpdir, 'aaa')
+ old_fname = fname + '.sha1'
+ f = open(old_fname, 'w')
+ f.write('fdghkfhkgjjksfdgjksjkghsdf')
+ f.close()
+
+ csum_output = imagecache.read_stored_checksum(fname,
+ timestamped=False)
+ self.assertEqual(csum_output, 'fdghkfhkgjjksfdgjksjkghsdf')
+ self.assertFalse(os.path.exists(old_fname))
+ info_fname = imagecache.get_info_filename(fname)
+ self.assertTrue(os.path.exists(info_fname))
+
+ def test_list_base_images(self):
+ listing = ['00000001',
+ 'ephemeral_0_20_None',
+ '17d1b00b81642842e514494a78e804e9a511637c_5368709120.info',
+ '00000004']
+ images = ['e97222e91fc4241f49a7f520d1dcf446751129b3_sm',
+ 'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm',
+ 'e97222e91fc4241f49a7f520d1dcf446751129b3',
+ '17d1b00b81642842e514494a78e804e9a511637c',
+ '17d1b00b81642842e514494a78e804e9a511637c_5368709120',
+ '17d1b00b81642842e514494a78e804e9a511637c_10737418240']
+ listing.extend(images)
+
+ self.stubs.Set(os, 'listdir', lambda x: listing)
+ self.stubs.Set(os.path, 'isfile', lambda x: True)
+
+ base_dir = '/var/lib/nova/instances/_base'
+ self.flags(instances_path='/var/lib/nova/instances')
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager._list_base_images(base_dir)
+
+ sanitized = []
+ for ent in image_cache_manager.unexplained_images:
+ sanitized.append(ent.replace(base_dir + '/', ''))
+
+ self.assertEqual(sorted(sanitized), sorted(images))
+
+ expected = os.path.join(base_dir,
+ 'e97222e91fc4241f49a7f520d1dcf446751129b3')
+ self.assertIn(expected, image_cache_manager.unexplained_images)
+
+ expected = os.path.join(base_dir,
+ '17d1b00b81642842e514494a78e804e9a511637c_'
+ '10737418240')
+ self.assertIn(expected, image_cache_manager.unexplained_images)
+
+ unexpected = os.path.join(base_dir, '00000004')
+ self.assertNotIn(unexpected, image_cache_manager.unexplained_images)
+
+ for ent in image_cache_manager.unexplained_images:
+ self.assertTrue(ent.startswith(base_dir))
+
+ self.assertEqual(len(image_cache_manager.originals), 2)
+
+ expected = os.path.join(base_dir,
+ '17d1b00b81642842e514494a78e804e9a511637c')
+ self.assertIn(expected, image_cache_manager.originals)
+
+ unexpected = os.path.join(base_dir,
+ '17d1b00b81642842e514494a78e804e9a511637c_'
+ '10737418240')
+ self.assertNotIn(unexpected, image_cache_manager.originals)
+
+ def test_list_backing_images_small(self):
+ self.stubs.Set(os, 'listdir',
+ lambda x: ['_base', 'instance-00000001',
+ 'instance-00000002', 'instance-00000003'])
+ self.stubs.Set(os.path, 'exists',
+ lambda x: x.find('instance-') != -1)
+ self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
+ lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
+
+ found = os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name,
+ 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [found]
+ image_cache_manager.instance_names = self.stock_instance_names
+
+ inuse_images = image_cache_manager._list_backing_images()
+
+ self.assertEqual(inuse_images, [found])
+ self.assertEqual(len(image_cache_manager.unexplained_images), 0)
+
+ def test_list_backing_images_resized(self):
+ self.stubs.Set(os, 'listdir',
+ lambda x: ['_base', 'instance-00000001',
+ 'instance-00000002', 'instance-00000003'])
+ self.stubs.Set(os.path, 'exists',
+ lambda x: x.find('instance-') != -1)
+ self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
+ lambda x: ('e97222e91fc4241f49a7f520d1dcf446751129b3_'
+ '10737418240'))
+
+ found = os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name,
+ 'e97222e91fc4241f49a7f520d1dcf446751129b3_'
+ '10737418240')
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [found]
+ image_cache_manager.instance_names = self.stock_instance_names
+
+ inuse_images = image_cache_manager._list_backing_images()
+
+ self.assertEqual(inuse_images, [found])
+ self.assertEqual(len(image_cache_manager.unexplained_images), 0)
+
+ def test_list_backing_images_instancename(self):
+ self.stubs.Set(os, 'listdir',
+ lambda x: ['_base', 'banana-42-hamster'])
+ self.stubs.Set(os.path, 'exists',
+ lambda x: x.find('banana-42-hamster') != -1)
+ self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
+ lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
+
+ found = os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name,
+ 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [found]
+ image_cache_manager.instance_names = self.stock_instance_names
+
+ inuse_images = image_cache_manager._list_backing_images()
+
+ self.assertEqual(inuse_images, [found])
+ self.assertEqual(len(image_cache_manager.unexplained_images), 0)
+
+ def test_list_backing_images_disk_notexist(self):
+ self.stubs.Set(os, 'listdir',
+ lambda x: ['_base', 'banana-42-hamster'])
+ self.stubs.Set(os.path, 'exists',
+ lambda x: x.find('banana-42-hamster') != -1)
+
+ def fake_get_disk(disk_path):
+ raise processutils.ProcessExecutionError()
+
+ self.stubs.Set(libvirt_utils, 'get_disk_backing_file', fake_get_disk)
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = []
+ image_cache_manager.instance_names = self.stock_instance_names
+
+ self.assertRaises(processutils.ProcessExecutionError,
+ image_cache_manager._list_backing_images)
+
+ def test_find_base_file_nothing(self):
+ self.stubs.Set(os.path, 'exists', lambda x: False)
+
+ base_dir = '/var/lib/nova/instances/_base'
+ fingerprint = '549867354867'
+ image_cache_manager = imagecache.ImageCacheManager()
+ res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
+
+ self.assertEqual(0, len(res))
+
+ def test_find_base_file_small(self):
+ fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
+ self.stubs.Set(os.path, 'exists',
+ lambda x: x.endswith('%s_sm' % fingerprint))
+
+ base_dir = '/var/lib/nova/instances/_base'
+ image_cache_manager = imagecache.ImageCacheManager()
+ res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
+
+ base_file = os.path.join(base_dir, fingerprint + '_sm')
+ self.assertEqual(res, [(base_file, True, False)])
+
+ def test_find_base_file_resized(self):
+ fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
+ listing = ['00000001',
+ 'ephemeral_0_20_None',
+ '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240',
+ '00000004']
+
+ self.stubs.Set(os, 'listdir', lambda x: listing)
+ self.stubs.Set(os.path, 'exists',
+ lambda x: x.endswith('%s_10737418240' % fingerprint))
+ self.stubs.Set(os.path, 'isfile', lambda x: True)
+
+ base_dir = '/var/lib/nova/instances/_base'
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager._list_base_images(base_dir)
+ res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
+
+ base_file = os.path.join(base_dir, fingerprint + '_10737418240')
+ self.assertEqual(res, [(base_file, False, True)])
+
+ def test_find_base_file_all(self):
+ fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
+ listing = ['00000001',
+ 'ephemeral_0_20_None',
+ '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_sm',
+ '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240',
+ '00000004']
+
+ self.stubs.Set(os, 'listdir', lambda x: listing)
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ self.stubs.Set(os.path, 'isfile', lambda x: True)
+
+ base_dir = '/var/lib/nova/instances/_base'
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager._list_base_images(base_dir)
+ res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
+
+ base_file1 = os.path.join(base_dir, fingerprint)
+ base_file2 = os.path.join(base_dir, fingerprint + '_sm')
+ base_file3 = os.path.join(base_dir, fingerprint + '_10737418240')
+ self.assertEqual(res, [(base_file1, False, False),
+ (base_file2, True, False),
+ (base_file3, False, True)])
+
+ @contextlib.contextmanager
+ def _make_base_file(self, checksum=True):
+ """Make a base file for testing."""
+
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+ fname = os.path.join(tmpdir, 'aaa')
+
+ base_file = open(fname, 'w')
+ base_file.write('data')
+ base_file.close()
+ base_file = open(fname, 'r')
+
+ if checksum:
+ imagecache.write_stored_checksum(fname)
+
+ base_file.close()
+ yield fname
+
+ def test_remove_base_file(self):
+ with self._make_base_file() as fname:
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager._remove_base_file(fname)
+ info_fname = imagecache.get_info_filename(fname)
+
+ # Files are initially too new to delete
+ self.assertTrue(os.path.exists(fname))
+ self.assertTrue(os.path.exists(info_fname))
+
+ # Old files get cleaned up though
+ os.utime(fname, (-1, time.time() - 3601))
+ image_cache_manager._remove_base_file(fname)
+
+ self.assertFalse(os.path.exists(fname))
+ self.assertFalse(os.path.exists(info_fname))
+
+ def test_remove_base_file_original(self):
+ with self._make_base_file() as fname:
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.originals = [fname]
+ image_cache_manager._remove_base_file(fname)
+ info_fname = imagecache.get_info_filename(fname)
+
+ # Files are initially too new to delete
+ self.assertTrue(os.path.exists(fname))
+ self.assertTrue(os.path.exists(info_fname))
+
+ # This file should stay longer than a resized image
+ os.utime(fname, (-1, time.time() - 3601))
+ image_cache_manager._remove_base_file(fname)
+
+ self.assertTrue(os.path.exists(fname))
+ self.assertTrue(os.path.exists(info_fname))
+
+ # Originals don't stay forever though
+ os.utime(fname, (-1, time.time() - 3600 * 25))
+ image_cache_manager._remove_base_file(fname)
+
+ self.assertFalse(os.path.exists(fname))
+ self.assertFalse(os.path.exists(info_fname))
+
+ def test_remove_base_file_dne(self):
+ # This test is solely to execute the "does not exist" code path. We
+ # don't expect the method being tested to do anything in this case.
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+
+ fname = os.path.join(tmpdir, 'aaa')
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager._remove_base_file(fname)
+
+ def test_remove_base_file_oserror(self):
+ with intercept_log_messages() as stream:
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+
+ fname = os.path.join(tmpdir, 'aaa')
+
+ os.mkdir(fname)
+ os.utime(fname, (-1, time.time() - 3601))
+
+ # This will raise an OSError because of file permissions
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager._remove_base_file(fname)
+
+ self.assertTrue(os.path.exists(fname))
+ self.assertNotEqual(stream.getvalue().find('Failed to remove'),
+ -1)
+
+ def test_handle_base_image_unused(self):
+ img = '123'
+
+ with self._make_base_file() as fname:
+ os.utime(fname, (-1, time.time() - 3601))
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [fname]
+ image_cache_manager._handle_base_image(img, fname)
+
+ self.assertEqual(image_cache_manager.unexplained_images, [])
+ self.assertEqual(image_cache_manager.removable_base_files,
+ [fname])
+ self.assertEqual(image_cache_manager.corrupt_base_files, [])
+
+ def test_handle_base_image_used(self):
+ self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
+ img = '123'
+
+ with self._make_base_file() as fname:
+ os.utime(fname, (-1, time.time() - 3601))
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [fname]
+ image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
+ image_cache_manager._handle_base_image(img, fname)
+
+ self.assertEqual(image_cache_manager.unexplained_images, [])
+ self.assertEqual(image_cache_manager.removable_base_files, [])
+ self.assertEqual(image_cache_manager.corrupt_base_files, [])
+
+ def test_handle_base_image_used_remotely(self):
+ self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
+ img = '123'
+
+ with self._make_base_file() as fname:
+ os.utime(fname, (-1, time.time() - 3601))
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [fname]
+ image_cache_manager.used_images = {'123': (0, 1, ['banana-42'])}
+ image_cache_manager._handle_base_image(img, fname)
+
+ self.assertEqual(image_cache_manager.unexplained_images, [])
+ self.assertEqual(image_cache_manager.removable_base_files, [])
+ self.assertEqual(image_cache_manager.corrupt_base_files, [])
+
+ def test_handle_base_image_absent(self):
+ img = '123'
+
+ with intercept_log_messages() as stream:
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
+ image_cache_manager._handle_base_image(img, None)
+
+ self.assertEqual(image_cache_manager.unexplained_images, [])
+ self.assertEqual(image_cache_manager.removable_base_files, [])
+ self.assertEqual(image_cache_manager.corrupt_base_files, [])
+ self.assertNotEqual(stream.getvalue().find('an absent base file'),
+ -1)
+
+ def test_handle_base_image_used_missing(self):
+ img = '123'
+
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+
+ fname = os.path.join(tmpdir, 'aaa')
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [fname]
+ image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
+ image_cache_manager._handle_base_image(img, fname)
+
+ self.assertEqual(image_cache_manager.unexplained_images, [])
+ self.assertEqual(image_cache_manager.removable_base_files, [])
+ self.assertEqual(image_cache_manager.corrupt_base_files, [])
+
+ def test_handle_base_image_checksum_fails(self):
+ self.flags(checksum_base_images=True, group='libvirt')
+ self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
+
+ img = '123'
+
+ with self._make_base_file() as fname:
+ with open(fname, 'w') as f:
+ f.write('banana')
+
+ d = {'sha1': '21323454'}
+ with open('%s.info' % fname, 'w') as f:
+ f.write(jsonutils.dumps(d))
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [fname]
+ image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
+ image_cache_manager._handle_base_image(img, fname)
+
+ self.assertEqual(image_cache_manager.unexplained_images, [])
+ self.assertEqual(image_cache_manager.removable_base_files, [])
+ self.assertEqual(image_cache_manager.corrupt_base_files,
+ [fname])
+
+ def test_verify_base_images(self):
+ hashed_1 = '356a192b7913b04c54574d18c28d46e6395428ab'
+ hashed_21 = '472b07b9fcf2c2451e8781e944bf5f77cd8457c8'
+ hashed_22 = '12c6fc06c99a462375eeb3f43dfd832b08ca9e17'
+ hashed_42 = '92cfceb39d57d914ed8b14d0e37643de0797ae56'
+
+ self.flags(instances_path='/instance_path',
+ image_cache_subdirectory_name='_base')
+
+ base_file_list = ['00000001',
+ 'ephemeral_0_20_None',
+ 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm',
+ 'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm',
+ hashed_42,
+ hashed_1,
+ hashed_21,
+ hashed_22,
+ '%s_5368709120' % hashed_1,
+ '%s_10737418240' % hashed_1,
+ '00000004']
+
+ def fq_path(path):
+ return os.path.join('/instance_path/_base/', path)
+
+ # Fake base directory existence
+ orig_exists = os.path.exists
+
+ def exists(path):
+ # The python coverage tool got angry with my overly broad mocks
+ if not path.startswith('/instance_path'):
+ return orig_exists(path)
+
+ if path in ['/instance_path',
+ '/instance_path/_base',
+ '/instance_path/instance-1/disk',
+ '/instance_path/instance-2/disk',
+ '/instance_path/instance-3/disk',
+ '/instance_path/_base/%s.info' % hashed_42]:
+ return True
+
+ for p in base_file_list:
+ if path == fq_path(p):
+ return True
+ if path == fq_path(p) + '.info':
+ return False
+
+ if path in ['/instance_path/_base/%s_sm' % i for i in [hashed_1,
+ hashed_21,
+ hashed_22,
+ hashed_42]]:
+ return False
+
+ self.fail('Unexpected path existence check: %s' % path)
+
+ self.stubs.Set(os.path, 'exists', lambda x: exists(x))
+
+ self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
+
+ # We need to stub utime as well
+ self.stubs.Set(os, 'utime', lambda x, y: None)
+
+ # Fake up some instances in the instances directory
+ orig_listdir = os.listdir
+
+ def listdir(path):
+ # The python coverage tool got angry with my overly broad mocks
+ if not path.startswith('/instance_path'):
+ return orig_listdir(path)
+
+ if path == '/instance_path':
+ return ['instance-1', 'instance-2', 'instance-3', '_base']
+
+ if path == '/instance_path/_base':
+ return base_file_list
+
+ self.fail('Unexpected directory listed: %s' % path)
+
+ self.stubs.Set(os, 'listdir', lambda x: listdir(x))
+
+ # Fake isfile for these faked images in _base
+ orig_isfile = os.path.isfile
+
+ def isfile(path):
+ # The python coverage tool got angry with my overly broad mocks
+ if not path.startswith('/instance_path'):
+ return orig_isfile(path)
+
+ for p in base_file_list:
+ if path == fq_path(p):
+ return True
+
+ self.fail('Unexpected isfile call: %s' % path)
+
+ self.stubs.Set(os.path, 'isfile', lambda x: isfile(x))
+
+ # Fake the database call which lists running instances
+ instances = [{'image_ref': '1',
+ 'host': CONF.host,
+ 'name': 'instance-1',
+ 'uuid': '123',
+ 'vm_state': '',
+ 'task_state': ''},
+ {'image_ref': '1',
+ 'kernel_id': '21',
+ 'ramdisk_id': '22',
+ 'host': CONF.host,
+ 'name': 'instance-2',
+ 'uuid': '456',
+ 'vm_state': '',
+ 'task_state': ''}]
+ all_instances = [fake_instance.fake_instance_obj(None, **instance)
+ for instance in instances]
+ image_cache_manager = imagecache.ImageCacheManager()
+
+ # Fake the utils call which finds the backing image
+ def get_disk_backing_file(path):
+ if path in ['/instance_path/instance-1/disk',
+ '/instance_path/instance-2/disk']:
+ return fq_path('%s_5368709120' % hashed_1)
+ self.fail('Unexpected backing file lookup: %s' % path)
+
+ self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
+ lambda x: get_disk_backing_file(x))
+
+ # Fake out verifying checksums, as that is tested elsewhere
+ self.stubs.Set(image_cache_manager, '_verify_checksum',
+ lambda x, y: True)
+
+ # Fake getmtime as well
+ orig_getmtime = os.path.getmtime
+
+ def getmtime(path):
+ if not path.startswith('/instance_path'):
+ return orig_getmtime(path)
+
+ return 1000000
+
+ self.stubs.Set(os.path, 'getmtime', lambda x: getmtime(x))
+
+ # Make sure we don't accidentally remove a real file
+ orig_remove = os.remove
+
+ def remove(path):
+ if not path.startswith('/instance_path'):
+ return orig_remove(path)
+
+ # Don't try to remove fake files
+ return
+
+ self.stubs.Set(os, 'remove', lambda x: remove(x))
+
+ # And finally we can make the call we're actually testing...
+ # The argument here should be a context, but it is mocked out
+ image_cache_manager.update(None, all_instances)
+
+ # Verify
+ active = [fq_path(hashed_1), fq_path('%s_5368709120' % hashed_1),
+ fq_path(hashed_21), fq_path(hashed_22)]
+ for act in active:
+ self.assertIn(act, image_cache_manager.active_base_files)
+ self.assertEqual(len(image_cache_manager.active_base_files),
+ len(active))
+
+ for rem in [fq_path('e97222e91fc4241f49a7f520d1dcf446751129b3_sm'),
+ fq_path('e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm'),
+ fq_path(hashed_42),
+ fq_path('%s_10737418240' % hashed_1)]:
+ self.assertIn(rem, image_cache_manager.removable_base_files)
+
+ # Ensure there are no "corrupt" images as well
+ self.assertEqual(len(image_cache_manager.corrupt_base_files), 0)
+
+ def test_verify_base_images_no_base(self):
+ self.flags(instances_path='/tmp/no/such/dir/name/please')
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.update(None, [])
+
+ def test_is_valid_info_file(self):
+ hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3'
+
+ self.flags(instances_path='/tmp/no/such/dir/name/please')
+ self.flags(image_info_filename_pattern=('$instances_path/_base/'
+ '%(image)s.info'),
+ group='libvirt')
+ base_filename = os.path.join(CONF.instances_path, '_base', hashed)
+
+ is_valid_info_file = imagecache.is_valid_info_file
+ self.assertFalse(is_valid_info_file('banana'))
+ self.assertFalse(is_valid_info_file(
+ os.path.join(CONF.instances_path, '_base', '00000001')))
+ self.assertFalse(is_valid_info_file(base_filename))
+ self.assertFalse(is_valid_info_file(base_filename + '.sha1'))
+ self.assertTrue(is_valid_info_file(base_filename + '.info'))
+
+ def test_configured_checksum_path(self):
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+
+ # Ensure there is a base directory
+ os.mkdir(os.path.join(tmpdir, '_base'))
+
+ # Fake the database call which lists running instances
+ instances = [{'image_ref': '1',
+ 'host': CONF.host,
+ 'name': 'instance-1',
+ 'uuid': '123',
+ 'vm_state': '',
+ 'task_state': ''},
+ {'image_ref': '1',
+ 'host': CONF.host,
+ 'name': 'instance-2',
+ 'uuid': '456',
+ 'vm_state': '',
+ 'task_state': ''}]
+
+ all_instances = []
+ for instance in instances:
+ all_instances.append(fake_instance.fake_instance_obj(
+ None, **instance))
+
+ def touch(filename):
+ f = open(filename, 'w')
+ f.write('Touched')
+ f.close()
+
+ old = time.time() - (25 * 3600)
+ hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3'
+ base_filename = os.path.join(tmpdir, hashed)
+ touch(base_filename)
+ touch(base_filename + '.info')
+ os.utime(base_filename + '.info', (old, old))
+ touch(base_filename + '.info')
+ os.utime(base_filename + '.info', (old, old))
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.update(None, all_instances)
+
+ self.assertTrue(os.path.exists(base_filename))
+ self.assertTrue(os.path.exists(base_filename + '.info'))
+
+ def test_compute_manager(self):
+ was = {'called': False}
+
+ def fake_get_all_by_filters(context, *args, **kwargs):
+ was['called'] = True
+ instances = []
+ for x in xrange(2):
+ instances.append(fake_instance.fake_db_instance(
+ image_ref='1',
+ uuid=x,
+ name=x,
+ vm_state='',
+ task_state=''))
+ return instances
+
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all_by_filters)
+ compute = importutils.import_object(CONF.compute_manager)
+ self.flags(use_local=True, group='conductor')
+ compute.conductor_api = conductor.API()
+ compute._run_image_cache_manager_pass(None)
+ self.assertTrue(was['called'])
+
+
+class VerifyChecksumTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(VerifyChecksumTestCase, self).setUp()
+ self.img = {'container_format': 'ami', 'id': '42'}
+ self.flags(checksum_base_images=True, group='libvirt')
+
+ def _make_checksum(self, tmpdir):
+ testdata = ('OpenStack Software delivers a massively scalable cloud '
+ 'operating system.')
+
+ fname = os.path.join(tmpdir, 'aaa')
+ info_fname = imagecache.get_info_filename(fname)
+
+ with open(fname, 'w') as f:
+ f.write(testdata)
+
+ return fname, info_fname, testdata
+
+ def _write_file(self, info_fname, info_attr, testdata):
+ f = open(info_fname, 'w')
+ if info_attr == "csum valid":
+ csum = hashlib.sha1()
+ csum.update(testdata)
+ f.write('{"sha1": "%s"}\n' % csum.hexdigest())
+ elif info_attr == "csum invalid, not json":
+ f.write('banana')
+ else:
+ f.write('{"sha1": "banana"}')
+ f.close()
+
+ def _check_body(self, tmpdir, info_attr):
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+ fname, info_fname, testdata = self._make_checksum(tmpdir)
+ self._write_file(info_fname, info_attr, testdata)
+ image_cache_manager = imagecache.ImageCacheManager()
+ return image_cache_manager, fname
+
+ def test_verify_checksum(self):
+ with utils.tempdir() as tmpdir:
+ image_cache_manager, fname = self._check_body(tmpdir, "csum valid")
+ res = image_cache_manager._verify_checksum(self.img, fname)
+ self.assertTrue(res)
+
+ def test_verify_checksum_disabled(self):
+ self.flags(checksum_base_images=False, group='libvirt')
+ with utils.tempdir() as tmpdir:
+ image_cache_manager, fname = self._check_body(tmpdir, "csum valid")
+ res = image_cache_manager._verify_checksum(self.img, fname)
+ self.assertIsNone(res)
+
+ def test_verify_checksum_invalid_json(self):
+ with intercept_log_messages() as stream:
+ with utils.tempdir() as tmpdir:
+ image_cache_manager, fname = (
+ self._check_body(tmpdir, "csum invalid, not json"))
+ res = image_cache_manager._verify_checksum(
+ self.img, fname, create_if_missing=False)
+ self.assertFalse(res)
+ log = stream.getvalue()
+
+ # NOTE(mikal): this is a skip not a fail because the file is
+ # present, but is not in valid json format and therefore is
+ # skipped.
+ self.assertNotEqual(log.find('image verification skipped'), -1)
+
+ def test_verify_checksum_invalid_repaired(self):
+ with utils.tempdir() as tmpdir:
+ image_cache_manager, fname = (
+ self._check_body(tmpdir, "csum invalid, not json"))
+ res = image_cache_manager._verify_checksum(
+ self.img, fname, create_if_missing=True)
+ self.assertIsNone(res)
+
+ def test_verify_checksum_invalid(self):
+ with intercept_log_messages() as stream:
+ with utils.tempdir() as tmpdir:
+ image_cache_manager, fname = (
+ self._check_body(tmpdir, "csum invalid, valid json"))
+ res = image_cache_manager._verify_checksum(self.img, fname)
+ self.assertFalse(res)
+ log = stream.getvalue()
+ self.assertNotEqual(log.find('image verification failed'), -1)
+
+ def test_verify_checksum_file_missing(self):
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+ fname, info_fname, testdata = self._make_checksum(tmpdir)
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ res = image_cache_manager._verify_checksum('aaa', fname)
+ self.assertIsNone(res)
+
+ # Checksum requests for a file with no checksum now have the
+ # side effect of creating the checksum
+ self.assertTrue(os.path.exists(info_fname))
diff --git a/nova/tests/unit/virt/libvirt/test_lvm.py b/nova/tests/unit/virt/libvirt/test_lvm.py
new file mode 100644
index 0000000000..fdb3e4b9f6
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_lvm.py
@@ -0,0 +1,183 @@
+# Copyright 2012 NTT Data. All Rights Reserved.
+# Copyright 2012 Yahoo! Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+
+import mock
+from oslo.concurrency import processutils
+from oslo.config import cfg
+
+from nova import exception
+from nova import test
+from nova import utils
+from nova.virt.libvirt import lvm
+from nova.virt.libvirt import utils as libvirt_utils
+
+CONF = cfg.CONF
+
+
+class LvmTestCase(test.NoDBTestCase):
+ def test_get_volume_size(self):
+ executes = []
+
+ def fake_execute(*cmd, **kwargs):
+ executes.append(cmd)
+ return 123456789, None
+
+ expected_commands = [('blockdev', '--getsize64', '/dev/foo')]
+ self.stubs.Set(utils, 'execute', fake_execute)
+ size = lvm.get_volume_size('/dev/foo')
+ self.assertEqual(expected_commands, executes)
+ self.assertEqual(size, 123456789)
+
+ @mock.patch.object(utils, 'execute',
+ side_effect=processutils.ProcessExecutionError(
+ stderr=('blockdev: cannot open /dev/foo: '
+ 'No such device or address')))
+ def test_get_volume_size_not_found(self, mock_execute):
+ self.assertRaises(exception.VolumeBDMPathNotFound,
+ lvm.get_volume_size, '/dev/foo')
+
+ @mock.patch.object(utils, 'execute',
+ side_effect=processutils.ProcessExecutionError(
+ stderr='blockdev: i am sad in other ways'))
+ def test_get_volume_size_unexpectd_error(self, mock_execute):
+ self.assertRaises(processutils.ProcessExecutionError,
+ lvm.get_volume_size, '/dev/foo')
+
+ def test_lvm_clear(self):
+ def fake_lvm_size(path):
+ return lvm_size
+
+ def fake_execute(*cmd, **kwargs):
+ executes.append(cmd)
+
+ self.stubs.Set(lvm, 'get_volume_size', fake_lvm_size)
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ # Test the correct dd commands are run for various sizes
+ lvm_size = 1
+ executes = []
+ expected_commands = [('dd', 'bs=1', 'if=/dev/zero', 'of=/dev/v1',
+ 'seek=0', 'count=1', 'conv=fdatasync')]
+ lvm.clear_volume('/dev/v1')
+ self.assertEqual(expected_commands, executes)
+
+ lvm_size = 1024
+ executes = []
+ expected_commands = [('dd', 'bs=1024', 'if=/dev/zero', 'of=/dev/v2',
+ 'seek=0', 'count=1', 'conv=fdatasync')]
+ lvm.clear_volume('/dev/v2')
+ self.assertEqual(expected_commands, executes)
+
+ lvm_size = 1025
+ executes = []
+ expected_commands = [('dd', 'bs=1024', 'if=/dev/zero', 'of=/dev/v3',
+ 'seek=0', 'count=1', 'conv=fdatasync')]
+ expected_commands += [('dd', 'bs=1', 'if=/dev/zero', 'of=/dev/v3',
+ 'seek=1024', 'count=1', 'conv=fdatasync')]
+ lvm.clear_volume('/dev/v3')
+ self.assertEqual(expected_commands, executes)
+
+ lvm_size = 1048576
+ executes = []
+ expected_commands = [('dd', 'bs=1048576', 'if=/dev/zero', 'of=/dev/v4',
+ 'seek=0', 'count=1', 'oflag=direct')]
+ lvm.clear_volume('/dev/v4')
+ self.assertEqual(expected_commands, executes)
+
+ lvm_size = 1048577
+ executes = []
+ expected_commands = [('dd', 'bs=1048576', 'if=/dev/zero', 'of=/dev/v5',
+ 'seek=0', 'count=1', 'oflag=direct')]
+ expected_commands += [('dd', 'bs=1', 'if=/dev/zero', 'of=/dev/v5',
+ 'seek=1048576', 'count=1', 'conv=fdatasync')]
+ lvm.clear_volume('/dev/v5')
+ self.assertEqual(expected_commands, executes)
+
+ lvm_size = 1234567
+ executes = []
+ expected_commands = [('dd', 'bs=1048576', 'if=/dev/zero', 'of=/dev/v6',
+ 'seek=0', 'count=1', 'oflag=direct')]
+ expected_commands += [('dd', 'bs=1024', 'if=/dev/zero', 'of=/dev/v6',
+ 'seek=1024', 'count=181', 'conv=fdatasync')]
+ expected_commands += [('dd', 'bs=1', 'if=/dev/zero', 'of=/dev/v6',
+ 'seek=1233920', 'count=647', 'conv=fdatasync')]
+ lvm.clear_volume('/dev/v6')
+ self.assertEqual(expected_commands, executes)
+
+ # Test volume_clear_size limits the size
+ lvm_size = 10485761
+ CONF.set_override('volume_clear_size', '1', 'libvirt')
+ executes = []
+ expected_commands = [('dd', 'bs=1048576', 'if=/dev/zero', 'of=/dev/v7',
+ 'seek=0', 'count=1', 'oflag=direct')]
+ lvm.clear_volume('/dev/v7')
+ self.assertEqual(expected_commands, executes)
+
+ CONF.set_override('volume_clear_size', '2', 'libvirt')
+ lvm_size = 1048576
+ executes = []
+ expected_commands = [('dd', 'bs=1048576', 'if=/dev/zero', 'of=/dev/v9',
+ 'seek=0', 'count=1', 'oflag=direct')]
+ lvm.clear_volume('/dev/v9')
+ self.assertEqual(expected_commands, executes)
+
+ # Test volume_clear=shred
+ CONF.set_override('volume_clear', 'shred', 'libvirt')
+ CONF.set_override('volume_clear_size', '0', 'libvirt')
+ lvm_size = 1048576
+ executes = []
+ expected_commands = [('shred', '-n3', '-s1048576', '/dev/va')]
+ lvm.clear_volume('/dev/va')
+ self.assertEqual(expected_commands, executes)
+
+ CONF.set_override('volume_clear', 'shred', 'libvirt')
+ CONF.set_override('volume_clear_size', '1', 'libvirt')
+ lvm_size = 10485761
+ executes = []
+ expected_commands = [('shred', '-n3', '-s1048576', '/dev/vb')]
+ lvm.clear_volume('/dev/vb')
+ self.assertEqual(expected_commands, executes)
+
+ # Test volume_clear=none does nothing
+ CONF.set_override('volume_clear', 'none', 'libvirt')
+ executes = []
+ expected_commands = []
+ lvm.clear_volume('/dev/vc')
+ self.assertEqual(expected_commands, executes)
+
+ # Test volume_clear=invalid falls back to the default 'zero'
+ CONF.set_override('volume_clear', 'invalid', 'libvirt')
+ lvm_size = 1
+ executes = []
+ expected_commands = [('dd', 'bs=1', 'if=/dev/zero', 'of=/dev/vd',
+ 'seek=0', 'count=1', 'conv=fdatasync')]
+ lvm.clear_volume('/dev/vd')
+ self.assertEqual(expected_commands, executes)
+
+ def test_fail_remove_all_logical_volumes(self):
+ def fake_execute(*args, **kwargs):
+ if 'vol2' in args:
+ raise processutils.ProcessExecutionError('Error')
+
+ with contextlib.nested(
+ mock.patch.object(lvm, 'clear_volume'),
+ mock.patch.object(libvirt_utils, 'execute',
+ side_effect=fake_execute)) as (mock_clear, mock_execute):
+ self.assertRaises(exception.VolumesNotRemoved,
+ lvm.remove_volumes,
+ ['vol1', 'vol2', 'vol3'])
+ self.assertEqual(3, mock_execute.call_count)
diff --git a/nova/tests/unit/virt/libvirt/test_rbd.py b/nova/tests/unit/virt/libvirt/test_rbd.py
new file mode 100644
index 0000000000..bcbdc25f59
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_rbd.py
@@ -0,0 +1,283 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import mock
+
+from nova import exception
+from nova.openstack.common import log as logging
+from nova import test
+from nova import utils
+from nova.virt.libvirt import rbd_utils
+
+
+LOG = logging.getLogger(__name__)
+
+
+CEPH_MON_DUMP = """dumped monmap epoch 1
+{ "epoch": 1,
+ "fsid": "33630410-6d93-4d66-8e42-3b953cf194aa",
+ "modified": "2013-05-22 17:44:56.343618",
+ "created": "2013-05-22 17:44:56.343618",
+ "mons": [
+ { "rank": 0,
+ "name": "a",
+ "addr": "[::1]:6789\/0"},
+ { "rank": 1,
+ "name": "b",
+ "addr": "[::1]:6790\/0"},
+ { "rank": 2,
+ "name": "c",
+ "addr": "[::1]:6791\/0"},
+ { "rank": 3,
+ "name": "d",
+ "addr": "127.0.0.1:6792\/0"},
+ { "rank": 4,
+ "name": "e",
+ "addr": "example.com:6791\/0"}],
+ "quorum": [
+ 0,
+ 1,
+ 2]}
+"""
+
+
+class RbdTestCase(test.NoDBTestCase):
+
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def setUp(self, mock_rados, mock_rbd):
+ super(RbdTestCase, self).setUp()
+
+ self.mock_rados = mock_rados
+ self.mock_rados.Rados = mock.Mock
+ self.mock_rados.Rados.ioctx = mock.Mock()
+ self.mock_rados.Rados.connect = mock.Mock()
+ self.mock_rados.Rados.shutdown = mock.Mock()
+ self.mock_rados.Rados.open_ioctx = mock.Mock()
+ self.mock_rados.Rados.open_ioctx.return_value = \
+ self.mock_rados.Rados.ioctx
+ self.mock_rados.Error = Exception
+
+ self.mock_rbd = mock_rbd
+ self.mock_rbd.RBD = mock.Mock
+ self.mock_rbd.Image = mock.Mock
+ self.mock_rbd.Image.close = mock.Mock()
+ self.mock_rbd.RBD.Error = Exception
+
+ self.rbd_pool = 'rbd'
+ self.driver = rbd_utils.RBDDriver(self.rbd_pool, None, None)
+
+ self.volume_name = u'volume-00000001'
+
+ def tearDown(self):
+ super(RbdTestCase, self).tearDown()
+
+ def test_good_locations(self):
+ locations = ['rbd://fsid/pool/image/snap',
+ 'rbd://%2F/%2F/%2F/%2F', ]
+ map(self.driver.parse_url, locations)
+
+ def test_bad_locations(self):
+ locations = ['rbd://image',
+ 'http://path/to/somewhere/else',
+ 'rbd://image/extra',
+ 'rbd://image/',
+ 'rbd://fsid/pool/image/',
+ 'rbd://fsid/pool/image/snap/',
+ 'rbd://///', ]
+ for loc in locations:
+ self.assertRaises(exception.ImageUnacceptable,
+ self.driver.parse_url, loc)
+ self.assertFalse(self.driver.is_cloneable({'url': loc},
+ {'disk_format': 'raw'}))
+
+ @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid')
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_cloneable(self, mock_rados, mock_rbd, mock_get_fsid):
+ mock_get_fsid.return_value = 'abc'
+ location = {'url': 'rbd://abc/pool/image/snap'}
+ info = {'disk_format': 'raw'}
+ self.assertTrue(self.driver.is_cloneable(location, info))
+ self.assertTrue(mock_get_fsid.called)
+
+ @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid')
+ def test_uncloneable_different_fsid(self, mock_get_fsid):
+ mock_get_fsid.return_value = 'abc'
+ location = {'url': 'rbd://def/pool/image/snap'}
+ self.assertFalse(
+ self.driver.is_cloneable(location, {'disk_format': 'raw'}))
+ self.assertTrue(mock_get_fsid.called)
+
+ @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid')
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_uncloneable_unreadable(self, mock_rados, mock_rbd, mock_proxy,
+ mock_get_fsid):
+ mock_get_fsid.return_value = 'abc'
+ location = {'url': 'rbd://abc/pool/image/snap'}
+
+ mock_proxy.side_effect = mock_rbd.Error
+
+ self.assertFalse(
+ self.driver.is_cloneable(location, {'disk_format': 'raw'}))
+ mock_proxy.assert_called_once_with(self.driver, 'image', pool='pool',
+ snapshot='snap', read_only=True)
+ self.assertTrue(mock_get_fsid.called)
+
+ @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid')
+ def test_uncloneable_bad_format(self, mock_get_fsid):
+ mock_get_fsid.return_value = 'abc'
+ location = {'url': 'rbd://abc/pool/image/snap'}
+ formats = ['qcow2', 'vmdk', 'vdi']
+ for f in formats:
+ self.assertFalse(
+ self.driver.is_cloneable(location, {'disk_format': f}))
+ self.assertTrue(mock_get_fsid.called)
+
+ @mock.patch.object(utils, 'execute')
+ def test_get_mon_addrs(self, mock_execute):
+ mock_execute.return_value = (CEPH_MON_DUMP, '')
+ hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com']
+ ports = ['6789', '6790', '6791', '6792', '6791']
+ self.assertEqual((hosts, ports), self.driver.get_mon_addrs())
+
+ @mock.patch.object(rbd_utils, 'RADOSClient')
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_clone(self, mock_rados, mock_rbd, mock_client):
+ pool = u'images'
+ image = u'image-name'
+ snap = u'snapshot-name'
+ location = {'url': u'rbd://fsid/%s/%s/%s' % (pool, image, snap)}
+
+ client_stack = []
+
+ def mock__enter__(inst):
+ def _inner():
+ client_stack.append(inst)
+ return inst
+ return _inner
+
+ client = mock_client.return_value
+ # capture both rados client used to perform the clone
+ client.__enter__.side_effect = mock__enter__(client)
+
+ rbd = mock_rbd.RBD.return_value
+
+ self.driver.clone(location, self.volume_name)
+
+ args = [client_stack[0].ioctx, str(image), str(snap),
+ client_stack[1].ioctx, str(self.volume_name)]
+ kwargs = {'features': mock_rbd.RBD_FEATURE_LAYERING}
+ rbd.clone.assert_called_once_with(*args, **kwargs)
+ self.assertEqual(client.__enter__.call_count, 2)
+
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ def test_resize(self, mock_proxy):
+ size = 1024
+ proxy = mock_proxy.return_value
+ proxy.__enter__.return_value = proxy
+ self.driver.resize(self.volume_name, size)
+ proxy.resize.assert_called_once_with(size)
+
+ @mock.patch.object(rbd_utils.RBDDriver, '_disconnect_from_rados')
+ @mock.patch.object(rbd_utils.RBDDriver, '_connect_to_rados')
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_rbd_volume_proxy_init(self, mock_rados, mock_rbd,
+ mock_connect_from_rados,
+ mock_disconnect_from_rados):
+ mock_connect_from_rados.return_value = (None, None)
+ mock_disconnect_from_rados.return_value = (None, None)
+
+ with rbd_utils.RBDVolumeProxy(self.driver, self.volume_name):
+ mock_connect_from_rados.assert_called_once_with(None)
+ self.assertFalse(mock_disconnect_from_rados.called)
+
+ mock_disconnect_from_rados.assert_called_once_with(None, None)
+
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_connect_to_rados_default(self, mock_rados, mock_rbd):
+ ret = self.driver._connect_to_rados()
+ self.assertTrue(self.mock_rados.Rados.connect.called)
+ self.assertTrue(self.mock_rados.Rados.open_ioctx.called)
+ self.assertIsInstance(ret[0], self.mock_rados.Rados)
+ self.assertEqual(ret[1], self.mock_rados.Rados.ioctx)
+ self.mock_rados.Rados.open_ioctx.assert_called_with(self.rbd_pool)
+
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_connect_to_rados_different_pool(self, mock_rados, mock_rbd):
+ ret = self.driver._connect_to_rados('alt_pool')
+ self.assertTrue(self.mock_rados.Rados.connect.called)
+ self.assertTrue(self.mock_rados.Rados.open_ioctx.called)
+ self.assertIsInstance(ret[0], self.mock_rados.Rados)
+ self.assertEqual(ret[1], self.mock_rados.Rados.ioctx)
+ self.mock_rados.Rados.open_ioctx.assert_called_with('alt_pool')
+
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_connect_to_rados_error(self, mock_rados):
+ mock_rados.Rados.open_ioctx.side_effect = mock_rados.Error
+ self.assertRaises(mock_rados.Error, self.driver._connect_to_rados)
+ mock_rados.Rados.open_ioctx.assert_called_once_with(self.rbd_pool)
+ mock_rados.Rados.shutdown.assert_called_once_with()
+
+ def test_ceph_args_none(self):
+ self.driver.rbd_user = None
+ self.driver.ceph_conf = None
+ self.assertEqual([], self.driver.ceph_args())
+
+ def test_ceph_args_rbd_user(self):
+ self.driver.rbd_user = 'foo'
+ self.driver.ceph_conf = None
+ self.assertEqual(['--id', 'foo'], self.driver.ceph_args())
+
+ def test_ceph_args_ceph_conf(self):
+ self.driver.rbd_user = None
+ self.driver.ceph_conf = '/path/bar.conf'
+ self.assertEqual(['--conf', '/path/bar.conf'],
+ self.driver.ceph_args())
+
+ def test_ceph_args_rbd_user_and_ceph_conf(self):
+ self.driver.rbd_user = 'foo'
+ self.driver.ceph_conf = '/path/bar.conf'
+ self.assertEqual(['--id', 'foo', '--conf', '/path/bar.conf'],
+ self.driver.ceph_args())
+
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ def test_exists(self, mock_proxy):
+ snapshot = 'snap'
+ proxy = mock_proxy.return_value
+ self.assertTrue(self.driver.exists(self.volume_name,
+ self.rbd_pool,
+ snapshot))
+ proxy.__enter__.assert_called_once_with()
+ proxy.__exit__.assert_called_once_with(None, None, None)
+
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ @mock.patch.object(rbd_utils, 'RADOSClient')
+ def test_cleanup_volumes(self, mock_client, mock_rados, mock_rbd):
+ instance = {'uuid': '12345'}
+
+ rbd = mock_rbd.RBD.return_value
+ rbd.list.return_value = ['12345_test', '111_test']
+
+ client = mock_client.return_value
+ self.driver.cleanup_volumes(instance)
+ rbd.remove.assert_called_once_with(client.ioctx, '12345_test')
+ client.__enter__.assert_called_once_with()
+ client.__exit__.assert_called_once_with(None, None, None)
diff --git a/nova/tests/unit/virt/libvirt/test_utils.py b/nova/tests/unit/virt/libvirt/test_utils.py
new file mode 100644
index 0000000000..4114c03516
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_utils.py
@@ -0,0 +1,652 @@
+# Copyright 2012 NTT Data. All Rights Reserved.
+# Copyright 2012 Yahoo! Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+import os
+import tempfile
+
+import mock
+from oslo.concurrency import processutils
+from oslo.config import cfg
+
+from nova import exception
+from nova.openstack.common import fileutils
+from nova import test
+from nova import utils
+from nova.virt.disk import api as disk
+from nova.virt import images
+from nova.virt.libvirt import config as vconfig
+from nova.virt.libvirt import utils as libvirt_utils
+
+CONF = cfg.CONF
+
+
+class LibvirtUtilsTestCase(test.NoDBTestCase):
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('nova.utils.execute')
+ def test_get_disk_type(self, mock_execute, mock_exists):
+ path = "disk.config"
+ example_output = """image: disk.config
+file format: raw
+virtual size: 64M (67108864 bytes)
+cluster_size: 65536
+disk size: 96K
+blah BLAH: bb
+"""
+ mock_execute.return_value = (example_output, '')
+ disk_type = libvirt_utils.get_disk_type(path)
+ mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+ mock_exists.assert_called_once_with(path)
+ self.assertEqual('raw', disk_type)
+
+ @mock.patch('nova.utils.execute')
+ def test_copy_image_local_cp(self, mock_execute):
+ libvirt_utils.copy_image('src', 'dest')
+ mock_execute.assert_called_once_with('cp', 'src', 'dest')
+
+ _rsync_call = functools.partial(mock.call,
+ 'rsync', '--sparse', '--compress')
+
+ @mock.patch('nova.utils.execute')
+ def test_copy_image_rsync(self, mock_execute):
+ libvirt_utils.copy_image('src', 'dest', host='host')
+
+ mock_execute.assert_has_calls([
+ self._rsync_call('--dry-run', 'src', 'host:dest'),
+ self._rsync_call('src', 'host:dest'),
+ ])
+ self.assertEqual(2, mock_execute.call_count)
+
+ @mock.patch('nova.utils.execute')
+ def test_copy_image_scp(self, mock_execute):
+ mock_execute.side_effect = [
+ processutils.ProcessExecutionError,
+ mock.DEFAULT,
+ ]
+
+ libvirt_utils.copy_image('src', 'dest', host='host')
+
+ mock_execute.assert_has_calls([
+ self._rsync_call('--dry-run', 'src', 'host:dest'),
+ mock.call('scp', 'src', 'host:dest'),
+ ])
+ self.assertEqual(2, mock_execute.call_count)
+
+ @mock.patch('os.path.exists', return_value=True)
+ def test_disk_type(self, mock_exists):
+ # Seems like lvm detection
+ # if its in /dev ??
+ for p in ['/dev/b', '/dev/blah/blah']:
+ d_type = libvirt_utils.get_disk_type(p)
+ self.assertEqual('lvm', d_type)
+
+ # Try rbd detection
+ d_type = libvirt_utils.get_disk_type('rbd:pool/instance')
+ self.assertEqual('rbd', d_type)
+
+ # Try the other types
+ template_output = """image: %(path)s
+file format: %(format)s
+virtual size: 64M (67108864 bytes)
+cluster_size: 65536
+disk size: 96K
+"""
+ path = '/myhome/disk.config'
+ for f in ['raw', 'qcow2']:
+ output = template_output % ({
+ 'format': f,
+ 'path': path,
+ })
+ with mock.patch('nova.utils.execute',
+ return_value=(output, '')) as mock_execute:
+ d_type = libvirt_utils.get_disk_type(path)
+ mock_execute.assert_called_once_with(
+ 'env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+ self.assertEqual(f, d_type)
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('nova.utils.execute')
+ def test_disk_backing(self, mock_execute, mock_exists):
+ path = '/myhome/disk.config'
+ template_output = """image: %(path)s
+file format: raw
+virtual size: 2K (2048 bytes)
+cluster_size: 65536
+disk size: 96K
+"""
+ output = template_output % ({
+ 'path': path,
+ })
+ mock_execute.return_value = (output, '')
+ d_backing = libvirt_utils.get_disk_backing_file(path)
+ mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+ mock_exists.assert_called_once_with(path)
+ self.assertIsNone(d_backing)
+
+ def _test_disk_size(self, mock_execute, path, expected_size):
+ d_size = libvirt_utils.get_disk_size(path)
+ self.assertEqual(expected_size, d_size)
+ mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+
+ @mock.patch('os.path.exists', return_value=True)
+ def test_disk_size(self, mock_exists):
+ path = '/myhome/disk.config'
+ template_output = """image: %(path)s
+file format: raw
+virtual size: %(v_size)s (%(vsize_b)s bytes)
+cluster_size: 65536
+disk size: 96K
+"""
+ for i in range(0, 128):
+ bytes = i * 65336
+ kbytes = bytes / 1024
+ mbytes = kbytes / 1024
+ output = template_output % ({
+ 'v_size': "%sM" % (mbytes),
+ 'vsize_b': i,
+ 'path': path,
+ })
+ with mock.patch('nova.utils.execute',
+ return_value=(output, '')) as mock_execute:
+ self._test_disk_size(mock_execute, path, i)
+ output = template_output % ({
+ 'v_size': "%sK" % (kbytes),
+ 'vsize_b': i,
+ 'path': path,
+ })
+ with mock.patch('nova.utils.execute',
+ return_value=(output, '')) as mock_execute:
+ self._test_disk_size(mock_execute, path, i)
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('nova.utils.execute')
+ def test_qemu_info_canon(self, mock_execute, mock_exists):
+ path = "disk.config"
+ example_output = """image: disk.config
+file format: raw
+virtual size: 64M (67108864 bytes)
+cluster_size: 65536
+disk size: 96K
+blah BLAH: bb
+"""
+ mock_execute.return_value = (example_output, '')
+ image_info = images.qemu_img_info(path)
+ mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+ mock_exists.assert_called_once_with(path)
+ self.assertEqual('disk.config', image_info.image)
+ self.assertEqual('raw', image_info.file_format)
+ self.assertEqual(67108864, image_info.virtual_size)
+ self.assertEqual(98304, image_info.disk_size)
+ self.assertEqual(65536, image_info.cluster_size)
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('nova.utils.execute')
+ def test_qemu_info_canon2(self, mock_execute, mock_exists):
+ path = "disk.config"
+ example_output = """image: disk.config
+file format: QCOW2
+virtual size: 67108844
+cluster_size: 65536
+disk size: 963434
+backing file: /var/lib/nova/a328c7998805951a_2
+"""
+ mock_execute.return_value = (example_output, '')
+ image_info = images.qemu_img_info(path)
+ mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+ mock_exists.assert_called_once_with(path)
+ self.assertEqual('disk.config', image_info.image)
+ self.assertEqual('qcow2', image_info.file_format)
+ self.assertEqual(67108844, image_info.virtual_size)
+ self.assertEqual(963434, image_info.disk_size)
+ self.assertEqual(65536, image_info.cluster_size)
+ self.assertEqual('/var/lib/nova/a328c7998805951a_2',
+ image_info.backing_file)
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('nova.utils.execute')
+ def test_qemu_backing_file_actual(self,
+ mock_execute, mock_exists):
+ path = "disk.config"
+ example_output = """image: disk.config
+file format: raw
+virtual size: 64M (67108864 bytes)
+cluster_size: 65536
+disk size: 96K
+Snapshot list:
+ID TAG VM SIZE DATE VM CLOCK
+1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
+backing file: /var/lib/nova/a328c7998805951a_2 (actual path: /b/3a988059e51a_2)
+"""
+ mock_execute.return_value = (example_output, '')
+ image_info = images.qemu_img_info(path)
+ mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+ mock_exists.assert_called_once_with(path)
+ self.assertEqual('disk.config', image_info.image)
+ self.assertEqual('raw', image_info.file_format)
+ self.assertEqual(67108864, image_info.virtual_size)
+ self.assertEqual(98304, image_info.disk_size)
+ self.assertEqual(1, len(image_info.snapshots))
+ self.assertEqual('/b/3a988059e51a_2',
+ image_info.backing_file)
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('nova.utils.execute')
+ def test_qemu_info_convert(self, mock_execute, mock_exists):
+ path = "disk.config"
+ example_output = """image: disk.config
+file format: raw
+virtual size: 64M
+disk size: 96K
+Snapshot list:
+ID TAG VM SIZE DATE VM CLOCK
+1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
+3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
+4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
+junk stuff: bbb
+"""
+ mock_execute.return_value = (example_output, '')
+ image_info = images.qemu_img_info(path)
+ mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+ mock_exists.assert_called_once_with(path)
+ self.assertEqual('disk.config', image_info.image)
+ self.assertEqual('raw', image_info.file_format)
+ self.assertEqual(67108864, image_info.virtual_size)
+ self.assertEqual(98304, image_info.disk_size)
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('nova.utils.execute')
+ def test_qemu_info_snaps(self, mock_execute, mock_exists):
+ path = "disk.config"
+ example_output = """image: disk.config
+file format: raw
+virtual size: 64M (67108864 bytes)
+disk size: 96K
+Snapshot list:
+ID TAG VM SIZE DATE VM CLOCK
+1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
+3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
+4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
+"""
+ mock_execute.return_value = (example_output, '')
+ image_info = images.qemu_img_info(path)
+ mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+ mock_exists.assert_called_once_with(path)
+ self.assertEqual('disk.config', image_info.image)
+ self.assertEqual('raw', image_info.file_format)
+ self.assertEqual(67108864, image_info.virtual_size)
+ self.assertEqual(98304, image_info.disk_size)
+ self.assertEqual(3, len(image_info.snapshots))
+
+ def test_valid_hostname_normal(self):
+ self.assertTrue(libvirt_utils.is_valid_hostname("hello.world.com"))
+
+ def test_valid_hostname_ipv4addr(self):
+ self.assertTrue(libvirt_utils.is_valid_hostname("10.0.2.1"))
+
+ def test_valid_hostname_ipv6addr(self):
+ self.assertTrue(libvirt_utils.is_valid_hostname("240:2ac3::2"))
+
+ def test_valid_hostname_bad(self):
+ self.assertFalse(libvirt_utils.is_valid_hostname("foo/?com=/bin/sh"))
+
+ @mock.patch('nova.utils.execute')
+ def test_create_image(self, mock_execute):
+ libvirt_utils.create_image('raw', '/some/path', '10G')
+ libvirt_utils.create_image('qcow2', '/some/stuff', '1234567891234')
+ expected_args = [(('qemu-img', 'create', '-f', 'raw',
+ '/some/path', '10G'),),
+ (('qemu-img', 'create', '-f', 'qcow2',
+ '/some/stuff', '1234567891234'),)]
+ self.assertEqual(expected_args, mock_execute.call_args_list)
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('nova.utils.execute')
+ def test_create_cow_image(self, mock_execute, mock_exists):
+ mock_execute.return_value = ('stdout', None)
+ libvirt_utils.create_cow_image('/some/path', '/the/new/cow')
+ expected_args = [(('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', '/some/path'),),
+ (('qemu-img', 'create', '-f', 'qcow2',
+ '-o', 'backing_file=/some/path',
+ '/the/new/cow'),)]
+ self.assertEqual(expected_args, mock_execute.call_args_list)
+
+ def test_pick_disk_driver_name(self):
+ type_map = {'kvm': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
+ 'qemu': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
+ 'xen': ([True, 'phy'], [False, 'tap2'], [None, 'tap2']),
+ 'uml': ([True, None], [False, None], [None, None]),
+ 'lxc': ([True, None], [False, None], [None, None])}
+
+ for (virt_type, checks) in type_map.iteritems():
+ if virt_type == "xen":
+ version = 4001000
+ else:
+ version = 1005001
+
+ self.flags(virt_type=virt_type, group='libvirt')
+ for (is_block_dev, expected_result) in checks:
+ result = libvirt_utils.pick_disk_driver_name(version,
+ is_block_dev)
+ self.assertEqual(result, expected_result)
+
+ def test_pick_disk_driver_name_xen_4_0_0(self):
+ self.flags(virt_type="xen", group='libvirt')
+ result = libvirt_utils.pick_disk_driver_name(4000000, False)
+ self.assertEqual(result, "tap")
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('nova.utils.execute')
+ def test_get_disk_size(self, mock_execute, mock_exists):
+ path = '/some/path'
+ example_output = """image: 00000001
+file format: raw
+virtual size: 4.4M (4592640 bytes)
+disk size: 4.4M
+"""
+ mock_execute.return_value = (example_output, '')
+ self.assertEqual(4592640, disk.get_disk_size('/some/path'))
+ mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+ mock_exists.assert_called_once_with(path)
+
+ def test_copy_image(self):
+ dst_fd, dst_path = tempfile.mkstemp()
+ try:
+ os.close(dst_fd)
+
+ src_fd, src_path = tempfile.mkstemp()
+ try:
+ with os.fdopen(src_fd, 'w') as fp:
+ fp.write('canary')
+
+ libvirt_utils.copy_image(src_path, dst_path)
+ with open(dst_path, 'r') as fp:
+ self.assertEqual(fp.read(), 'canary')
+ finally:
+ os.unlink(src_path)
+ finally:
+ os.unlink(dst_path)
+
+ def test_write_to_file(self):
+ dst_fd, dst_path = tempfile.mkstemp()
+ try:
+ os.close(dst_fd)
+
+ libvirt_utils.write_to_file(dst_path, 'hello')
+ with open(dst_path, 'r') as fp:
+ self.assertEqual(fp.read(), 'hello')
+ finally:
+ os.unlink(dst_path)
+
+ def test_write_to_file_with_umask(self):
+ dst_fd, dst_path = tempfile.mkstemp()
+ try:
+ os.close(dst_fd)
+ os.unlink(dst_path)
+
+ libvirt_utils.write_to_file(dst_path, 'hello', umask=0o277)
+ with open(dst_path, 'r') as fp:
+ self.assertEqual(fp.read(), 'hello')
+ mode = os.stat(dst_path).st_mode
+ self.assertEqual(mode & 0o277, 0)
+ finally:
+ os.unlink(dst_path)
+
+ @mock.patch.object(utils, 'execute')
+ def test_chown(self, mock_execute):
+ libvirt_utils.chown('/some/path', 'soren')
+ mock_execute.assert_called_once_with('chown', 'soren', '/some/path',
+ run_as_root=True)
+
+ @mock.patch.object(utils, 'execute')
+ def test_chown_for_id_maps(self, mock_execute):
+ id_maps = [vconfig.LibvirtConfigGuestUIDMap(),
+ vconfig.LibvirtConfigGuestUIDMap(),
+ vconfig.LibvirtConfigGuestGIDMap(),
+ vconfig.LibvirtConfigGuestGIDMap()]
+ id_maps[0].target = 10000
+ id_maps[0].count = 2000
+ id_maps[1].start = 2000
+ id_maps[1].target = 40000
+ id_maps[1].count = 2000
+ id_maps[2].target = 10000
+ id_maps[2].count = 2000
+ id_maps[3].start = 2000
+ id_maps[3].target = 40000
+ id_maps[3].count = 2000
+ libvirt_utils.chown_for_id_maps('/some/path', id_maps)
+ execute_args = ('nova-idmapshift', '-i',
+ '-u', '0:10000:2000,2000:40000:2000',
+ '-g', '0:10000:2000,2000:40000:2000',
+ '/some/path')
+ mock_execute.assert_called_once_with(*execute_args, run_as_root=True)
+
+ def _do_test_extract_snapshot(self, mock_execute,
+ dest_format='raw', out_format='raw'):
+ libvirt_utils.extract_snapshot('/path/to/disk/image', 'qcow2',
+ '/extracted/snap', dest_format)
+ mock_execute.assert_called_once_with(
+ 'qemu-img', 'convert', '-f', 'qcow2', '-O', out_format,
+ '/path/to/disk/image', '/extracted/snap')
+
+ @mock.patch.object(utils, 'execute')
+ def test_extract_snapshot_raw(self, mock_execute):
+ self._do_test_extract_snapshot(mock_execute)
+
+ @mock.patch.object(utils, 'execute')
+ def test_extract_snapshot_iso(self, mock_execute):
+ self._do_test_extract_snapshot(mock_execute, dest_format='iso')
+
+ @mock.patch.object(utils, 'execute')
+ def test_extract_snapshot_qcow2(self, mock_execute):
+ self._do_test_extract_snapshot(mock_execute,
+ dest_format='qcow2', out_format='qcow2')
+
+ def test_load_file(self):
+ dst_fd, dst_path = tempfile.mkstemp()
+ try:
+ os.close(dst_fd)
+
+ # We have a test for write_to_file. If that is sound, this suffices
+ libvirt_utils.write_to_file(dst_path, 'hello')
+ self.assertEqual(libvirt_utils.load_file(dst_path), 'hello')
+ finally:
+ os.unlink(dst_path)
+
+ def test_file_open(self):
+ dst_fd, dst_path = tempfile.mkstemp()
+ try:
+ os.close(dst_fd)
+
+ # We have a test for write_to_file. If that is sound, this suffices
+ libvirt_utils.write_to_file(dst_path, 'hello')
+ with libvirt_utils.file_open(dst_path, 'r') as fp:
+ self.assertEqual(fp.read(), 'hello')
+ finally:
+ os.unlink(dst_path)
+
+ def test_get_fs_info(self):
+
+ class FakeStatResult(object):
+
+ def __init__(self):
+ self.f_bsize = 4096
+ self.f_frsize = 4096
+ self.f_blocks = 2000
+ self.f_bfree = 1000
+ self.f_bavail = 900
+ self.f_files = 2000
+ self.f_ffree = 1000
+ self.f_favail = 900
+ self.f_flag = 4096
+ self.f_namemax = 255
+
+ self.path = None
+
+ def fake_statvfs(path):
+ self.path = path
+ return FakeStatResult()
+
+ self.stubs.Set(os, 'statvfs', fake_statvfs)
+
+ fs_info = libvirt_utils.get_fs_info('/some/file/path')
+ self.assertEqual('/some/file/path', self.path)
+ self.assertEqual(8192000, fs_info['total'])
+ self.assertEqual(3686400, fs_info['free'])
+ self.assertEqual(4096000, fs_info['used'])
+
+ @mock.patch('nova.virt.images.fetch_to_raw')
+ def test_fetch_image(self, mock_images):
+ context = 'opaque context'
+ target = '/tmp/targetfile'
+ image_id = '4'
+ user_id = 'fake'
+ project_id = 'fake'
+ libvirt_utils.fetch_image(context, target, image_id,
+ user_id, project_id)
+ mock_images.assert_called_once_with(
+ context, image_id, target, user_id, project_id,
+ max_size=0)
+
+ def test_fetch_raw_image(self):
+
+ def fake_execute(*cmd, **kwargs):
+ self.executes.append(cmd)
+ return None, None
+
+ def fake_rename(old, new):
+ self.executes.append(('mv', old, new))
+
+ def fake_unlink(path):
+ self.executes.append(('rm', path))
+
+ def fake_rm_on_error(path, remove=None):
+ self.executes.append(('rm', '-f', path))
+
+ def fake_qemu_img_info(path):
+ class FakeImgInfo(object):
+ pass
+
+ file_format = path.split('.')[-1]
+ if file_format == 'part':
+ file_format = path.split('.')[-2]
+ elif file_format == 'converted':
+ file_format = 'raw'
+
+ if 'backing' in path:
+ backing_file = 'backing'
+ else:
+ backing_file = None
+
+ if 'big' in path:
+ virtual_size = 2
+ else:
+ virtual_size = 1
+
+ FakeImgInfo.file_format = file_format
+ FakeImgInfo.backing_file = backing_file
+ FakeImgInfo.virtual_size = virtual_size
+
+ return FakeImgInfo()
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ self.stubs.Set(os, 'rename', fake_rename)
+ self.stubs.Set(os, 'unlink', fake_unlink)
+ self.stubs.Set(images, 'fetch', lambda *_, **__: None)
+ self.stubs.Set(images, 'qemu_img_info', fake_qemu_img_info)
+ self.stubs.Set(fileutils, 'delete_if_exists', fake_rm_on_error)
+
+ # Since the remove param of fileutils.remove_path_on_error()
+ # is initialized at load time, we must provide a wrapper
+ # that explicitly resets it to our fake delete_if_exists()
+ old_rm_path_on_error = fileutils.remove_path_on_error
+ f = functools.partial(old_rm_path_on_error, remove=fake_rm_on_error)
+ self.stubs.Set(fileutils, 'remove_path_on_error', f)
+
+ context = 'opaque context'
+ image_id = '4'
+ user_id = 'fake'
+ project_id = 'fake'
+
+ target = 't.qcow2'
+ self.executes = []
+ expected_commands = [('qemu-img', 'convert', '-O', 'raw',
+ 't.qcow2.part', 't.qcow2.converted'),
+ ('rm', 't.qcow2.part'),
+ ('mv', 't.qcow2.converted', 't.qcow2')]
+ images.fetch_to_raw(context, image_id, target, user_id, project_id,
+ max_size=1)
+ self.assertEqual(self.executes, expected_commands)
+
+ target = 't.raw'
+ self.executes = []
+ expected_commands = [('mv', 't.raw.part', 't.raw')]
+ images.fetch_to_raw(context, image_id, target, user_id, project_id)
+ self.assertEqual(self.executes, expected_commands)
+
+ target = 'backing.qcow2'
+ self.executes = []
+ expected_commands = [('rm', '-f', 'backing.qcow2.part')]
+ self.assertRaises(exception.ImageUnacceptable,
+ images.fetch_to_raw,
+ context, image_id, target, user_id, project_id)
+ self.assertEqual(self.executes, expected_commands)
+
+ target = 'big.qcow2'
+ self.executes = []
+ expected_commands = [('rm', '-f', 'big.qcow2.part')]
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ images.fetch_to_raw,
+ context, image_id, target, user_id, project_id,
+ max_size=1)
+ self.assertEqual(self.executes, expected_commands)
+
+ del self.executes
+
+ def test_get_disk_backing_file(self):
+ with_actual_path = False
+
+ def fake_execute(*args, **kwargs):
+ if with_actual_path:
+ return ("some: output\n"
+ "backing file: /foo/bar/baz (actual path: /a/b/c)\n"
+ "...: ...\n"), ''
+ else:
+ return ("some: output\n"
+ "backing file: /foo/bar/baz\n"
+ "...: ...\n"), ''
+
+ def return_true(*args, **kwargs):
+ return True
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ self.stubs.Set(os.path, 'exists', return_true)
+
+ out = libvirt_utils.get_disk_backing_file('')
+ self.assertEqual(out, 'baz')
+ with_actual_path = True
+ out = libvirt_utils.get_disk_backing_file('')
+ self.assertEqual(out, 'c')
diff --git a/nova/tests/unit/virt/libvirt/test_vif.py b/nova/tests/unit/virt/libvirt/test_vif.py
new file mode 100644
index 0000000000..3d64dd5ad0
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_vif.py
@@ -0,0 +1,959 @@
+# Copyright 2012 Nicira, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+
+from lxml import etree
+import mock
+from oslo.concurrency import processutils
+from oslo.config import cfg
+
+from nova import exception
+from nova.network import linux_net
+from nova.network import model as network_model
+from nova import test
+from nova.tests.unit.virt.libvirt import fakelibvirt
+from nova import utils
+from nova.virt.libvirt import config as vconfig
+from nova.virt.libvirt import vif
+
+CONF = cfg.CONF
+
+
+class LibvirtVifTestCase(test.NoDBTestCase):
+
+ gateway_bridge_4 = network_model.IP(address='101.168.1.1', type='gateway')
+ dns_bridge_4 = network_model.IP(address='8.8.8.8', type=None)
+ ips_bridge_4 = [network_model.IP(address='101.168.1.9', type=None)]
+ subnet_bridge_4 = network_model.Subnet(cidr='101.168.1.0/24',
+ dns=[dns_bridge_4],
+ gateway=gateway_bridge_4,
+ routes=None,
+ dhcp_server='191.168.1.1')
+
+ gateway_bridge_6 = network_model.IP(address='101:1db9::1', type='gateway')
+ subnet_bridge_6 = network_model.Subnet(cidr='101:1db9::/64',
+ dns=None,
+ gateway=gateway_bridge_6,
+ ips=None,
+ routes=None)
+
+ network_bridge = network_model.Network(id='network-id-xxx-yyy-zzz',
+ bridge='br0',
+ label=None,
+ subnets=[subnet_bridge_4,
+ subnet_bridge_6],
+ bridge_interface='eth0',
+ vlan=99)
+
+ vif_bridge = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_bridge,
+ type=network_model.VIF_TYPE_BRIDGE,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid=None)
+
+ network_bridge_neutron = network_model.Network(id='network-id-xxx-yyy-zzz',
+ bridge=None,
+ label=None,
+ subnets=[subnet_bridge_4,
+ subnet_bridge_6],
+ bridge_interface='eth0',
+ vlan=99)
+
+ vif_bridge_neutron = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_bridge_neutron,
+ type=None,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ network_ovs = network_model.Network(id='network-id-xxx-yyy-zzz',
+ bridge='br0',
+ label=None,
+ subnets=[subnet_bridge_4,
+ subnet_bridge_6],
+ bridge_interface=None,
+ vlan=99)
+
+ network_ivs = network_model.Network(id='network-id-xxx-yyy-zzz',
+ bridge='br0',
+ label=None,
+ subnets=[subnet_bridge_4,
+ subnet_bridge_6],
+ bridge_interface=None,
+ vlan=99)
+
+ vif_ovs = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ovs,
+ type=network_model.VIF_TYPE_OVS,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ vif_ovs_hybrid = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ovs,
+ type=network_model.VIF_TYPE_OVS,
+ details={'ovs_hybrid_plug': True,
+ 'port_filter': True},
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ vif_ovs_filter_cap = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ovs,
+ type=network_model.VIF_TYPE_OVS,
+ details={'port_filter': True},
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ vif_ovs_legacy = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ovs,
+ type=None,
+ devname=None,
+ ovs_interfaceid=None)
+
+ vif_ivs = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ivs,
+ type=network_model.VIF_TYPE_IVS,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ vif_ivs_legacy = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ovs,
+ type=None,
+ devname=None,
+ ovs_interfaceid='aaa')
+
+ vif_ivs_filter_direct = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ivs,
+ type=network_model.VIF_TYPE_IVS,
+ details={'port_filter': True},
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ vif_ivs_filter_hybrid = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ivs,
+ type=network_model.VIF_TYPE_IVS,
+ details={
+ 'port_filter': True,
+ 'ovs_hybrid_plug': True},
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ vif_none = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_bridge,
+ type=None,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid=None)
+
+ network_8021 = network_model.Network(id='network-id-xxx-yyy-zzz',
+ bridge=None,
+ label=None,
+ subnets=[subnet_bridge_4,
+ subnet_bridge_6],
+ interface='eth0',
+ vlan=99)
+
+ vif_8021qbh = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_8021,
+ type=network_model.VIF_TYPE_802_QBH,
+ vnic_type=network_model.VNIC_TYPE_DIRECT,
+ ovs_interfaceid=None,
+ details={
+ network_model.VIF_DETAILS_PROFILEID:
+ 'MyPortProfile'},
+ profile={'pci_vendor_info': '1137:0043',
+ 'pci_slot': '0000:0a:00.1',
+ 'physical_network': 'phynet1'})
+
+ vif_hw_veb = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_8021,
+ type=network_model.VIF_TYPE_HW_VEB,
+ vnic_type=network_model.VNIC_TYPE_DIRECT,
+ ovs_interfaceid=None,
+ details={
+ network_model.VIF_DETAILS_VLAN: '100'},
+ profile={'pci_vendor_info': '1137:0043',
+ 'pci_slot': '0000:0a:00.1',
+ 'physical_network': 'phynet1'})
+
+ vif_8021qbg = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_8021,
+ type=network_model.VIF_TYPE_802_QBG,
+ ovs_interfaceid=None,
+ qbg_params=network_model.VIF8021QbgParams(
+ managerid="xxx-yyy-zzz",
+ typeid="aaa-bbb-ccc",
+ typeidversion="1",
+ instanceid="ddd-eee-fff"))
+
+ network_mlnx = network_model.Network(id='network-id-xxx-yyy-zzz',
+ label=None,
+ bridge=None,
+ subnets=[subnet_bridge_4,
+ subnet_bridge_6],
+ interface='eth0')
+
+ network_midonet = network_model.Network(id='network-id-xxx-yyy-zzz',
+ label=None,
+ bridge=None,
+ subnets=[subnet_bridge_4],
+ interface='eth0')
+
+ vif_mlnx = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_mlnx,
+ type=network_model.VIF_TYPE_MLNX_DIRECT,
+ devname='tap-xxx-yyy-zzz')
+
+ vif_mlnx_net = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_mlnx,
+ type=network_model.VIF_TYPE_MLNX_DIRECT,
+ details={'physical_network':
+ 'fake_phy_network'},
+ devname='tap-xxx-yyy-zzz')
+
+ vif_midonet = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_midonet,
+ type=network_model.VIF_TYPE_MIDONET,
+ devname='tap-xxx-yyy-zzz')
+
+ vif_iovisor = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_bridge,
+ type=network_model.VIF_TYPE_IOVISOR,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid=None)
+
+ instance = {
+ 'name': 'instance-name',
+ 'uuid': 'instance-uuid'
+ }
+
+ bandwidth = {
+ 'quota:vif_inbound_peak': '200',
+ 'quota:vif_outbound_peak': '20',
+ 'quota:vif_inbound_average': '100',
+ 'quota:vif_outbound_average': '10',
+ 'quota:vif_inbound_burst': '300',
+ 'quota:vif_outbound_burst': '30'
+ }
+
+ def setUp(self):
+ super(LibvirtVifTestCase, self).setUp()
+ self.flags(allow_same_net_traffic=True)
+ self.executes = []
+
+ def fake_execute(*cmd, **kwargs):
+ self.executes.append(cmd)
+ return None, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ def _get_conn(self, uri="qemu:///session", ver=None):
+ def __inner():
+ if ver is None:
+ return fakelibvirt.Connection(uri, False)
+ else:
+ return fakelibvirt.Connection(uri, False, ver)
+ return __inner
+
+ def _get_node(self, xml):
+ doc = etree.fromstring(xml)
+ ret = doc.findall('./devices/interface')
+ self.assertEqual(len(ret), 1)
+ return ret[0]
+
+ def _assertMacEquals(self, node, vif):
+ mac = node.find("mac").get("address")
+ self.assertEqual(mac, vif['address'])
+
+ def _assertTypeEquals(self, node, type, attr, source, br_want,
+ prefix=None):
+ self.assertEqual(node.get("type"), type)
+ br_name = node.find(attr).get(source)
+ if prefix is None:
+ self.assertEqual(br_name, br_want)
+ else:
+ self.assertTrue(br_name.startswith(prefix))
+
+ def _assertTypeAndMacEquals(self, node, type, attr, source, vif,
+ br_want=None, size=0, prefix=None):
+ ret = node.findall("filterref")
+ self.assertEqual(len(ret), size)
+ self._assertTypeEquals(node, type, attr, source, br_want,
+ prefix)
+ self._assertMacEquals(node, vif)
+
+ def _assertModel(self, xml, model_want=None, driver_want=None):
+ node = self._get_node(xml)
+ if model_want is None:
+ ret = node.findall("model")
+ self.assertEqual(len(ret), 0)
+ else:
+ model = node.find("model").get("type")
+ self.assertEqual(model, model_want)
+ if driver_want is None:
+ ret = node.findall("driver")
+ self.assertEqual(len(ret), 0)
+ else:
+ driver = node.find("driver").get("name")
+ self.assertEqual(driver, driver_want)
+
+ def _assertTypeAndPciEquals(self, node, type, vif):
+ self.assertEqual(node.get("type"), type)
+ address = node.find("source").find("address")
+ addr_type = address.get("type")
+ self.assertEqual("pci", addr_type)
+ pci_slot = "%(domain)s:%(bus)s:%(slot)s.%(func)s" % {
+ 'domain': address.get("domain")[2:],
+ 'bus': address.get("bus")[2:],
+ 'slot': address.get("slot")[2:],
+ 'func': address.get("function")[2:]}
+
+ pci_slot_want = vif['profile']['pci_slot']
+ self.assertEqual(pci_slot, pci_slot_want)
+
+ def _get_conf(self):
+ conf = vconfig.LibvirtConfigGuest()
+ conf.virt_type = "qemu"
+ conf.name = "fake-name"
+ conf.uuid = "fake-uuid"
+ conf.memory = 100 * 1024
+ conf.vcpus = 4
+ return conf
+
+ def _get_instance_xml(self, driver, vif, image_meta=None):
+ default_inst_type = {
+ 'memory_mb': 128, 'root_gb': 0, 'deleted_at': None,
+ 'name': 'm1.micro', 'deleted': 0, 'created_at': None,
+ 'ephemeral_gb': 0, 'updated_at': None,
+ 'disabled': False, 'vcpus': 1,
+ 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True,
+ 'flavorid': '1', 'vcpu_weight': None, 'id': 2,
+ 'extra_specs': dict(self.bandwidth)
+ }
+ conf = self._get_conf()
+ nic = driver.get_config(self.instance, vif, image_meta,
+ default_inst_type, CONF.libvirt.virt_type)
+ conf.add_device(nic)
+ return conf.to_xml()
+
+ def test_multiple_nics(self):
+ conf = self._get_conf()
+ # Tests multiple nic configuration and that target_dev is
+ # set for each
+ nics = [{'net_type': 'bridge',
+ 'mac_addr': '00:00:00:00:00:0b',
+ 'source_dev': 'b_source_dev',
+ 'target_dev': 'b_target_dev'},
+ {'net_type': 'ethernet',
+ 'mac_addr': '00:00:00:00:00:0e',
+ 'source_dev': 'e_source_dev',
+ 'target_dev': 'e_target_dev'},
+ {'net_type': 'direct',
+ 'mac_addr': '00:00:00:00:00:0d',
+ 'source_dev': 'd_source_dev',
+ 'target_dev': 'd_target_dev'}]
+
+ for nic in nics:
+ nic_conf = vconfig.LibvirtConfigGuestInterface()
+ nic_conf.net_type = nic['net_type']
+ nic_conf.target_dev = nic['target_dev']
+ nic_conf.mac_addr = nic['mac_addr']
+ nic_conf.source_dev = nic['source_dev']
+ conf.add_device(nic_conf)
+
+ xml = conf.to_xml()
+ doc = etree.fromstring(xml)
+ for nic in nics:
+ path = "./devices/interface/[@type='%s']" % nic['net_type']
+ node = doc.find(path)
+ self.assertEqual(nic['net_type'], node.get("type"))
+ self.assertEqual(nic['mac_addr'],
+ node.find("mac").get("address"))
+ self.assertEqual(nic['target_dev'],
+ node.find("target").get("dev"))
+
+ def test_model_novirtio(self):
+ self.flags(use_virtio_for_bridges=False,
+ virt_type='kvm',
+ group='libvirt')
+
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d, self.vif_bridge)
+ self._assertModel(xml)
+
+ def test_model_kvm(self):
+ self.flags(use_virtio_for_bridges=True,
+ virt_type='kvm',
+ group='libvirt')
+
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d, self.vif_bridge)
+ self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
+
+ def test_model_kvm_qemu_custom(self):
+ for virt in ('kvm', 'qemu'):
+ self.flags(use_virtio_for_bridges=True,
+ virt_type=virt,
+ group='libvirt')
+
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ supported = (network_model.VIF_MODEL_NE2K_PCI,
+ network_model.VIF_MODEL_PCNET,
+ network_model.VIF_MODEL_RTL8139,
+ network_model.VIF_MODEL_E1000,
+ network_model.VIF_MODEL_SPAPR_VLAN)
+ for model in supported:
+ image_meta = {'properties': {'hw_vif_model': model}}
+ xml = self._get_instance_xml(d, self.vif_bridge,
+ image_meta)
+ self._assertModel(xml, model)
+
+ def test_model_kvm_bogus(self):
+ self.flags(use_virtio_for_bridges=True,
+ virt_type='kvm',
+ group='libvirt')
+
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ image_meta = {'properties': {'hw_vif_model': 'acme'}}
+ self.assertRaises(exception.UnsupportedHardware,
+ self._get_instance_xml,
+ d,
+ self.vif_bridge,
+ image_meta)
+
+ def _test_model_qemu(self, *vif_objs, **kw):
+ libvirt_version = kw.get('libvirt_version')
+ self.flags(use_virtio_for_bridges=True,
+ virt_type='qemu',
+ group='libvirt')
+
+ for vif_obj in vif_objs:
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ if libvirt_version is not None:
+ d.libvirt_version = libvirt_version
+
+ xml = self._get_instance_xml(d, vif_obj)
+
+ doc = etree.fromstring(xml)
+
+ bandwidth = doc.find('./devices/interface/bandwidth')
+ self.assertNotEqual(bandwidth, None)
+
+ inbound = bandwidth.find('inbound')
+ self.assertEqual(inbound.get("average"),
+ self.bandwidth['quota:vif_inbound_average'])
+ self.assertEqual(inbound.get("peak"),
+ self.bandwidth['quota:vif_inbound_peak'])
+ self.assertEqual(inbound.get("burst"),
+ self.bandwidth['quota:vif_inbound_burst'])
+
+ outbound = bandwidth.find('outbound')
+ self.assertEqual(outbound.get("average"),
+ self.bandwidth['quota:vif_outbound_average'])
+ self.assertEqual(outbound.get("peak"),
+ self.bandwidth['quota:vif_outbound_peak'])
+ self.assertEqual(outbound.get("burst"),
+ self.bandwidth['quota:vif_outbound_burst'])
+
+ self._assertModel(xml, network_model.VIF_MODEL_VIRTIO, "qemu")
+
+ def test_model_qemu_no_firewall(self):
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ self._test_model_qemu(
+ self.vif_bridge,
+ self.vif_8021qbg,
+ self.vif_iovisor,
+ self.vif_mlnx,
+ self.vif_ovs,
+ )
+
+ def test_model_qemu_iptables(self):
+ self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
+ self._test_model_qemu(
+ self.vif_bridge,
+ self.vif_ovs,
+ self.vif_ivs,
+ self.vif_8021qbg,
+ self.vif_iovisor,
+ self.vif_mlnx,
+ )
+
+ def test_model_xen(self):
+ self.flags(use_virtio_for_bridges=True,
+ virt_type='xen',
+ group='libvirt')
+
+ d = vif.LibvirtGenericVIFDriver(self._get_conn("xen:///system"))
+ xml = self._get_instance_xml(d, self.vif_bridge)
+ self._assertModel(xml)
+
+ def test_generic_driver_none(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ self.assertRaises(exception.NovaException,
+ self._get_instance_xml,
+ d,
+ self.vif_none)
+
+ def _check_bridge_driver(self, d, vif, br_want):
+ xml = self._get_instance_xml(d, vif)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
+ self.vif_bridge, br_want, 1)
+
+ def test_generic_driver_bridge(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ self._check_bridge_driver(d,
+ self.vif_bridge,
+ self.vif_bridge['network']['bridge'])
+
+ def _check_ivs_ethernet_driver(self, d, vif, dev_prefix):
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ xml = self._get_instance_xml(d, vif)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
+ self.vif_ivs, prefix=dev_prefix)
+ script = node.find("script").get("path")
+ self.assertEqual(script, "")
+
+ def test_unplug_ivs_ethernet(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ with mock.patch.object(linux_net, 'delete_ivs_vif_port') as delete:
+ delete.side_effect = processutils.ProcessExecutionError
+ d.unplug_ivs_ethernet(None, self.vif_ovs)
+
+ def test_plug_ovs_hybrid(self):
+ calls = {
+ 'device_exists': [mock.call('qbrvif-xxx-yyy'),
+ mock.call('qvovif-xxx-yyy')],
+ '_create_veth_pair': [mock.call('qvbvif-xxx-yyy',
+ 'qvovif-xxx-yyy')],
+ 'execute': [mock.call('brctl', 'addbr', 'qbrvif-xxx-yyy',
+ run_as_root=True),
+ mock.call('brctl', 'setfd', 'qbrvif-xxx-yyy', 0,
+ run_as_root=True),
+ mock.call('brctl', 'stp', 'qbrvif-xxx-yyy', 'off',
+ run_as_root=True),
+ mock.call('tee', ('/sys/class/net/qbrvif-xxx-yyy'
+ '/bridge/multicast_snooping'),
+ process_input='0', run_as_root=True,
+ check_exit_code=[0, 1]),
+ mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'up',
+ run_as_root=True),
+ mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
+ 'qvbvif-xxx-yyy', run_as_root=True)],
+ 'create_ovs_vif_port': [mock.call('br0',
+ 'qvovif-xxx-yyy', 'aaa-bbb-ccc',
+ 'ca:fe:de:ad:be:ef',
+ 'instance-uuid')]
+ }
+ with contextlib.nested(
+ mock.patch.object(linux_net, 'device_exists',
+ return_value=False),
+ mock.patch.object(utils, 'execute'),
+ mock.patch.object(linux_net, '_create_veth_pair'),
+ mock.patch.object(linux_net, 'create_ovs_vif_port')
+ ) as (device_exists, execute, _create_veth_pair, create_ovs_vif_port):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ d.plug_ovs_hybrid(self.instance, self.vif_ovs)
+ device_exists.assert_has_calls(calls['device_exists'])
+ _create_veth_pair.assert_has_calls(calls['_create_veth_pair'])
+ execute.assert_has_calls(calls['execute'])
+ create_ovs_vif_port.assert_has_calls(calls['create_ovs_vif_port'])
+
+ def test_unplug_ovs_hybrid(self):
+ calls = {
+ 'device_exists': [mock.call('qbrvif-xxx-yyy')],
+ 'execute': [mock.call('brctl', 'delif', 'qbrvif-xxx-yyy',
+ 'qvbvif-xxx-yyy', run_as_root=True),
+ mock.call('ip', 'link', 'set',
+ 'qbrvif-xxx-yyy', 'down', run_as_root=True),
+ mock.call('brctl', 'delbr',
+ 'qbrvif-xxx-yyy', run_as_root=True)],
+ 'delete_ovs_vif_port': [mock.call('br0', 'qvovif-xxx-yyy')]
+ }
+ with contextlib.nested(
+ mock.patch.object(linux_net, 'device_exists',
+ return_value=True),
+ mock.patch.object(utils, 'execute'),
+ mock.patch.object(linux_net, 'delete_ovs_vif_port')
+ ) as (device_exists, execute, delete_ovs_vif_port):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ d.unplug_ovs_hybrid(None, self.vif_ovs)
+ device_exists.assert_has_calls(calls['device_exists'])
+ execute.assert_has_calls(calls['execute'])
+ delete_ovs_vif_port.assert_has_calls(calls['delete_ovs_vif_port'])
+
+ def test_unplug_ovs_hybrid_bridge_does_not_exist(self):
+ calls = {
+ 'device_exists': [mock.call('qbrvif-xxx-yyy')],
+ 'delete_ovs_vif_port': [mock.call('br0', 'qvovif-xxx-yyy')]
+ }
+ with contextlib.nested(
+ mock.patch.object(linux_net, 'device_exists',
+ return_value=False),
+ mock.patch.object(linux_net, 'delete_ovs_vif_port')
+ ) as (device_exists, delete_ovs_vif_port):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ d.unplug_ovs_hybrid(None, self.vif_ovs)
+ device_exists.assert_has_calls(calls['device_exists'])
+ delete_ovs_vif_port.assert_has_calls(calls['delete_ovs_vif_port'])
+
+ def test_plug_ivs_hybrid(self):
+ calls = {
+ 'device_exists': [mock.call('qbrvif-xxx-yyy'),
+ mock.call('qvovif-xxx-yyy')],
+ '_create_veth_pair': [mock.call('qvbvif-xxx-yyy',
+ 'qvovif-xxx-yyy')],
+ 'execute': [mock.call('brctl', 'addbr', 'qbrvif-xxx-yyy',
+ run_as_root=True),
+ mock.call('brctl', 'setfd', 'qbrvif-xxx-yyy', 0,
+ run_as_root=True),
+ mock.call('brctl', 'stp', 'qbrvif-xxx-yyy', 'off',
+ run_as_root=True),
+ mock.call('tee', ('/sys/class/net/qbrvif-xxx-yyy'
+ '/bridge/multicast_snooping'),
+ process_input='0', run_as_root=True,
+ check_exit_code=[0, 1]),
+ mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'up',
+ run_as_root=True),
+ mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
+ 'qvbvif-xxx-yyy', run_as_root=True)],
+ 'create_ivs_vif_port': [mock.call('qvovif-xxx-yyy', 'aaa-bbb-ccc',
+ 'ca:fe:de:ad:be:ef',
+ 'instance-uuid')]
+ }
+ with contextlib.nested(
+ mock.patch.object(linux_net, 'device_exists',
+ return_value=False),
+ mock.patch.object(utils, 'execute'),
+ mock.patch.object(linux_net, '_create_veth_pair'),
+ mock.patch.object(linux_net, 'create_ivs_vif_port')
+ ) as (device_exists, execute, _create_veth_pair, create_ivs_vif_port):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ d.plug_ivs_hybrid(self.instance, self.vif_ivs)
+ device_exists.assert_has_calls(calls['device_exists'])
+ _create_veth_pair.assert_has_calls(calls['_create_veth_pair'])
+ execute.assert_has_calls(calls['execute'])
+ create_ivs_vif_port.assert_has_calls(calls['create_ivs_vif_port'])
+
+ def test_unplug_ivs_hybrid(self):
+ calls = {
+ 'execute': [mock.call('brctl', 'delif', 'qbrvif-xxx-yyy',
+ 'qvbvif-xxx-yyy', run_as_root=True),
+ mock.call('ip', 'link', 'set',
+ 'qbrvif-xxx-yyy', 'down', run_as_root=True),
+ mock.call('brctl', 'delbr',
+ 'qbrvif-xxx-yyy', run_as_root=True)],
+ 'delete_ivs_vif_port': [mock.call('qvovif-xxx-yyy')]
+ }
+ with contextlib.nested(
+ mock.patch.object(utils, 'execute'),
+ mock.patch.object(linux_net, 'delete_ivs_vif_port')
+ ) as (execute, delete_ivs_vif_port):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ d.unplug_ivs_hybrid(None, self.vif_ivs)
+ execute.assert_has_calls(calls['execute'])
+ delete_ivs_vif_port.assert_has_calls(calls['delete_ivs_vif_port'])
+
+ def test_unplug_ivs_hybrid_bridge_does_not_exist(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ execute.side_effect = processutils.ProcessExecutionError
+ d.unplug_ivs_hybrid(None, self.vif_ivs)
+
+ def test_unplug_iovisor(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ execute.side_effect = processutils.ProcessExecutionError
+ mynetwork = network_model.Network(id='network-id-xxx-yyy-zzz',
+ label='mylabel')
+ myvif = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=mynetwork)
+ d.unplug_iovisor(None, myvif)
+
+ @mock.patch('nova.network.linux_net.device_exists')
+ def test_plug_iovisor(self, device_exists):
+ device_exists.return_value = True
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ execute.side_effect = processutils.ProcessExecutionError
+ instance = {
+ 'name': 'instance-name',
+ 'uuid': 'instance-uuid',
+ 'project_id': 'myproject'
+ }
+ d.plug_iovisor(instance, self.vif_ivs)
+
+ def test_unplug_mlnx_with_details(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ execute.side_effect = processutils.ProcessExecutionError
+ d.unplug_mlnx_direct(None, self.vif_mlnx_net)
+ execute.assert_called_once_with('ebrctl', 'del-port',
+ 'fake_phy_network',
+ 'ca:fe:de:ad:be:ef',
+ run_as_root=True)
+
+ def test_plug_mlnx_with_details(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ d.plug_mlnx_direct(self.instance, self.vif_mlnx_net)
+ execute.assert_called_once_with('ebrctl', 'add-port',
+ 'ca:fe:de:ad:be:ef',
+ 'instance-uuid',
+ 'fake_phy_network',
+ 'mlnx_direct',
+ 'eth-xxx-yyy-zzz',
+ run_as_root=True)
+
+ def test_plug_mlnx_no_physical_network(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ self.assertRaises(exception.NovaException,
+ d.plug_mlnx_direct,
+ self.instance,
+ self.vif_mlnx)
+ self.assertEqual(0, execute.call_count)
+
+ def test_ivs_ethernet_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ self._check_ivs_ethernet_driver(d,
+ self.vif_ivs,
+ "tap")
+
+ def _check_ivs_virtualport_driver(self, d, vif, want_iface_id):
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ xml = self._get_instance_xml(d, vif)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
+ vif, vif['devname'])
+
+ def _check_ovs_virtualport_driver(self, d, vif, want_iface_id):
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ xml = self._get_instance_xml(d, vif)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
+ vif, "br0")
+ vp = node.find("virtualport")
+ self.assertEqual(vp.get("type"), "openvswitch")
+ iface_id_found = False
+ for p_elem in vp.findall("parameters"):
+ iface_id = p_elem.get("interfaceid", None)
+ if iface_id:
+ self.assertEqual(iface_id, want_iface_id)
+ iface_id_found = True
+
+ self.assertTrue(iface_id_found)
+
+ def test_generic_ovs_virtualport_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9011))
+ want_iface_id = self.vif_ovs['ovs_interfaceid']
+ self._check_ovs_virtualport_driver(d,
+ self.vif_ovs,
+ want_iface_id)
+
+ def test_generic_ivs_virtualport_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9011))
+ want_iface_id = self.vif_ivs['ovs_interfaceid']
+ self._check_ivs_virtualport_driver(d,
+ self.vif_ivs,
+ want_iface_id)
+
+ def test_ivs_plug_with_nova_firewall(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = "qbr" + self.vif_ivs['id']
+ br_want = br_want[:network_model.NIC_NAME_LEN]
+ xml = self._get_instance_xml(d, self.vif_ivs)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
+ self.vif_ivs, br_want, 1)
+
+ def test_ivs_plug_with_port_filter_direct_no_nova_firewall(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = "qbr" + self.vif_ivs_filter_hybrid['id']
+ br_want = br_want[:network_model.NIC_NAME_LEN]
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ xml = self._get_instance_xml(d, self.vif_ivs_filter_hybrid)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
+ self.vif_ivs_filter_hybrid, br_want, 0)
+
+ def test_ivs_plug_with_port_filter_hybrid_no_nova_firewall(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = self.vif_ivs_filter_direct['devname']
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ xml = self._get_instance_xml(d, self.vif_ivs_filter_direct)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
+ self.vif_ivs_filter_direct, br_want, 0)
+
+ def test_hybrid_plug_without_nova_firewall(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = "qbr" + self.vif_ovs_hybrid['id']
+ br_want = br_want[:network_model.NIC_NAME_LEN]
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ xml = self._get_instance_xml(d, self.vif_ovs_hybrid)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
+ self.vif_ovs_hybrid, br_want, 0)
+
+ def test_direct_plug_with_port_filter_cap_no_nova_firewall(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = self.vif_midonet['devname']
+ xml = self._get_instance_xml(d, self.vif_ovs_filter_cap)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "target", "dev",
+ self.vif_ovs_filter_cap, br_want)
+
+ def _check_neutron_hybrid_driver(self, d, vif, br_want):
+ self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
+ xml = self._get_instance_xml(d, vif)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
+ vif, br_want, 1)
+
+ def test_generic_hybrid_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = "qbr" + self.vif_ovs['id']
+ br_want = br_want[:network_model.NIC_NAME_LEN]
+ self._check_neutron_hybrid_driver(d,
+ self.vif_ovs,
+ br_want)
+
+ def test_ivs_hybrid_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = "qbr" + self.vif_ivs['id']
+ br_want = br_want[:network_model.NIC_NAME_LEN]
+ self._check_neutron_hybrid_driver(d,
+ self.vif_ivs,
+ br_want)
+
+ def test_mlnx_direct_vif_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d,
+ self.vif_mlnx)
+ node = self._get_node(xml)
+ self.assertEqual(node.get("type"), "direct")
+ self._assertTypeEquals(node, "direct", "source",
+ "dev", "eth-xxx-yyy-zzz")
+ self._assertTypeEquals(node, "direct", "source",
+ "mode", "passthrough")
+ self._assertMacEquals(node, self.vif_mlnx)
+ self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
+
+ def test_midonet_ethernet_vif_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ br_want = self.vif_midonet['devname']
+ xml = self._get_instance_xml(d, self.vif_midonet)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
+ self.vif_midonet, br_want)
+
+ def test_generic_8021qbh_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d, self.vif_8021qbh)
+ node = self._get_node(xml)
+ self._assertTypeAndPciEquals(node, "hostdev", self.vif_8021qbh)
+ self._assertMacEquals(node, self.vif_8021qbh)
+ vp = node.find("virtualport")
+ self.assertEqual(vp.get("type"), "802.1Qbh")
+ profile_id_found = False
+ for p_elem in vp.findall("parameters"):
+ details = self.vif_8021qbh["details"]
+ profile_id = p_elem.get("profileid", None)
+ if profile_id:
+ self.assertEqual(profile_id,
+ details[network_model.VIF_DETAILS_PROFILEID])
+ profile_id_found = True
+
+ self.assertTrue(profile_id_found)
+
+ def test_hw_veb_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d, self.vif_hw_veb)
+ node = self._get_node(xml)
+ self._assertTypeAndPciEquals(node, "hostdev", self.vif_hw_veb)
+ self._assertMacEquals(node, self.vif_hw_veb)
+ vlan = node.find("vlan").find("tag").get("id")
+ vlan_want = self.vif_hw_veb["details"]["vlan"]
+ self.assertEqual(vlan, vlan_want)
+
+ def test_generic_iovisor_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ br_want = self.vif_ivs['devname']
+ xml = self._get_instance_xml(d, self.vif_ivs)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
+ self.vif_ivs, br_want)
+
+ def test_generic_8021qbg_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d, self.vif_8021qbg)
+
+ node = self._get_node(xml)
+ self._assertTypeEquals(node, "direct", "source", "dev", "eth0")
+ self._assertMacEquals(node, self.vif_8021qbg)
+
+ vp = node.find("virtualport")
+ self.assertEqual(vp.get("type"), "802.1Qbg")
+ manager_id_found = False
+ type_id_found = False
+ typeversion_id_found = False
+ instance_id_found = False
+ for p_elem in vp.findall("parameters"):
+ wantparams = self.vif_8021qbg['qbg_params']
+ manager_id = p_elem.get("managerid", None)
+ type_id = p_elem.get("typeid", None)
+ typeversion_id = p_elem.get("typeidversion", None)
+ instance_id = p_elem.get("instanceid", None)
+ if manager_id:
+ self.assertEqual(manager_id,
+ wantparams['managerid'])
+ manager_id_found = True
+ if type_id:
+ self.assertEqual(type_id,
+ wantparams['typeid'])
+ type_id_found = True
+ if typeversion_id:
+ self.assertEqual(typeversion_id,
+ wantparams['typeidversion'])
+ typeversion_id_found = True
+ if instance_id:
+ self.assertEqual(instance_id,
+ wantparams['instanceid'])
+ instance_id_found = True
+
+ self.assertTrue(manager_id_found)
+ self.assertTrue(type_id_found)
+ self.assertTrue(typeversion_id_found)
+ self.assertTrue(instance_id_found)
diff --git a/nova/tests/unit/virt/libvirt/test_volume.py b/nova/tests/unit/virt/libvirt/test_volume.py
new file mode 100644
index 0000000000..0594161638
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_volume.py
@@ -0,0 +1,1160 @@
+# Copyright 2010 OpenStack Foundation
+# Copyright 2012 University Of Minho
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import os
+import time
+
+import fixtures
+import mock
+from oslo.concurrency import processutils
+from oslo.config import cfg
+
+from nova import exception
+from nova.storage import linuxscsi
+from nova import test
+from nova.tests.unit.virt.libvirt import fake_libvirt_utils
+from nova import utils
+from nova.virt import fake
+from nova.virt.libvirt import utils as libvirt_utils
+from nova.virt.libvirt import volume
+
+CONF = cfg.CONF
+
+
+class LibvirtVolumeTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(LibvirtVolumeTestCase, self).setUp()
+ self.executes = []
+
+ def fake_execute(*cmd, **kwargs):
+ self.executes.append(cmd)
+ return None, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ class FakeLibvirtDriver(object):
+ def __init__(self, hyperv="QEMU", version=1005001):
+ self.hyperv = hyperv
+ self.version = version
+
+ def _get_hypervisor_version(self):
+ return self.version
+
+ def _get_hypervisor_type(self):
+ return self.hyperv
+
+ def _get_all_block_devices(self):
+ return []
+
+ self.fake_conn = FakeLibvirtDriver(fake.FakeVirtAPI())
+ self.connr = {
+ 'ip': '127.0.0.1',
+ 'initiator': 'fake_initiator',
+ 'host': 'fake_host'
+ }
+ self.disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ self.name = 'volume-00000001'
+ self.location = '10.0.2.15:3260'
+ self.iqn = 'iqn.2010-10.org.openstack:%s' % self.name
+ self.vol = {'id': 1, 'name': self.name}
+ self.uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
+ self.user = 'foo'
+
+ def _assertNetworkAndProtocolEquals(self, tree):
+ self.assertEqual(tree.get('type'), 'network')
+ self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
+ rbd_name = '%s/%s' % ('rbd', self.name)
+ self.assertEqual(tree.find('./source').get('name'), rbd_name)
+
+ def _assertFileTypeEquals(self, tree, file_path):
+ self.assertEqual(tree.get('type'), 'file')
+ self.assertEqual(tree.find('./source').get('file'), file_path)
+
+ def _assertDiskInfoEquals(self, tree, disk_info):
+ self.assertEqual(tree.get('device'), disk_info['type'])
+ self.assertEqual(tree.find('./target').get('bus'),
+ disk_info['bus'])
+ self.assertEqual(tree.find('./target').get('dev'),
+ disk_info['dev'])
+
+ def _test_libvirt_volume_driver_disk_info(self):
+ libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
+ connection_info = {
+ 'driver_volume_type': 'fake',
+ 'data': {
+ 'device_path': '/foo',
+ },
+ 'serial': 'fake_serial',
+ }
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertDiskInfoEquals(tree, self.disk_info)
+
+ def test_libvirt_volume_disk_info_type(self):
+ self.disk_info['type'] = 'cdrom'
+ self._test_libvirt_volume_driver_disk_info()
+
+ def test_libvirt_volume_disk_info_dev(self):
+ self.disk_info['dev'] = 'hdc'
+ self._test_libvirt_volume_driver_disk_info()
+
+ def test_libvirt_volume_disk_info_bus(self):
+ self.disk_info['bus'] = 'scsi'
+ self._test_libvirt_volume_driver_disk_info()
+
+ def test_libvirt_volume_driver_serial(self):
+ libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
+ connection_info = {
+ 'driver_volume_type': 'fake',
+ 'data': {
+ 'device_path': '/foo',
+ },
+ 'serial': 'fake_serial',
+ }
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual('block', tree.get('type'))
+ self.assertEqual('fake_serial', tree.find('./serial').text)
+ self.assertIsNone(tree.find('./blockio'))
+
+ def test_libvirt_volume_driver_blockio(self):
+ libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
+ connection_info = {
+ 'driver_volume_type': 'fake',
+ 'data': {
+ 'device_path': '/foo',
+ 'logical_block_size': '4096',
+ 'physical_block_size': '4096',
+ },
+ 'serial': 'fake_serial',
+ }
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ blockio = tree.find('./blockio')
+ self.assertEqual('4096', blockio.get('logical_block_size'))
+ self.assertEqual('4096', blockio.get('physical_block_size'))
+
+ def test_libvirt_volume_driver_iotune(self):
+ libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
+ connection_info = {
+ 'driver_volume_type': 'fake',
+ 'data': {
+ "device_path": "/foo",
+ 'qos_specs': 'bar',
+ },
+ }
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ iotune = tree.find('./iotune')
+ # ensure invalid qos_specs is ignored
+ self.assertIsNone(iotune)
+
+ specs = {
+ 'total_bytes_sec': '102400',
+ 'read_bytes_sec': '51200',
+ 'write_bytes_sec': '0',
+ 'total_iops_sec': '0',
+ 'read_iops_sec': '200',
+ 'write_iops_sec': '200',
+ }
+ del connection_info['data']['qos_specs']
+ connection_info['data'].update(dict(qos_specs=specs))
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual('102400', tree.find('./iotune/total_bytes_sec').text)
+ self.assertEqual('51200', tree.find('./iotune/read_bytes_sec').text)
+ self.assertEqual('0', tree.find('./iotune/write_bytes_sec').text)
+ self.assertEqual('0', tree.find('./iotune/total_iops_sec').text)
+ self.assertEqual('200', tree.find('./iotune/read_iops_sec').text)
+ self.assertEqual('200', tree.find('./iotune/write_iops_sec').text)
+
+ def test_libvirt_volume_driver_readonly(self):
+ libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
+ connection_info = {
+ 'driver_volume_type': 'fake',
+ 'data': {
+ "device_path": "/foo",
+ 'access_mode': 'bar',
+ },
+ }
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ self.assertRaises(exception.InvalidVolumeAccessMode,
+ libvirt_driver.get_config,
+ connection_info, self.disk_info)
+
+ connection_info['data']['access_mode'] = 'rw'
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ readonly = tree.find('./readonly')
+ self.assertIsNone(readonly)
+
+ connection_info['data']['access_mode'] = 'ro'
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ readonly = tree.find('./readonly')
+ self.assertIsNotNone(readonly)
+
+ def iscsi_connection(self, volume, location, iqn):
+ dev_name = 'ip-%s-iscsi-%s-lun-1' % (location, iqn)
+ dev_path = '/dev/disk/by-path/%s' % (dev_name)
+ return {
+ 'driver_volume_type': 'iscsi',
+ 'data': {
+ 'volume_id': volume['id'],
+ 'target_portal': location,
+ 'target_iqn': iqn,
+ 'target_lun': 1,
+ 'device_path': dev_path,
+ 'qos_specs': {
+ 'total_bytes_sec': '102400',
+ 'read_iops_sec': '200',
+ }
+ }
+ }
+
+ def test_rescan_multipath(self):
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ libvirt_driver._rescan_multipath()
+ expected_multipath_cmd = ('multipath', '-r')
+ self.assertIn(expected_multipath_cmd, self.executes)
+
+ def test_iscsiadm_discover_parsing(self):
+ # Ensure that parsing iscsiadm discover ignores cruft.
+
+ targets = [
+ ["192.168.204.82:3260,1",
+ ("iqn.2010-10.org.openstack:volume-"
+ "f9b12623-6ce3-4dac-a71f-09ad4249bdd3")],
+ ["192.168.204.82:3261,1",
+ ("iqn.2010-10.org.openstack:volume-"
+ "f9b12623-6ce3-4dac-a71f-09ad4249bdd4")]]
+
+ # This slight wonkiness brought to you by pep8, as the actual
+ # example output runs about 97 chars wide.
+ sample_input = """Loading iscsi modules: done
+Starting iSCSI initiator service: done
+Setting up iSCSI targets: unused
+%s %s
+%s %s
+""" % (targets[0][0], targets[0][1], targets[1][0], targets[1][1])
+ driver = volume.LibvirtISCSIVolumeDriver("none")
+ out = driver._get_target_portals_from_iscsiadm_output(sample_input)
+ self.assertEqual(out, targets)
+
+ def test_libvirt_iscsi_driver(self):
+ # NOTE(vish) exists is to make driver assume connecting worked
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ connection_info = self.iscsi_connection(self.vol, self.location,
+ self.iqn)
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ expected_commands = [('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location),
+ ('iscsiadm', '-m', 'session'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--login'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--op', 'update',
+ '-n', 'node.startup', '-v', 'automatic'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--rescan'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--op', 'update',
+ '-n', 'node.startup', '-v', 'manual'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--logout'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--op', 'delete')]
+ self.assertEqual(self.executes, expected_commands)
+
+ def test_libvirt_iscsi_driver_still_in_use(self):
+ # NOTE(vish) exists is to make driver assume connecting worked
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ devs = ['/dev/disk/by-path/ip-%s-iscsi-%s-lun-2' % (self.location,
+ self.iqn)]
+ self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
+ vol = {'id': 1, 'name': self.name}
+ connection_info = self.iscsi_connection(vol, self.location, self.iqn)
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ dev_name = 'ip-%s-iscsi-%s-lun-1' % (self.location, self.iqn)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ expected_commands = [('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location),
+ ('iscsiadm', '-m', 'session'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--login'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--op', 'update',
+ '-n', 'node.startup', '-v', 'automatic'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--rescan'),
+ ('cp', '/dev/stdin',
+ '/sys/block/%s/device/delete' % dev_name)]
+ self.assertEqual(self.executes, expected_commands)
+
+ def test_libvirt_iscsi_driver_disconnect_multipath_error(self):
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ devs = ['/dev/disk/by-path/ip-%s-iscsi-%s-lun-2' % (self.location,
+ self.iqn)]
+ with contextlib.nested(
+ mock.patch.object(os.path, 'exists', return_value=True),
+ mock.patch.object(self.fake_conn, '_get_all_block_devices',
+ return_value=devs),
+ mock.patch.object(libvirt_driver, '_rescan_multipath'),
+ mock.patch.object(libvirt_driver, '_run_multipath'),
+ mock.patch.object(libvirt_driver, '_get_multipath_device_name',
+ return_value='/dev/mapper/fake-multipath-devname'),
+ mock.patch.object(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ return_value=[('fake-ip', 'fake-portal')]),
+ mock.patch.object(libvirt_driver, '_get_multipath_iqn',
+ return_value='fake-portal'),
+ ) as (mock_exists, mock_devices, mock_rescan_multipath,
+ mock_run_multipath, mock_device_name, mock_get_portals,
+ mock_get_iqn):
+ mock_run_multipath.side_effect = processutils.ProcessExecutionError
+ vol = {'id': 1, 'name': self.name}
+ connection_info = self.iscsi_connection(vol, self.location,
+ self.iqn)
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+
+ libvirt_driver.use_multipath = True
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ mock_run_multipath.assert_called_once_with(
+ ['-f', 'fake-multipath-devname'],
+ check_exit_code=[0, 1])
+
+ def test_libvirt_iscsi_driver_get_config(self):
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ dev_name = 'ip-%s-iscsi-%s-lun-1' % (self.location, self.iqn)
+ dev_path = '/dev/disk/by-path/%s' % (dev_name)
+ vol = {'id': 1, 'name': self.name}
+ connection_info = self.iscsi_connection(vol, self.location,
+ self.iqn)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual('block', tree.get('type'))
+ self.assertEqual(dev_path, tree.find('./source').get('dev'))
+
+ libvirt_driver.use_multipath = True
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual('block', tree.get('type'))
+ self.assertEqual(dev_path, tree.find('./source').get('dev'))
+
+ def test_libvirt_iscsi_driver_multipath_id(self):
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ libvirt_driver.use_multipath = True
+ self.stubs.Set(libvirt_driver, '_run_iscsiadm_bare',
+ lambda x, check_exit_code: ('',))
+ self.stubs.Set(libvirt_driver, '_rescan_iscsi', lambda: None)
+ self.stubs.Set(libvirt_driver, '_get_host_device', lambda x: None)
+ self.stubs.Set(libvirt_driver, '_rescan_multipath', lambda: None)
+ fake_multipath_id = 'fake_multipath_id'
+ fake_multipath_device = '/dev/mapper/%s' % fake_multipath_id
+ self.stubs.Set(libvirt_driver, '_get_multipath_device_name',
+ lambda x: fake_multipath_device)
+
+ def fake_disconnect_volume_multipath_iscsi(iscsi_properties,
+ multipath_device):
+ if fake_multipath_device != multipath_device:
+ raise Exception('Invalid multipath_device.')
+
+ self.stubs.Set(libvirt_driver, '_disconnect_volume_multipath_iscsi',
+ fake_disconnect_volume_multipath_iscsi)
+ with mock.patch.object(os.path, 'exists', return_value=True):
+ vol = {'id': 1, 'name': self.name}
+ connection_info = self.iscsi_connection(vol, self.location,
+ self.iqn)
+ libvirt_driver.connect_volume(connection_info,
+ self.disk_info)
+ self.assertEqual(fake_multipath_id,
+ connection_info['data']['multipath_id'])
+ libvirt_driver.disconnect_volume(connection_info, "fake")
+
+ def test_sanitize_log_run_iscsiadm(self):
+ # Tests that the parameters to the _run_iscsiadm function are sanitized
+ # for passwords when logged.
+ def fake_debug(*args, **kwargs):
+ self.assertIn('node.session.auth.password', args[0])
+ self.assertNotIn('scrubme', args[0])
+
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ connection_info = self.iscsi_connection(self.vol, self.location,
+ self.iqn)
+ iscsi_properties = connection_info['data']
+ with mock.patch.object(volume.LOG, 'debug',
+ side_effect=fake_debug) as debug_mock:
+ libvirt_driver._iscsiadm_update(iscsi_properties,
+ 'node.session.auth.password',
+ 'scrubme')
+ # we don't care what the log message is, we just want to make sure
+ # our stub method is called which asserts the password is scrubbed
+ self.assertTrue(debug_mock.called)
+
+ def iser_connection(self, volume, location, iqn):
+ return {
+ 'driver_volume_type': 'iser',
+ 'data': {
+ 'volume_id': volume['id'],
+ 'target_portal': location,
+ 'target_iqn': iqn,
+ 'target_lun': 1,
+ }
+ }
+
+ def sheepdog_connection(self, volume):
+ return {
+ 'driver_volume_type': 'sheepdog',
+ 'data': {
+ 'name': volume['name']
+ }
+ }
+
+ def test_libvirt_sheepdog_driver(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.sheepdog_connection(self.vol)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.get('type'), 'network')
+ self.assertEqual(tree.find('./source').get('protocol'), 'sheepdog')
+ self.assertEqual(tree.find('./source').get('name'), self.name)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def rbd_connection(self, volume):
+ return {
+ 'driver_volume_type': 'rbd',
+ 'data': {
+ 'name': '%s/%s' % ('rbd', volume['name']),
+ 'auth_enabled': CONF.libvirt.rbd_secret_uuid is not None,
+ 'auth_username': CONF.libvirt.rbd_user,
+ 'secret_type': 'ceph',
+ 'secret_uuid': CONF.libvirt.rbd_secret_uuid,
+ 'qos_specs': {
+ 'total_bytes_sec': '1048576',
+ 'read_iops_sec': '500',
+ }
+ }
+ }
+
+ def test_libvirt_rbd_driver(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.rbd_connection(self.vol)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertNetworkAndProtocolEquals(tree)
+ self.assertIsNone(tree.find('./source/auth'))
+ self.assertEqual('1048576', tree.find('./iotune/total_bytes_sec').text)
+ self.assertEqual('500', tree.find('./iotune/read_iops_sec').text)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_rbd_driver_hosts(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.rbd_connection(self.vol)
+ hosts = ['example.com', '1.2.3.4', '::1']
+ ports = [None, '6790', '6791']
+ connection_info['data']['hosts'] = hosts
+ connection_info['data']['ports'] = ports
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertNetworkAndProtocolEquals(tree)
+ self.assertIsNone(tree.find('./source/auth'))
+ found_hosts = tree.findall('./source/host')
+ self.assertEqual([host.get('name') for host in found_hosts], hosts)
+ self.assertEqual([host.get('port') for host in found_hosts], ports)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_rbd_driver_auth_enabled(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.rbd_connection(self.vol)
+ secret_type = 'ceph'
+ connection_info['data']['auth_enabled'] = True
+ connection_info['data']['auth_username'] = self.user
+ connection_info['data']['secret_type'] = secret_type
+ connection_info['data']['secret_uuid'] = self.uuid
+
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertNetworkAndProtocolEquals(tree)
+ self.assertEqual(tree.find('./auth').get('username'), self.user)
+ self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
+ self.assertEqual(tree.find('./auth/secret').get('uuid'), self.uuid)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_rbd_driver_auth_enabled_flags_override(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.rbd_connection(self.vol)
+ secret_type = 'ceph'
+ connection_info['data']['auth_enabled'] = True
+ connection_info['data']['auth_username'] = self.user
+ connection_info['data']['secret_type'] = secret_type
+ connection_info['data']['secret_uuid'] = self.uuid
+
+ flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b'
+ flags_user = 'bar'
+ self.flags(rbd_user=flags_user,
+ rbd_secret_uuid=flags_uuid,
+ group='libvirt')
+
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertNetworkAndProtocolEquals(tree)
+ self.assertEqual(tree.find('./auth').get('username'), flags_user)
+ self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
+ self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_rbd_driver_auth_disabled(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.rbd_connection(self.vol)
+ secret_type = 'ceph'
+ connection_info['data']['auth_enabled'] = False
+ connection_info['data']['auth_username'] = self.user
+ connection_info['data']['secret_type'] = secret_type
+ connection_info['data']['secret_uuid'] = self.uuid
+
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertNetworkAndProtocolEquals(tree)
+ self.assertIsNone(tree.find('./auth'))
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_rbd_driver_auth_disabled_flags_override(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.rbd_connection(self.vol)
+ secret_type = 'ceph'
+ connection_info['data']['auth_enabled'] = False
+ connection_info['data']['auth_username'] = self.user
+ connection_info['data']['secret_type'] = secret_type
+ connection_info['data']['secret_uuid'] = self.uuid
+
+ # NOTE: Supplying the rbd_secret_uuid will enable authentication
+ # locally in nova-compute even if not enabled in nova-volume/cinder
+ flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b'
+ flags_user = 'bar'
+ self.flags(rbd_user=flags_user,
+ rbd_secret_uuid=flags_uuid,
+ group='libvirt')
+
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertNetworkAndProtocolEquals(tree)
+ self.assertEqual(tree.find('./auth').get('username'), flags_user)
+ self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
+ self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_kvm_volume(self):
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ connection_info = self.iscsi_connection(self.vol, self.location,
+ self.iqn)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (self.location,
+ self.iqn)
+ self.assertEqual(tree.get('type'), 'block')
+ self.assertEqual(tree.find('./source').get('dev'), dev_str)
+ libvirt_driver.disconnect_volume(connection_info, 'vde')
+
+ def test_libvirt_kvm_volume_with_multipath(self):
+ self.flags(iscsi_use_multipath=True, group='libvirt')
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ devs = ['/dev/mapper/sda', '/dev/mapper/sdb']
+ self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ connection_info = self.iscsi_connection(self.vol, self.location,
+ self.iqn)
+ mpdev_filepath = '/dev/mapper/foo'
+ connection_info['data']['device_path'] = mpdev_filepath
+ libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
+ self.stubs.Set(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ lambda x: [[self.location, self.iqn]])
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
+ libvirt_driver._get_multipath_iqn = lambda x: self.iqn
+ libvirt_driver.disconnect_volume(connection_info, 'vde')
+ expected_multipath_cmd = ('multipath', '-f', 'foo')
+ self.assertIn(expected_multipath_cmd, self.executes)
+
+ def test_libvirt_kvm_volume_with_multipath_still_in_use(self):
+ name = 'volume-00000001'
+ location = '10.0.2.15:3260'
+ iqn = 'iqn.2010-10.org.openstack:%s' % name
+ mpdev_filepath = '/dev/mapper/foo'
+
+ def _get_multipath_device_name(path):
+ if '%s-lun-1' % iqn in path:
+ return mpdev_filepath
+ return '/dev/mapper/donotdisconnect'
+
+ self.flags(iscsi_use_multipath=True, group='libvirt')
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ libvirt_driver._get_multipath_device_name =\
+ lambda x: _get_multipath_device_name(x)
+
+ block_devs = ['/dev/disks/by-path/%s-iscsi-%s-lun-2' % (location, iqn)]
+ self.stubs.Set(self.fake_conn, '_get_all_block_devices',
+ lambda: block_devs)
+
+ vol = {'id': 1, 'name': name}
+ connection_info = self.iscsi_connection(vol, location, iqn)
+ connection_info['data']['device_path'] = mpdev_filepath
+
+ libvirt_driver._get_multipath_iqn = lambda x: iqn
+
+ iscsi_devs = ['1.2.3.4-iscsi-%s-lun-1' % iqn,
+ '%s-iscsi-%s-lun-1' % (location, iqn),
+ '%s-iscsi-%s-lun-2' % (location, iqn)]
+ libvirt_driver._get_iscsi_devices = lambda: iscsi_devs
+
+ self.stubs.Set(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ lambda x: [[location, iqn]])
+
+ # Set up disconnect volume mock expectations
+ self.mox.StubOutWithMock(libvirt_driver, '_delete_device')
+ self.mox.StubOutWithMock(libvirt_driver, '_rescan_multipath')
+ libvirt_driver._rescan_multipath()
+ libvirt_driver._delete_device('/dev/disk/by-path/%s' % iscsi_devs[0])
+ libvirt_driver._delete_device('/dev/disk/by-path/%s' % iscsi_devs[1])
+ libvirt_driver._rescan_multipath()
+
+ # Ensure that the mpath devices are deleted
+ self.mox.ReplayAll()
+ libvirt_driver.disconnect_volume(connection_info, 'vde')
+
+ def test_libvirt_kvm_volume_with_multipath_getmpdev(self):
+ self.flags(iscsi_use_multipath=True, group='libvirt')
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ name0 = 'volume-00000000'
+ iqn0 = 'iqn.2010-10.org.openstack:%s' % name0
+ dev0 = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-0' % (self.location, iqn0)
+ dev = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (self.location,
+ self.iqn)
+ devs = [dev0, dev]
+ self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
+ connection_info = self.iscsi_connection(self.vol, self.location,
+ self.iqn)
+ mpdev_filepath = '/dev/mapper/foo'
+ libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
+ self.stubs.Set(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ lambda x: [['fake_portal1', 'fake_iqn1']])
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
+ libvirt_driver.disconnect_volume(connection_info, 'vde')
+
+ def test_libvirt_kvm_iser_volume_with_multipath(self):
+ self.flags(iser_use_multipath=True, group='libvirt')
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ self.stubs.Set(time, 'sleep', lambda x: None)
+ devs = ['/dev/mapper/sda', '/dev/mapper/sdb']
+ self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
+ libvirt_driver = volume.LibvirtISERVolumeDriver(self.fake_conn)
+ name = 'volume-00000001'
+ location = '10.0.2.15:3260'
+ iqn = 'iqn.2010-10.org.iser.openstack:%s' % name
+ vol = {'id': 1, 'name': name}
+ connection_info = self.iser_connection(vol, location, iqn)
+ mpdev_filepath = '/dev/mapper/foo'
+ connection_info['data']['device_path'] = mpdev_filepath
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
+ self.stubs.Set(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ lambda x: [[location, iqn]])
+ libvirt_driver.connect_volume(connection_info, disk_info)
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
+ libvirt_driver._get_multipath_iqn = lambda x: iqn
+ libvirt_driver.disconnect_volume(connection_info, 'vde')
+ expected_multipath_cmd = ('multipath', '-f', 'foo')
+ self.assertIn(expected_multipath_cmd, self.executes)
+
+ def test_libvirt_kvm_iser_volume_with_multipath_getmpdev(self):
+ self.flags(iser_use_multipath=True, group='libvirt')
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ self.stubs.Set(time, 'sleep', lambda x: None)
+ libvirt_driver = volume.LibvirtISERVolumeDriver(self.fake_conn)
+ name0 = 'volume-00000000'
+ location0 = '10.0.2.15:3260'
+ iqn0 = 'iqn.2010-10.org.iser.openstack:%s' % name0
+ dev0 = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-0' % (location0, iqn0)
+ name = 'volume-00000001'
+ location = '10.0.2.15:3260'
+ iqn = 'iqn.2010-10.org.iser.openstack:%s' % name
+ vol = {'id': 1, 'name': name}
+ dev = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
+ devs = [dev0, dev]
+ self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
+ self.stubs.Set(libvirt_driver, '_get_iscsi_devices', lambda: [])
+ connection_info = self.iser_connection(vol, location, iqn)
+ mpdev_filepath = '/dev/mapper/foo'
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
+ self.stubs.Set(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ lambda x: [['fake_portal1', 'fake_iqn1']])
+ libvirt_driver.connect_volume(connection_info, disk_info)
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
+ libvirt_driver.disconnect_volume(connection_info, 'vde')
+
+ def test_libvirt_nfs_driver(self):
+ # NOTE(vish) exists is to make driver assume connecting worked
+ mnt_base = '/mnt'
+ self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
+ self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
+
+ export_string = '192.168.1.1:/nfs/share1'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name}}
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ device_path = os.path.join(export_mnt_base,
+ connection_info['data']['name'])
+ self.assertEqual(device_path, connection_info['data']['device_path'])
+ expected_commands = [
+ ('mkdir', '-p', export_mnt_base),
+ ('mount', '-t', 'nfs', export_string, export_mnt_base),
+ ('umount', export_mnt_base)]
+ self.assertEqual(expected_commands, self.executes)
+
+ @mock.patch.object(volume.utils, 'execute')
+ @mock.patch.object(volume.LOG, 'debug')
+ @mock.patch.object(volume.LOG, 'exception')
+ def test_libvirt_nfs_driver_umount_error(self, mock_LOG_exception,
+ mock_LOG_debug, mock_utils_exe):
+ export_string = '192.168.1.1:/nfs/share1'
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name}}
+ libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
+ mock_utils_exe.side_effect = processutils.ProcessExecutionError(
+ None, None, None, 'umount', 'umount: device is busy.')
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ self.assertTrue(mock_LOG_debug.called)
+ mock_utils_exe.side_effect = processutils.ProcessExecutionError(
+ None, None, None, 'umount', 'umount: target is busy.')
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ self.assertTrue(mock_LOG_debug.called)
+ mock_utils_exe.side_effect = processutils.ProcessExecutionError(
+ None, None, None, 'umount', 'umount: Other error.')
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ self.assertTrue(mock_LOG_exception.called)
+
+ def test_libvirt_nfs_driver_get_config(self):
+ libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
+ mnt_base = '/mnt'
+ self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
+ export_string = '192.168.1.1:/nfs/share1'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+ file_path = os.path.join(export_mnt_base, self.name)
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name,
+ 'device_path': file_path}}
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertFileTypeEquals(tree, file_path)
+ self.assertEqual('raw', tree.find('./driver').get('type'))
+
+ def test_libvirt_nfs_driver_already_mounted(self):
+ # NOTE(vish) exists is to make driver assume connecting worked
+ mnt_base = '/mnt'
+ self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
+
+ export_string = '192.168.1.1:/nfs/share1'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name}}
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ expected_commands = [
+ ('findmnt', '--target', export_mnt_base, '--source',
+ export_string),
+ ('umount', export_mnt_base)]
+ self.assertEqual(self.executes, expected_commands)
+
+ def test_libvirt_nfs_driver_with_opts(self):
+ mnt_base = '/mnt'
+ self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
+ self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
+ export_string = '192.168.1.1:/nfs/share1'
+ options = '-o intr,nfsvers=3'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name,
+ 'options': options}}
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ expected_commands = [
+ ('mkdir', '-p', export_mnt_base),
+ ('mount', '-t', 'nfs', '-o', 'intr,nfsvers=3',
+ export_string, export_mnt_base),
+ ('umount', export_mnt_base),
+ ]
+ self.assertEqual(expected_commands, self.executes)
+
+ def aoe_connection(self, shelf, lun):
+ aoedev = 'e%s.%s' % (shelf, lun)
+ aoedevpath = '/dev/etherd/%s' % (aoedev)
+ return {
+ 'driver_volume_type': 'aoe',
+ 'data': {
+ 'target_shelf': shelf,
+ 'target_lun': lun,
+ 'device_path': aoedevpath
+ }
+ }
+
+ @mock.patch('os.path.exists', return_value=True)
+ def test_libvirt_aoe_driver(self, exists):
+ libvirt_driver = volume.LibvirtAOEVolumeDriver(self.fake_conn)
+ shelf = '100'
+ lun = '1'
+ connection_info = self.aoe_connection(shelf, lun)
+ aoedev = 'e%s.%s' % (shelf, lun)
+ aoedevpath = '/dev/etherd/%s' % (aoedev)
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ exists.assert_called_with(aoedevpath)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ self.assertEqual(aoedevpath, connection_info['data']['device_path'])
+ expected_commands = [('aoe-revalidate', aoedev)]
+ self.assertEqual(expected_commands, self.executes)
+
+ def test_libvirt_aoe_driver_get_config(self):
+ libvirt_driver = volume.LibvirtAOEVolumeDriver(self.fake_conn)
+ shelf = '100'
+ lun = '1'
+ connection_info = self.aoe_connection(shelf, lun)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ aoedevpath = '/dev/etherd/e%s.%s' % (shelf, lun)
+ self.assertEqual('block', tree.get('type'))
+ self.assertEqual(aoedevpath, tree.find('./source').get('dev'))
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_glusterfs_driver(self):
+ mnt_base = '/mnt'
+ self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
+ self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
+ export_string = '192.168.1.1:/volume-00001'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name}}
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ device_path = os.path.join(export_mnt_base,
+ connection_info['data']['name'])
+ self.assertEqual(device_path, connection_info['data']['device_path'])
+ expected_commands = [
+ ('mkdir', '-p', export_mnt_base),
+ ('mount', '-t', 'glusterfs', export_string, export_mnt_base),
+ ('umount', export_mnt_base)]
+ self.assertEqual(expected_commands, self.executes)
+
+ def test_libvirt_glusterfs_driver_get_config(self):
+ mnt_base = '/mnt'
+ self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
+ export_string = '192.168.1.1:/volume-00001'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+ file_path = os.path.join(export_mnt_base, self.name)
+
+ # Test default format - raw
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name,
+ 'device_path': file_path}}
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertFileTypeEquals(tree, file_path)
+ self.assertEqual('raw', tree.find('./driver').get('type'))
+
+ # Test specified format - qcow2
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name,
+ 'device_path': file_path,
+ 'format': 'qcow2'}}
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertFileTypeEquals(tree, file_path)
+ self.assertEqual('qcow2', tree.find('./driver').get('type'))
+
+ def test_libvirt_glusterfs_driver_already_mounted(self):
+ mnt_base = '/mnt'
+ self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
+ export_string = '192.168.1.1:/volume-00001'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name}}
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ expected_commands = [
+ ('findmnt', '--target', export_mnt_base,
+ '--source', export_string),
+ ('umount', export_mnt_base)]
+ self.assertEqual(self.executes, expected_commands)
+
+ def test_libvirt_glusterfs_driver_with_opts(self):
+ mnt_base = '/mnt'
+ self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
+ self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
+ export_string = '192.168.1.1:/volume-00001'
+ options = '-o backupvolfile-server=192.168.1.2'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name,
+ 'options': options}}
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ expected_commands = [
+ ('mkdir', '-p', export_mnt_base),
+ ('mount', '-t', 'glusterfs',
+ '-o', 'backupvolfile-server=192.168.1.2',
+ export_string, export_mnt_base),
+ ('umount', export_mnt_base),
+ ]
+ self.assertEqual(self.executes, expected_commands)
+
+ def test_libvirt_glusterfs_libgfapi(self):
+ self.flags(qemu_allowed_storage_drivers=['gluster'], group='libvirt')
+ libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
+ self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
+ export_string = '192.168.1.1:/volume-00001'
+ name = 'volume-00001'
+
+ connection_info = {'data': {'export': export_string, 'name': name}}
+
+ disk_info = {
+ "dev": "vde",
+ "type": "disk",
+ "bus": "virtio",
+ }
+
+ libvirt_driver.connect_volume(connection_info, disk_info)
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.get('type'), 'network')
+ self.assertEqual(tree.find('./driver').get('type'), 'raw')
+
+ source = tree.find('./source')
+ self.assertEqual(source.get('protocol'), 'gluster')
+ self.assertEqual(source.get('name'), 'volume-00001/volume-00001')
+ self.assertEqual(source.find('./host').get('name'), '192.168.1.1')
+ self.assertEqual(source.find('./host').get('port'), '24007')
+
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def fibrechan_connection(self, volume, location, wwn):
+ return {
+ 'driver_volume_type': 'fibrechan',
+ 'data': {
+ 'volume_id': volume['id'],
+ 'target_portal': location,
+ 'target_wwn': wwn,
+ 'target_lun': 1,
+ }
+ }
+
+ def test_libvirt_fibrechan_driver(self):
+ self.stubs.Set(libvirt_utils, 'get_fc_hbas',
+ fake_libvirt_utils.get_fc_hbas)
+ self.stubs.Set(libvirt_utils, 'get_fc_hbas_info',
+ fake_libvirt_utils.get_fc_hbas_info)
+ # NOTE(vish) exists is to make driver assume connecting worked
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ self.stubs.Set(os.path, 'realpath', lambda x: '/dev/sdb')
+ libvirt_driver = volume.LibvirtFibreChannelVolumeDriver(self.fake_conn)
+ multipath_devname = '/dev/md-1'
+ devices = {"device": multipath_devname,
+ "id": "1234567890",
+ "devices": [{'device': '/dev/sdb',
+ 'address': '1:0:0:1',
+ 'host': 1, 'channel': 0,
+ 'id': 0, 'lun': 1}]}
+ self.stubs.Set(linuxscsi, 'find_multipath_device', lambda x: devices)
+ self.stubs.Set(linuxscsi, 'remove_device', lambda x: None)
+ # Should work for string, unicode, and list
+ wwns = ['1234567890123456', unicode('1234567890123456'),
+ ['1234567890123456', '1234567890123457']]
+ for wwn in wwns:
+ connection_info = self.fibrechan_connection(self.vol,
+ self.location, wwn)
+ mount_device = "vde"
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+
+ # Test the scenario where multipath_id is returned
+ libvirt_driver.disconnect_volume(connection_info, mount_device)
+ self.assertEqual(multipath_devname,
+ connection_info['data']['device_path'])
+ expected_commands = []
+ self.assertEqual(expected_commands, self.executes)
+ # Test the scenario where multipath_id is not returned
+ connection_info["data"]["devices"] = devices["devices"]
+ del connection_info["data"]["multipath_id"]
+ libvirt_driver.disconnect_volume(connection_info, mount_device)
+ expected_commands = []
+ self.assertEqual(expected_commands, self.executes)
+
+ # Should not work for anything other than string, unicode, and list
+ connection_info = self.fibrechan_connection(self.vol,
+ self.location, 123)
+ self.assertRaises(exception.NovaException,
+ libvirt_driver.connect_volume,
+ connection_info, self.disk_info)
+
+ self.stubs.Set(libvirt_utils, 'get_fc_hbas', lambda: [])
+ self.stubs.Set(libvirt_utils, 'get_fc_hbas_info', lambda: [])
+ self.assertRaises(exception.NovaException,
+ libvirt_driver.connect_volume,
+ connection_info, self.disk_info)
+
+ def test_libvirt_fibrechan_driver_get_config(self):
+ libvirt_driver = volume.LibvirtFibreChannelVolumeDriver(self.fake_conn)
+ connection_info = self.fibrechan_connection(self.vol,
+ self.location, 123)
+ connection_info['data']['device_path'] = ("/sys/devices/pci0000:00"
+ "/0000:00:03.0/0000:05:00.3/host2/fc_host/host2")
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual('block', tree.get('type'))
+ self.assertEqual(connection_info['data']['device_path'],
+ tree.find('./source').get('dev'))
+
+ def test_libvirt_fibrechan_getpci_num(self):
+ libvirt_driver = volume.LibvirtFibreChannelVolumeDriver(self.fake_conn)
+ hba = {'device_path': "/sys/devices/pci0000:00/0000:00:03.0"
+ "/0000:05:00.3/host2/fc_host/host2"}
+ pci_num = libvirt_driver._get_pci_num(hba)
+ self.assertEqual("0000:05:00.3", pci_num)
+
+ hba = {'device_path': "/sys/devices/pci0000:00/0000:00:03.0"
+ "/0000:05:00.3/0000:06:00.6/host2/fc_host/host2"}
+ pci_num = libvirt_driver._get_pci_num(hba)
+ self.assertEqual("0000:06:00.6", pci_num)
+
+ def test_libvirt_scality_driver(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ TEST_MOUNT = os.path.join(tempdir, 'fake_mount')
+ TEST_CONFIG = os.path.join(tempdir, 'fake_config')
+ TEST_VOLDIR = 'volumes'
+ TEST_VOLNAME = 'volume_name'
+ TEST_CONN_INFO = {
+ 'data': {
+ 'sofs_path': os.path.join(TEST_VOLDIR, TEST_VOLNAME)
+ }
+ }
+ TEST_VOLPATH = os.path.join(TEST_MOUNT,
+ TEST_VOLDIR,
+ TEST_VOLNAME)
+ open(TEST_CONFIG, "w+").close()
+ os.makedirs(os.path.join(TEST_MOUNT, 'sys'))
+
+ def _access_wrapper(path, flags):
+ if path == '/sbin/mount.sofs':
+ return True
+ else:
+ return os.access(path, flags)
+
+ self.stubs.Set(os, 'access', _access_wrapper)
+ self.flags(scality_sofs_config=TEST_CONFIG,
+ scality_sofs_mount_point=TEST_MOUNT,
+ group='libvirt')
+ driver = volume.LibvirtScalityVolumeDriver(self.fake_conn)
+ driver.connect_volume(TEST_CONN_INFO, self.disk_info)
+
+ device_path = os.path.join(TEST_MOUNT,
+ TEST_CONN_INFO['data']['sofs_path'])
+ self.assertEqual(device_path,
+ TEST_CONN_INFO['data']['device_path'])
+
+ conf = driver.get_config(TEST_CONN_INFO, self.disk_info)
+ tree = conf.format_dom()
+ self._assertFileTypeEquals(tree, TEST_VOLPATH)
diff --git a/nova/tests/unit/virt/test_block_device.py b/nova/tests/unit/virt/test_block_device.py
new file mode 100644
index 0000000000..f71438eae2
--- /dev/null
+++ b/nova/tests/unit/virt/test_block_device.py
@@ -0,0 +1,684 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova import block_device
+from nova import context
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit import matchers
+from nova.virt import block_device as driver_block_device
+from nova.virt import driver
+from nova.volume import cinder
+from nova.volume import encryptors
+
+
+class TestDriverBlockDevice(test.NoDBTestCase):
+ driver_classes = {
+ 'swap': driver_block_device.DriverSwapBlockDevice,
+ 'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
+ 'volume': driver_block_device.DriverVolumeBlockDevice,
+ 'snapshot': driver_block_device.DriverSnapshotBlockDevice,
+ 'image': driver_block_device.DriverImageBlockDevice,
+ 'blank': driver_block_device.DriverBlankBlockDevice
+ }
+
+ swap_bdm = block_device.BlockDeviceDict(
+ {'id': 1, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdb1',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'delete_on_termination': True,
+ 'guest_format': 'swap',
+ 'disk_bus': 'scsi',
+ 'volume_size': 2,
+ 'boot_index': -1})
+
+ swap_driver_bdm = {
+ 'device_name': '/dev/sdb1',
+ 'swap_size': 2,
+ 'disk_bus': 'scsi'}
+
+ swap_legacy_driver_bdm = {
+ 'device_name': '/dev/sdb1',
+ 'swap_size': 2}
+
+ ephemeral_bdm = block_device.BlockDeviceDict(
+ {'id': 2, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdc1',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'volume_size': 4,
+ 'guest_format': 'ext4',
+ 'delete_on_termination': True,
+ 'boot_index': -1})
+
+ ephemeral_driver_bdm = {
+ 'device_name': '/dev/sdc1',
+ 'size': 4,
+ 'device_type': 'disk',
+ 'guest_format': 'ext4',
+ 'disk_bus': 'scsi'}
+
+ ephemeral_legacy_driver_bdm = {
+ 'device_name': '/dev/sdc1',
+ 'size': 4,
+ 'virtual_name': 'ephemeral0',
+ 'num': 0}
+
+ volume_bdm = block_device.BlockDeviceDict(
+ {'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda1',
+ 'source_type': 'volume',
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'volume_size': 8,
+ 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id-1',
+ 'guest_format': 'ext4',
+ 'connection_info': '{"fake": "connection_info"}',
+ 'delete_on_termination': False,
+ 'boot_index': 0})
+
+ volume_driver_bdm = {
+ 'mount_device': '/dev/sda1',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': False,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'guest_format': 'ext4',
+ 'boot_index': 0}
+
+ volume_legacy_driver_bdm = {
+ 'mount_device': '/dev/sda1',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': False}
+
+ snapshot_bdm = block_device.BlockDeviceDict(
+ {'id': 4, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'delete_on_termination': True,
+ 'volume_size': 3,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'connection_info': '{"fake": "connection_info"}',
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'volume_id': 'fake-volume-id-2',
+ 'boot_index': -1})
+
+ snapshot_driver_bdm = {
+ 'mount_device': '/dev/sda2',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': True,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'guest_format': None,
+ 'boot_index': -1}
+
+ snapshot_legacy_driver_bdm = {
+ 'mount_device': '/dev/sda2',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': True}
+
+ image_bdm = block_device.BlockDeviceDict(
+ {'id': 5, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'delete_on_termination': True,
+ 'volume_size': 1,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'source_type': 'image',
+ 'destination_type': 'volume',
+ 'connection_info': '{"fake": "connection_info"}',
+ 'image_id': 'fake-image-id-1',
+ 'volume_id': 'fake-volume-id-2',
+ 'boot_index': -1})
+
+ image_driver_bdm = {
+ 'mount_device': '/dev/sda2',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': True,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'guest_format': None,
+ 'boot_index': -1}
+
+ image_legacy_driver_bdm = {
+ 'mount_device': '/dev/sda2',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': True}
+
+ blank_bdm = block_device.BlockDeviceDict(
+ {'id': 6, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'delete_on_termination': True,
+ 'volume_size': 3,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
+ 'connection_info': '{"fake": "connection_info"}',
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'volume_id': 'fake-volume-id-2',
+ 'boot_index': -1})
+
+ blank_driver_bdm = {
+ 'mount_device': '/dev/sda2',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': True,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'guest_format': None,
+ 'boot_index': -1}
+
+ blank_legacy_driver_bdm = {
+ 'mount_device': '/dev/sda2',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': True}
+
+ def setUp(self):
+ super(TestDriverBlockDevice, self).setUp()
+ self.volume_api = self.mox.CreateMock(cinder.API)
+ self.virt_driver = self.mox.CreateMock(driver.ComputeDriver)
+ self.context = context.RequestContext('fake_user',
+ 'fake_project')
+
+ def test_no_device_raises(self):
+ for name, cls in self.driver_classes.items():
+ self.assertRaises(driver_block_device._NotTransformable,
+ cls, {'no_device': True})
+
+ def _test_driver_device(self, name):
+ db_bdm = getattr(self, "%s_bdm" % name)
+ test_bdm = self.driver_classes[name](db_bdm)
+ self.assertThat(test_bdm, matchers.DictMatches(
+ getattr(self, "%s_driver_bdm" % name)))
+
+ for k, v in db_bdm.iteritems():
+ field_val = getattr(test_bdm._bdm_obj, k)
+ if isinstance(field_val, bool):
+ v = bool(v)
+ self.assertEqual(field_val, v)
+
+ self.assertThat(test_bdm.legacy(),
+ matchers.DictMatches(
+ getattr(self, "%s_legacy_driver_bdm" % name)))
+
+ # Test passthru attributes
+ for passthru in test_bdm._proxy_as_attr:
+ self.assertEqual(getattr(test_bdm, passthru),
+ getattr(test_bdm._bdm_obj, passthru))
+
+ # Make sure that all others raise _invalidType
+ for other_name, cls in self.driver_classes.iteritems():
+ if other_name == name:
+ continue
+ self.assertRaises(driver_block_device._InvalidType,
+ cls,
+ getattr(self, '%s_bdm' % name))
+
+ # Test the save method
+ with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
+ test_bdm.save(self.context)
+ for fld, alias in test_bdm._update_on_save.iteritems():
+ self.assertEqual(test_bdm[alias or fld],
+ getattr(test_bdm._bdm_obj, fld))
+
+ save_mock.assert_called_once_with(self.context)
+
+ # Test the save method with no context passed
+ with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
+ test_bdm.save()
+ save_mock.assert_called_once_with()
+
+ def _test_driver_default_size(self, name):
+ size = 'swap_size' if name == 'swap' else 'size'
+ no_size_bdm = getattr(self, "%s_bdm" % name).copy()
+ no_size_bdm['volume_size'] = None
+
+ driver_bdm = self.driver_classes[name](no_size_bdm)
+ self.assertEqual(driver_bdm[size], 0)
+
+ del no_size_bdm['volume_size']
+
+ driver_bdm = self.driver_classes[name](no_size_bdm)
+ self.assertEqual(driver_bdm[size], 0)
+
+ def test_driver_swap_block_device(self):
+ self._test_driver_device("swap")
+
+ def test_driver_swap_default_size(self):
+ self._test_driver_default_size('swap')
+
+ def test_driver_ephemeral_block_device(self):
+ self._test_driver_device("ephemeral")
+
+ def test_driver_ephemeral_default_size(self):
+ self._test_driver_default_size('ephemeral')
+
+ def test_driver_volume_block_device(self):
+ self._test_driver_device("volume")
+
+ test_bdm = self.driver_classes['volume'](
+ self.volume_bdm)
+ self.assertEqual(test_bdm['connection_info'],
+ jsonutils.loads(test_bdm._bdm_obj.connection_info))
+ self.assertEqual(test_bdm._bdm_obj.id, 3)
+ self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1')
+ self.assertEqual(test_bdm.volume_size, 8)
+
+ def test_driver_snapshot_block_device(self):
+ self._test_driver_device("snapshot")
+
+ test_bdm = self.driver_classes['snapshot'](
+ self.snapshot_bdm)
+ self.assertEqual(test_bdm._bdm_obj.id, 4)
+ self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1')
+ self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
+ self.assertEqual(test_bdm.volume_size, 3)
+
+ def test_driver_image_block_device(self):
+ self._test_driver_device('image')
+
+ test_bdm = self.driver_classes['image'](
+ self.image_bdm)
+ self.assertEqual(test_bdm._bdm_obj.id, 5)
+ self.assertEqual(test_bdm.image_id, 'fake-image-id-1')
+ self.assertEqual(test_bdm.volume_size, 1)
+
+ def test_driver_image_block_device_destination_local(self):
+ self._test_driver_device('image')
+ bdm = self.image_bdm.copy()
+ bdm['destination_type'] = 'local'
+ self.assertRaises(driver_block_device._InvalidType,
+ self.driver_classes['image'], bdm)
+
+ def test_driver_blank_block_device(self):
+ self._test_driver_device('blank')
+
+ test_bdm = self.driver_classes['blank'](
+ self.blank_bdm)
+ self.assertEqual(6, test_bdm._bdm_obj.id)
+ self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
+ self.assertEqual(3, test_bdm.volume_size)
+
+ def _test_volume_attach(self, driver_bdm, bdm_dict,
+ fake_volume, check_attach=True,
+ fail_check_attach=False, driver_attach=False,
+ fail_driver_attach=False, volume_attach=True,
+ access_mode='rw'):
+ elevated_context = self.context.elevated()
+ self.stubs.Set(self.context, 'elevated',
+ lambda: elevated_context)
+ self.mox.StubOutWithMock(driver_bdm._bdm_obj, 'save')
+ self.mox.StubOutWithMock(encryptors, 'get_encryption_metadata')
+ instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
+ connector = {'ip': 'fake_ip', 'host': 'fake_host'}
+ connection_info = {'data': {'access_mode': access_mode}}
+ expected_conn_info = {'data': {'access_mode': access_mode},
+ 'serial': fake_volume['id']}
+ enc_data = {'fake': 'enc_data'}
+
+ self.volume_api.get(self.context,
+ fake_volume['id']).AndReturn(fake_volume)
+ if check_attach:
+ if not fail_check_attach:
+ self.volume_api.check_attach(self.context, fake_volume,
+ instance=instance).AndReturn(None)
+ else:
+ self.volume_api.check_attach(self.context, fake_volume,
+ instance=instance).AndRaise(
+ test.TestingException)
+ return instance, expected_conn_info
+
+ self.virt_driver.get_volume_connector(instance).AndReturn(connector)
+ self.volume_api.initialize_connection(
+ elevated_context, fake_volume['id'],
+ connector).AndReturn(connection_info)
+ if driver_attach:
+ encryptors.get_encryption_metadata(
+ elevated_context, self.volume_api, fake_volume['id'],
+ connection_info).AndReturn(enc_data)
+ if not fail_driver_attach:
+ self.virt_driver.attach_volume(
+ elevated_context, expected_conn_info, instance,
+ bdm_dict['device_name'],
+ disk_bus=bdm_dict['disk_bus'],
+ device_type=bdm_dict['device_type'],
+ encryption=enc_data).AndReturn(None)
+ else:
+ self.virt_driver.attach_volume(
+ elevated_context, expected_conn_info, instance,
+ bdm_dict['device_name'],
+ disk_bus=bdm_dict['disk_bus'],
+ device_type=bdm_dict['device_type'],
+ encryption=enc_data).AndRaise(test.TestingException)
+ self.volume_api.terminate_connection(
+ elevated_context, fake_volume['id'],
+ expected_conn_info).AndReturn(None)
+ return instance, expected_conn_info
+
+ if volume_attach:
+ self.volume_api.attach(elevated_context, fake_volume['id'],
+ 'fake_uuid', bdm_dict['device_name'],
+ mode=access_mode).AndReturn(None)
+ driver_bdm._bdm_obj.save(self.context).AndReturn(None)
+ return instance, expected_conn_info
+
+ def test_volume_attach(self):
+ test_bdm = self.driver_classes['volume'](
+ self.volume_bdm)
+ volume = {'id': 'fake-volume-id-1',
+ 'attach_status': 'detached'}
+
+ instance, expected_conn_info = self._test_volume_attach(
+ test_bdm, self.volume_bdm, volume)
+
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance,
+ self.volume_api, self.virt_driver)
+ self.assertThat(test_bdm['connection_info'],
+ matchers.DictMatches(expected_conn_info))
+
+ def test_volume_attach_ro(self):
+ test_bdm = self.driver_classes['volume'](self.volume_bdm)
+ volume = {'id': 'fake-volume-id-1',
+ 'attach_status': 'detached'}
+
+ instance, expected_conn_info = self._test_volume_attach(
+ test_bdm, self.volume_bdm, volume, access_mode='ro')
+
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance,
+ self.volume_api, self.virt_driver)
+ self.assertThat(test_bdm['connection_info'],
+ matchers.DictMatches(expected_conn_info))
+
+ def check_volume_attach_check_attach_fails(self):
+ test_bdm = self.driver_classes['volume'](
+ self.volume_bdm)
+ volume = {'id': 'fake-volume-id-1'}
+
+ instance, _ = self._test_volume_attach(
+ test_bdm, self.volume_bdm, volume, fail_check_attach=True)
+ self.mox.ReplayAll()
+
+ self.asserRaises(test.TestingException, test_bdm.attach, self.context,
+ instance, self.volume_api, self.virt_driver)
+
+ def test_volume_no_volume_attach(self):
+ test_bdm = self.driver_classes['volume'](
+ self.volume_bdm)
+ volume = {'id': 'fake-volume-id-1',
+ 'attach_status': 'detached'}
+
+ instance, expected_conn_info = self._test_volume_attach(
+ test_bdm, self.volume_bdm, volume, check_attach=False,
+ driver_attach=False)
+
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance,
+ self.volume_api, self.virt_driver,
+ do_check_attach=False, do_driver_attach=False)
+ self.assertThat(test_bdm['connection_info'],
+ matchers.DictMatches(expected_conn_info))
+
+ def test_volume_attach_no_check_driver_attach(self):
+ test_bdm = self.driver_classes['volume'](
+ self.volume_bdm)
+ volume = {'id': 'fake-volume-id-1',
+ 'attach_status': 'detached'}
+
+ instance, expected_conn_info = self._test_volume_attach(
+ test_bdm, self.volume_bdm, volume, check_attach=False,
+ driver_attach=True)
+
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance,
+ self.volume_api, self.virt_driver,
+ do_check_attach=False, do_driver_attach=True)
+ self.assertThat(test_bdm['connection_info'],
+ matchers.DictMatches(expected_conn_info))
+
+ def check_volume_attach_driver_attach_fails(self):
+ test_bdm = self.driver_classes['volume'](
+ self.volume_bdm)
+ volume = {'id': 'fake-volume-id-1'}
+
+ instance, _ = self._test_volume_attach(
+ test_bdm, self.volume_bdm, volume, fail_check_attach=True)
+ self.mox.ReplayAll()
+
+ self.asserRaises(test.TestingException, test_bdm.attach, self.context,
+ instance, self.volume_api, self.virt_driver,
+ do_driver_attach=True)
+
+ def test_refresh_connection(self):
+ test_bdm = self.driver_classes['snapshot'](
+ self.snapshot_bdm)
+
+ instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
+ connector = {'ip': 'fake_ip', 'host': 'fake_host'}
+ connection_info = {'data': {'multipath_id': 'fake_multipath_id'}}
+ expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'},
+ 'serial': 'fake-volume-id-2'}
+
+ self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save')
+
+ self.virt_driver.get_volume_connector(instance).AndReturn(connector)
+ self.volume_api.initialize_connection(
+ self.context, test_bdm.volume_id,
+ connector).AndReturn(connection_info)
+ test_bdm._bdm_obj.save(self.context).AndReturn(None)
+
+ self.mox.ReplayAll()
+
+ test_bdm.refresh_connection_info(self.context, instance,
+ self.volume_api, self.virt_driver)
+ self.assertThat(test_bdm['connection_info'],
+ matchers.DictMatches(expected_conn_info))
+
+ def test_snapshot_attach_no_volume(self):
+ no_volume_snapshot = self.snapshot_bdm.copy()
+ no_volume_snapshot['volume_id'] = None
+ test_bdm = self.driver_classes['snapshot'](no_volume_snapshot)
+
+ snapshot = {'id': 'fake-volume-id-1',
+ 'attach_status': 'detached'}
+ volume = {'id': 'fake-volume-id-2',
+ 'attach_status': 'detached'}
+
+ wait_func = self.mox.CreateMockAnything()
+
+ self.volume_api.get_snapshot(self.context,
+ 'fake-snapshot-id-1').AndReturn(snapshot)
+ self.volume_api.create(self.context, 3,
+ '', '', snapshot).AndReturn(volume)
+ wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
+ instance, expected_conn_info = self._test_volume_attach(
+ test_bdm, no_volume_snapshot, volume)
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance, self.volume_api,
+ self.virt_driver, wait_func)
+ self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
+
+ def test_snapshot_attach_volume(self):
+ test_bdm = self.driver_classes['snapshot'](
+ self.snapshot_bdm)
+
+ instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
+
+ volume_class = self.driver_classes['volume']
+ self.mox.StubOutWithMock(volume_class, 'attach')
+
+ # Make sure theses are not called
+ self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
+ self.mox.StubOutWithMock(self.volume_api, 'create')
+
+ volume_class.attach(self.context, instance, self.volume_api,
+ self.virt_driver, do_check_attach=True
+ ).AndReturn(None)
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance, self.volume_api,
+ self.virt_driver)
+ self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
+
+ def test_image_attach_no_volume(self):
+ no_volume_image = self.image_bdm.copy()
+ no_volume_image['volume_id'] = None
+ test_bdm = self.driver_classes['image'](no_volume_image)
+
+ image = {'id': 'fake-image-id-1'}
+ volume = {'id': 'fake-volume-id-2',
+ 'attach_status': 'detached'}
+
+ wait_func = self.mox.CreateMockAnything()
+
+ self.volume_api.create(self.context, 1,
+ '', '', image_id=image['id']).AndReturn(volume)
+ wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
+ instance, expected_conn_info = self._test_volume_attach(
+ test_bdm, no_volume_image, volume)
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance, self.volume_api,
+ self.virt_driver, wait_func)
+ self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
+
+ def test_image_attach_volume(self):
+ test_bdm = self.driver_classes['image'](
+ self.image_bdm)
+
+ instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
+
+ volume_class = self.driver_classes['volume']
+ self.mox.StubOutWithMock(volume_class, 'attach')
+
+ # Make sure theses are not called
+ self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
+ self.mox.StubOutWithMock(self.volume_api, 'create')
+
+ volume_class.attach(self.context, instance, self.volume_api,
+ self.virt_driver, do_check_attach=True
+ ).AndReturn(None)
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance, self.volume_api,
+ self.virt_driver)
+ self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
+
+ def test_blank_attach_volume(self):
+ no_blank_volume = self.blank_bdm.copy()
+ no_blank_volume['volume_id'] = None
+ test_bdm = self.driver_classes['blank'](no_blank_volume)
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
+ **{'uuid': 'fake-uuid'})
+ volume_class = self.driver_classes['volume']
+ volume = {'id': 'fake-volume-id-2',
+ 'display_name': 'fake-uuid-blank-vol'}
+
+ with contextlib.nested(
+ mock.patch.object(self.volume_api, 'create', return_value=volume),
+ mock.patch.object(volume_class, 'attach')
+ ) as (vol_create, vol_attach):
+ test_bdm.attach(self.context, instance, self.volume_api,
+ self.virt_driver)
+
+ vol_create.assert_called_once_with(self.context,
+ test_bdm.volume_size,
+ 'fake-uuid-blank-vol',
+ '')
+ vol_attach.assert_called_once_with(self.context, instance,
+ self.volume_api,
+ self.virt_driver,
+ do_check_attach=True)
+ self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
+
+ def test_convert_block_devices(self):
+ converted = driver_block_device._convert_block_devices(
+ self.driver_classes['volume'],
+ [self.volume_bdm, self.ephemeral_bdm])
+ self.assertEqual(converted, [self.volume_driver_bdm])
+
+ def test_legacy_block_devices(self):
+ test_snapshot = self.driver_classes['snapshot'](
+ self.snapshot_bdm)
+
+ block_device_mapping = [test_snapshot, test_snapshot]
+ legacy_bdm = driver_block_device.legacy_block_devices(
+ block_device_mapping)
+ self.assertEqual(legacy_bdm, [self.snapshot_legacy_driver_bdm,
+ self.snapshot_legacy_driver_bdm])
+
+ # Test that the ephemerals work as expected
+ test_ephemerals = [self.driver_classes['ephemeral'](
+ self.ephemeral_bdm) for _ in xrange(2)]
+ expected = [self.ephemeral_legacy_driver_bdm.copy()
+ for _ in xrange(2)]
+ expected[0]['virtual_name'] = 'ephemeral0'
+ expected[0]['num'] = 0
+ expected[1]['virtual_name'] = 'ephemeral1'
+ expected[1]['num'] = 1
+ legacy_ephemerals = driver_block_device.legacy_block_devices(
+ test_ephemerals)
+ self.assertEqual(expected, legacy_ephemerals)
+
+ def test_get_swap(self):
+ swap = [self.swap_driver_bdm]
+ legacy_swap = [self.swap_legacy_driver_bdm]
+ no_swap = [self.volume_driver_bdm]
+
+ self.assertEqual(swap[0], driver_block_device.get_swap(swap))
+ self.assertEqual(legacy_swap[0],
+ driver_block_device.get_swap(legacy_swap))
+ self.assertIsNone(driver_block_device.get_swap(no_swap))
+ self.assertIsNone(driver_block_device.get_swap([]))
+
+ def test_is_implemented(self):
+ for bdm in (self.image_bdm, self.volume_bdm, self.swap_bdm,
+ self.ephemeral_bdm, self.snapshot_bdm):
+ self.assertTrue(driver_block_device.is_implemented(bdm))
+ local_image = self.image_bdm.copy()
+ local_image['destination_type'] = 'local'
+ self.assertFalse(driver_block_device.is_implemented(local_image))
+
+ def test_is_block_device_mapping(self):
+ test_swap = self.driver_classes['swap'](self.swap_bdm)
+ test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm)
+ test_image = self.driver_classes['image'](self.image_bdm)
+ test_snapshot = self.driver_classes['snapshot'](self.snapshot_bdm)
+ test_volume = self.driver_classes['volume'](self.volume_bdm)
+ test_blank = self.driver_classes['blank'](self.blank_bdm)
+
+ for bdm in (test_image, test_snapshot, test_volume, test_blank):
+ self.assertTrue(driver_block_device.is_block_device_mapping(
+ bdm._bdm_obj))
+
+ for bdm in (test_swap, test_ephemeral):
+ self.assertFalse(driver_block_device.is_block_device_mapping(
+ bdm._bdm_obj))
diff --git a/nova/tests/unit/virt/test_configdrive.py b/nova/tests/unit/virt/test_configdrive.py
new file mode 100644
index 0000000000..b8dc717b80
--- /dev/null
+++ b/nova/tests/unit/virt/test_configdrive.py
@@ -0,0 +1,30 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.utils import strutils
+
+from nova import test
+from nova.virt import configdrive
+
+
+class ConfigDriveTestCase(test.NoDBTestCase):
+ def test_valid_string_values(self):
+ for value in (strutils.TRUE_STRINGS + ('always',)):
+ self.flags(force_config_drive=value)
+ self.assertTrue(configdrive.required_by({}))
+
+ def test_invalid_string_values(self):
+ for value in (strutils.FALSE_STRINGS + ('foo',)):
+ self.flags(force_config_drive=value)
+ self.assertFalse(configdrive.required_by({}))
diff --git a/nova/tests/unit/virt/test_diagnostics.py b/nova/tests/unit/virt/test_diagnostics.py
new file mode 100644
index 0000000000..f3969fc09f
--- /dev/null
+++ b/nova/tests/unit/virt/test_diagnostics.py
@@ -0,0 +1,231 @@
+# Copyright (c) 2014 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import exception
+from nova import test
+from nova.virt import diagnostics
+
+
+class DiagnosticsTests(test.NoDBTestCase):
+
+ def test_cpu_diagnostics_default(self):
+ cpu = diagnostics.CpuDiagnostics()
+ self.assertEqual(0, cpu.time)
+
+ def test_cpu_diagnostics(self):
+ cpu = diagnostics.CpuDiagnostics(time=7)
+ self.assertEqual(7, cpu.time)
+
+ def test_nic_diagnostics_default(self):
+ nic = diagnostics.NicDiagnostics()
+ self.assertEqual('00:00:00:00:00:00', nic.mac_address)
+ self.assertEqual(0, nic.rx_octets)
+ self.assertEqual(0, nic.rx_errors)
+ self.assertEqual(0, nic.rx_drop)
+ self.assertEqual(0, nic.rx_packets)
+ self.assertEqual(0, nic.tx_octets)
+ self.assertEqual(0, nic.tx_errors)
+ self.assertEqual(0, nic.tx_drop)
+ self.assertEqual(0, nic.tx_packets)
+
+ def test_nic_diagnostics(self):
+ nic = diagnostics.NicDiagnostics(mac_address='00:00:ca:fe:00:00',
+ rx_octets=1, rx_errors=2, rx_drop=3, rx_packets=4,
+ tx_octets=5, tx_errors=6, tx_drop=7, tx_packets=8)
+ self.assertEqual('00:00:ca:fe:00:00', nic.mac_address)
+ self.assertEqual(1, nic.rx_octets)
+ self.assertEqual(2, nic.rx_errors)
+ self.assertEqual(3, nic.rx_drop)
+ self.assertEqual(4, nic.rx_packets)
+ self.assertEqual(5, nic.tx_octets)
+ self.assertEqual(6, nic.tx_errors)
+ self.assertEqual(7, nic.tx_drop)
+ self.assertEqual(8, nic.tx_packets)
+
+ def test_disk_diagnostics_default(self):
+ disk = diagnostics.DiskDiagnostics()
+ self.assertEqual('', disk.id)
+ self.assertEqual(0, disk.read_bytes)
+ self.assertEqual(0, disk.read_requests)
+ self.assertEqual(0, disk.write_bytes)
+ self.assertEqual(0, disk.write_requests)
+ self.assertEqual(0, disk.errors_count)
+
+ def test_disk_diagnostics(self):
+ disk = diagnostics.DiskDiagnostics(id='fake_disk_id',
+ read_bytes=1, read_requests=2,
+ write_bytes=3, write_requests=4,
+ errors_count=5)
+ self.assertEqual('fake_disk_id', disk.id)
+ self.assertEqual(1, disk.read_bytes)
+ self.assertEqual(2, disk.read_requests)
+ self.assertEqual(3, disk.write_bytes)
+ self.assertEqual(4, disk.write_requests)
+ self.assertEqual(5, disk.errors_count)
+
+ def test_memory_diagnostics_default(self):
+ memory = diagnostics.MemoryDiagnostics()
+ self.assertEqual(0, memory.maximum)
+ self.assertEqual(0, memory.used)
+
+ def test_memory_diagnostics(self):
+ memory = diagnostics.MemoryDiagnostics(maximum=1, used=2)
+ self.assertEqual(1, memory.maximum)
+ self.assertEqual(2, memory.used)
+
+ def test_diagnostics_default(self):
+ diags = diagnostics.Diagnostics()
+ self.assertIsNone(diags.state)
+ self.assertIsNone(diags.driver)
+ self.assertIsNone(diags.hypervisor_os)
+ self.assertEqual(0, diags.uptime)
+ self.assertFalse(diags.config_drive)
+ self.assertEqual([], diags.cpu_details)
+ self.assertEqual([], diags.nic_details)
+ self.assertEqual([], diags.disk_details)
+ self.assertEqual(0, diags.memory_details.maximum)
+ self.assertEqual(0, diags.memory_details.used)
+ self.assertEqual('1.0', diags.version)
+
+ def test_diagnostics(self):
+ cpu_details = [diagnostics.CpuDiagnostics()]
+ nic_details = [diagnostics.NicDiagnostics()]
+ disk_details = [diagnostics.DiskDiagnostics()]
+ diags = diagnostics.Diagnostics(
+ state='fake-state', driver='fake-driver',
+ hypervisor_os='fake-os',
+ uptime=1, cpu_details=cpu_details,
+ nic_details=nic_details, disk_details=disk_details,
+ config_drive=True)
+ self.assertEqual('fake-state', diags.state)
+ self.assertEqual('fake-driver', diags.driver)
+ self.assertEqual('fake-os', diags.hypervisor_os)
+ self.assertEqual(1, diags.uptime)
+ self.assertTrue(diags.config_drive)
+ self.assertEqual(1, len(diags.cpu_details))
+ self.assertEqual(1, len(diags.nic_details))
+ self.assertEqual(1, len(diags.disk_details))
+ self.assertEqual(0, diags.memory_details.maximum)
+ self.assertEqual(0, diags.memory_details.used)
+ self.assertEqual('1.0', diags.version)
+
+ def test_add_cpu(self):
+ diags = diagnostics.Diagnostics()
+ self.assertEqual([], diags.cpu_details)
+ diags.add_cpu(time=7)
+ self.assertEqual(1, len(diags.cpu_details))
+ self.assertEqual(7, diags.cpu_details[0].time)
+
+ def test_add_nic(self):
+ diags = diagnostics.Diagnostics()
+ self.assertEqual([], diags.nic_details)
+ diags.add_nic(mac_address='00:00:ca:fe:00:00',
+ rx_octets=1, rx_errors=2, rx_drop=3, rx_packets=4,
+ tx_octets=5, tx_errors=6, tx_drop=7, tx_packets=8)
+ self.assertEqual(1, len(diags.nic_details))
+ self.assertEqual('00:00:ca:fe:00:00', diags.nic_details[0].mac_address)
+ self.assertEqual(1, diags.nic_details[0].rx_octets)
+ self.assertEqual(2, diags.nic_details[0].rx_errors)
+ self.assertEqual(3, diags.nic_details[0].rx_drop)
+ self.assertEqual(4, diags.nic_details[0].rx_packets)
+ self.assertEqual(5, diags.nic_details[0].tx_octets)
+ self.assertEqual(6, diags.nic_details[0].tx_errors)
+ self.assertEqual(7, diags.nic_details[0].tx_drop)
+ self.assertEqual(8, diags.nic_details[0].tx_packets)
+
+ def test_add_disk(self):
+ diags = diagnostics.Diagnostics()
+ self.assertEqual([], diags.disk_details)
+ diags.add_disk(id='fake_disk_id',
+ read_bytes=1, read_requests=2,
+ write_bytes=3, write_requests=4,
+ errors_count=5)
+ self.assertEqual(1, len(diags.disk_details))
+ self.assertEqual('fake_disk_id', diags.disk_details[0].id)
+ self.assertEqual(1, diags.disk_details[0].read_bytes)
+ self.assertEqual(2, diags.disk_details[0].read_requests)
+ self.assertEqual(3, diags.disk_details[0].write_bytes)
+ self.assertEqual(4, diags.disk_details[0].write_requests)
+ self.assertEqual(5, diags.disk_details[0].errors_count)
+
+ def test_diagnostics_serialize_default(self):
+ diags = diagnostics.Diagnostics()
+ expected = {'config_drive': False,
+ 'cpu_details': [],
+ 'disk_details': [],
+ 'driver': None,
+ 'hypervisor_os': None,
+ 'memory_details': {'maximum': 0, 'used': 0},
+ 'nic_details': [],
+ 'state': None,
+ 'uptime': 0,
+ 'version': '1.0'}
+ result = diags.serialize()
+ self.assertEqual(expected, result)
+
+ def test_diagnostics_serialize(self):
+ cpu_details = [diagnostics.CpuDiagnostics()]
+ nic_details = [diagnostics.NicDiagnostics()]
+ disk_details = [diagnostics.DiskDiagnostics()]
+ diags = diagnostics.Diagnostics(
+ state='fake-state', driver='fake-driver',
+ hypervisor_os='fake-os',
+ uptime=1, cpu_details=cpu_details,
+ nic_details=nic_details, disk_details=disk_details,
+ config_drive=True)
+ expected = {'config_drive': True,
+ 'cpu_details': [{'time': 0}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 0,
+ 'read_requests': 0,
+ 'write_bytes': 0,
+ 'write_requests': 0}],
+ 'driver': 'fake-driver',
+ 'hypervisor_os': 'fake-os',
+ 'memory_details': {'maximum': 0, 'used': 0},
+ 'nic_details': [{'mac_address': '00:00:00:00:00:00',
+ 'rx_drop': 0,
+ 'rx_errors': 0,
+ 'rx_octets': 0,
+ 'rx_packets': 0,
+ 'tx_drop': 0,
+ 'tx_errors': 0,
+ 'tx_octets': 0,
+ 'tx_packets': 0}],
+ 'state': 'fake-state',
+ 'uptime': 1,
+ 'version': '1.0'}
+ result = diags.serialize()
+ self.assertEqual(expected, result)
+
+ def test_diagnostics_invalid_input(self):
+ self.assertRaises(exception.InvalidInput,
+ diagnostics.Diagnostics,
+ cpu_details='invalid type')
+ self.assertRaises(exception.InvalidInput,
+ diagnostics.Diagnostics,
+ cpu_details=['invalid entry'])
+ self.assertRaises(exception.InvalidInput,
+ diagnostics.Diagnostics,
+ nic_details='invalid type')
+ self.assertRaises(exception.InvalidInput,
+ diagnostics.Diagnostics,
+ nic_details=['invalid entry'])
+ self.assertRaises(exception.InvalidInput,
+ diagnostics.Diagnostics,
+ disk_details='invalid type')
+ self.assertRaises(exception.InvalidInput,
+ diagnostics.Diagnostics,
+ disk_details=['invalid entry'])
diff --git a/nova/tests/unit/virt/test_driver.py b/nova/tests/unit/virt/test_driver.py
new file mode 100644
index 0000000000..572afdedec
--- /dev/null
+++ b/nova/tests/unit/virt/test_driver.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2013 Citrix Systems, Inc.
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova.virt import driver
+
+
+class FakeDriver(object):
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+
+
+class FakeDriver2(FakeDriver):
+ pass
+
+
+class ToDriverRegistryTestCase(test.NoDBTestCase):
+
+ def assertDriverInstance(self, inst, class_, *args, **kwargs):
+ self.assertEqual(class_, inst.__class__)
+ self.assertEqual(args, inst.args)
+ self.assertEqual(kwargs, inst.kwargs)
+
+ def test_driver_dict_from_config(self):
+ drvs = driver.driver_dict_from_config(
+ [
+ 'key1=nova.tests.unit.virt.test_driver.FakeDriver',
+ 'key2=nova.tests.unit.virt.test_driver.FakeDriver2',
+ ], 'arg1', 'arg2', param1='value1', param2='value2'
+ )
+
+ self.assertEqual(
+ sorted(['key1', 'key2']),
+ sorted(drvs.keys())
+ )
+
+ self.assertDriverInstance(
+ drvs['key1'],
+ FakeDriver, 'arg1', 'arg2', param1='value1',
+ param2='value2')
+
+ self.assertDriverInstance(
+ drvs['key2'],
+ FakeDriver2, 'arg1', 'arg2', param1='value1',
+ param2='value2')
diff --git a/nova/tests/unit/virt/test_events.py b/nova/tests/unit/virt/test_events.py
new file mode 100644
index 0000000000..792a8d0453
--- /dev/null
+++ b/nova/tests/unit/virt/test_events.py
@@ -0,0 +1,36 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import time
+
+from nova import test
+from nova.virt import event
+
+
+class TestEvents(test.NoDBTestCase):
+
+ def test_event_repr(self):
+ t = time.time()
+ uuid = '1234'
+ lifecycle = event.EVENT_LIFECYCLE_RESUMED
+
+ e = event.Event(t)
+ self.assertEqual(str(e), "<Event: %s>" % t)
+
+ e = event.InstanceEvent(uuid, timestamp=t)
+ self.assertEqual(str(e), "<InstanceEvent: %s, %s>" % (t, uuid))
+
+ e = event.LifecycleEvent(uuid, lifecycle, timestamp=t)
+ self.assertEqual(str(e), "<LifecycleEvent: %s, %s => Resumed>" %
+ (t, uuid))
diff --git a/nova/tests/unit/virt/test_hardware.py b/nova/tests/unit/virt/test_hardware.py
new file mode 100644
index 0000000000..d0781a6ca7
--- /dev/null
+++ b/nova/tests/unit/virt/test_hardware.py
@@ -0,0 +1,1439 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import mock
+from oslo.serialization import jsonutils
+import six
+
+from nova import context
+from nova import exception
+from nova import objects
+from nova.objects import base as base_obj
+from nova import test
+from nova.tests.unit import matchers
+from nova.virt import hardware as hw
+
+
+class FakeFlavor(dict):
+ def __init__(self, vcpus, memory, extra_specs):
+ self['vcpus'] = vcpus
+ self['memory_mb'] = memory
+ self['extra_specs'] = extra_specs
+
+
+class FakeFlavorObject(object):
+ def __init__(self, vcpus, memory, extra_specs):
+ self.vcpus = vcpus
+ self.memory_mb = memory
+ self.extra_specs = extra_specs
+
+ def __getitem__(self, item):
+ try:
+ return getattr(self, item)
+ except AttributeError:
+ raise KeyError(item)
+
+ def get(self, item, default=None):
+ try:
+ return getattr(self, item)
+ except AttributeError:
+ return default
+
+
+class CpuSetTestCase(test.NoDBTestCase):
+ def test_get_vcpu_pin_set(self):
+ self.flags(vcpu_pin_set="1-3,5,^2")
+ cpuset_ids = hw.get_vcpu_pin_set()
+ self.assertEqual(set([1, 3, 5]), cpuset_ids)
+
+ def test_parse_cpu_spec_none_returns_none(self):
+ self.flags(vcpu_pin_set=None)
+ cpuset_ids = hw.get_vcpu_pin_set()
+ self.assertIsNone(cpuset_ids)
+
+ def test_parse_cpu_spec_valid_syntax_works(self):
+ cpuset_ids = hw.parse_cpu_spec("1")
+ self.assertEqual(set([1]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec("1,2")
+ self.assertEqual(set([1, 2]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec(", , 1 , ,, 2, ,")
+ self.assertEqual(set([1, 2]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec("1-1")
+ self.assertEqual(set([1]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec(" 1 - 1, 1 - 2 , 1 -3")
+ self.assertEqual(set([1, 2, 3]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec("1,^2")
+ self.assertEqual(set([1]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec("1-2, ^1")
+ self.assertEqual(set([2]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec("1-3,5,^2")
+ self.assertEqual(set([1, 3, 5]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec(" 1 - 3 , ^2, 5")
+ self.assertEqual(set([1, 3, 5]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec(" 1,1, ^1")
+ self.assertEqual(set([]), cpuset_ids)
+
+ def test_parse_cpu_spec_invalid_syntax_raises(self):
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ " -1-3,5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-3-,5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "-3,5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-,5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-3,5,^2^")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-3,5,^2-")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "--13,^^5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "a-3,5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-a,5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-3,b,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-3,5,^c")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "3 - 1, 5 , ^ 2 ")
+
+ def test_format_cpu_spec(self):
+ cpus = set([])
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("", spec)
+
+ cpus = []
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("", spec)
+
+ cpus = set([1, 3])
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("1,3", spec)
+
+ cpus = [1, 3]
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("1,3", spec)
+
+ cpus = set([1, 2, 4, 6])
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("1-2,4,6", spec)
+
+ cpus = [1, 2, 4, 6]
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("1-2,4,6", spec)
+
+ cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48])
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("10-11,13-16,19-20,40,42,48", spec)
+
+ cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("10-11,13-16,19-20,40,42,48", spec)
+
+ cpus = set([1, 2, 4, 6])
+ spec = hw.format_cpu_spec(cpus, allow_ranges=False)
+ self.assertEqual("1,2,4,6", spec)
+
+ cpus = [1, 2, 4, 6]
+ spec = hw.format_cpu_spec(cpus, allow_ranges=False)
+ self.assertEqual("1,2,4,6", spec)
+
+ cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48])
+ spec = hw.format_cpu_spec(cpus, allow_ranges=False)
+ self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec)
+
+ cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]
+ spec = hw.format_cpu_spec(cpus, allow_ranges=False)
+ self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec)
+
+
+class VCPUTopologyTest(test.NoDBTestCase):
+
+ def test_validate_config(self):
+ testdata = [
+ { # Flavor sets preferred topology only
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1",
+ }),
+ "image": {
+ "properties": {}
+ },
+ "expect": (
+ 8, 2, 1, 65536, 65536, 65536
+ )
+ },
+ { # Image topology overrides flavor
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1",
+ "hw:cpu_max_threads": "2",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_sockets": "4",
+ "hw_cpu_cores": "2",
+ "hw_cpu_threads": "2",
+ }
+ },
+ "expect": (
+ 4, 2, 2, 65536, 65536, 2,
+ )
+ },
+ { # Partial image topology overrides flavor
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_sockets": "2",
+ }
+ },
+ "expect": (
+ 2, -1, -1, 65536, 65536, 65536,
+ )
+ },
+ { # Restrict use of threads
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_threads": "2",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_max_threads": "1",
+ }
+ },
+ "expect": (
+ -1, -1, -1, 65536, 65536, 1,
+ )
+ },
+ { # Force use of at least two sockets
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_cores": "8",
+ "hw:cpu_max_threads": "1",
+ }),
+ "image": {
+ "properties": {}
+ },
+ "expect": (
+ -1, -1, -1, 65536, 8, 1
+ )
+ },
+ { # Image limits reduce flavor
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_cores": "8",
+ "hw:cpu_max_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_max_cores": "4",
+ }
+ },
+ "expect": (
+ -1, -1, -1, 65536, 4, 1
+ )
+ },
+ { # Image limits kill flavor preferred
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "2",
+ "hw:cpu_cores": "8",
+ "hw:cpu_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_max_cores": "4",
+ }
+ },
+ "expect": (
+ -1, -1, -1, 65536, 4, 65536
+ )
+ },
+ { # Image limits cannot exceed flavor
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_cores": "8",
+ "hw:cpu_max_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_max_cores": "16",
+ }
+ },
+ "expect": exception.ImageVCPULimitsRangeExceeded,
+ },
+ { # Image preferred cannot exceed flavor
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_cores": "8",
+ "hw:cpu_max_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_cores": "16",
+ }
+ },
+ "expect": exception.ImageVCPUTopologyRangeExceeded,
+ },
+ ]
+
+ for topo_test in testdata:
+ if type(topo_test["expect"]) == tuple:
+ (preferred,
+ maximum) = hw.VirtCPUTopology.get_topology_constraints(
+ topo_test["flavor"],
+ topo_test["image"])
+
+ self.assertEqual(topo_test["expect"][0], preferred.sockets)
+ self.assertEqual(topo_test["expect"][1], preferred.cores)
+ self.assertEqual(topo_test["expect"][2], preferred.threads)
+ self.assertEqual(topo_test["expect"][3], maximum.sockets)
+ self.assertEqual(topo_test["expect"][4], maximum.cores)
+ self.assertEqual(topo_test["expect"][5], maximum.threads)
+ else:
+ self.assertRaises(topo_test["expect"],
+ hw.VirtCPUTopology.get_topology_constraints,
+ topo_test["flavor"],
+ topo_test["image"])
+
+ def test_possible_configs(self):
+ testdata = [
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 8,
+ "maxcores": 8,
+ "maxthreads": 2,
+ "expect": [
+ [8, 1, 1],
+ [4, 2, 1],
+ [2, 4, 1],
+ [1, 8, 1],
+ [4, 1, 2],
+ [2, 2, 2],
+ [1, 4, 2],
+ ]
+ },
+ {
+ "allow_threads": False,
+ "vcpus": 8,
+ "maxsockets": 8,
+ "maxcores": 8,
+ "maxthreads": 2,
+ "expect": [
+ [8, 1, 1],
+ [4, 2, 1],
+ [2, 4, 1],
+ [1, 8, 1],
+ ]
+ },
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 1024,
+ "maxcores": 1024,
+ "maxthreads": 2,
+ "expect": [
+ [8, 1, 1],
+ [4, 2, 1],
+ [2, 4, 1],
+ [1, 8, 1],
+ [4, 1, 2],
+ [2, 2, 2],
+ [1, 4, 2],
+ ]
+ },
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 1024,
+ "maxcores": 1,
+ "maxthreads": 2,
+ "expect": [
+ [8, 1, 1],
+ [4, 1, 2],
+ ]
+ },
+ {
+ "allow_threads": True,
+ "vcpus": 7,
+ "maxsockets": 8,
+ "maxcores": 8,
+ "maxthreads": 2,
+ "expect": [
+ [7, 1, 1],
+ [1, 7, 1],
+ ]
+ },
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 2,
+ "maxcores": 1,
+ "maxthreads": 1,
+ "expect": exception.ImageVCPULimitsRangeImpossible,
+ },
+ {
+ "allow_threads": False,
+ "vcpus": 8,
+ "maxsockets": 2,
+ "maxcores": 1,
+ "maxthreads": 4,
+ "expect": exception.ImageVCPULimitsRangeImpossible,
+ },
+ ]
+
+ for topo_test in testdata:
+ if type(topo_test["expect"]) == list:
+ actual = []
+ for topology in hw.VirtCPUTopology.get_possible_topologies(
+ topo_test["vcpus"],
+ hw.VirtCPUTopology(topo_test["maxsockets"],
+ topo_test["maxcores"],
+ topo_test["maxthreads"]),
+ topo_test["allow_threads"]):
+ actual.append([topology.sockets,
+ topology.cores,
+ topology.threads])
+
+ self.assertEqual(topo_test["expect"], actual)
+ else:
+ self.assertRaises(topo_test["expect"],
+ hw.VirtCPUTopology.get_possible_topologies,
+ topo_test["vcpus"],
+ hw.VirtCPUTopology(topo_test["maxsockets"],
+ topo_test["maxcores"],
+ topo_test["maxthreads"]),
+ topo_test["allow_threads"])
+
+ def test_sorting_configs(self):
+ testdata = [
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 8,
+ "maxcores": 8,
+ "maxthreads": 2,
+ "sockets": 4,
+ "cores": 2,
+ "threads": 1,
+ "expect": [
+ [4, 2, 1], # score = 2
+ [8, 1, 1], # score = 1
+ [2, 4, 1], # score = 1
+ [1, 8, 1], # score = 1
+ [4, 1, 2], # score = 1
+ [2, 2, 2], # score = 1
+ [1, 4, 2], # score = 1
+ ]
+ },
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 1024,
+ "maxcores": 1024,
+ "maxthreads": 2,
+ "sockets": -1,
+ "cores": 4,
+ "threads": -1,
+ "expect": [
+ [2, 4, 1], # score = 1
+ [1, 4, 2], # score = 1
+ [8, 1, 1], # score = 0
+ [4, 2, 1], # score = 0
+ [1, 8, 1], # score = 0
+ [4, 1, 2], # score = 0
+ [2, 2, 2], # score = 0
+ ]
+ },
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 1024,
+ "maxcores": 1,
+ "maxthreads": 2,
+ "sockets": -1,
+ "cores": -1,
+ "threads": 2,
+ "expect": [
+ [4, 1, 2], # score = 1
+ [8, 1, 1], # score = 0
+ ]
+ },
+ {
+ "allow_threads": False,
+ "vcpus": 8,
+ "maxsockets": 1024,
+ "maxcores": 1,
+ "maxthreads": 2,
+ "sockets": -1,
+ "cores": -1,
+ "threads": 2,
+ "expect": [
+ [8, 1, 1], # score = 0
+ ]
+ },
+ ]
+
+ for topo_test in testdata:
+ actual = []
+ possible = hw.VirtCPUTopology.get_possible_topologies(
+ topo_test["vcpus"],
+ hw.VirtCPUTopology(topo_test["maxsockets"],
+ topo_test["maxcores"],
+ topo_test["maxthreads"]),
+ topo_test["allow_threads"])
+
+ tops = hw.VirtCPUTopology.sort_possible_topologies(
+ possible,
+ hw.VirtCPUTopology(topo_test["sockets"],
+ topo_test["cores"],
+ topo_test["threads"]))
+ for topology in tops:
+ actual.append([topology.sockets,
+ topology.cores,
+ topology.threads])
+
+ self.assertEqual(topo_test["expect"], actual)
+
+ def test_best_config(self):
+ testdata = [
+ { # Flavor sets preferred topology only
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1"
+ }),
+ "image": {
+ "properties": {}
+ },
+ "expect": [8, 2, 1],
+ },
+ { # Image topology overrides flavor
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1",
+ "hw:cpu_maxthreads": "2",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_sockets": "4",
+ "hw_cpu_cores": "2",
+ "hw_cpu_threads": "2",
+ }
+ },
+ "expect": [4, 2, 2],
+ },
+ { # Image topology overrides flavor
+ "allow_threads": False,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1",
+ "hw:cpu_maxthreads": "2",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_sockets": "4",
+ "hw_cpu_cores": "2",
+ "hw_cpu_threads": "2",
+ }
+ },
+ "expect": [8, 2, 1],
+ },
+ { # Partial image topology overrides flavor
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1"
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_sockets": "2"
+ }
+ },
+ "expect": [2, 8, 1],
+ },
+ { # Restrict use of threads
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_threads": "1"
+ }),
+ "image": {
+ "properties": {}
+ },
+ "expect": [16, 1, 1]
+ },
+ { # Force use of at least two sockets
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_cores": "8",
+ "hw:cpu_max_threads": "1",
+ }),
+ "image": {
+ "properties": {}
+ },
+ "expect": [16, 1, 1]
+ },
+ { # Image limits reduce flavor
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_sockets": "8",
+ "hw:cpu_max_cores": "8",
+ "hw:cpu_max_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_max_sockets": 4,
+ }
+ },
+ "expect": [4, 4, 1]
+ },
+ { # Image limits kill flavor preferred
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "2",
+ "hw:cpu_cores": "8",
+ "hw:cpu_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_max_cores": 4,
+ }
+ },
+ "expect": [16, 1, 1]
+ },
+ ]
+
+ for topo_test in testdata:
+ topology = hw.VirtCPUTopology.get_desirable_configs(
+ topo_test["flavor"],
+ topo_test["image"],
+ topo_test["allow_threads"])[0]
+
+ self.assertEqual(topo_test["expect"][0], topology.sockets)
+ self.assertEqual(topo_test["expect"][1], topology.cores)
+ self.assertEqual(topo_test["expect"][2], topology.threads)
+
+
+class NUMATopologyTest(test.NoDBTestCase):
+
+ def test_topology_constraints(self):
+ testdata = [
+ {
+ "flavor": FakeFlavor(8, 2048, {
+ }),
+ "image": {
+ },
+ "expect": None,
+ },
+ {
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2
+ }),
+ "image": {
+ },
+ "expect": hw.VirtNUMAInstanceTopology(
+ [
+ hw.VirtNUMATopologyCellInstance(
+ 0, set([0, 1, 2, 3]), 1024),
+ hw.VirtNUMATopologyCellInstance(
+ 1, set([4, 5, 6, 7]), 1024),
+ ]),
+ },
+ {
+ # vcpus is not a multiple of nodes, so it
+ # is an error to not provide cpu/mem mapping
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 3
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyAsymmetric,
+ },
+ {
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 3,
+ "hw:numa_cpus.0": "0-3",
+ "hw:numa_mem.0": "1024",
+ "hw:numa_cpus.1": "4,6",
+ "hw:numa_mem.1": "512",
+ "hw:numa_cpus.2": "5,7",
+ "hw:numa_mem.2": "512",
+ }),
+ "image": {
+ },
+ "expect": hw.VirtNUMAInstanceTopology(
+ [
+ hw.VirtNUMATopologyCellInstance(
+ 0, set([0, 1, 2, 3]), 1024),
+ hw.VirtNUMATopologyCellInstance(
+ 1, set([4, 6]), 512),
+ hw.VirtNUMATopologyCellInstance(
+ 2, set([5, 7]), 512),
+ ]),
+ },
+ {
+ # Request a CPU that is out of range
+ # wrt vCPU count
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 1,
+ "hw:numa_cpus.0": "0-16",
+ "hw:numa_mem.0": "2048",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyCPUOutOfRange,
+ },
+ {
+ # Request the same CPU in two nodes
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_cpus.0": "0-7",
+ "hw:numa_mem.0": "1024",
+ "hw:numa_cpus.1": "0-7",
+ "hw:numa_mem.1": "1024",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyCPUDuplicates,
+ },
+ {
+ # Request with some CPUs not assigned
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_cpus.0": "0-2",
+ "hw:numa_mem.0": "1024",
+ "hw:numa_cpus.1": "3-4",
+ "hw:numa_mem.1": "1024",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyCPUsUnassigned,
+ },
+ {
+ # Request too little memory vs flavor total
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_cpus.0": "0-3",
+ "hw:numa_mem.0": "512",
+ "hw:numa_cpus.1": "4-7",
+ "hw:numa_mem.1": "512",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyMemoryOutOfRange,
+ },
+ {
+ # Request too much memory vs flavor total
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_cpus.0": "0-3",
+ "hw:numa_mem.0": "1576",
+ "hw:numa_cpus.1": "4-7",
+ "hw:numa_mem.1": "1576",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyMemoryOutOfRange,
+ },
+ {
+ # Request missing mem.0
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_cpus.0": "0-3",
+ "hw:numa_mem.1": "1576",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyIncomplete,
+ },
+ {
+ # Request missing cpu.0
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_mem.0": "1576",
+ "hw:numa_cpus.1": "4-7",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyIncomplete,
+ },
+ {
+ # Image attempts to override flavor
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ }),
+ "image": {
+ "hw_numa_nodes": 4,
+ },
+ "expect": exception.ImageNUMATopologyForbidden,
+ },
+ ]
+
+ for testitem in testdata:
+ if testitem["expect"] is None:
+ topology = hw.VirtNUMAInstanceTopology.get_constraints(
+ testitem["flavor"], testitem["image"])
+ self.assertIsNone(topology)
+ elif type(testitem["expect"]) == type:
+ self.assertRaises(testitem["expect"],
+ hw.VirtNUMAInstanceTopology.get_constraints,
+ testitem["flavor"],
+ testitem["image"])
+ else:
+ topology = hw.VirtNUMAInstanceTopology.get_constraints(
+ testitem["flavor"], testitem["image"])
+ self.assertEqual(len(testitem["expect"].cells),
+ len(topology.cells))
+ for i in range(len(topology.cells)):
+ self.assertEqual(testitem["expect"].cells[i].cpuset,
+ topology.cells[i].cpuset)
+ self.assertEqual(testitem["expect"].cells[i].memory,
+ topology.cells[i].memory)
+
+ def test_can_fit_isntances(self):
+ hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(0, set([0, 1, 2, 3]), 1024),
+ hw.VirtNUMATopologyCellUsage(1, set([4, 6]), 512)
+ ])
+ instance1 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1, 2]), 256),
+ hw.VirtNUMATopologyCellInstance(1, set([4]), 256),
+ ])
+ instance2 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1]), 256),
+ hw.VirtNUMATopologyCellInstance(1, set([4, 6]), 256),
+ hw.VirtNUMATopologyCellInstance(2, set([7, 8]), 256),
+ ])
+
+ self.assertTrue(hw.VirtNUMAHostTopology.can_fit_instances(
+ hosttopo, []))
+ self.assertTrue(hw.VirtNUMAHostTopology.can_fit_instances(
+ hosttopo, [instance1]))
+ self.assertFalse(hw.VirtNUMAHostTopology.can_fit_instances(
+ hosttopo, [instance2]))
+ self.assertFalse(hw.VirtNUMAHostTopology.can_fit_instances(
+ hosttopo, [instance1, instance2]))
+
+ def test_host_usage_contiguous(self):
+ hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(0, set([0, 1, 2, 3]), 1024),
+ hw.VirtNUMATopologyCellUsage(1, set([4, 6]), 512),
+ hw.VirtNUMATopologyCellUsage(2, set([5, 7]), 512),
+ ])
+ instance1 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1, 2]), 256),
+ hw.VirtNUMATopologyCellInstance(1, set([4]), 256),
+ ])
+ instance2 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1]), 256),
+ hw.VirtNUMATopologyCellInstance(1, set([5, 7]), 256),
+ ])
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hosttopo, [instance1, instance2])
+
+ self.assertEqual(len(hosttopo), len(hostusage))
+
+ self.assertIsInstance(hostusage.cells[0],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[0].cpuset,
+ hostusage.cells[0].cpuset)
+ self.assertEqual(hosttopo.cells[0].memory,
+ hostusage.cells[0].memory)
+ self.assertEqual(hostusage.cells[0].cpu_usage, 5)
+ self.assertEqual(hostusage.cells[0].memory_usage, 512)
+
+ self.assertIsInstance(hostusage.cells[1],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[1].cpuset,
+ hostusage.cells[1].cpuset)
+ self.assertEqual(hosttopo.cells[1].memory,
+ hostusage.cells[1].memory)
+ self.assertEqual(hostusage.cells[1].cpu_usage, 3)
+ self.assertEqual(hostusage.cells[1].memory_usage, 512)
+
+ self.assertIsInstance(hostusage.cells[2],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[2].cpuset,
+ hostusage.cells[2].cpuset)
+ self.assertEqual(hosttopo.cells[2].memory,
+ hostusage.cells[2].memory)
+ self.assertEqual(hostusage.cells[2].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[2].memory_usage, 0)
+
+ def test_host_usage_sparse(self):
+ hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(0, set([0, 1, 2, 3]), 1024),
+ hw.VirtNUMATopologyCellUsage(5, set([4, 6]), 512),
+ hw.VirtNUMATopologyCellUsage(6, set([5, 7]), 512),
+ ])
+ instance1 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1, 2]), 256),
+ hw.VirtNUMATopologyCellInstance(6, set([4]), 256),
+ ])
+ instance2 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1]), 256),
+ hw.VirtNUMATopologyCellInstance(5, set([5, 7]), 256),
+ ])
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hosttopo, [instance1, instance2])
+
+ self.assertEqual(len(hosttopo), len(hostusage))
+
+ self.assertIsInstance(hostusage.cells[0],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[0].id,
+ hostusage.cells[0].id)
+ self.assertEqual(hosttopo.cells[0].cpuset,
+ hostusage.cells[0].cpuset)
+ self.assertEqual(hosttopo.cells[0].memory,
+ hostusage.cells[0].memory)
+ self.assertEqual(hostusage.cells[0].cpu_usage, 5)
+ self.assertEqual(hostusage.cells[0].memory_usage, 512)
+
+ self.assertIsInstance(hostusage.cells[1],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[1].id,
+ hostusage.cells[1].id)
+ self.assertEqual(hosttopo.cells[1].cpuset,
+ hostusage.cells[1].cpuset)
+ self.assertEqual(hosttopo.cells[1].memory,
+ hostusage.cells[1].memory)
+ self.assertEqual(hostusage.cells[1].cpu_usage, 2)
+ self.assertEqual(hostusage.cells[1].memory_usage, 256)
+
+ self.assertIsInstance(hostusage.cells[2],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[2].cpuset,
+ hostusage.cells[2].cpuset)
+ self.assertEqual(hosttopo.cells[2].memory,
+ hostusage.cells[2].memory)
+ self.assertEqual(hostusage.cells[2].cpu_usage, 1)
+ self.assertEqual(hostusage.cells[2].memory_usage, 256)
+
+ def test_host_usage_culmulative_with_free(self):
+ hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(
+ 0, set([0, 1, 2, 3]), 1024, cpu_usage=2, memory_usage=512),
+ hw.VirtNUMATopologyCellUsage(
+ 1, set([4, 6]), 512, cpu_usage=1, memory_usage=512),
+ hw.VirtNUMATopologyCellUsage(2, set([5, 7]), 256),
+ ])
+ instance1 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1, 2]), 512),
+ hw.VirtNUMATopologyCellInstance(1, set([3]), 256),
+ hw.VirtNUMATopologyCellInstance(2, set([4]), 256)])
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hosttopo, [instance1])
+ self.assertIsInstance(hostusage.cells[0],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hostusage.cells[0].cpu_usage, 5)
+ self.assertEqual(hostusage.cells[0].memory_usage, 1024)
+
+ self.assertIsInstance(hostusage.cells[1],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hostusage.cells[1].cpu_usage, 2)
+ self.assertEqual(hostusage.cells[1].memory_usage, 768)
+
+ self.assertIsInstance(hostusage.cells[2],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hostusage.cells[2].cpu_usage, 1)
+ self.assertEqual(hostusage.cells[2].memory_usage, 256)
+
+ # Test freeing of resources
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hostusage, [instance1], free=True)
+ self.assertEqual(hostusage.cells[0].cpu_usage, 2)
+ self.assertEqual(hostusage.cells[0].memory_usage, 512)
+
+ self.assertEqual(hostusage.cells[1].cpu_usage, 1)
+ self.assertEqual(hostusage.cells[1].memory_usage, 512)
+
+ self.assertEqual(hostusage.cells[2].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[2].memory_usage, 0)
+
+ def test_topo_usage_none(self):
+ hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(0, set([0, 1]), 512),
+ hw.VirtNUMATopologyCellUsage(1, set([2, 3]), 512),
+ ])
+ instance1 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1]), 256),
+ hw.VirtNUMATopologyCellInstance(2, set([2]), 256),
+ ])
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ None, [instance1])
+ self.assertIsNone(hostusage)
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hosttopo, [])
+ self.assertEqual(hostusage.cells[0].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[0].memory_usage, 0)
+ self.assertEqual(hostusage.cells[1].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[1].memory_usage, 0)
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hosttopo, None)
+ self.assertEqual(hostusage.cells[0].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[0].memory_usage, 0)
+ self.assertEqual(hostusage.cells[1].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[1].memory_usage, 0)
+
+ def _test_to_dict(self, cell_or_topo, expected):
+ got = cell_or_topo._to_dict()
+ self.assertThat(expected, matchers.DictMatches(got))
+
+ def assertNUMACellMatches(self, expected_cell, got_cell):
+ attrs = ('cpuset', 'memory', 'id')
+ if isinstance(expected_cell, hw.VirtNUMAHostTopology):
+ attrs += ('cpu_usage', 'memory_usage')
+
+ for attr in attrs:
+ self.assertEqual(getattr(expected_cell, attr),
+ getattr(got_cell, attr))
+
+ def _test_cell_from_dict(self, data_dict, expected_cell):
+ cell_class = expected_cell.__class__
+ got_cell = cell_class._from_dict(data_dict)
+ self.assertNUMACellMatches(expected_cell, got_cell)
+
+ def _test_topo_from_dict(self, data_dict, expected_topo):
+ got_topo = expected_topo.__class__._from_dict(
+ data_dict)
+ for got_cell, expected_cell in zip(
+ got_topo.cells, expected_topo.cells):
+ self.assertNUMACellMatches(expected_cell, got_cell)
+
+ def test_numa_cell_dict(self):
+ cell = hw.VirtNUMATopologyCellInstance(1, set([1, 2]), 512)
+ cell_dict = {'cpus': '1,2',
+ 'mem': {'total': 512},
+ 'id': 1,
+ 'pagesize': None}
+ self._test_to_dict(cell, cell_dict)
+ self._test_cell_from_dict(cell_dict, cell)
+
+ def test_numa_cell_pagesize_dict(self):
+ cell = hw.VirtNUMATopologyCellInstance(
+ 1, set([1, 2]), 512, hw.VirtPageSize(2048))
+ cell_dict = {'cpus': '1,2',
+ 'mem': {'total': 512},
+ 'id': 1,
+ 'pagesize': 2048}
+ self._test_to_dict(cell, cell_dict)
+ self._test_cell_from_dict(cell_dict, cell)
+
+ def test_numa_limit_cell_dict(self):
+ cell = hw.VirtNUMATopologyCellLimit(1, set([1, 2]), 512, 4, 2048)
+ cell_dict = {'cpus': '1,2', 'cpu_limit': 4,
+ 'mem': {'total': 512, 'limit': 2048},
+ 'id': 1}
+ self._test_to_dict(cell, cell_dict)
+ self._test_cell_from_dict(cell_dict, cell)
+
+ def test_numa_cell_usage_dict(self):
+ cell = hw.VirtNUMATopologyCellUsage(1, set([1, 2]), 512)
+ cell_dict = {'cpus': '1,2', 'cpu_usage': 0,
+ 'mem': {'total': 512, 'used': 0},
+ 'id': 1}
+ self._test_to_dict(cell, cell_dict)
+ self._test_cell_from_dict(cell_dict, cell)
+
+ def test_numa_instance_topo_dict(self):
+ topo = hw.VirtNUMAInstanceTopology(
+ cells=[
+ hw.VirtNUMATopologyCellInstance(1, set([1, 2]), 1024),
+ hw.VirtNUMATopologyCellInstance(2, set([3, 4]), 1024)])
+ topo_dict = {'cells': [
+ {'cpus': '1,2',
+ 'mem': {'total': 1024},
+ 'id': 1,
+ 'pagesize': None},
+ {'cpus': '3,4',
+ 'mem': {'total': 1024},
+ 'id': 2,
+ 'pagesize': None}]}
+ self._test_to_dict(topo, topo_dict)
+ self._test_topo_from_dict(topo_dict, topo)
+
+ def test_numa_limits_topo_dict(self):
+ topo = hw.VirtNUMALimitTopology(
+ cells=[
+ hw.VirtNUMATopologyCellLimit(
+ 1, set([1, 2]), 1024, 4, 2048),
+ hw.VirtNUMATopologyCellLimit(
+ 2, set([3, 4]), 1024, 4, 2048)])
+ topo_dict = {'cells': [
+ {'cpus': '1,2', 'cpu_limit': 4,
+ 'mem': {'total': 1024, 'limit': 2048},
+ 'id': 1},
+ {'cpus': '3,4', 'cpu_limit': 4,
+ 'mem': {'total': 1024, 'limit': 2048},
+ 'id': 2}]}
+ self._test_to_dict(topo, topo_dict)
+ self._test_topo_from_dict(topo_dict, topo)
+
+ def test_numa_topo_dict_with_usage(self):
+ topo = hw.VirtNUMAHostTopology(
+ cells=[
+ hw.VirtNUMATopologyCellUsage(
+ 1, set([1, 2]), 1024),
+ hw.VirtNUMATopologyCellUsage(
+ 2, set([3, 4]), 1024)])
+ topo_dict = {'cells': [
+ {'cpus': '1,2', 'cpu_usage': 0,
+ 'mem': {'total': 1024, 'used': 0},
+ 'id': 1},
+ {'cpus': '3,4', 'cpu_usage': 0,
+ 'mem': {'total': 1024, 'used': 0},
+ 'id': 2}]}
+ self._test_to_dict(topo, topo_dict)
+ self._test_topo_from_dict(topo_dict, topo)
+
+ def test_json(self):
+ expected = hw.VirtNUMAHostTopology(
+ cells=[
+ hw.VirtNUMATopologyCellUsage(
+ 1, set([1, 2]), 1024),
+ hw.VirtNUMATopologyCellUsage(
+ 2, set([3, 4]), 1024)])
+ got = hw.VirtNUMAHostTopology.from_json(expected.to_json())
+
+ for exp_cell, got_cell in zip(expected.cells, got.cells):
+ self.assertNUMACellMatches(exp_cell, got_cell)
+
+
+class NumberOfSerialPortsTest(test.NoDBTestCase):
+ def test_flavor(self):
+ flavor = FakeFlavorObject(8, 2048, {"hw:serial_port_count": 3})
+ num_ports = hw.get_number_of_serial_ports(flavor, None)
+ self.assertEqual(3, num_ports)
+
+ def test_image_meta(self):
+ flavor = FakeFlavorObject(8, 2048, {})
+ image_meta = {"properties": {"hw_serial_port_count": 2}}
+ num_ports = hw.get_number_of_serial_ports(flavor, image_meta)
+ self.assertEqual(2, num_ports)
+
+ def test_flavor_invalid_value(self):
+ flavor = FakeFlavorObject(8, 2048, {"hw:serial_port_count": 'foo'})
+ image_meta = {"properties": {}}
+ self.assertRaises(exception.ImageSerialPortNumberInvalid,
+ hw.get_number_of_serial_ports,
+ flavor, image_meta)
+
+ def test_image_meta_invalid_value(self):
+ flavor = FakeFlavorObject(8, 2048, {})
+ image_meta = {"properties": {"hw_serial_port_count": 'bar'}}
+ self.assertRaises(exception.ImageSerialPortNumberInvalid,
+ hw.get_number_of_serial_ports,
+ flavor, image_meta)
+
+ def test_image_meta_smaller_than_flavor(self):
+ flavor = FakeFlavorObject(8, 2048, {"hw:serial_port_count": 3})
+ image_meta = {"properties": {"hw_serial_port_count": 2}}
+ num_ports = hw.get_number_of_serial_ports(flavor, image_meta)
+ self.assertEqual(2, num_ports)
+
+ def test_flavor_smaller_than_image_meta(self):
+ flavor = FakeFlavorObject(8, 2048, {"hw:serial_port_count": 3})
+ image_meta = {"properties": {"hw_serial_port_count": 4}}
+ self.assertRaises(exception.ImageSerialPortNumberExceedFlavorValue,
+ hw.get_number_of_serial_ports,
+ flavor, image_meta)
+
+
+class NUMATopologyClaimsTest(test.NoDBTestCase):
+ def setUp(self):
+ super(NUMATopologyClaimsTest, self).setUp()
+
+ self.host = hw.VirtNUMAHostTopology(
+ cells=[
+ hw.VirtNUMATopologyCellUsage(
+ 1, set([1, 2, 3, 4]), 2048,
+ cpu_usage=1, memory_usage=512),
+ hw.VirtNUMATopologyCellUsage(
+ 2, set([5, 6]), 1024)])
+
+ self.limits = hw.VirtNUMALimitTopology(
+ cells=[
+ hw.VirtNUMATopologyCellLimit(
+ 1, set([1, 2, 3, 4]), 2048,
+ cpu_limit=8, memory_limit=4096),
+ hw.VirtNUMATopologyCellLimit(
+ 2, set([5, 6]), 1024,
+ cpu_limit=4, memory_limit=2048)])
+
+ self.large_instance = hw.VirtNUMAInstanceTopology(
+ cells=[
+ hw.VirtNUMATopologyCellInstance(
+ 1, set([1, 2, 3, 4, 5, 6]), 8192),
+ hw.VirtNUMATopologyCellInstance(
+ 2, set([7, 8]), 4096)])
+ self.medium_instance = hw.VirtNUMAInstanceTopology(
+ cells=[
+ hw.VirtNUMATopologyCellInstance(
+ 1, set([1, 2, 3, 4]), 1024),
+ hw.VirtNUMATopologyCellInstance(
+ 2, set([7, 8]), 2048)])
+ self.small_instance = hw.VirtNUMAInstanceTopology(
+ cells=[
+ hw.VirtNUMATopologyCellInstance(1, set([1]), 256),
+ hw.VirtNUMATopologyCellInstance(2, set([5]), 1024)])
+ self.no_fit_instance = hw.VirtNUMAInstanceTopology(
+ cells=[
+ hw.VirtNUMATopologyCellInstance(1, set([1]), 256),
+ hw.VirtNUMATopologyCellInstance(2, set([2]), 256),
+ hw.VirtNUMATopologyCellInstance(3, set([3]), 256)])
+
+ def test_claim_not_enough_info(self):
+
+ # No limits supplied
+ self.assertIsNone(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.large_instance]))
+ # Empty topology
+ self.assertIsNone(
+ hw.VirtNUMAHostTopology.claim_test(
+ hw.VirtNUMAHostTopology(), [self.large_instance],
+ limits=self.limits))
+ # No instances to claim
+ self.assertIsNone(
+ hw.VirtNUMAHostTopology.claim_test(self.host, [], self.limits))
+
+ def test_claim_succeeds(self):
+ self.assertIsNone(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.small_instance], self.limits))
+ self.assertIsNone(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.medium_instance], self.limits))
+
+ def test_claim_fails(self):
+ self.assertIsInstance(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.large_instance], self.limits),
+ six.text_type)
+
+ self.assertIsInstance(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.medium_instance, self.small_instance],
+ self.limits),
+ six.text_type)
+
+ # Instance fails if it won't fit the topology
+ self.assertIsInstance(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.no_fit_instance], self.limits),
+ six.text_type)
+
+ # Instance fails if it won't fit the topology even with no limits
+ self.assertIsInstance(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.no_fit_instance]), six.text_type)
+
+
+class HelperMethodsTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(HelperMethodsTestCase, self).setUp()
+ self.hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(0, set([0, 1]), 512),
+ hw.VirtNUMATopologyCellUsage(1, set([2, 3]), 512),
+ ])
+ self.instancetopo = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1]), 256),
+ hw.VirtNUMATopologyCellInstance(1, set([2]), 256),
+ ])
+ self.context = context.RequestContext('fake-user',
+ 'fake-project')
+
+ def _check_usage(self, host_usage):
+ self.assertEqual(2, host_usage.cells[0].cpu_usage)
+ self.assertEqual(256, host_usage.cells[0].memory_usage)
+ self.assertEqual(1, host_usage.cells[1].cpu_usage)
+ self.assertEqual(256, host_usage.cells[1].memory_usage)
+
+ def test_dicts_json(self):
+ host = {'numa_topology': self.hosttopo.to_json()}
+ instance = {'numa_topology': self.instancetopo.to_json()}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
+
+ def test_dicts_instance_json(self):
+ host = {'numa_topology': self.hosttopo}
+ instance = {'numa_topology': self.instancetopo.to_json()}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, hw.VirtNUMAHostTopology)
+ self._check_usage(res)
+
+ def test_dicts_host_json(self):
+ host = {'numa_topology': self.hosttopo.to_json()}
+ instance = {'numa_topology': self.instancetopo}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
+
+ def test_object_host_instance_json(self):
+ host = objects.ComputeNode(numa_topology=self.hosttopo.to_json())
+ instance = {'numa_topology': self.instancetopo.to_json()}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
+
+ def test_object_host_instance(self):
+ host = objects.ComputeNode(numa_topology=self.hosttopo.to_json())
+ instance = {'numa_topology': self.instancetopo}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
+
+ def test_instance_with_fetch(self):
+ host = objects.ComputeNode(numa_topology=self.hosttopo.to_json())
+ fake_uuid = str(uuid.uuid4())
+ instance = {'uuid': fake_uuid}
+
+ with mock.patch.object(objects.InstanceNUMATopology,
+ 'get_by_instance_uuid', return_value=None) as get_mock:
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self.assertTrue(get_mock.called)
+
+ def test_object_instance_with_load(self):
+ host = objects.ComputeNode(numa_topology=self.hosttopo.to_json())
+ fake_uuid = str(uuid.uuid4())
+ instance = objects.Instance(context=self.context, uuid=fake_uuid)
+
+ with mock.patch.object(objects.InstanceNUMATopology,
+ 'get_by_instance_uuid', return_value=None) as get_mock:
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self.assertTrue(get_mock.called)
+
+ def test_instance_serialized_by_build_request_spec(self):
+ host = objects.ComputeNode(numa_topology=self.hosttopo.to_json())
+ fake_uuid = str(uuid.uuid4())
+ instance = objects.Instance(context=self.context, id=1, uuid=fake_uuid,
+ numa_topology=objects.InstanceNUMATopology.obj_from_topology(
+ self.instancetopo))
+ # NOTE (ndipanov): This emulates scheduler.utils.build_request_spec
+ # We can remove this test once we no longer use that method.
+ instance_raw = jsonutils.to_primitive(
+ base_obj.obj_to_primitive(instance))
+ res = hw.get_host_numa_usage_from_instance(host, instance_raw)
+ self.assertIsInstance(res, six.string_types)
+ self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
+
+ def test_attr_host(self):
+ class Host(object):
+ def __init__(obj):
+ obj.numa_topology = self.hosttopo.to_json()
+
+ host = Host()
+ instance = {'numa_topology': self.instancetopo.to_json()}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
+
+ def test_never_serialize_result(self):
+ host = {'numa_topology': self.hosttopo.to_json()}
+ instance = {'numa_topology': self.instancetopo}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance,
+ never_serialize_result=True)
+ self.assertIsInstance(res, hw.VirtNUMAHostTopology)
+ self._check_usage(res)
+
+
+class VirtMemoryPagesTestCase(test.NoDBTestCase):
+ def test_virt_pages_topology(self):
+ pages = hw.VirtPagesTopology(4, 1024, 512)
+ self.assertEqual(4, pages.size_kb)
+ self.assertEqual(1024, pages.total)
+ self.assertEqual(512, pages.used)
+
+ def test_virt_pages_topology_to_dict(self):
+ pages = hw.VirtPagesTopology(4, 1024, 512)
+ self.assertEqual({'size_kb': 4,
+ 'total': 1024,
+ 'used': 512}, pages.to_dict())
+
+ def test_virt_pages_topology_from_dict(self):
+ pages = hw.VirtPagesTopology.from_dict({'size_kb': 4,
+ 'total': 1024,
+ 'used': 512})
+ self.assertEqual(4, pages.size_kb)
+ self.assertEqual(1024, pages.total)
+ self.assertEqual(512, pages.used)
+
+ def test_cell_instance_pagesize(self):
+ pagesize = hw.VirtPageSize(2048)
+ cell = hw.VirtNUMATopologyCellInstance(
+ 0, set([0]), 1024, pagesize)
+
+ self.assertEqual(0, cell.id)
+ self.assertEqual(set([0]), cell.cpuset)
+ self.assertEqual(1024, cell.memory)
+ self.assertEqual(2048, cell.pagesize.size_kb)
diff --git a/nova/tests/unit/virt/test_imagecache.py b/nova/tests/unit/virt/test_imagecache.py
new file mode 100644
index 0000000000..dc587fb4bc
--- /dev/null
+++ b/nova/tests/unit/virt/test_imagecache.py
@@ -0,0 +1,122 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+from nova.compute import vm_states
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.virt import imagecache
+
+CONF = cfg.CONF
+
+
+class ImageCacheManagerTests(test.NoDBTestCase):
+
+ def test_configurationi_defaults(self):
+ self.assertEqual(2400, CONF.image_cache_manager_interval)
+ self.assertEqual('_base', CONF.image_cache_subdirectory_name)
+ self.assertTrue(CONF.remove_unused_base_images)
+ self.assertEqual(24 * 3600,
+ CONF.remove_unused_original_minimum_age_seconds)
+
+ def test_cache_manager(self):
+ cache_manager = imagecache.ImageCacheManager()
+ self.assertTrue(cache_manager.remove_unused_base_images)
+ self.assertRaises(NotImplementedError,
+ cache_manager.update, None, [])
+ self.assertRaises(NotImplementedError,
+ cache_manager._get_base)
+ base_images = cache_manager._list_base_images(None)
+ self.assertEqual([], base_images['unexplained_images'])
+ self.assertEqual([], base_images['originals'])
+ self.assertRaises(NotImplementedError,
+ cache_manager._age_and_verify_cached_images,
+ None, [], None)
+
+ def test_list_running_instances(self):
+ instances = [{'image_ref': '1',
+ 'host': CONF.host,
+ 'id': '1',
+ 'uuid': '123',
+ 'vm_state': '',
+ 'task_state': ''},
+ {'image_ref': '2',
+ 'host': CONF.host,
+ 'id': '2',
+ 'uuid': '456',
+ 'vm_state': '',
+ 'task_state': ''},
+ {'image_ref': '2',
+ 'kernel_id': '21',
+ 'ramdisk_id': '22',
+ 'host': 'remotehost',
+ 'id': '3',
+ 'uuid': '789',
+ 'vm_state': '',
+ 'task_state': ''}]
+
+ all_instances = [fake_instance.fake_instance_obj(None, **instance)
+ for instance in instances]
+
+ image_cache_manager = imagecache.ImageCacheManager()
+
+ # The argument here should be a context, but it's mocked out
+ running = image_cache_manager._list_running_instances(None,
+ all_instances)
+
+ self.assertEqual(4, len(running['used_images']))
+ self.assertEqual((1, 0, ['instance-00000001']),
+ running['used_images']['1'])
+ self.assertEqual((1, 1, ['instance-00000002',
+ 'instance-00000003']),
+ running['used_images']['2'])
+ self.assertEqual((0, 1, ['instance-00000003']),
+ running['used_images']['21'])
+ self.assertEqual((0, 1, ['instance-00000003']),
+ running['used_images']['22'])
+
+ self.assertIn('instance-00000001', running['instance_names'])
+ self.assertIn('123', running['instance_names'])
+
+ self.assertEqual(4, len(running['image_popularity']))
+ self.assertEqual(1, running['image_popularity']['1'])
+ self.assertEqual(2, running['image_popularity']['2'])
+ self.assertEqual(1, running['image_popularity']['21'])
+ self.assertEqual(1, running['image_popularity']['22'])
+
+ def test_list_resizing_instances(self):
+ instances = [{'image_ref': '1',
+ 'host': CONF.host,
+ 'id': '1',
+ 'uuid': '123',
+ 'vm_state': vm_states.RESIZED,
+ 'task_state': None}]
+
+ all_instances = [fake_instance.fake_instance_obj(None, **instance)
+ for instance in instances]
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ running = image_cache_manager._list_running_instances(None,
+ all_instances)
+
+ self.assertEqual(1, len(running['used_images']))
+ self.assertEqual((1, 0, ['instance-00000001']),
+ running['used_images']['1'])
+ self.assertEqual(set(['instance-00000001', '123',
+ 'instance-00000001_resize', '123_resize']),
+ running['instance_names'])
+
+ self.assertEqual(1, len(running['image_popularity']))
+ self.assertEqual(1, running['image_popularity']['1'])
diff --git a/nova/tests/unit/virt/test_images.py b/nova/tests/unit/virt/test_images.py
new file mode 100644
index 0000000000..be5ea73ef1
--- /dev/null
+++ b/nova/tests/unit/virt/test_images.py
@@ -0,0 +1,45 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import mock
+from oslo.concurrency import processutils
+
+from nova import exception
+from nova import test
+from nova import utils
+from nova.virt import images
+
+
+class QemuTestCase(test.NoDBTestCase):
+ def test_qemu_info_with_bad_path(self):
+ self.assertRaises(exception.InvalidDiskInfo,
+ images.qemu_img_info,
+ '/path/that/does/not/exist')
+
+ @mock.patch.object(os.path, 'exists', return_value=True)
+ def test_qemu_info_with_errors(self, path_exists):
+ self.assertRaises(processutils.ProcessExecutionError,
+ images.qemu_img_info,
+ '/fake/path')
+
+ @mock.patch.object(os.path, 'exists', return_value=True)
+ @mock.patch.object(utils, 'execute',
+ return_value=('stdout', None))
+ def test_qemu_info_with_no_errors(self, path_exists,
+ utils_execute):
+ image_info = images.qemu_img_info('/fake/path')
+ self.assertTrue(image_info)
+ self.assertTrue(str(image_info)) \ No newline at end of file
diff --git a/nova/tests/unit/virt/test_virt.py b/nova/tests/unit/virt/test_virt.py
new file mode 100644
index 0000000000..67b0ac503a
--- /dev/null
+++ b/nova/tests/unit/virt/test_virt.py
@@ -0,0 +1,287 @@
+# Copyright 2011 Isaku Yamahata
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import io
+import os
+
+import mock
+
+from nova import test
+from nova import utils
+from nova.virt.disk import api as disk_api
+from nova.virt.disk.mount import api as mount
+from nova.virt import driver
+
+PROC_MOUNTS_CONTENTS = """rootfs / rootfs rw 0 0
+sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
+proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
+udev /dev devtmpfs rw,relatime,size=1013160k,nr_inodes=253290,mode=755 0 0
+devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620 0 0
+tmpfs /run tmpfs rw,nosuid,relatime,size=408904k,mode=755 0 0"""
+
+
+class TestVirtDriver(test.NoDBTestCase):
+ def test_block_device(self):
+ swap = {'device_name': '/dev/sdb',
+ 'swap_size': 1}
+ ephemerals = [{'num': 0,
+ 'virtual_name': 'ephemeral0',
+ 'device_name': '/dev/sdc1',
+ 'size': 1}]
+ block_device_mapping = [{'mount_device': '/dev/sde',
+ 'device_path': 'fake_device'}]
+ block_device_info = {
+ 'root_device_name': '/dev/sda',
+ 'swap': swap,
+ 'ephemerals': ephemerals,
+ 'block_device_mapping': block_device_mapping}
+
+ empty_block_device_info = {}
+
+ self.assertEqual(
+ driver.block_device_info_get_root(block_device_info), '/dev/sda')
+ self.assertIsNone(
+ driver.block_device_info_get_root(empty_block_device_info))
+ self.assertIsNone(driver.block_device_info_get_root(None))
+
+ self.assertEqual(
+ driver.block_device_info_get_swap(block_device_info), swap)
+ self.assertIsNone(driver.block_device_info_get_swap(
+ empty_block_device_info)['device_name'])
+ self.assertEqual(driver.block_device_info_get_swap(
+ empty_block_device_info)['swap_size'], 0)
+ self.assertIsNone(
+ driver.block_device_info_get_swap({'swap': None})['device_name'])
+ self.assertEqual(
+ driver.block_device_info_get_swap({'swap': None})['swap_size'],
+ 0)
+ self.assertIsNone(
+ driver.block_device_info_get_swap(None)['device_name'])
+ self.assertEqual(
+ driver.block_device_info_get_swap(None)['swap_size'], 0)
+
+ self.assertEqual(
+ driver.block_device_info_get_ephemerals(block_device_info),
+ ephemerals)
+ self.assertEqual(
+ driver.block_device_info_get_ephemerals(empty_block_device_info),
+ [])
+ self.assertEqual(
+ driver.block_device_info_get_ephemerals(None),
+ [])
+
+ def test_swap_is_usable(self):
+ self.assertFalse(driver.swap_is_usable(None))
+ self.assertFalse(driver.swap_is_usable({'device_name': None}))
+ self.assertFalse(driver.swap_is_usable({'device_name': '/dev/sdb',
+ 'swap_size': 0}))
+ self.assertTrue(driver.swap_is_usable({'device_name': '/dev/sdb',
+ 'swap_size': 1}))
+
+
+class FakeMount(object):
+ def __init__(self, image, mount_dir, partition=None, device=None):
+ self.image = image
+ self.partition = partition
+ self.mount_dir = mount_dir
+
+ self.linked = self.mapped = self.mounted = False
+ self.device = device
+
+ def do_mount(self):
+ self.linked = True
+ self.mapped = True
+ self.mounted = True
+ self.device = '/dev/fake'
+ return True
+
+ def do_umount(self):
+ self.linked = True
+ self.mounted = False
+
+ def do_teardown(self):
+ self.linked = False
+ self.mapped = False
+ self.mounted = False
+ self.device = None
+
+
+class TestDiskImage(test.NoDBTestCase):
+ def mock_proc_mounts(self, mock_open):
+ response = io.StringIO(unicode(PROC_MOUNTS_CONTENTS))
+ mock_open.return_value = response
+
+ @mock.patch('__builtin__.open')
+ def test_mount(self, mock_open):
+ self.mock_proc_mounts(mock_open)
+ image = '/tmp/fake-image'
+ mountdir = '/mnt/fake_rootfs'
+ fakemount = FakeMount(image, mountdir, None)
+
+ def fake_instance_for_format(imgfile, mountdir, partition, imgfmt):
+ return fakemount
+
+ self.stubs.Set(mount.Mount, 'instance_for_format',
+ staticmethod(fake_instance_for_format))
+ diskimage = disk_api._DiskImage(image=image, mount_dir=mountdir)
+ dev = diskimage.mount()
+ self.assertEqual(diskimage._mounter, fakemount)
+ self.assertEqual(dev, '/dev/fake')
+
+ @mock.patch('__builtin__.open')
+ def test_umount(self, mock_open):
+ self.mock_proc_mounts(mock_open)
+
+ image = '/tmp/fake-image'
+ mountdir = '/mnt/fake_rootfs'
+ fakemount = FakeMount(image, mountdir, None)
+
+ def fake_instance_for_format(imgfile, mountdir, partition, imgfmt):
+ return fakemount
+
+ self.stubs.Set(mount.Mount, 'instance_for_format',
+ staticmethod(fake_instance_for_format))
+ diskimage = disk_api._DiskImage(image=image, mount_dir=mountdir)
+ dev = diskimage.mount()
+ self.assertEqual(diskimage._mounter, fakemount)
+ self.assertEqual(dev, '/dev/fake')
+ diskimage.umount()
+ self.assertIsNone(diskimage._mounter)
+
+ @mock.patch('__builtin__.open')
+ def test_teardown(self, mock_open):
+ self.mock_proc_mounts(mock_open)
+
+ image = '/tmp/fake-image'
+ mountdir = '/mnt/fake_rootfs'
+ fakemount = FakeMount(image, mountdir, None)
+
+ def fake_instance_for_format(imgfile, mountdir, partition, imgfmt):
+ return fakemount
+
+ self.stubs.Set(mount.Mount, 'instance_for_format',
+ staticmethod(fake_instance_for_format))
+ diskimage = disk_api._DiskImage(image=image, mount_dir=mountdir)
+ dev = diskimage.mount()
+ self.assertEqual(diskimage._mounter, fakemount)
+ self.assertEqual(dev, '/dev/fake')
+ diskimage.teardown()
+ self.assertIsNone(diskimage._mounter)
+
+
+class TestVirtDisk(test.NoDBTestCase):
+ def setUp(self):
+ super(TestVirtDisk, self).setUp()
+ self.executes = []
+
+ def fake_execute(*cmd, **kwargs):
+ self.executes.append(cmd)
+ return None, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ def test_lxc_setup_container(self):
+ image = '/tmp/fake-image'
+ container_dir = '/mnt/fake_rootfs/'
+
+ def proc_mounts(self, mount_point):
+ return None
+
+ def fake_instance_for_format(imgfile, mountdir, partition, imgfmt):
+ return FakeMount(imgfile, mountdir, partition)
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts)
+ self.stubs.Set(mount.Mount, 'instance_for_format',
+ staticmethod(fake_instance_for_format))
+
+ self.assertEqual(disk_api.setup_container(image, container_dir),
+ '/dev/fake')
+
+ def test_lxc_teardown_container(self):
+
+ def proc_mounts(self, mount_point):
+ mount_points = {
+ '/mnt/loop/nopart': '/dev/loop0',
+ '/mnt/loop/part': '/dev/mapper/loop0p1',
+ '/mnt/nbd/nopart': '/dev/nbd15',
+ '/mnt/nbd/part': '/dev/mapper/nbd15p1',
+ }
+ return mount_points[mount_point]
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts)
+ expected_commands = []
+
+ disk_api.teardown_container('/mnt/loop/nopart')
+ expected_commands += [
+ ('umount', '/dev/loop0'),
+ ('losetup', '--detach', '/dev/loop0'),
+ ]
+
+ disk_api.teardown_container('/mnt/loop/part')
+ expected_commands += [
+ ('umount', '/dev/mapper/loop0p1'),
+ ('kpartx', '-d', '/dev/loop0'),
+ ('losetup', '--detach', '/dev/loop0'),
+ ]
+
+ disk_api.teardown_container('/mnt/nbd/nopart')
+ expected_commands += [
+ ('blockdev', '--flushbufs', '/dev/nbd15'),
+ ('umount', '/dev/nbd15'),
+ ('qemu-nbd', '-d', '/dev/nbd15'),
+ ]
+
+ disk_api.teardown_container('/mnt/nbd/part')
+ expected_commands += [
+ ('blockdev', '--flushbufs', '/dev/nbd15'),
+ ('umount', '/dev/mapper/nbd15p1'),
+ ('kpartx', '-d', '/dev/nbd15'),
+ ('qemu-nbd', '-d', '/dev/nbd15'),
+ ]
+
+ self.assertEqual(self.executes, expected_commands)
+
+ def test_lxc_teardown_container_with_namespace_cleaned(self):
+
+ def proc_mounts(self, mount_point):
+ return None
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts)
+ expected_commands = []
+
+ disk_api.teardown_container('/mnt/loop/nopart', '/dev/loop0')
+ expected_commands += [
+ ('losetup', '--detach', '/dev/loop0'),
+ ]
+
+ disk_api.teardown_container('/mnt/loop/part', '/dev/loop0')
+ expected_commands += [
+ ('losetup', '--detach', '/dev/loop0'),
+ ]
+
+ disk_api.teardown_container('/mnt/nbd/nopart', '/dev/nbd15')
+ expected_commands += [
+ ('qemu-nbd', '-d', '/dev/nbd15'),
+ ]
+
+ disk_api.teardown_container('/mnt/nbd/part', '/dev/nbd15')
+ expected_commands += [
+ ('qemu-nbd', '-d', '/dev/nbd15'),
+ ]
+
+ self.assertEqual(self.executes, expected_commands)
diff --git a/nova/tests/unit/virt/test_virt_drivers.py b/nova/tests/unit/virt/test_virt_drivers.py
new file mode 100644
index 0000000000..48c009fd42
--- /dev/null
+++ b/nova/tests/unit/virt/test_virt_drivers.py
@@ -0,0 +1,881 @@
+# Copyright 2010 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import sys
+import traceback
+
+import fixtures
+import mock
+import netaddr
+from oslo.serialization import jsonutils
+from oslo.utils import importutils
+from oslo.utils import timeutils
+import six
+
+from nova.compute import manager
+from nova.console import type as ctype
+from nova import exception
+from nova import objects
+from nova.openstack.common import log as logging
+from nova import test
+from nova.tests.unit import fake_block_device
+from nova.tests.unit.image import fake as fake_image
+from nova.tests.unit import utils as test_utils
+from nova.tests.unit.virt.libvirt import fake_libvirt_utils
+from nova.virt import block_device as driver_block_device
+from nova.virt import event as virtevent
+from nova.virt import fake
+from nova.virt import libvirt
+from nova.virt.libvirt import imagebackend
+
+LOG = logging.getLogger(__name__)
+
+
+def catch_notimplementederror(f):
+ """Decorator to simplify catching drivers raising NotImplementedError
+
+ If a particular call makes a driver raise NotImplementedError, we
+ log it so that we can extract this information afterwards as needed.
+ """
+ def wrapped_func(self, *args, **kwargs):
+ try:
+ return f(self, *args, **kwargs)
+ except NotImplementedError:
+ frame = traceback.extract_tb(sys.exc_info()[2])[-1]
+ LOG.error("%(driver)s does not implement %(method)s "
+ "required for test %(test)s" %
+ {'driver': type(self.connection),
+ 'method': frame[2], 'test': f.__name__})
+
+ wrapped_func.__name__ = f.__name__
+ wrapped_func.__doc__ = f.__doc__
+ return wrapped_func
+
+
+class _FakeDriverBackendTestCase(object):
+ def _setup_fakelibvirt(self):
+ # So that the _supports_direct_io does the test based
+ # on the current working directory, instead of the
+ # default instances_path which doesn't exist
+ self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
+
+ # Put fakelibvirt in place
+ if 'libvirt' in sys.modules:
+ self.saved_libvirt = sys.modules['libvirt']
+ else:
+ self.saved_libvirt = None
+
+ import nova.tests.unit.virt.libvirt.fake_imagebackend as \
+ fake_imagebackend
+ import nova.tests.unit.virt.libvirt.fake_libvirt_utils as \
+ fake_libvirt_utils
+ import nova.tests.unit.virt.libvirt.fakelibvirt as fakelibvirt
+
+ sys.modules['libvirt'] = fakelibvirt
+ import nova.virt.libvirt.driver
+ import nova.virt.libvirt.firewall
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.imagebackend',
+ fake_imagebackend))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.libvirt',
+ fakelibvirt))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.libvirt_utils',
+ fake_libvirt_utils))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.imagebackend.libvirt_utils',
+ fake_libvirt_utils))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.firewall.libvirt',
+ fakelibvirt))
+
+ self.flags(rescue_image_id="2",
+ rescue_kernel_id="3",
+ rescue_ramdisk_id=None,
+ snapshots_directory='./',
+ group='libvirt')
+
+ def fake_extend(image, size):
+ pass
+
+ def fake_migrateToURI(*a):
+ pass
+
+ def fake_make_drive(_self, _path):
+ pass
+
+ def fake_get_instance_disk_info(_self, instance, xml=None,
+ block_device_info=None):
+ return '[]'
+
+ def fake_delete_instance_files(_self, _instance):
+ pass
+
+ self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
+ '_get_instance_disk_info',
+ fake_get_instance_disk_info)
+
+ self.stubs.Set(nova.virt.libvirt.driver.disk,
+ 'extend', fake_extend)
+
+ self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
+ '_delete_instance_files',
+ fake_delete_instance_files)
+
+ # Like the existing fakelibvirt.migrateToURI, do nothing,
+ # but don't fail for these tests.
+ self.stubs.Set(nova.virt.libvirt.driver.libvirt.Domain,
+ 'migrateToURI', fake_migrateToURI)
+
+ # We can't actually make a config drive v2 because ensure_tree has
+ # been faked out
+ self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder,
+ 'make_drive', fake_make_drive)
+
+ def _teardown_fakelibvirt(self):
+ # Restore libvirt
+ if self.saved_libvirt:
+ sys.modules['libvirt'] = self.saved_libvirt
+
+ def setUp(self):
+ super(_FakeDriverBackendTestCase, self).setUp()
+ # TODO(sdague): it would be nice to do this in a way that only
+ # the relevant backends where replaced for tests, though this
+ # should not harm anything by doing it for all backends
+ fake_image.stub_out_image_service(self.stubs)
+ self._setup_fakelibvirt()
+
+ def tearDown(self):
+ fake_image.FakeImageService_reset()
+ self._teardown_fakelibvirt()
+ super(_FakeDriverBackendTestCase, self).tearDown()
+
+
+class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase):
+ """Test that ComputeManager can successfully load both
+ old style and new style drivers and end up with the correct
+ final class.
+ """
+
+ # if your driver supports being tested in a fake way, it can go here
+ #
+ # both long form and short form drivers are supported
+ new_drivers = {
+ 'nova.virt.fake.FakeDriver': 'FakeDriver',
+ 'nova.virt.libvirt.LibvirtDriver': 'LibvirtDriver',
+ 'fake.FakeDriver': 'FakeDriver',
+ 'libvirt.LibvirtDriver': 'LibvirtDriver'
+ }
+
+ def test_load_new_drivers(self):
+ for cls, driver in self.new_drivers.iteritems():
+ self.flags(compute_driver=cls)
+ # NOTE(sdague) the try block is to make it easier to debug a
+ # failure by knowing which driver broke
+ try:
+ cm = manager.ComputeManager()
+ except Exception as e:
+ self.fail("Couldn't load driver %s - %s" % (cls, e))
+
+ self.assertEqual(cm.driver.__class__.__name__, driver,
+ "Could't load driver %s" % cls)
+
+ def test_fail_to_load_new_drivers(self):
+ self.flags(compute_driver='nova.virt.amiga')
+
+ def _fake_exit(error):
+ raise test.TestingException()
+
+ self.stubs.Set(sys, 'exit', _fake_exit)
+ self.assertRaises(test.TestingException, manager.ComputeManager)
+
+
+class _VirtDriverTestCase(_FakeDriverBackendTestCase):
+ def setUp(self):
+ super(_VirtDriverTestCase, self).setUp()
+
+ self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
+ self.connection = importutils.import_object(self.driver_module,
+ fake.FakeVirtAPI())
+ self.ctxt = test_utils.get_test_admin_context()
+ self.image_service = fake_image.FakeImageService()
+ # NOTE(dripton): resolve_driver_format does some file reading and
+ # writing and chowning that complicate testing too much by requiring
+ # using real directories with proper permissions. Just stub it out
+ # here; we test it in test_imagebackend.py
+ self.stubs.Set(imagebackend.Image, 'resolve_driver_format',
+ imagebackend.Image._get_driver_format)
+
+ def _get_running_instance(self, obj=True):
+ instance_ref = test_utils.get_test_instance(obj=obj)
+ network_info = test_utils.get_test_network_info()
+ network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
+ '1.1.1.1'
+ image_info = test_utils.get_test_image_info(None, instance_ref)
+ self.connection.spawn(self.ctxt, instance_ref, image_info,
+ [], 'herp', network_info=network_info)
+ return instance_ref, network_info
+
+ @catch_notimplementederror
+ def test_init_host(self):
+ self.connection.init_host('myhostname')
+
+ @catch_notimplementederror
+ def test_list_instances(self):
+ self.connection.list_instances()
+
+ @catch_notimplementederror
+ def test_list_instance_uuids(self):
+ self.connection.list_instance_uuids()
+
+ @catch_notimplementederror
+ def test_spawn(self):
+ instance_ref, network_info = self._get_running_instance()
+ domains = self.connection.list_instances()
+ self.assertIn(instance_ref['name'], domains)
+
+ num_instances = self.connection.get_num_instances()
+ self.assertEqual(1, num_instances)
+
+ @catch_notimplementederror
+ def test_snapshot_not_running(self):
+ instance_ref = test_utils.get_test_instance()
+ img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
+ self.assertRaises(exception.InstanceNotRunning,
+ self.connection.snapshot,
+ self.ctxt, instance_ref, img_ref['id'],
+ lambda *args, **kwargs: None)
+
+ @catch_notimplementederror
+ def test_snapshot_running(self):
+ img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'],
+ lambda *args, **kwargs: None)
+
+ @catch_notimplementederror
+ def test_post_interrupted_snapshot_cleanup(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.post_interrupted_snapshot_cleanup(self.ctxt,
+ instance_ref)
+
+ @catch_notimplementederror
+ def test_reboot(self):
+ reboot_type = "SOFT"
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.reboot(self.ctxt, instance_ref, network_info,
+ reboot_type)
+
+ @catch_notimplementederror
+ def test_get_host_ip_addr(self):
+ host_ip = self.connection.get_host_ip_addr()
+
+ # Will raise an exception if it's not a valid IP at all
+ ip = netaddr.IPAddress(host_ip)
+
+ # For now, assume IPv4.
+ self.assertEqual(ip.version, 4)
+
+ @catch_notimplementederror
+ def test_set_admin_password(self):
+ instance, network_info = self._get_running_instance(obj=True)
+ self.connection.set_admin_password(instance, 'p4ssw0rd')
+
+ @catch_notimplementederror
+ def test_inject_file(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.inject_file(instance_ref,
+ base64.b64encode('/testfile'),
+ base64.b64encode('testcontents'))
+
+ @catch_notimplementederror
+ def test_resume_state_on_host_boot(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.resume_state_on_host_boot(self.ctxt, instance_ref,
+ network_info)
+
+ @catch_notimplementederror
+ def test_rescue(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.rescue(self.ctxt, instance_ref, network_info, None, '')
+
+ @catch_notimplementederror
+ def test_unrescue_unrescued_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.unrescue(instance_ref, network_info)
+
+ @catch_notimplementederror
+ def test_unrescue_rescued_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.rescue(self.ctxt, instance_ref, network_info, None, '')
+ self.connection.unrescue(instance_ref, network_info)
+
+ @catch_notimplementederror
+ def test_poll_rebooting_instances(self):
+ instances = [self._get_running_instance()]
+ self.connection.poll_rebooting_instances(10, instances)
+
+ @catch_notimplementederror
+ def test_migrate_disk_and_power_off(self):
+ instance_ref, network_info = self._get_running_instance()
+ flavor_ref = test_utils.get_test_flavor()
+ self.connection.migrate_disk_and_power_off(
+ self.ctxt, instance_ref, 'dest_host', flavor_ref,
+ network_info)
+
+ @catch_notimplementederror
+ def test_power_off(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.power_off(instance_ref)
+
+ @catch_notimplementederror
+ def test_power_on_running(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.power_on(self.ctxt, instance_ref,
+ network_info, None)
+
+ @catch_notimplementederror
+ def test_power_on_powered_off(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.power_off(instance_ref)
+ self.connection.power_on(self.ctxt, instance_ref, network_info, None)
+
+ @catch_notimplementederror
+ def test_soft_delete(self):
+ instance_ref, network_info = self._get_running_instance(obj=True)
+ self.connection.soft_delete(instance_ref)
+
+ @catch_notimplementederror
+ def test_restore_running(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.restore(instance_ref)
+
+ @catch_notimplementederror
+ def test_restore_soft_deleted(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.soft_delete(instance_ref)
+ self.connection.restore(instance_ref)
+
+ @catch_notimplementederror
+ def test_pause(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.pause(instance_ref)
+
+ @catch_notimplementederror
+ def test_unpause_unpaused_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.unpause(instance_ref)
+
+ @catch_notimplementederror
+ def test_unpause_paused_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.pause(instance_ref)
+ self.connection.unpause(instance_ref)
+
+ @catch_notimplementederror
+ def test_suspend(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.suspend(instance_ref)
+
+ @catch_notimplementederror
+ def test_resume_unsuspended_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.resume(self.ctxt, instance_ref, network_info)
+
+ @catch_notimplementederror
+ def test_resume_suspended_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.suspend(instance_ref)
+ self.connection.resume(self.ctxt, instance_ref, network_info)
+
+ @catch_notimplementederror
+ def test_destroy_instance_nonexistent(self):
+ fake_instance = {'id': 42, 'name': 'I just made this up!',
+ 'uuid': 'bda5fb9e-b347-40e8-8256-42397848cb00'}
+ network_info = test_utils.get_test_network_info()
+ self.connection.destroy(self.ctxt, fake_instance, network_info)
+
+ @catch_notimplementederror
+ def test_destroy_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.assertIn(instance_ref['name'],
+ self.connection.list_instances())
+ self.connection.destroy(self.ctxt, instance_ref, network_info)
+ self.assertNotIn(instance_ref['name'],
+ self.connection.list_instances())
+
+ @catch_notimplementederror
+ def test_get_volume_connector(self):
+ result = self.connection.get_volume_connector({'id': 'fake'})
+ self.assertIn('ip', result)
+ self.assertIn('initiator', result)
+ self.assertIn('host', result)
+
+ @catch_notimplementederror
+ def test_attach_detach_volume(self):
+ instance_ref, network_info = self._get_running_instance()
+ connection_info = {
+ "driver_volume_type": "fake",
+ "serial": "fake_serial",
+ "data": {}
+ }
+ self.assertIsNone(
+ self.connection.attach_volume(None, connection_info, instance_ref,
+ '/dev/sda'))
+ self.assertIsNone(
+ self.connection.detach_volume(connection_info, instance_ref,
+ '/dev/sda'))
+
+ @catch_notimplementederror
+ def test_swap_volume(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.assertIsNone(
+ self.connection.attach_volume(None, {'driver_volume_type': 'fake',
+ 'data': {}},
+ instance_ref,
+ '/dev/sda'))
+ self.assertIsNone(
+ self.connection.swap_volume({'driver_volume_type': 'fake',
+ 'data': {}},
+ {'driver_volume_type': 'fake',
+ 'data': {}},
+ instance_ref,
+ '/dev/sda', 2))
+
+ @catch_notimplementederror
+ def test_attach_detach_different_power_states(self):
+ instance_ref, network_info = self._get_running_instance()
+ connection_info = {
+ "driver_volume_type": "fake",
+ "serial": "fake_serial",
+ "data": {}
+ }
+ self.connection.power_off(instance_ref)
+ self.connection.attach_volume(None, connection_info, instance_ref,
+ '/dev/sda')
+
+ bdm = {
+ 'root_device_name': None,
+ 'swap': None,
+ 'ephemerals': [],
+ 'block_device_mapping': driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'instance_uuid': instance_ref['uuid'],
+ 'device_name': '/dev/sda',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'delete_on_termination': False,
+ 'snapshot_id': None,
+ 'volume_id': 'abcdedf',
+ 'volume_size': None,
+ 'no_device': None
+ }),
+ ])
+ }
+ bdm['block_device_mapping'][0]['connection_info'] = (
+ {'driver_volume_type': 'fake', 'data': {}})
+ with mock.patch.object(
+ driver_block_device.DriverVolumeBlockDevice, 'save'):
+ self.connection.power_on(
+ self.ctxt, instance_ref, network_info, bdm)
+ self.connection.detach_volume(connection_info,
+ instance_ref,
+ '/dev/sda')
+
+ @catch_notimplementederror
+ def test_get_info(self):
+ instance_ref, network_info = self._get_running_instance()
+ info = self.connection.get_info(instance_ref)
+ self.assertIn('state', info)
+ self.assertIn('max_mem', info)
+ self.assertIn('mem', info)
+ self.assertIn('num_cpu', info)
+ self.assertIn('cpu_time', info)
+
+ @catch_notimplementederror
+ def test_get_info_for_unknown_instance(self):
+ self.assertRaises(exception.NotFound,
+ self.connection.get_info,
+ {'name': 'I just made this name up'})
+
+ @catch_notimplementederror
+ def test_get_diagnostics(self):
+ instance_ref, network_info = self._get_running_instance(obj=True)
+ self.connection.get_diagnostics(instance_ref)
+
+ @catch_notimplementederror
+ def test_get_instance_diagnostics(self):
+ instance_ref, network_info = self._get_running_instance(obj=True)
+ instance_ref['launched_at'] = timeutils.utcnow()
+ self.connection.get_instance_diagnostics(instance_ref)
+
+ @catch_notimplementederror
+ def test_block_stats(self):
+ instance_ref, network_info = self._get_running_instance()
+ stats = self.connection.block_stats(instance_ref['name'], 'someid')
+ self.assertEqual(len(stats), 5)
+
+ @catch_notimplementederror
+ def test_interface_stats(self):
+ instance_ref, network_info = self._get_running_instance()
+ stats = self.connection.interface_stats(instance_ref['name'], 'someid')
+ self.assertEqual(len(stats), 8)
+
+ @catch_notimplementederror
+ def test_get_console_output(self):
+ fake_libvirt_utils.files['dummy.log'] = ''
+ instance_ref, network_info = self._get_running_instance()
+ console_output = self.connection.get_console_output(self.ctxt,
+ instance_ref)
+ self.assertIsInstance(console_output, six.string_types)
+
+ @catch_notimplementederror
+ def test_get_vnc_console(self):
+ instance, network_info = self._get_running_instance(obj=True)
+ vnc_console = self.connection.get_vnc_console(self.ctxt, instance)
+ self.assertIsInstance(vnc_console, ctype.ConsoleVNC)
+
+ @catch_notimplementederror
+ def test_get_spice_console(self):
+ instance_ref, network_info = self._get_running_instance()
+ spice_console = self.connection.get_spice_console(self.ctxt,
+ instance_ref)
+ self.assertIsInstance(spice_console, ctype.ConsoleSpice)
+
+ @catch_notimplementederror
+ def test_get_rdp_console(self):
+ instance_ref, network_info = self._get_running_instance()
+ rdp_console = self.connection.get_rdp_console(self.ctxt, instance_ref)
+ self.assertIsInstance(rdp_console, ctype.ConsoleRDP)
+
+ @catch_notimplementederror
+ def test_get_serial_console(self):
+ instance_ref, network_info = self._get_running_instance()
+ serial_console = self.connection.get_serial_console(self.ctxt,
+ instance_ref)
+ self.assertIsInstance(serial_console, ctype.ConsoleSerial)
+
+ @catch_notimplementederror
+ def test_get_console_pool_info(self):
+ instance_ref, network_info = self._get_running_instance()
+ console_pool = self.connection.get_console_pool_info(instance_ref)
+ self.assertIn('address', console_pool)
+ self.assertIn('username', console_pool)
+ self.assertIn('password', console_pool)
+
+ @catch_notimplementederror
+ def test_refresh_security_group_rules(self):
+ # FIXME: Create security group and add the instance to it
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.refresh_security_group_rules(1)
+
+ @catch_notimplementederror
+ def test_refresh_security_group_members(self):
+ # FIXME: Create security group and add the instance to it
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.refresh_security_group_members(1)
+
+ @catch_notimplementederror
+ def test_refresh_instance_security_rules(self):
+ # FIXME: Create security group and add the instance to it
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.refresh_instance_security_rules(instance_ref)
+
+ @catch_notimplementederror
+ def test_refresh_provider_fw_rules(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.refresh_provider_fw_rules()
+
+ @catch_notimplementederror
+ def test_ensure_filtering_for_instance(self):
+ instance = test_utils.get_test_instance(obj=True)
+ network_info = test_utils.get_test_network_info()
+ self.connection.ensure_filtering_rules_for_instance(instance,
+ network_info)
+
+ @catch_notimplementederror
+ def test_unfilter_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.unfilter_instance(instance_ref, network_info)
+
+ @catch_notimplementederror
+ def test_live_migration(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.live_migration(self.ctxt, instance_ref, 'otherhost',
+ lambda *a: None, lambda *a: None)
+
+ @catch_notimplementederror
+ def _check_available_resource_fields(self, host_status):
+ keys = ['vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
+ 'memory_mb_used', 'hypervisor_type', 'hypervisor_version',
+ 'hypervisor_hostname', 'cpu_info', 'disk_available_least',
+ 'supported_instances']
+ for key in keys:
+ self.assertIn(key, host_status)
+ self.assertIsInstance(host_status['hypervisor_version'], int)
+
+ @catch_notimplementederror
+ def test_get_available_resource(self):
+ available_resource = self.connection.get_available_resource(
+ 'myhostname')
+ self._check_available_resource_fields(available_resource)
+
+ @catch_notimplementederror
+ def test_get_available_nodes(self):
+ self.connection.get_available_nodes(False)
+
+ @catch_notimplementederror
+ def _check_host_cpu_status_fields(self, host_cpu_status):
+ self.assertIn('kernel', host_cpu_status)
+ self.assertIn('idle', host_cpu_status)
+ self.assertIn('user', host_cpu_status)
+ self.assertIn('iowait', host_cpu_status)
+ self.assertIn('frequency', host_cpu_status)
+
+ @catch_notimplementederror
+ def test_get_host_cpu_stats(self):
+ host_cpu_status = self.connection.get_host_cpu_stats()
+ self._check_host_cpu_status_fields(host_cpu_status)
+
+ @catch_notimplementederror
+ def test_set_host_enabled(self):
+ self.connection.set_host_enabled('a useless argument?', True)
+
+ @catch_notimplementederror
+ def test_get_host_uptime(self):
+ self.connection.get_host_uptime('a useless argument?')
+
+ @catch_notimplementederror
+ def test_host_power_action_reboot(self):
+ self.connection.host_power_action('a useless argument?', 'reboot')
+
+ @catch_notimplementederror
+ def test_host_power_action_shutdown(self):
+ self.connection.host_power_action('a useless argument?', 'shutdown')
+
+ @catch_notimplementederror
+ def test_host_power_action_startup(self):
+ self.connection.host_power_action('a useless argument?', 'startup')
+
+ @catch_notimplementederror
+ def test_add_to_aggregate(self):
+ self.connection.add_to_aggregate(self.ctxt, 'aggregate', 'host')
+
+ @catch_notimplementederror
+ def test_remove_from_aggregate(self):
+ self.connection.remove_from_aggregate(self.ctxt, 'aggregate', 'host')
+
+ def test_events(self):
+ got_events = []
+
+ def handler(event):
+ got_events.append(event)
+
+ self.connection.register_event_listener(handler)
+
+ event1 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STARTED)
+ event2 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_PAUSED)
+
+ self.connection.emit_event(event1)
+ self.connection.emit_event(event2)
+ want_events = [event1, event2]
+ self.assertEqual(want_events, got_events)
+
+ event3 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_RESUMED)
+ event4 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STOPPED)
+
+ self.connection.emit_event(event3)
+ self.connection.emit_event(event4)
+
+ want_events = [event1, event2, event3, event4]
+ self.assertEqual(want_events, got_events)
+
+ def test_event_bad_object(self):
+ # Passing in something which does not inherit
+ # from virtevent.Event
+
+ def handler(event):
+ pass
+
+ self.connection.register_event_listener(handler)
+
+ badevent = {
+ "foo": "bar"
+ }
+
+ self.assertRaises(ValueError,
+ self.connection.emit_event,
+ badevent)
+
+ def test_event_bad_callback(self):
+ # Check that if a callback raises an exception,
+ # it does not propagate back out of the
+ # 'emit_event' call
+
+ def handler(event):
+ raise Exception("Hit Me!")
+
+ self.connection.register_event_listener(handler)
+
+ event1 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STARTED)
+
+ self.connection.emit_event(event1)
+
+ def test_set_bootable(self):
+ self.assertRaises(NotImplementedError, self.connection.set_bootable,
+ 'instance', True)
+
+ @catch_notimplementederror
+ def test_get_instance_disk_info(self):
+ # This should be implemented by any driver that supports live migrate.
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.get_instance_disk_info(instance_ref['name'],
+ block_device_info={})
+
+
+class AbstractDriverTestCase(_VirtDriverTestCase, test.TestCase):
+ def setUp(self):
+ self.driver_module = "nova.virt.driver.ComputeDriver"
+ super(AbstractDriverTestCase, self).setUp()
+
+
+class FakeConnectionTestCase(_VirtDriverTestCase, test.TestCase):
+ def setUp(self):
+ self.driver_module = 'nova.virt.fake.FakeDriver'
+ fake.set_nodes(['myhostname'])
+ super(FakeConnectionTestCase, self).setUp()
+
+ def _check_available_resource_fields(self, host_status):
+ super(FakeConnectionTestCase, self)._check_available_resource_fields(
+ host_status)
+
+ hypervisor_type = host_status['hypervisor_type']
+ supported_instances = host_status['supported_instances']
+ try:
+ # supported_instances could be JSON wrapped
+ supported_instances = jsonutils.loads(supported_instances)
+ except TypeError:
+ pass
+ self.assertTrue(any(hypervisor_type in x for x in supported_instances))
+
+
+class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ # Point _VirtDriverTestCase at the right module
+ self.driver_module = 'nova.virt.libvirt.LibvirtDriver'
+ super(LibvirtConnTestCase, self).setUp()
+ self.stubs.Set(self.connection,
+ '_set_host_enabled', mock.MagicMock())
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.context.get_admin_context',
+ self._fake_admin_context))
+
+ def _fake_admin_context(self, *args, **kwargs):
+ return self.ctxt
+
+ def test_force_hard_reboot(self):
+ self.flags(wait_soft_reboot_seconds=0, group='libvirt')
+ self.test_reboot()
+
+ def test_migrate_disk_and_power_off(self):
+ # there is lack of fake stuff to execute this method. so pass.
+ self.skipTest("Test nothing, but this method"
+ " needed to override superclass.")
+
+ def test_internal_set_host_enabled(self):
+ self.mox.UnsetStubs()
+ service_mock = mock.MagicMock()
+
+ # Previous status of the service: disabled: False
+ service_mock.configure_mock(disabled_reason='None',
+ disabled=False)
+ with mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock):
+ self.connection._set_host_enabled(False, 'ERROR!')
+ self.assertTrue(service_mock.disabled)
+ self.assertEqual(service_mock.disabled_reason, 'AUTO: ERROR!')
+
+ def test_set_host_enabled_when_auto_disabled(self):
+ self.mox.UnsetStubs()
+ service_mock = mock.MagicMock()
+
+ # Previous status of the service: disabled: True, 'AUTO: ERROR'
+ service_mock.configure_mock(disabled_reason='AUTO: ERROR',
+ disabled=True)
+ with mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock):
+ self.connection._set_host_enabled(True)
+ self.assertFalse(service_mock.disabled)
+ self.assertEqual(service_mock.disabled_reason, 'None')
+
+ def test_set_host_enabled_when_manually_disabled(self):
+ self.mox.UnsetStubs()
+ service_mock = mock.MagicMock()
+
+ # Previous status of the service: disabled: True, 'Manually disabled'
+ service_mock.configure_mock(disabled_reason='Manually disabled',
+ disabled=True)
+ with mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock):
+ self.connection._set_host_enabled(True)
+ self.assertTrue(service_mock.disabled)
+ self.assertEqual(service_mock.disabled_reason, 'Manually disabled')
+
+ def test_set_host_enabled_dont_override_manually_disabled(self):
+ self.mox.UnsetStubs()
+ service_mock = mock.MagicMock()
+
+ # Previous status of the service: disabled: True, 'Manually disabled'
+ service_mock.configure_mock(disabled_reason='Manually disabled',
+ disabled=True)
+ with mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock):
+ self.connection._set_host_enabled(False, 'ERROR!')
+ self.assertTrue(service_mock.disabled)
+ self.assertEqual(service_mock.disabled_reason, 'Manually disabled')
+
+ @catch_notimplementederror
+ @mock.patch.object(libvirt.driver.LibvirtDriver, '_unplug_vifs')
+ def test_unplug_vifs_with_destroy_vifs_false(self, unplug_vifs_mock):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.cleanup(self.ctxt, instance_ref, network_info,
+ destroy_vifs=False)
+ self.assertEqual(unplug_vifs_mock.call_count, 0)
+
+ @catch_notimplementederror
+ @mock.patch.object(libvirt.driver.LibvirtDriver, '_unplug_vifs')
+ def test_unplug_vifs_with_destroy_vifs_true(self, unplug_vifs_mock):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.cleanup(self.ctxt, instance_ref, network_info,
+ destroy_vifs=True)
+ self.assertEqual(unplug_vifs_mock.call_count, 1)
+ unplug_vifs_mock.assert_called_once_with(instance_ref,
+ network_info, True)
diff --git a/nova/tests/unit/virt/test_volumeutils.py b/nova/tests/unit/virt/test_volumeutils.py
new file mode 100644
index 0000000000..8ba7e50399
--- /dev/null
+++ b/nova/tests/unit/virt/test_volumeutils.py
@@ -0,0 +1,47 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# Copyright 2012 University Of Minho
+# Copyright 2010 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests fot virt volumeutils.
+"""
+
+from nova import exception
+from nova import test
+from nova import utils
+from nova.virt import volumeutils
+
+
+class VolumeUtilsTestCase(test.TestCase):
+ def test_get_iscsi_initiator(self):
+ self.mox.StubOutWithMock(utils, 'execute')
+ initiator = 'fake.initiator.iqn'
+ rval = ("junk\nInitiatorName=%s\njunk\n" % initiator, None)
+ utils.execute('cat', '/etc/iscsi/initiatorname.iscsi',
+ run_as_root=True).AndReturn(rval)
+ # Start test
+ self.mox.ReplayAll()
+ result = volumeutils.get_iscsi_initiator()
+ self.assertEqual(initiator, result)
+
+ def test_get_missing_iscsi_initiator(self):
+ self.mox.StubOutWithMock(utils, 'execute')
+ file_path = '/etc/iscsi/initiatorname.iscsi'
+ utils.execute('cat', file_path, run_as_root=True).AndRaise(
+ exception.FileNotFound(file_path=file_path)
+ )
+ # Start test
+ self.mox.ReplayAll()
+ result = volumeutils.get_iscsi_initiator()
+ self.assertIsNone(result)
diff --git a/nova/tests/unit/virt/vmwareapi/__init__.py b/nova/tests/unit/virt/vmwareapi/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/__init__.py
diff --git a/nova/tests/unit/virt/vmwareapi/fake.py b/nova/tests/unit/virt/vmwareapi/fake.py
new file mode 100644
index 0000000000..5bd2b7fb4f
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/fake.py
@@ -0,0 +1,1606 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2012 VMware, Inc.
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A fake VMware VI API implementation.
+"""
+
+import collections
+import pprint
+
+from oslo.serialization import jsonutils
+from oslo.utils import units
+from oslo.vmware import exceptions as vexc
+
+from nova import exception
+from nova.i18n import _
+from nova.openstack.common import log as logging
+from nova.openstack.common import uuidutils
+from nova.virt.vmwareapi import constants
+from nova.virt.vmwareapi import ds_util
+
+_CLASSES = ['Datacenter', 'Datastore', 'ResourcePool', 'VirtualMachine',
+ 'Network', 'HostSystem', 'HostNetworkSystem', 'Task', 'session',
+ 'files', 'ClusterComputeResource', 'HostStorageSystem']
+
+_FAKE_FILE_SIZE = 1024
+
+_db_content = {}
+_array_types = {}
+_vim_map = {}
+
+LOG = logging.getLogger(__name__)
+
+
+def log_db_contents(msg=None):
+ """Log DB Contents."""
+ LOG.debug("%(text)s: _db_content => %(content)s",
+ {'text': msg or "", 'content': pprint.pformat(_db_content)})
+
+
+def reset():
+ """Resets the db contents."""
+ cleanup()
+ create_network()
+ create_host_network_system()
+ create_host_storage_system()
+ ds_ref1 = create_datastore('ds1', 1024, 500)
+ create_host(ds_ref=ds_ref1)
+ ds_ref2 = create_datastore('ds2', 1024, 500)
+ create_host(ds_ref=ds_ref2)
+ create_datacenter('dc1', ds_ref1)
+ create_datacenter('dc2', ds_ref2)
+ create_res_pool()
+ create_cluster('test_cluster', ds_ref1)
+ create_cluster('test_cluster2', ds_ref2)
+
+
+def cleanup():
+ """Clear the db contents."""
+ for c in _CLASSES:
+ # We fake the datastore by keeping the file references as a list of
+ # names in the db
+ if c == 'files':
+ _db_content[c] = []
+ else:
+ _db_content[c] = {}
+
+
+def _create_object(table, table_obj):
+ """Create an object in the db."""
+ _db_content[table][table_obj.obj] = table_obj
+
+
+def _get_object(obj_ref):
+ """Get object for the give reference."""
+ return _db_content[obj_ref.type][obj_ref]
+
+
+def _get_objects(obj_type):
+ """Get objects of the type."""
+ lst_objs = FakeRetrieveResult()
+ for key in _db_content[obj_type]:
+ lst_objs.add_object(_db_content[obj_type][key])
+ return lst_objs
+
+
+def _convert_to_array_of_mor(mors):
+ """Wraps the given array into a DataObject."""
+ array_of_mors = DataObject()
+ array_of_mors.ManagedObjectReference = mors
+ return array_of_mors
+
+
+def _convert_to_array_of_opt_val(optvals):
+ """Wraps the given array into a DataObject."""
+ array_of_optv = DataObject()
+ array_of_optv.OptionValue = optvals
+ return array_of_optv
+
+
+def _create_array_of_type(t):
+ """Returns an array to contain objects of type t."""
+ if t in _array_types:
+ return _array_types[t]()
+
+ array_type_name = 'ArrayOf%s' % t
+ array_type = type(array_type_name, (DataObject,), {})
+
+ def __init__(self):
+ super(array_type, self).__init__(array_type_name)
+ setattr(self, t, [])
+
+ setattr(array_type, '__init__', __init__)
+
+ _array_types[t] = array_type
+ return array_type()
+
+
+class FakeRetrieveResult(object):
+ """Object to retrieve a ObjectContent list."""
+
+ def __init__(self, token=None):
+ self.objects = []
+ if token is not None:
+ self.token = token
+
+ def add_object(self, object):
+ self.objects.append(object)
+
+
+class MissingProperty(object):
+ """Missing object in ObjectContent's missing set."""
+ def __init__(self, path='fake-path', message='fake_message',
+ method_fault=None):
+ self.path = path
+ self.fault = DataObject()
+ self.fault.localizedMessage = message
+ self.fault.fault = method_fault
+
+
+def _get_object_refs(obj_type):
+ """Get object References of the type."""
+ lst_objs = []
+ for key in _db_content[obj_type]:
+ lst_objs.append(key)
+ return lst_objs
+
+
+def _update_object(table, table_obj):
+ """Update objects of the type."""
+ _db_content[table][table_obj.obj] = table_obj
+
+
+class Prop(object):
+ """Property Object base class."""
+
+ def __init__(self, name=None, val=None):
+ self.name = name
+ self.val = val
+
+
+class ManagedObjectReference(object):
+ """A managed object reference is a remote identifier."""
+
+ def __init__(self, name="ManagedObject", value=None):
+ super(ManagedObjectReference, self)
+ # Managed Object Reference value attributes
+ # typically have values like vm-123 or
+ # host-232 and not UUID.
+ self.value = value
+ # Managed Object Reference type
+ # attributes hold the name of the type
+ # of the vCenter object the value
+ # attribute is the identifier for
+ self.type = name
+ self._type = name
+
+
+class ObjectContent(object):
+ """ObjectContent array holds dynamic properties."""
+
+ # This class is a *fake* of a class sent back to us by
+ # SOAP. It has its own names. These names are decided
+ # for us by the API we are *faking* here.
+ def __init__(self, obj_ref, prop_list=None, missing_list=None):
+ self.obj = obj_ref
+
+ if not isinstance(prop_list, collections.Iterable):
+ prop_list = []
+
+ if not isinstance(missing_list, collections.Iterable):
+ missing_list = []
+
+ # propSet is the name your Python code will need to
+ # use since this is the name that the API will use
+ if prop_list:
+ self.propSet = prop_list
+
+ # missingSet is the name your python code will
+ # need to use since this is the name that the
+ # API we are talking to will use.
+ if missing_list:
+ self.missingSet = missing_list
+
+
+class ManagedObject(object):
+ """Managed Object base class."""
+ _counter = 0
+
+ def __init__(self, mo_id_prefix="obj"):
+ """Sets the obj property which acts as a reference to the object."""
+ object.__setattr__(self, 'mo_id', self._generate_moid(mo_id_prefix))
+ object.__setattr__(self, 'propSet', [])
+ object.__setattr__(self, 'obj',
+ ManagedObjectReference(self.__class__.__name__,
+ self.mo_id))
+
+ def set(self, attr, val):
+ """Sets an attribute value. Not using the __setattr__ directly for we
+ want to set attributes of the type 'a.b.c' and using this function
+ class we set the same.
+ """
+ self.__setattr__(attr, val)
+
+ def get(self, attr):
+ """Gets an attribute. Used as an intermediary to get nested
+ property like 'a.b.c' value.
+ """
+ return self.__getattr__(attr)
+
+ def delete(self, attr):
+ """Deletes an attribute."""
+ self.propSet = filter(lambda elem: elem.name != attr, self.propSet)
+
+ def __setattr__(self, attr, val):
+ # TODO(hartsocks): this is adds unnecessary complexity to the class
+ for prop in self.propSet:
+ if prop.name == attr:
+ prop.val = val
+ return
+ elem = Prop()
+ elem.name = attr
+ elem.val = val
+ self.propSet.append(elem)
+
+ def __getattr__(self, attr):
+ # TODO(hartsocks): remove this
+ # in a real ManagedObject you have to iterate the propSet
+ # in a real ManagedObject, the propSet is a *set* not a list
+ for elem in self.propSet:
+ if elem.name == attr:
+ return elem.val
+ msg = _("Property %(attr)s not set for the managed object %(name)s")
+ raise exception.NovaException(msg % {'attr': attr,
+ 'name': self.__class__.__name__})
+
+ def _generate_moid(self, prefix):
+ """Generates a new Managed Object ID."""
+ self.__class__._counter += 1
+ return prefix + "-" + str(self.__class__._counter)
+
+ def __repr__(self):
+ return jsonutils.dumps(dict([(elem.name, elem.val)
+ for elem in self.propSet]))
+
+
+class DataObject(object):
+ """Data object base class."""
+
+ def __init__(self, obj_name=None):
+ self.obj_name = obj_name
+
+ def __repr__(self):
+ return str(self.__dict__)
+
+
+class HostInternetScsiHba(DataObject):
+ """iSCSI Host Bus Adapter."""
+
+ def __init__(self):
+ super(HostInternetScsiHba, self).__init__()
+ self.device = 'vmhba33'
+ self.key = 'key-vmhba33'
+
+
+class FileAlreadyExists(DataObject):
+ """File already exists class."""
+
+ def __init__(self):
+ super(FileAlreadyExists, self).__init__()
+ self.__name__ = vexc.FILE_ALREADY_EXISTS
+
+
+class FileNotFound(DataObject):
+ """File not found class."""
+
+ def __init__(self):
+ super(FileNotFound, self).__init__()
+ self.__name__ = vexc.FILE_NOT_FOUND
+
+
+class FileFault(DataObject):
+ """File fault."""
+
+ def __init__(self):
+ super(FileFault, self).__init__()
+ self.__name__ = vexc.FILE_FAULT
+
+
+class CannotDeleteFile(DataObject):
+ """Cannot delete file."""
+
+ def __init__(self):
+ super(CannotDeleteFile, self).__init__()
+ self.__name__ = vexc.CANNOT_DELETE_FILE
+
+
+class FileLocked(DataObject):
+ """File locked."""
+
+ def __init__(self):
+ super(FileLocked, self).__init__()
+ self.__name__ = vexc.FILE_LOCKED
+
+
+class VirtualDisk(DataObject):
+ """Virtual Disk class."""
+
+ def __init__(self, controllerKey=0, unitNumber=0):
+ super(VirtualDisk, self).__init__()
+ self.key = 0
+ self.controllerKey = controllerKey
+ self.unitNumber = unitNumber
+
+
+class VirtualDiskFlatVer2BackingInfo(DataObject):
+ """VirtualDiskFlatVer2BackingInfo class."""
+
+ def __init__(self):
+ super(VirtualDiskFlatVer2BackingInfo, self).__init__()
+ self.thinProvisioned = False
+ self.eagerlyScrub = False
+
+
+class VirtualDiskRawDiskMappingVer1BackingInfo(DataObject):
+ """VirtualDiskRawDiskMappingVer1BackingInfo class."""
+
+ def __init__(self):
+ super(VirtualDiskRawDiskMappingVer1BackingInfo, self).__init__()
+ self.lunUuid = ""
+
+
+class VirtualIDEController(DataObject):
+
+ def __init__(self, key=0):
+ self.key = key
+
+
+class VirtualLsiLogicController(DataObject):
+ """VirtualLsiLogicController class."""
+ def __init__(self, key=0, scsiCtlrUnitNumber=0):
+ self.key = key
+ self.scsiCtlrUnitNumber = scsiCtlrUnitNumber
+
+
+class VirtualLsiLogicSASController(DataObject):
+ """VirtualLsiLogicSASController class."""
+ pass
+
+
+class VirtualPCNet32(DataObject):
+ """VirtualPCNet32 class."""
+
+ def __init__(self):
+ super(VirtualPCNet32, self).__init__()
+ self.key = 4000
+
+
+class OptionValue(DataObject):
+ """OptionValue class."""
+
+ def __init__(self, key=None, value=None):
+ super(OptionValue, self).__init__()
+ self.key = key
+ self.value = value
+
+
+class VirtualMachine(ManagedObject):
+ """Virtual Machine class."""
+
+ def __init__(self, **kwargs):
+ super(VirtualMachine, self).__init__("vm")
+ self.set("name", kwargs.get("name", 'test-vm'))
+ self.set("runtime.connectionState",
+ kwargs.get("conn_state", "connected"))
+ self.set("summary.config.guestId",
+ kwargs.get("guest", constants.DEFAULT_OS_TYPE))
+ ds_do = kwargs.get("ds", None)
+ self.set("datastore", _convert_to_array_of_mor(ds_do))
+ self.set("summary.guest.toolsStatus", kwargs.get("toolsstatus",
+ "toolsOk"))
+ self.set("summary.guest.toolsRunningStatus", kwargs.get(
+ "toolsrunningstate", "guestToolsRunning"))
+ self.set("runtime.powerState", kwargs.get("powerstate", "poweredOn"))
+ self.set("config.files.vmPathName", kwargs.get("vmPathName"))
+ self.set("summary.config.numCpu", kwargs.get("numCpu", 1))
+ self.set("summary.config.memorySizeMB", kwargs.get("mem", 1))
+ self.set("summary.config.instanceUuid", kwargs.get("instanceUuid"))
+
+ devices = _create_array_of_type('VirtualDevice')
+ devices.VirtualDevice = kwargs.get("virtual_device", [])
+ self.set("config.hardware.device", devices)
+
+ exconfig_do = kwargs.get("extra_config", None)
+ self.set("config.extraConfig",
+ _convert_to_array_of_opt_val(exconfig_do))
+ if exconfig_do:
+ for optval in exconfig_do:
+ self.set('config.extraConfig["%s"]' % optval.key, optval)
+ self.set('runtime.host', kwargs.get("runtime_host", None))
+ self.device = kwargs.get("virtual_device", [])
+ # Sample of diagnostics data is below.
+ config = [
+ ('template', False),
+ ('vmPathName', 'fake_path'),
+ ('memorySizeMB', 512),
+ ('cpuReservation', 0),
+ ('memoryReservation', 0),
+ ('numCpu', 1),
+ ('numEthernetCards', 1),
+ ('numVirtualDisks', 1)]
+ self.set("summary.config", config)
+
+ quickStats = [
+ ('overallCpuUsage', 0),
+ ('overallCpuDemand', 0),
+ ('guestMemoryUsage', 0),
+ ('hostMemoryUsage', 141),
+ ('balloonedMemory', 0),
+ ('consumedOverheadMemory', 20)]
+ self.set("summary.quickStats", quickStats)
+
+ key1 = {'key': 'cpuid.AES'}
+ key2 = {'key': 'cpuid.AVX'}
+ runtime = [
+ ('connectionState', 'connected'),
+ ('powerState', 'poweredOn'),
+ ('toolsInstallerMounted', False),
+ ('suspendInterval', 0),
+ ('memoryOverhead', 21417984),
+ ('maxCpuUsage', 2000),
+ ('featureRequirement', [key1, key2])]
+ self.set("summary.runtime", runtime)
+
+ def _update_extra_config(self, extra):
+ extra_config = self.get("config.extraConfig")
+ values = extra_config.OptionValue
+ for value in values:
+ if value.key == extra.key:
+ value.value = extra.value
+ return
+ kv = DataObject()
+ kv.key = extra.key
+ kv.value = extra.value
+ extra_config.OptionValue.append(kv)
+ self.set("config.extraConfig", extra_config)
+ extra_config = self.get("config.extraConfig")
+
+ def reconfig(self, factory, val):
+ """Called to reconfigure the VM. Actually customizes the property
+ setting of the Virtual Machine object.
+ """
+
+ if hasattr(val, 'name') and val.name:
+ self.set("name", val.name)
+
+ if hasattr(val, 'extraConfig'):
+ extraConfigs = _merge_extraconfig(
+ self.get("config.extraConfig").OptionValue,
+ val.extraConfig)
+ self.get("config.extraConfig").OptionValue = extraConfigs
+
+ if hasattr(val, 'instanceUuid') and val.instanceUuid is not None:
+ if val.instanceUuid == "":
+ val.instanceUuid = uuidutils.generate_uuid()
+ self.set("summary.config.instanceUuid", val.instanceUuid)
+
+ try:
+ if not hasattr(val, 'deviceChange'):
+ return
+
+ if hasattr(val, 'extraConfig'):
+ # there are 2 cases - new entry or update an existing one
+ for extra in val.extraConfig:
+ self._update_extra_config(extra)
+
+ if len(val.deviceChange) < 2:
+ return
+
+ # Case of Reconfig of VM to attach disk
+ controller_key = val.deviceChange[0].device.controllerKey
+ filename = val.deviceChange[0].device.backing.fileName
+
+ disk = VirtualDisk()
+ disk.controllerKey = controller_key
+
+ disk_backing = VirtualDiskFlatVer2BackingInfo()
+ disk_backing.fileName = filename
+ disk_backing.key = -101
+ disk.backing = disk_backing
+
+ controller = VirtualLsiLogicController()
+ controller.key = controller_key
+
+ devices = _create_array_of_type('VirtualDevice')
+ devices.VirtualDevice = [disk, controller, self.device[0]]
+ self.set("config.hardware.device", devices)
+ except AttributeError:
+ pass
+
+
+class Network(ManagedObject):
+ """Network class."""
+
+ def __init__(self):
+ super(Network, self).__init__("network")
+ self.set("summary.name", "vmnet0")
+
+
+class ResourcePool(ManagedObject):
+ """Resource Pool class."""
+
+ def __init__(self, name="test_ResPool", value="resgroup-test"):
+ super(ResourcePool, self).__init__("rp")
+ self.set("name", name)
+ summary = DataObject()
+ runtime = DataObject()
+ config = DataObject()
+ memory = DataObject()
+ cpu = DataObject()
+
+ memoryAllocation = DataObject()
+ cpuAllocation = DataObject()
+ vm_list = DataObject()
+
+ memory.maxUsage = 1000 * units.Mi
+ memory.overallUsage = 500 * units.Mi
+ cpu.maxUsage = 10000
+ cpu.overallUsage = 1000
+ runtime.cpu = cpu
+ runtime.memory = memory
+ summary.runtime = runtime
+ cpuAllocation.limit = 10000
+ memoryAllocation.limit = 1024
+ memoryAllocation.reservation = 1024
+ config.memoryAllocation = memoryAllocation
+ config.cpuAllocation = cpuAllocation
+ vm_list.ManagedObjectReference = []
+ self.set("summary", summary)
+ self.set("summary.runtime.memory", memory)
+ self.set("config", config)
+ self.set("vm", vm_list)
+ parent = ManagedObjectReference(value=value,
+ name=name)
+ owner = ManagedObjectReference(value=value,
+ name=name)
+ self.set("parent", parent)
+ self.set("owner", owner)
+
+
+class DatastoreHostMount(DataObject):
+ def __init__(self, value='host-100'):
+ super(DatastoreHostMount, self).__init__()
+ host_ref = (_db_content["HostSystem"]
+ [_db_content["HostSystem"].keys()[0]].obj)
+ host_system = DataObject()
+ host_system.ManagedObjectReference = [host_ref]
+ host_system.value = value
+ self.key = host_system
+
+
+class ClusterComputeResource(ManagedObject):
+ """Cluster class."""
+
+ def __init__(self, name="test_cluster"):
+ super(ClusterComputeResource, self).__init__("domain")
+ self.set("name", name)
+ self.set("host", None)
+ self.set("datastore", None)
+ self.set("resourcePool", None)
+
+ summary = DataObject()
+ summary.numHosts = 0
+ summary.numCpuCores = 0
+ summary.numCpuThreads = 0
+ summary.numEffectiveHosts = 0
+ summary.totalMemory = 0
+ summary.effectiveMemory = 0
+ summary.effectiveCpu = 10000
+ self.set("summary", summary)
+
+ def _add_root_resource_pool(self, r_pool):
+ if r_pool:
+ self.set("resourcePool", r_pool)
+
+ def _add_host(self, host_sys):
+ if host_sys:
+ hosts = self.get("host")
+ if hosts is None:
+ hosts = DataObject()
+ hosts.ManagedObjectReference = []
+ self.set("host", hosts)
+ hosts.ManagedObjectReference.append(host_sys)
+ # Update summary every time a new host is added
+ self._update_summary()
+
+ def _add_datastore(self, datastore):
+ if datastore:
+ datastores = self.get("datastore")
+ if datastores is None:
+ datastores = DataObject()
+ datastores.ManagedObjectReference = []
+ self.set("datastore", datastores)
+ datastores.ManagedObjectReference.append(datastore)
+
+ # Method to update summary of a cluster upon host addition
+ def _update_summary(self):
+ summary = self.get("summary")
+ summary.numHosts = 0
+ summary.numCpuCores = 0
+ summary.numCpuThreads = 0
+ summary.numEffectiveHosts = 0
+ summary.totalMemory = 0
+ summary.effectiveMemory = 0
+
+ hosts = self.get("host")
+ # Compute the aggregate stats
+ summary.numHosts = len(hosts.ManagedObjectReference)
+ for host_ref in hosts.ManagedObjectReference:
+ host_sys = _get_object(host_ref)
+ connected = host_sys.get("connected")
+ host_summary = host_sys.get("summary")
+ summary.numCpuCores += host_summary.hardware.numCpuCores
+ summary.numCpuThreads += host_summary.hardware.numCpuThreads
+ summary.totalMemory += host_summary.hardware.memorySize
+ free_memory = (host_summary.hardware.memorySize / units.Mi
+ - host_summary.quickStats.overallMemoryUsage)
+ summary.effectiveMemory += free_memory if connected else 0
+ summary.numEffectiveHosts += 1 if connected else 0
+ self.set("summary", summary)
+
+
+class Datastore(ManagedObject):
+ """Datastore class."""
+
+ def __init__(self, name="fake-ds", capacity=1024, free=500,
+ accessible=True, maintenance_mode="normal"):
+ super(Datastore, self).__init__("ds")
+ self.set("summary.type", "VMFS")
+ self.set("summary.name", name)
+ self.set("summary.capacity", capacity * units.Gi)
+ self.set("summary.freeSpace", free * units.Gi)
+ self.set("summary.accessible", accessible)
+ self.set("summary.maintenanceMode", maintenance_mode)
+ self.set("browser", "")
+
+
+class HostNetworkSystem(ManagedObject):
+ """HostNetworkSystem class."""
+
+ def __init__(self, name="networkSystem"):
+ super(HostNetworkSystem, self).__init__("ns")
+ self.set("name", name)
+
+ pnic_do = DataObject()
+ pnic_do.device = "vmnic0"
+
+ net_info_pnic = DataObject()
+ net_info_pnic.PhysicalNic = [pnic_do]
+
+ self.set("networkInfo.pnic", net_info_pnic)
+
+
+class HostStorageSystem(ManagedObject):
+ """HostStorageSystem class."""
+
+ def __init__(self):
+ super(HostStorageSystem, self).__init__("storageSystem")
+
+
+class HostSystem(ManagedObject):
+ """Host System class."""
+
+ def __init__(self, name="ha-host", connected=True, ds_ref=None,
+ maintenance_mode=False):
+ super(HostSystem, self).__init__("host")
+ self.set("name", name)
+ if _db_content.get("HostNetworkSystem", None) is None:
+ create_host_network_system()
+ if not _get_object_refs('HostStorageSystem'):
+ create_host_storage_system()
+ host_net_key = _db_content["HostNetworkSystem"].keys()[0]
+ host_net_sys = _db_content["HostNetworkSystem"][host_net_key].obj
+ self.set("configManager.networkSystem", host_net_sys)
+ host_storage_sys_key = _get_object_refs('HostStorageSystem')[0]
+ self.set("configManager.storageSystem", host_storage_sys_key)
+
+ if not ds_ref:
+ ds_ref = create_datastore('local-host-%s' % name, 500, 500)
+ datastores = DataObject()
+ datastores.ManagedObjectReference = [ds_ref]
+ self.set("datastore", datastores)
+
+ summary = DataObject()
+ hardware = DataObject()
+ hardware.numCpuCores = 8
+ hardware.numCpuPkgs = 2
+ hardware.numCpuThreads = 16
+ hardware.vendor = "Intel"
+ hardware.cpuModel = "Intel(R) Xeon(R)"
+ hardware.uuid = "host-uuid"
+ hardware.memorySize = units.Gi
+ summary.hardware = hardware
+
+ runtime = DataObject()
+ if connected:
+ runtime.connectionState = "connected"
+ else:
+ runtime.connectionState = "disconnected"
+
+ runtime.inMaintenanceMode = maintenance_mode
+
+ summary.runtime = runtime
+
+ quickstats = DataObject()
+ quickstats.overallMemoryUsage = 500
+ summary.quickStats = quickstats
+
+ product = DataObject()
+ product.name = "VMware ESXi"
+ product.version = "5.0.0"
+ config = DataObject()
+ config.product = product
+ summary.config = config
+
+ pnic_do = DataObject()
+ pnic_do.device = "vmnic0"
+ net_info_pnic = DataObject()
+ net_info_pnic.PhysicalNic = [pnic_do]
+
+ self.set("summary", summary)
+ self.set("capability.maxHostSupportedVcpus", 600)
+ self.set("summary.hardware", hardware)
+ self.set("summary.runtime", runtime)
+ self.set("config.network.pnic", net_info_pnic)
+ self.set("connected", connected)
+
+ if _db_content.get("Network", None) is None:
+ create_network()
+ net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
+ network_do = DataObject()
+ network_do.ManagedObjectReference = [net_ref]
+ self.set("network", network_do)
+
+ vswitch_do = DataObject()
+ vswitch_do.pnic = ["vmnic0"]
+ vswitch_do.name = "vSwitch0"
+ vswitch_do.portgroup = ["PortGroup-vmnet0"]
+
+ net_swicth = DataObject()
+ net_swicth.HostVirtualSwitch = [vswitch_do]
+ self.set("config.network.vswitch", net_swicth)
+
+ host_pg_do = DataObject()
+ host_pg_do.key = "PortGroup-vmnet0"
+
+ pg_spec = DataObject()
+ pg_spec.vlanId = 0
+ pg_spec.name = "vmnet0"
+
+ host_pg_do.spec = pg_spec
+
+ host_pg = DataObject()
+ host_pg.HostPortGroup = [host_pg_do]
+ self.set("config.network.portgroup", host_pg)
+
+ config = DataObject()
+ storageDevice = DataObject()
+
+ iscsi_hba = HostInternetScsiHba()
+ iscsi_hba.iScsiName = "iscsi-name"
+ host_bus_adapter_array = DataObject()
+ host_bus_adapter_array.HostHostBusAdapter = [iscsi_hba]
+ storageDevice.hostBusAdapter = host_bus_adapter_array
+ config.storageDevice = storageDevice
+ self.set("config.storageDevice.hostBusAdapter", host_bus_adapter_array)
+
+ # Set the same on the storage system managed object
+ host_storage_sys = _get_object(host_storage_sys_key)
+ host_storage_sys.set('storageDeviceInfo.hostBusAdapter',
+ host_bus_adapter_array)
+
+ def _add_iscsi_target(self, data):
+ default_lun = DataObject()
+ default_lun.scsiLun = 'key-vim.host.ScsiDisk-010'
+ default_lun.key = 'key-vim.host.ScsiDisk-010'
+ default_lun.deviceName = 'fake-device'
+ default_lun.uuid = 'fake-uuid'
+ scsi_lun_array = DataObject()
+ scsi_lun_array.ScsiLun = [default_lun]
+ self.set("config.storageDevice.scsiLun", scsi_lun_array)
+
+ transport = DataObject()
+ transport.address = [data['target_portal']]
+ transport.iScsiName = data['target_iqn']
+ default_target = DataObject()
+ default_target.lun = [default_lun]
+ default_target.transport = transport
+
+ iscsi_adapter = DataObject()
+ iscsi_adapter.adapter = 'key-vmhba33'
+ iscsi_adapter.transport = transport
+ iscsi_adapter.target = [default_target]
+ iscsi_topology = DataObject()
+ iscsi_topology.adapter = [iscsi_adapter]
+ self.set("config.storageDevice.scsiTopology", iscsi_topology)
+
+ def _add_port_group(self, spec):
+ """Adds a port group to the host system object in the db."""
+ pg_name = spec.name
+ vswitch_name = spec.vswitchName
+ vlanid = spec.vlanId
+
+ vswitch_do = DataObject()
+ vswitch_do.pnic = ["vmnic0"]
+ vswitch_do.name = vswitch_name
+ vswitch_do.portgroup = ["PortGroup-%s" % pg_name]
+
+ vswitches = self.get("config.network.vswitch").HostVirtualSwitch
+ vswitches.append(vswitch_do)
+
+ host_pg_do = DataObject()
+ host_pg_do.key = "PortGroup-%s" % pg_name
+
+ pg_spec = DataObject()
+ pg_spec.vlanId = vlanid
+ pg_spec.name = pg_name
+
+ host_pg_do.spec = pg_spec
+ host_pgrps = self.get("config.network.portgroup").HostPortGroup
+ host_pgrps.append(host_pg_do)
+
+
+class Datacenter(ManagedObject):
+ """Datacenter class."""
+
+ def __init__(self, name="ha-datacenter", ds_ref=None):
+ super(Datacenter, self).__init__("dc")
+ self.set("name", name)
+ self.set("vmFolder", "vm_folder_ref")
+ if _db_content.get("Network", None) is None:
+ create_network()
+ net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
+ network_do = DataObject()
+ network_do.ManagedObjectReference = [net_ref]
+ self.set("network", network_do)
+ if ds_ref:
+ datastore = DataObject()
+ datastore.ManagedObjectReference = [ds_ref]
+ else:
+ datastore = None
+ self.set("datastore", datastore)
+
+
+class Task(ManagedObject):
+ """Task class."""
+
+ def __init__(self, task_name, state="running", result=None,
+ error_fault=None):
+ super(Task, self).__init__("Task")
+ info = DataObject()
+ info.name = task_name
+ info.state = state
+ if state == 'error':
+ error = DataObject()
+ error.localizedMessage = "Error message"
+ if not error_fault:
+ error.fault = DataObject()
+ else:
+ error.fault = error_fault
+ info.error = error
+ info.result = result
+ self.set("info", info)
+
+
+def create_host_network_system():
+ host_net_system = HostNetworkSystem()
+ _create_object("HostNetworkSystem", host_net_system)
+
+
+def create_host_storage_system():
+ host_storage_system = HostStorageSystem()
+ _create_object("HostStorageSystem", host_storage_system)
+
+
+def create_host(ds_ref=None):
+ host_system = HostSystem(ds_ref=ds_ref)
+ _create_object('HostSystem', host_system)
+
+
+def create_datacenter(name, ds_ref=None):
+ data_center = Datacenter(name, ds_ref)
+ _create_object('Datacenter', data_center)
+
+
+def create_datastore(name, capacity, free):
+ data_store = Datastore(name, capacity, free)
+ _create_object('Datastore', data_store)
+ return data_store.obj
+
+
+def create_res_pool():
+ res_pool = ResourcePool()
+ _create_object('ResourcePool', res_pool)
+ return res_pool.obj
+
+
+def create_network():
+ network = Network()
+ _create_object('Network', network)
+
+
+def create_cluster(name, ds_ref):
+ cluster = ClusterComputeResource(name=name)
+ cluster._add_host(_get_object_refs("HostSystem")[0])
+ cluster._add_host(_get_object_refs("HostSystem")[1])
+ cluster._add_datastore(ds_ref)
+ cluster._add_root_resource_pool(create_res_pool())
+ _create_object('ClusterComputeResource', cluster)
+
+
+def create_vm(uuid=None, name=None,
+ cpus=1, memory=128, devices=None,
+ vmPathName=None, extraConfig=None,
+ res_pool_ref=None, host_ref=None):
+ if uuid is None:
+ uuid = uuidutils.generate_uuid()
+
+ if name is None:
+ name = uuid
+
+ if devices is None:
+ devices = []
+
+ if vmPathName is None:
+ vm_path = ds_util.DatastorePath(_db_content['Datastore'].values()[0])
+ else:
+ vm_path = ds_util.DatastorePath.parse(vmPathName)
+
+ if res_pool_ref is None:
+ res_pool_ref = _db_content['ResourcePool'].keys()[0]
+
+ if host_ref is None:
+ host_ref = _db_content["HostSystem"].keys()[0]
+
+ # Fill in the default path to the vmx file if we were only given a
+ # datastore. Note that if you create a VM with vmPathName '[foo]', when you
+ # retrieve vmPathName it will be '[foo] uuid/uuid.vmx'. Hence we use
+ # vm_path below for the stored value of vmPathName.
+ if vm_path.rel_path == '':
+ vm_path = vm_path.join(name, name + '.vmx')
+
+ for key, value in _db_content["Datastore"].iteritems():
+ if value.get('summary.name') == vm_path.datastore:
+ ds = key
+ break
+ else:
+ ds = create_datastore(vm_path.datastore, 1024, 500)
+
+ vm_dict = {"name": name,
+ "ds": [ds],
+ "runtime_host": host_ref,
+ "powerstate": "poweredOff",
+ "vmPathName": str(vm_path),
+ "numCpu": cpus,
+ "mem": memory,
+ "extra_config": extraConfig,
+ "virtual_device": devices,
+ "instanceUuid": uuid}
+ vm = VirtualMachine(**vm_dict)
+ _create_object("VirtualMachine", vm)
+
+ res_pool = _get_object(res_pool_ref)
+ res_pool.vm.ManagedObjectReference.append(vm.obj)
+
+ return vm.obj
+
+
+def create_task(task_name, state="running", result=None, error_fault=None):
+ task = Task(task_name, state, result, error_fault)
+ _create_object("Task", task)
+ return task
+
+
+def _add_file(file_path):
+ """Adds a file reference to the db."""
+ _db_content["files"].append(file_path)
+
+
+def _remove_file(file_path):
+ """Removes a file reference from the db."""
+ # Check if the remove is for a single file object or for a folder
+ if file_path.find(".vmdk") != -1:
+ if file_path not in _db_content.get("files"):
+ raise vexc.FileNotFoundException(file_path)
+ _db_content.get("files").remove(file_path)
+ else:
+ # Removes the files in the folder and the folder too from the db
+ to_delete = set()
+ for file in _db_content.get("files"):
+ if file.find(file_path) != -1:
+ to_delete.add(file)
+ for file in to_delete:
+ _db_content.get("files").remove(file)
+
+
+def fake_plug_vifs(*args, **kwargs):
+ """Fakes plugging vifs."""
+ pass
+
+
+def fake_get_network(*args, **kwargs):
+ """Fake get network."""
+ return {'type': 'fake'}
+
+
+def get_file(file_path):
+ """Check if file exists in the db."""
+ return file_path in _db_content.get("files")
+
+
+def fake_upload_image(context, image, instance, **kwargs):
+ """Fakes the upload of an image."""
+ pass
+
+
+def fake_fetch_image(context, instance, host, dc_name, ds_name, file_path,
+ cookies=None):
+ """Fakes the fetch of an image."""
+ ds_file_path = "[" + ds_name + "] " + file_path
+ _add_file(ds_file_path)
+
+
+def _get_vm_mdo(vm_ref):
+ """Gets the Virtual Machine with the ref from the db."""
+ if _db_content.get("VirtualMachine", None) is None:
+ raise exception.NotFound(_("There is no VM registered"))
+ if vm_ref not in _db_content.get("VirtualMachine"):
+ raise exception.NotFound(_("Virtual Machine with ref %s is not "
+ "there") % vm_ref)
+ return _db_content.get("VirtualMachine")[vm_ref]
+
+
+def _merge_extraconfig(existing, changes):
+ """Imposes the changes in extraConfig over the existing extraConfig."""
+ existing = existing or []
+ if (changes):
+ for c in changes:
+ if len([x for x in existing if x.key == c.key]) > 0:
+ extraConf = [x for x in existing if x.key == c.key][0]
+ extraConf.value = c.value
+ else:
+ existing.append(c)
+ return existing
+
+
+class FakeFactory(object):
+ """Fake factory class for the suds client."""
+
+ def create(self, obj_name):
+ """Creates a namespace object."""
+ return DataObject(obj_name)
+
+
+class FakeService(DataObject):
+ """Fake service class."""
+
+ def Logout(self, session_manager):
+ pass
+
+ def FindExtension(self, extension_manager, key):
+ return []
+
+
+class FakeClient(DataObject):
+ """Fake client class."""
+
+ def __init__(self):
+ """Creates a namespace object."""
+ self.service = FakeService()
+
+
+class FakeSession(object):
+ """Fake Session Class."""
+
+ def __init__(self):
+ self.vim = FakeVim()
+
+ def _call_method(self, module, method, *args, **kwargs):
+ raise NotImplementedError()
+
+ def _wait_for_task(self, task_ref):
+ raise NotImplementedError()
+
+
+class FakeObjectRetrievalSession(FakeSession):
+ """A session for faking object retrieval tasks.
+
+ _call_method() returns a given set of objects
+ sequentially, regardless of the method called.
+ """
+
+ def __init__(self, *ret):
+ super(FakeObjectRetrievalSession, self).__init__()
+ self.ret = ret
+ self.ind = 0
+
+ def _call_method(self, module, method, *args, **kwargs):
+ # return fake objects in a circular manner
+ self.ind = (self.ind + 1) % len(self.ret)
+ return self.ret[self.ind - 1]
+
+
+def get_fake_vim_object(vmware_api_session):
+ key = vmware_api_session.__repr__()
+ if key not in _vim_map:
+ _vim_map[key] = FakeVim()
+ return _vim_map[key]
+
+
+class FakeVim(object):
+ """Fake VIM Class."""
+
+ def __init__(self, protocol="https", host="localhost", trace=None):
+ """Initializes the suds client object, sets the service content
+ contents and the cookies for the session.
+ """
+ self._session = None
+ self.client = FakeClient()
+ self.client.factory = FakeFactory()
+
+ transport = DataObject()
+ transport.cookiejar = "Fake-CookieJar"
+ options = DataObject()
+ options.transport = transport
+
+ self.client.options = options
+
+ service_content = self.client.factory.create('ns0:ServiceContent')
+ service_content.propertyCollector = "PropCollector"
+ service_content.virtualDiskManager = "VirtualDiskManager"
+ service_content.fileManager = "FileManager"
+ service_content.rootFolder = "RootFolder"
+ service_content.sessionManager = "SessionManager"
+ service_content.extensionManager = "ExtensionManager"
+ service_content.searchIndex = "SearchIndex"
+
+ about_info = DataObject()
+ about_info.name = "VMware vCenter Server"
+ about_info.version = "5.1.0"
+ service_content.about = about_info
+
+ self._service_content = service_content
+
+ @property
+ def service_content(self):
+ return self._service_content
+
+ def __repr__(self):
+ return "Fake VIM Object"
+
+ def __str__(self):
+ return "Fake VIM Object"
+
+ def _login(self):
+ """Logs in and sets the session object in the db."""
+ self._session = uuidutils.generate_uuid()
+ session = DataObject()
+ session.key = self._session
+ session.userName = 'sessionUserName'
+ _db_content['session'][self._session] = session
+ return session
+
+ def _terminate_session(self, *args, **kwargs):
+ """Terminates a session."""
+ s = kwargs.get("sessionId")[0]
+ if s not in _db_content['session']:
+ return
+ del _db_content['session'][s]
+
+ def _check_session(self):
+ """Checks if the session is active."""
+ if (self._session is None or self._session not in
+ _db_content['session']):
+ LOG.debug("Session is faulty")
+ raise vexc.VimFaultException(
+ [vexc.NOT_AUTHENTICATED],
+ _("Session Invalid"))
+
+ def _session_is_active(self, *args, **kwargs):
+ try:
+ self._check_session()
+ return True
+ except Exception:
+ return False
+
+ def _create_vm(self, method, *args, **kwargs):
+ """Creates and registers a VM object with the Host System."""
+ config_spec = kwargs.get("config")
+
+ if config_spec.guestId not in constants.VALID_OS_TYPES:
+ ex = vexc.VMwareDriverException('A specified parameter was '
+ 'not correct.')
+ return create_task(method, "error", error_fault=ex).obj
+
+ pool = kwargs.get('pool')
+
+ devices = []
+ for device_change in config_spec.deviceChange:
+ if device_change.operation == 'add':
+ devices.append(device_change.device)
+
+ vm_ref = create_vm(config_spec.instanceUuid, config_spec.name,
+ config_spec.numCPUs, config_spec.memoryMB,
+ devices, config_spec.files.vmPathName,
+ config_spec.extraConfig, pool)
+
+ task_mdo = create_task(method, "success", result=vm_ref)
+ return task_mdo.obj
+
+ def _reconfig_vm(self, method, *args, **kwargs):
+ """Reconfigures a VM and sets the properties supplied."""
+ vm_ref = args[0]
+ vm_mdo = _get_vm_mdo(vm_ref)
+ vm_mdo.reconfig(self.client.factory, kwargs.get("spec"))
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _create_copy_disk(self, method, vmdk_file_path):
+ """Creates/copies a vmdk file object in the datastore."""
+ # We need to add/create both .vmdk and .-flat.vmdk files
+ flat_vmdk_file_path = vmdk_file_path.replace(".vmdk", "-flat.vmdk")
+ _add_file(vmdk_file_path)
+ _add_file(flat_vmdk_file_path)
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _extend_disk(self, method, size):
+ """Extend disk size when create a instance."""
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _snapshot_vm(self, method):
+ """Snapshots a VM. Here we do nothing for faking sake."""
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _find_all_by_uuid(self, *args, **kwargs):
+ uuid = kwargs.get('uuid')
+ vm_refs = []
+ for vm_ref in _db_content.get("VirtualMachine"):
+ vm = _get_object(vm_ref)
+ vm_uuid = vm.get("summary.config.instanceUuid")
+ if vm_uuid == uuid:
+ vm_refs.append(vm_ref)
+ return vm_refs
+
+ def _delete_snapshot(self, method, *args, **kwargs):
+ """Deletes a VM snapshot. Here we do nothing for faking sake."""
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _delete_file(self, method, *args, **kwargs):
+ """Deletes a file from the datastore."""
+ _remove_file(kwargs.get("name"))
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _just_return(self):
+ """Fakes a return."""
+ return
+
+ def _just_return_task(self, method):
+ """Fakes a task return."""
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _clone_vm(self, method, *args, **kwargs):
+ """Fakes a VM clone."""
+ """Creates and registers a VM object with the Host System."""
+ source_vmref = args[0]
+ source_vm_mdo = _get_vm_mdo(source_vmref)
+ clone_spec = kwargs.get("spec")
+ vm_dict = {
+ "name": kwargs.get("name"),
+ "ds": source_vm_mdo.get("datastore"),
+ "runtime_host": source_vm_mdo.get("runtime.host"),
+ "powerstate": source_vm_mdo.get("runtime.powerState"),
+ "vmPathName": source_vm_mdo.get("config.files.vmPathName"),
+ "numCpu": source_vm_mdo.get("summary.config.numCpu"),
+ "mem": source_vm_mdo.get("summary.config.memorySizeMB"),
+ "extra_config": source_vm_mdo.get("config.extraConfig").OptionValue,
+ "virtual_device":
+ source_vm_mdo.get("config.hardware.device").VirtualDevice,
+ "instanceUuid": source_vm_mdo.get("summary.config.instanceUuid")}
+
+ if clone_spec.config is not None:
+ # Impose the config changes specified in the config property
+ if (hasattr(clone_spec.config, 'instanceUuid') and
+ clone_spec.config.instanceUuid is not None):
+ vm_dict["instanceUuid"] = clone_spec.config.instanceUuid
+
+ if hasattr(clone_spec.config, 'extraConfig'):
+ extraConfigs = _merge_extraconfig(vm_dict["extra_config"],
+ clone_spec.config.extraConfig)
+ vm_dict["extra_config"] = extraConfigs
+
+ virtual_machine = VirtualMachine(**vm_dict)
+ _create_object("VirtualMachine", virtual_machine)
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _unregister_vm(self, method, *args, **kwargs):
+ """Unregisters a VM from the Host System."""
+ vm_ref = args[0]
+ _get_vm_mdo(vm_ref)
+ del _db_content["VirtualMachine"][vm_ref]
+
+ def _search_ds(self, method, *args, **kwargs):
+ """Searches the datastore for a file."""
+ # TODO(garyk): add support for spec parameter
+ ds_path = kwargs.get("datastorePath")
+ matched_files = set()
+ # Check if we are searching for a file or a directory
+ directory = False
+ dname = '%s/' % ds_path
+ for file in _db_content.get("files"):
+ if file == dname:
+ directory = True
+ break
+ # A directory search implies that we must return all
+ # subdirectories
+ if directory:
+ for file in _db_content.get("files"):
+ if file.find(ds_path) != -1:
+ if not file.endswith(ds_path):
+ path = file.lstrip(dname).split('/')
+ if path:
+ matched_files.add(path[0])
+ if not matched_files:
+ matched_files.add('/')
+ else:
+ for file in _db_content.get("files"):
+ if file.find(ds_path) != -1:
+ matched_files.add(ds_path)
+ if matched_files:
+ result = DataObject()
+ result.path = ds_path
+ result.file = []
+ for file in matched_files:
+ matched = DataObject()
+ matched.path = file
+ result.file.append(matched)
+ task_mdo = create_task(method, "success", result=result)
+ else:
+ task_mdo = create_task(method, "error", error_fault=FileNotFound())
+ return task_mdo.obj
+
+ def _move_file(self, method, *args, **kwargs):
+ source = kwargs.get('sourceName')
+ destination = kwargs.get('destinationName')
+ new_files = []
+ if source != destination:
+ for file in _db_content.get("files"):
+ if source in file:
+ new_file = file.replace(source, destination)
+ new_files.append(new_file)
+ # if source is not a file then the children will also
+ # be deleted
+ _remove_file(source)
+ for file in new_files:
+ _add_file(file)
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def fake_transfer_file(self, ds_name, file_path):
+ """Fakes fetch image call.
+ Just adds a reference to the db for the file.
+ """
+ ds_file_path = "[" + ds_name + "] " + file_path
+ _add_file(ds_file_path)
+
+ def _make_dir(self, method, *args, **kwargs):
+ """Creates a directory in the datastore."""
+ ds_path = kwargs.get("name")
+ if get_file(ds_path):
+ raise vexc.FileAlreadyExistsException()
+ _db_content["files"].append('%s/' % ds_path)
+
+ def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"):
+ """Sets power state for the VM."""
+ if _db_content.get("VirtualMachine", None) is None:
+ raise exception.NotFound(_("No Virtual Machine has been "
+ "registered yet"))
+ if vm_ref not in _db_content.get("VirtualMachine"):
+ raise exception.NotFound(_("Virtual Machine with ref %s is not "
+ "there") % vm_ref)
+ vm_mdo = _db_content.get("VirtualMachine").get(vm_ref)
+ vm_mdo.set("runtime.powerState", pwr_state)
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _retrieve_properties_continue(self, method, *args, **kwargs):
+ """Continues the retrieve."""
+ return FakeRetrieveResult()
+
+ def _retrieve_properties_cancel(self, method, *args, **kwargs):
+ """Cancels the retrieve."""
+ return None
+
+ def _retrieve_properties(self, method, *args, **kwargs):
+ """Retrieves properties based on the type."""
+ spec_set = kwargs.get("specSet")[0]
+ spec_type = spec_set.propSet[0].type
+ properties = spec_set.propSet[0].pathSet
+ if not isinstance(properties, list):
+ properties = properties.split()
+ objs = spec_set.objectSet
+ lst_ret_objs = FakeRetrieveResult()
+ for obj in objs:
+ try:
+ obj_ref = obj.obj
+ if obj_ref == "RootFolder":
+ # This means that we are retrieving props for all managed
+ # data objects of the specified 'type' in the entire
+ # inventory. This gets invoked by vim_util.get_objects.
+ mdo_refs = _db_content[spec_type]
+ elif obj_ref.type != spec_type:
+ # This means that we are retrieving props for the managed
+ # data objects in the parent object's 'path' property.
+ # This gets invoked by vim_util.get_inner_objects
+ # eg. obj_ref = <ManagedObjectReference of a cluster>
+ # type = 'DataStore'
+ # path = 'datastore'
+ # the above will retrieve all datastores in the given
+ # cluster.
+ parent_mdo = _db_content[obj_ref.type][obj_ref]
+ path = obj.selectSet[0].path
+ mdo_refs = parent_mdo.get(path).ManagedObjectReference
+ else:
+ # This means that we are retrieving props of the given
+ # managed data object. This gets invoked by
+ # vim_util.get_properties_for_a_collection_of_objects.
+ mdo_refs = [obj_ref]
+
+ for mdo_ref in mdo_refs:
+ mdo = _db_content[spec_type][mdo_ref]
+ prop_list = []
+ for prop_name in properties:
+ prop = Prop(prop_name, mdo.get(prop_name))
+ prop_list.append(prop)
+ obj_content = ObjectContent(mdo.obj, prop_list)
+ lst_ret_objs.add_object(obj_content)
+ except Exception as exc:
+ LOG.exception(exc)
+ continue
+ return lst_ret_objs
+
+ def _add_port_group(self, method, *args, **kwargs):
+ """Adds a port group to the host system."""
+ _host_sk = _db_content["HostSystem"].keys()[0]
+ host_mdo = _db_content["HostSystem"][_host_sk]
+ host_mdo._add_port_group(kwargs.get("portgrp"))
+
+ def _add_iscsi_send_tgt(self, method, *args, **kwargs):
+ """Adds a iscsi send target to the hba."""
+ send_targets = kwargs.get('targets')
+ host_storage_sys = _get_objects('HostStorageSystem').objects[0]
+ iscsi_hba_array = host_storage_sys.get('storageDeviceInfo'
+ '.hostBusAdapter')
+ iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0]
+ if hasattr(iscsi_hba, 'configuredSendTarget'):
+ iscsi_hba.configuredSendTarget.extend(send_targets)
+ else:
+ iscsi_hba.configuredSendTarget = send_targets
+
+ def __getattr__(self, attr_name):
+ if attr_name != "Login":
+ self._check_session()
+ if attr_name == "Login":
+ return lambda *args, **kwargs: self._login()
+ elif attr_name == "SessionIsActive":
+ return lambda *args, **kwargs: self._session_is_active(
+ *args, **kwargs)
+ elif attr_name == "TerminateSession":
+ return lambda *args, **kwargs: self._terminate_session(
+ *args, **kwargs)
+ elif attr_name == "CreateVM_Task":
+ return lambda *args, **kwargs: self._create_vm(attr_name,
+ *args, **kwargs)
+ elif attr_name == "ReconfigVM_Task":
+ return lambda *args, **kwargs: self._reconfig_vm(attr_name,
+ *args, **kwargs)
+ elif attr_name == "CreateVirtualDisk_Task":
+ return lambda *args, **kwargs: self._create_copy_disk(attr_name,
+ kwargs.get("name"))
+ elif attr_name == "DeleteDatastoreFile_Task":
+ return lambda *args, **kwargs: self._delete_file(attr_name,
+ *args, **kwargs)
+ elif attr_name == "PowerOnVM_Task":
+ return lambda *args, **kwargs: self._set_power_state(attr_name,
+ args[0], "poweredOn")
+ elif attr_name == "PowerOffVM_Task":
+ return lambda *args, **kwargs: self._set_power_state(attr_name,
+ args[0], "poweredOff")
+ elif attr_name == "RebootGuest":
+ return lambda *args, **kwargs: self._just_return()
+ elif attr_name == "ResetVM_Task":
+ return lambda *args, **kwargs: self._set_power_state(attr_name,
+ args[0], "poweredOn")
+ elif attr_name == "SuspendVM_Task":
+ return lambda *args, **kwargs: self._set_power_state(attr_name,
+ args[0], "suspended")
+ elif attr_name == "CreateSnapshot_Task":
+ return lambda *args, **kwargs: self._snapshot_vm(attr_name)
+ elif attr_name == "RemoveSnapshot_Task":
+ return lambda *args, **kwargs: self._delete_snapshot(attr_name,
+ *args, **kwargs)
+ elif attr_name == "CopyVirtualDisk_Task":
+ return lambda *args, **kwargs: self._create_copy_disk(attr_name,
+ kwargs.get("destName"))
+ elif attr_name == "ExtendVirtualDisk_Task":
+ return lambda *args, **kwargs: self._extend_disk(attr_name,
+ kwargs.get("size"))
+ elif attr_name == "Destroy_Task":
+ return lambda *args, **kwargs: self._unregister_vm(attr_name,
+ *args, **kwargs)
+ elif attr_name == "UnregisterVM":
+ return lambda *args, **kwargs: self._unregister_vm(attr_name,
+ *args, **kwargs)
+ elif attr_name == "CloneVM_Task":
+ return lambda *args, **kwargs: self._clone_vm(attr_name,
+ *args, **kwargs)
+ elif attr_name == "FindAllByUuid":
+ return lambda *args, **kwargs: self._find_all_by_uuid(attr_name,
+ *args, **kwargs)
+ elif attr_name == "SearchDatastore_Task":
+ return lambda *args, **kwargs: self._search_ds(attr_name,
+ *args, **kwargs)
+ elif attr_name == "MoveDatastoreFile_Task":
+ return lambda *args, **kwargs: self._move_file(attr_name,
+ *args, **kwargs)
+ elif attr_name == "MakeDirectory":
+ return lambda *args, **kwargs: self._make_dir(attr_name,
+ *args, **kwargs)
+ elif attr_name == "RetrievePropertiesEx":
+ return lambda *args, **kwargs: self._retrieve_properties(
+ attr_name, *args, **kwargs)
+ elif attr_name == "ContinueRetrievePropertiesEx":
+ return lambda *args, **kwargs: self._retrieve_properties_continue(
+ attr_name, *args, **kwargs)
+ elif attr_name == "CancelRetrievePropertiesEx":
+ return lambda *args, **kwargs: self._retrieve_properties_cancel(
+ attr_name, *args, **kwargs)
+ elif attr_name == "AddPortGroup":
+ return lambda *args, **kwargs: self._add_port_group(attr_name,
+ *args, **kwargs)
+ elif attr_name == "RebootHost_Task":
+ return lambda *args, **kwargs: self._just_return_task(attr_name)
+ elif attr_name == "ShutdownHost_Task":
+ return lambda *args, **kwargs: self._just_return_task(attr_name)
+ elif attr_name == "PowerUpHostFromStandBy_Task":
+ return lambda *args, **kwargs: self._just_return_task(attr_name)
+ elif attr_name == "EnterMaintenanceMode_Task":
+ return lambda *args, **kwargs: self._just_return_task(attr_name)
+ elif attr_name == "ExitMaintenanceMode_Task":
+ return lambda *args, **kwargs: self._just_return_task(attr_name)
+ elif attr_name == "AddInternetScsiSendTargets":
+ return lambda *args, **kwargs: self._add_iscsi_send_tgt(attr_name,
+ *args, **kwargs)
+ elif attr_name == "RescanHba":
+ return lambda *args, **kwargs: self._just_return_task(attr_name)
diff --git a/nova/tests/unit/virt/vmwareapi/stubs.py b/nova/tests/unit/virt/vmwareapi/stubs.py
new file mode 100644
index 0000000000..d126b36e0f
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/stubs.py
@@ -0,0 +1,131 @@
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Stubouts for the test suite
+"""
+
+import contextlib
+
+import mock
+from oslo.vmware import exceptions as vexc
+
+from nova import db
+from nova.tests.unit import test_flavors
+from nova.tests.unit.virt.vmwareapi import fake
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import images
+from nova.virt.vmwareapi import network_util
+
+
+def fake_get_vim_object(arg):
+ """Stubs out the VMwareAPISession's get_vim_object method."""
+ return fake.FakeVim()
+
+
+@property
+def fake_vim_prop(arg):
+ """Stubs out the VMwareAPISession's vim property access method."""
+ return fake.get_fake_vim_object(arg)
+
+
+def fake_is_vim_object(arg, module):
+ """Stubs out the VMwareAPISession's is_vim_object method."""
+ return isinstance(module, fake.FakeVim)
+
+
+def fake_temp_method_exception():
+ raise vexc.VimFaultException(
+ [vexc.NOT_AUTHENTICATED],
+ "Session Empty/Not Authenticated")
+
+
+def fake_temp_session_exception():
+ raise vexc.VimConnectionException("it's a fake!",
+ "Session Exception")
+
+
+def fake_session_file_exception():
+ fault_list = [vexc.FILE_ALREADY_EXISTS]
+ raise vexc.VimFaultException(fault_list,
+ Exception('fake'))
+
+
+def fake_session_permission_exception():
+ fault_list = [vexc.NO_PERMISSION]
+ fault_string = 'Permission to perform this operation was denied.'
+ details = {'privilegeId': 'Resource.AssignVMToPool', 'object': 'domain-c7'}
+ raise vexc.VimFaultException(fault_list, fault_string, details=details)
+
+
+def _fake_flavor_get(context, id):
+ for instance_type in test_flavors.DEFAULT_FLAVORS:
+ if instance_type['id'] == id:
+ return instance_type
+ return {'memory_mb': 128, 'root_gb': 0, 'deleted_at': None,
+ 'name': 'm1.micro', 'deleted': 0, 'created_at': None,
+ 'ephemeral_gb': 0, 'updated_at': None,
+ 'disabled': False, 'vcpus': 1, 'extra_specs': {},
+ 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True,
+ 'flavorid': '1', 'vcpu_weight': None, 'id': 2}
+
+
+def set_stubs(stubs):
+ """Set the stubs."""
+ stubs.Set(network_util, 'get_network_with_the_name',
+ fake.fake_get_network)
+ stubs.Set(images, 'upload_image', fake.fake_upload_image)
+ stubs.Set(images, 'fetch_image', fake.fake_fetch_image)
+ stubs.Set(driver.VMwareAPISession, "vim", fake_vim_prop)
+ stubs.Set(driver.VMwareAPISession, "_is_vim_object",
+ fake_is_vim_object)
+ stubs.Set(db, 'flavor_get', _fake_flavor_get)
+
+
+def fake_suds_context(calls=None):
+ """Generate a suds client which automatically mocks all SOAP method calls.
+
+ Calls are stored in <calls>, indexed by the name of the call. If you need
+ to mock the behaviour of specific API calls you can pre-populate <calls>
+ with appropriate Mock objects.
+ """
+
+ calls = calls or {}
+
+ class fake_factory:
+ def create(self, name):
+ return mock.NonCallableMagicMock(name=name)
+
+ class fake_service:
+ def __getattr__(self, attr_name):
+ if attr_name in calls:
+ return calls[attr_name]
+
+ mock_call = mock.MagicMock(name=attr_name)
+ calls[attr_name] = mock_call
+ return mock_call
+
+ class fake_client:
+ def __init__(self, wdsl_url, **kwargs):
+ self.service = fake_service()
+ self.factory = fake_factory()
+
+ return contextlib.nested(
+ mock.patch('suds.client.Client', fake_client),
+
+ # As we're not connecting to a real host there's no need to wait
+ # between retries
+ mock.patch.object(driver, 'TIME_BETWEEN_API_CALL_RETRIES', 0)
+ )
diff --git a/nova/tests/unit/virt/vmwareapi/test_configdrive.py b/nova/tests/unit/virt/vmwareapi/test_configdrive.py
new file mode 100644
index 0000000000..7b4b1bba1f
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_configdrive.py
@@ -0,0 +1,168 @@
+# Copyright 2013 IBM Corp.
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+import mock
+import mox
+
+from nova import context
+from nova.image import glance
+from nova import test
+from nova.tests.unit import fake_instance
+import nova.tests.unit.image.fake
+from nova.tests.unit import utils
+from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
+from nova.tests.unit.virt.vmwareapi import stubs
+from nova.virt import fake
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import images
+from nova.virt.vmwareapi import vm_util
+from nova.virt.vmwareapi import vmops
+
+
+class ConfigDriveTestCase(test.NoDBTestCase):
+
+ REQUIRES_LOCKING = True
+
+ @mock.patch.object(driver.VMwareVCDriver, '_register_openstack_extension')
+ def setUp(self, mock_register):
+ super(ConfigDriveTestCase, self).setUp()
+ vm_util.vm_refs_cache_reset()
+ self.context = context.RequestContext('fake', 'fake', is_admin=False)
+ cluster_name = 'test_cluster'
+ self.flags(cluster_name=[cluster_name],
+ host_ip='test_url',
+ host_username='test_username',
+ host_password='test_pass',
+ use_linked_clone=False, group='vmware')
+ self.flags(vnc_enabled=False)
+ vmwareapi_fake.reset()
+ stubs.set_stubs(self.stubs)
+ nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
+ self.conn = driver.VMwareVCDriver(fake.FakeVirtAPI)
+ self.network_info = utils.get_test_network_info()
+ self.node_name = '%s(%s)' % (self.conn.dict_mors.keys()[0],
+ cluster_name)
+ image_ref = nova.tests.unit.image.fake.get_valid_image_id()
+ instance_values = {
+ 'vm_state': 'building',
+ 'project_id': 'fake',
+ 'user_id': 'fake',
+ 'name': '1',
+ 'kernel_id': '1',
+ 'ramdisk_id': '1',
+ 'mac_addresses': [{'address': 'de:ad:be:ef:be:ef'}],
+ 'memory_mb': 8192,
+ 'flavor': 'm1.large',
+ 'instance_type_id': 0,
+ 'vcpus': 4,
+ 'root_gb': 80,
+ 'image_ref': image_ref,
+ 'host': 'fake_host',
+ 'task_state': 'scheduling',
+ 'reservation_id': 'r-3t8muvr0',
+ 'id': 1,
+ 'uuid': 'fake-uuid',
+ 'node': self.node_name,
+ 'metadata': [],
+ 'expected_attrs': ['system_metadata'],
+ }
+ self.test_instance = fake_instance.fake_instance_obj(self.context,
+ **instance_values)
+
+ (image_service, image_id) = glance.get_remote_image_service(context,
+ image_ref)
+ metadata = image_service.show(context, image_id)
+ self.image = {
+ 'id': image_ref,
+ 'disk_format': 'vmdk',
+ 'size': int(metadata['size']),
+ }
+
+ class FakeInstanceMetadata(object):
+ def __init__(self, instance, content=None, extra_md=None):
+ pass
+
+ def metadata_for_config_drive(self):
+ return []
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.api.metadata.base.InstanceMetadata',
+ FakeInstanceMetadata))
+
+ def fake_make_drive(_self, _path):
+ pass
+ # We can't actually make a config drive v2 because ensure_tree has
+ # been faked out
+ self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder,
+ 'make_drive', fake_make_drive)
+
+ def fake_upload_iso_to_datastore(iso_path, instance, **kwargs):
+ pass
+ self.stubs.Set(images,
+ 'upload_iso_to_datastore',
+ fake_upload_iso_to_datastore)
+
+ def tearDown(self):
+ super(ConfigDriveTestCase, self).tearDown()
+ vmwareapi_fake.cleanup()
+ nova.tests.unit.image.fake.FakeImageService_reset()
+
+ def _spawn_vm(self, injected_files=None, admin_password=None,
+ block_device_info=None):
+
+ injected_files = injected_files or []
+ self.conn.spawn(self.context, self.test_instance, self.image,
+ injected_files=injected_files,
+ admin_password=admin_password,
+ network_info=self.network_info,
+ block_device_info=block_device_info)
+
+ def test_create_vm_with_config_drive_verify_method_invocation(self):
+ self.test_instance.config_drive = 'True'
+ self.mox.StubOutWithMock(vmops.VMwareVMOps, '_create_config_drive')
+ self.mox.StubOutWithMock(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
+ self.conn._vmops._create_config_drive(self.test_instance,
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg()
+ ).AndReturn('[ds1] fake.iso')
+ self.conn._vmops._attach_cdrom_to_vm(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+ # if spawn does not call the _create_config_drive or
+ # _attach_cdrom_to_vm call with the correct set of parameters
+ # then mox's VerifyAll will throw a Expected methods never called
+ # Exception
+ self._spawn_vm()
+
+ def test_create_vm_without_config_drive(self):
+ self.test_instance.config_drive = None
+ self.mox.StubOutWithMock(vmops.VMwareVMOps, '_create_config_drive')
+ self.mox.StubOutWithMock(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
+ self.mox.ReplayAll()
+ # if spawn ends up calling _create_config_drive or
+ # _attach_cdrom_to_vm then mox will log a Unexpected method call
+ # exception
+ self._spawn_vm()
+
+ def test_create_vm_with_config_drive(self):
+ self.test_instance.config_drive = 'True'
+ self._spawn_vm()
diff --git a/nova/tests/unit/virt/vmwareapi/test_driver_api.py b/nova/tests/unit/virt/vmwareapi/test_driver_api.py
new file mode 100644
index 0000000000..5f7eb76a62
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_driver_api.py
@@ -0,0 +1,2650 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2012 VMware, Inc.
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test suite for VMwareAPI.
+"""
+
+import collections
+import contextlib
+import copy
+import datetime
+
+from eventlet import greenthread
+import mock
+import mox
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+from oslo.utils import units
+from oslo.vmware import exceptions as vexc
+from oslo.vmware import pbm
+from oslo.vmware import vim
+from oslo.vmware import vim_util as oslo_vim_util
+import suds
+
+from nova import block_device
+from nova.compute import api as compute_api
+from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import context
+from nova import exception
+from nova.image import glance
+from nova.network import model as network_model
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit import fake_instance
+import nova.tests.unit.image.fake
+from nova.tests.unit import matchers
+from nova.tests.unit import test_flavors
+from nova.tests.unit import utils
+from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
+from nova.tests.unit.virt.vmwareapi import stubs
+from nova import utils as nova_utils
+from nova.virt import driver as v_driver
+from nova.virt.vmwareapi import constants
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import ds_util
+from nova.virt.vmwareapi import error_util
+from nova.virt.vmwareapi import imagecache
+from nova.virt.vmwareapi import images
+from nova.virt.vmwareapi import vif
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
+from nova.virt.vmwareapi import vmops
+from nova.virt.vmwareapi import volumeops
+
+CONF = cfg.CONF
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('remove_unused_original_minimum_age_seconds',
+ 'nova.virt.imagecache')
+
+
+class fake_vm_ref(object):
+ def __init__(self):
+ self.value = 4
+ self._type = 'VirtualMachine'
+
+
+class fake_service_content(object):
+ def __init__(self):
+ self.ServiceContent = vmwareapi_fake.DataObject()
+ self.ServiceContent.fake = 'fake'
+
+
+class VMwareSudsTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(VMwareSudsTest, self).setUp()
+
+ def new_client_init(self, url, **kwargs):
+ return
+
+ mock.patch.object(suds.client.Client,
+ '__init__', new=new_client_init).start()
+ self.vim = self._vim_create()
+ self.addCleanup(mock.patch.stopall)
+
+ def _mock_getattr(self, attr_name):
+ self.assertEqual("RetrieveServiceContent", attr_name)
+ return lambda obj, **kwargs: fake_service_content()
+
+ def _vim_create(self):
+ with mock.patch.object(vim.Vim, '__getattr__', self._mock_getattr):
+ return vim.Vim()
+
+ def test_exception_with_deepcopy(self):
+ self.assertIsNotNone(self.vim)
+ self.assertRaises(vexc.VimException,
+ copy.deepcopy, self.vim)
+
+
+def _fake_create_session(inst):
+ session = vmwareapi_fake.DataObject()
+ session.key = 'fake_key'
+ session.userName = 'fake_username'
+ session._pbm_wsdl_loc = None
+ session._pbm = None
+ inst._session = session
+
+
+class VMwareDriverStartupTestCase(test.NoDBTestCase):
+ def _start_driver_with_flags(self, expected_exception_type, startup_flags):
+ self.flags(**startup_flags)
+ with mock.patch(
+ 'nova.virt.vmwareapi.driver.VMwareAPISession.__init__'):
+ e = self.assertRaises(
+ Exception, driver.VMwareVCDriver, None) # noqa
+ self.assertIs(type(e), expected_exception_type)
+
+ def test_start_driver_no_user(self):
+ self._start_driver_with_flags(
+ Exception,
+ dict(host_ip='ip', host_password='password',
+ group='vmware'))
+
+ def test_start_driver_no_host(self):
+ self._start_driver_with_flags(
+ Exception,
+ dict(host_username='username', host_password='password',
+ group='vmware'))
+
+ def test_start_driver_no_password(self):
+ self._start_driver_with_flags(
+ Exception,
+ dict(host_ip='ip', host_username='username',
+ group='vmware'))
+
+ def test_start_driver_with_user_host_password(self):
+ # Getting the InvalidInput exception signifies that no exception
+ # is raised regarding missing user/password/host
+ self._start_driver_with_flags(
+ nova.exception.InvalidInput,
+ dict(host_ip='ip', host_password='password',
+ host_username="user", datastore_regex="bad(regex",
+ group='vmware'))
+
+
+class VMwareSessionTestCase(test.NoDBTestCase):
+
+ @mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
+ return_value=False)
+ def test_call_method(self, mock_is_vim):
+ with contextlib.nested(
+ mock.patch.object(driver.VMwareAPISession, '_create_session',
+ _fake_create_session),
+ mock.patch.object(driver.VMwareAPISession, 'invoke_api'),
+ ) as (fake_create, fake_invoke):
+ session = driver.VMwareAPISession()
+ session._vim = mock.Mock()
+ module = mock.Mock()
+ session._call_method(module, 'fira')
+ fake_invoke.assert_called_once_with(module, 'fira', session._vim)
+
+ @mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
+ return_value=True)
+ def test_call_method_vim(self, mock_is_vim):
+ with contextlib.nested(
+ mock.patch.object(driver.VMwareAPISession, '_create_session',
+ _fake_create_session),
+ mock.patch.object(driver.VMwareAPISession, 'invoke_api'),
+ ) as (fake_create, fake_invoke):
+ session = driver.VMwareAPISession()
+ module = mock.Mock()
+ session._call_method(module, 'fira')
+ fake_invoke.assert_called_once_with(module, 'fira')
+
+
+class VMwareAPIVMTestCase(test.NoDBTestCase):
+ """Unit tests for Vmware API connection calls."""
+
+ REQUIRES_LOCKING = True
+
+ @mock.patch.object(driver.VMwareVCDriver, '_register_openstack_extension')
+ def setUp(self, mock_register, create_connection=True):
+ super(VMwareAPIVMTestCase, self).setUp()
+ vm_util.vm_refs_cache_reset()
+ self.context = context.RequestContext('fake', 'fake', is_admin=False)
+ cluster_name = 'test_cluster'
+ cluster_name2 = 'test_cluster2'
+ self.flags(cluster_name=[cluster_name, cluster_name2],
+ host_ip='test_url',
+ host_username='test_username',
+ host_password='test_pass',
+ api_retry_count=1,
+ use_linked_clone=False, group='vmware')
+ self.flags(vnc_enabled=False,
+ image_cache_subdirectory_name='vmware_base',
+ my_ip='')
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ stubs.set_stubs(self.stubs)
+ vmwareapi_fake.reset()
+ nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
+ if create_connection:
+ self.conn = driver.VMwareVCDriver(None, False)
+ self._set_exception_vars()
+ self.node_name = self.conn._resources.keys()[0]
+ self.node_name2 = self.conn._resources.keys()[1]
+ if cluster_name2 in self.node_name2:
+ self.ds = 'ds1'
+ else:
+ self.ds = 'ds2'
+
+ self.vim = vmwareapi_fake.FakeVim()
+
+ # NOTE(vish): none of the network plugging code is actually
+ # being tested
+ self.network_info = utils.get_test_network_info()
+ image_ref = nova.tests.unit.image.fake.get_valid_image_id()
+ (image_service, image_id) = glance.get_remote_image_service(
+ self.context, image_ref)
+ metadata = image_service.show(self.context, image_id)
+ self.image = {
+ 'id': image_ref,
+ 'disk_format': 'vmdk',
+ 'size': int(metadata['size']),
+ }
+ self.fake_image_uuid = self.image['id']
+ nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
+ self.vnc_host = 'ha-host'
+ self.instance_without_compute = {'node': None,
+ 'vm_state': 'building',
+ 'project_id': 'fake',
+ 'user_id': 'fake',
+ 'name': '1',
+ 'display_description': '1',
+ 'kernel_id': '1',
+ 'ramdisk_id': '1',
+ 'mac_addresses': [
+ {'address': 'de:ad:be:ef:be:ef'}
+ ],
+ 'memory_mb': 8192,
+ 'instance_type': 'm1.large',
+ 'vcpus': 4,
+ 'root_gb': 80,
+ 'image_ref': self.image['id'],
+ 'host': 'fake_host',
+ 'task_state':
+ 'scheduling',
+ 'reservation_id': 'r-3t8muvr0',
+ 'id': 1,
+ 'uuid': 'fake-uuid',
+ 'metadata': []}
+
+ def tearDown(self):
+ super(VMwareAPIVMTestCase, self).tearDown()
+ vmwareapi_fake.cleanup()
+ nova.tests.unit.image.fake.FakeImageService_reset()
+
+ def test_get_host_ip_addr(self):
+ self.assertEqual('test_url', self.conn.get_host_ip_addr())
+
+ def test_init_host_with_no_session(self):
+ self.conn._session = mock.Mock()
+ self.conn._session.vim = None
+ self.conn.init_host('fake_host')
+ self.conn._session._create_session.assert_called_once_with()
+
+ def test_init_host(self):
+ try:
+ self.conn.init_host("fake_host")
+ except Exception as ex:
+ self.fail("init_host raised: %s" % ex)
+
+ def _set_exception_vars(self):
+ self.wait_task = self.conn._session._wait_for_task
+ self.call_method = self.conn._session._call_method
+ self.task_ref = None
+ self.exception = False
+
+ def test_cleanup_host(self):
+ self.conn.init_host("fake_host")
+ try:
+ self.conn.cleanup_host("fake_host")
+ except Exception as ex:
+ self.fail("cleanup_host raised: %s" % ex)
+
+ @mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__')
+ def test_cleanup_host_direct(self, mock_init):
+ mock_init.return_value = None
+ vcdriver = driver.VMwareVCDriver(None, False)
+ vcdriver._session = mock.Mock()
+ vcdriver.cleanup_host("foo")
+ vcdriver._session.vim.client.service.Logout.assert_called_once_with(
+ vcdriver._session.vim.service_content.sessionManager
+ )
+
+ @mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__')
+ def test_cleanup_host_direct_with_bad_logout(self, mock_init):
+ mock_init.return_value = None
+ vcdriver = driver.VMwareVCDriver(None, False)
+ vcdriver._session = mock.Mock()
+ fault = suds.WebFault(mock.Mock(), mock.Mock())
+ vcdriver._session.vim.client.service.Logout.side_effect = fault
+ vcdriver.cleanup_host("foo")
+
+ def test_driver_capabilities(self):
+ self.assertTrue(self.conn.capabilities['has_imagecache'])
+ self.assertFalse(self.conn.capabilities['supports_recreate'])
+
+ def test_configuration_linked_clone(self):
+ self.flags(use_linked_clone=None, group='vmware')
+ self.assertRaises(vexc.UseLinkedCloneConfigurationFault,
+ self.conn._validate_configuration)
+
+ @mock.patch.object(pbm, 'get_profile_id_by_name')
+ def test_configuration_pbm(self, get_profile_mock):
+ get_profile_mock.return_value = 'fake-profile'
+ self.flags(pbm_enabled=True,
+ pbm_default_policy='fake-policy',
+ pbm_wsdl_location='fake-location', group='vmware')
+ self.conn._validate_configuration()
+
+ @mock.patch.object(pbm, 'get_profile_id_by_name')
+ def test_configuration_pbm_bad_default(self, get_profile_mock):
+ get_profile_mock.return_value = None
+ self.flags(pbm_enabled=True,
+ pbm_wsdl_location='fake-location',
+ pbm_default_policy='fake-policy', group='vmware')
+ self.assertRaises(error_util.PbmDefaultPolicyDoesNotExist,
+ self.conn._validate_configuration)
+
+ def test_login_retries(self):
+ self.attempts = 0
+ self.login_session = vmwareapi_fake.FakeVim()._login()
+
+ def _fake_login(_self):
+ self.attempts += 1
+ if self.attempts == 1:
+ raise vexc.VimConnectionException('Here is my fake exception')
+ return self.login_session
+
+ def _fake_check_session(_self):
+ return True
+
+ self.stubs.Set(vmwareapi_fake.FakeVim, '_login', _fake_login)
+ self.stubs.Set(vmwareapi_fake.FakeVim, '_check_session',
+ _fake_check_session)
+
+ with mock.patch.object(greenthread, 'sleep'):
+ self.conn = driver.VMwareAPISession()
+ self.assertEqual(self.attempts, 2)
+
+ def _get_instance_type_by_name(self, type):
+ for instance_type in test_flavors.DEFAULT_FLAVORS:
+ if instance_type['name'] == type:
+ return instance_type
+ if type == 'm1.micro':
+ return {'memory_mb': 128, 'root_gb': 0, 'deleted_at': None,
+ 'name': 'm1.micro', 'deleted': 0, 'created_at': None,
+ 'ephemeral_gb': 0, 'updated_at': None,
+ 'disabled': False, 'vcpus': 1, 'extra_specs': {},
+ 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True,
+ 'flavorid': '1', 'vcpu_weight': None, 'id': 2}
+
+ def _create_instance(self, node=None, set_image_ref=True,
+ uuid=None, instance_type='m1.large'):
+ if not node:
+ node = self.node_name
+ if not uuid:
+ uuid = uuidutils.generate_uuid()
+ self.type_data = self._get_instance_type_by_name(instance_type)
+ values = {'name': 'fake_name',
+ 'id': 1,
+ 'uuid': uuid,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
+ 'kernel_id': "fake_kernel_uuid",
+ 'ramdisk_id': "fake_ramdisk_uuid",
+ 'mac_address': "de:ad:be:ef:be:ef",
+ 'flavor': instance_type,
+ 'node': node,
+ 'memory_mb': self.type_data['memory_mb'],
+ 'root_gb': self.type_data['root_gb'],
+ 'ephemeral_gb': self.type_data['ephemeral_gb'],
+ 'vcpus': self.type_data['vcpus'],
+ 'swap': self.type_data['swap'],
+ 'expected_attrs': ['system_metadata'],
+ }
+ if set_image_ref:
+ values['image_ref'] = self.fake_image_uuid
+ self.instance_node = node
+ self.uuid = uuid
+ self.instance = fake_instance.fake_instance_obj(
+ self.context, **values)
+
+ def _create_vm(self, node=None, num_instances=1, uuid=None,
+ instance_type='m1.large', powered_on=True):
+ """Create and spawn the VM."""
+ if not node:
+ node = self.node_name
+ self._create_instance(node=node, uuid=uuid,
+ instance_type=instance_type)
+ self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
+ self.conn.spawn(self.context, self.instance, self.image,
+ injected_files=[], admin_password=None,
+ network_info=self.network_info,
+ block_device_info=None)
+ self._check_vm_record(num_instances=num_instances,
+ powered_on=powered_on)
+ self.assertIsNotNone(vm_util.vm_ref_cache_get(self.uuid))
+
+ def _get_vm_record(self):
+ # Get record for VM
+ vms = vmwareapi_fake._get_objects("VirtualMachine")
+ for vm in vms.objects:
+ if vm.get('name') == self.uuid:
+ return vm
+ self.fail('Unable to find VM backing!')
+
+ def _check_vm_record(self, num_instances=1, powered_on=True):
+ """Check if the spawned VM's properties correspond to the instance in
+ the db.
+ """
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), num_instances)
+
+ # Get Nova record for VM
+ vm_info = self.conn.get_info({'uuid': self.uuid,
+ 'name': 1,
+ 'node': self.instance_node})
+
+ vm = self._get_vm_record()
+
+ # Check that m1.large above turned into the right thing.
+ mem_kib = long(self.type_data['memory_mb']) << 10
+ vcpus = self.type_data['vcpus']
+ self.assertEqual(vm_info['max_mem'], mem_kib)
+ self.assertEqual(vm_info['mem'], mem_kib)
+ self.assertEqual(vm.get("summary.config.instanceUuid"), self.uuid)
+ self.assertEqual(vm.get("summary.config.numCpu"), vcpus)
+ self.assertEqual(vm.get("summary.config.memorySizeMB"),
+ self.type_data['memory_mb'])
+
+ self.assertEqual(
+ vm.get("config.hardware.device").VirtualDevice[2].obj_name,
+ "ns0:VirtualE1000")
+ if powered_on:
+ # Check that the VM is running according to Nova
+ self.assertEqual(power_state.RUNNING, vm_info['state'])
+
+ # Check that the VM is running according to vSphere API.
+ self.assertEqual('poweredOn', vm.get("runtime.powerState"))
+ else:
+ # Check that the VM is not running according to Nova
+ self.assertEqual(power_state.SHUTDOWN, vm_info['state'])
+
+ # Check that the VM is not running according to vSphere API.
+ self.assertEqual('poweredOff', vm.get("runtime.powerState"))
+
+ found_vm_uuid = False
+ found_iface_id = False
+ extras = vm.get("config.extraConfig")
+ for c in extras.OptionValue:
+ if (c.key == "nvp.vm-uuid" and c.value == self.instance['uuid']):
+ found_vm_uuid = True
+ if (c.key == "nvp.iface-id.0" and c.value == "vif-xxx-yyy-zzz"):
+ found_iface_id = True
+
+ self.assertTrue(found_vm_uuid)
+ self.assertTrue(found_iface_id)
+
+ def _check_vm_info(self, info, pwr_state=power_state.RUNNING):
+ """Check if the get_info returned values correspond to the instance
+ object in the db.
+ """
+ mem_kib = long(self.type_data['memory_mb']) << 10
+ self.assertEqual(info["state"], pwr_state)
+ self.assertEqual(info["max_mem"], mem_kib)
+ self.assertEqual(info["mem"], mem_kib)
+ self.assertEqual(info["num_cpu"], self.type_data['vcpus'])
+
+ def test_instance_exists(self):
+ self._create_vm()
+ self.assertTrue(self.conn.instance_exists(self.instance))
+ invalid_instance = dict(uuid='foo', name='bar', node=self.node_name)
+ self.assertFalse(self.conn.instance_exists(invalid_instance))
+
+ def test_list_instances(self):
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), 0)
+
+ def test_list_instances_1(self):
+ self._create_vm()
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), 1)
+
+ def test_list_instance_uuids(self):
+ self._create_vm()
+ uuids = self.conn.list_instance_uuids()
+ self.assertEqual(len(uuids), 1)
+
+ def test_list_instance_uuids_invalid_uuid(self):
+ self._create_vm(uuid='fake_id')
+ uuids = self.conn.list_instance_uuids()
+ self.assertEqual(len(uuids), 0)
+
+ def _cached_files_exist(self, exists=True):
+ cache = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.vmdk' % self.fake_image_uuid)
+ if exists:
+ self.assertTrue(vmwareapi_fake.get_file(str(cache)))
+ else:
+ self.assertFalse(vmwareapi_fake.get_file(str(cache)))
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_instance_dir_disk_created(self, mock_from_image):
+ """Test image file is cached when even when use_linked_clone
+ is False
+ """
+ img_props = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ linked_clone=False)
+
+ mock_from_image.return_value = img_props
+ self._create_vm()
+ path = ds_util.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(path)))
+ self._cached_files_exist()
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_cache_dir_disk_created(self, mock_from_image):
+ """Test image disk is cached when use_linked_clone is True."""
+ self.flags(use_linked_clone=True, group='vmware')
+
+ img_props = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=1 * units.Ki,
+ disk_type=constants.DISK_TYPE_SPARSE)
+
+ mock_from_image.return_value = img_props
+
+ self._create_vm()
+ path = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.vmdk' % self.fake_image_uuid)
+ root = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.80.vmdk' % self.fake_image_uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(path)))
+ self.assertTrue(vmwareapi_fake.get_file(str(root)))
+
+ def _iso_disk_type_created(self, instance_type='m1.large'):
+ self.image['disk_format'] = 'iso'
+ self._create_vm(instance_type=instance_type)
+ path = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.iso' % self.fake_image_uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(path)))
+
+ def test_iso_disk_type_created(self):
+ self._iso_disk_type_created()
+ path = ds_util.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(path)))
+
+ def test_iso_disk_type_created_with_root_gb_0(self):
+ self._iso_disk_type_created(instance_type='m1.micro')
+ path = ds_util.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
+ self.assertFalse(vmwareapi_fake.get_file(str(path)))
+
+ def test_iso_disk_cdrom_attach(self):
+ iso_path = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.iso' % self.fake_image_uuid)
+
+ def fake_attach_cdrom(vm_ref, instance, data_store_ref,
+ iso_uploaded_path):
+ self.assertEqual(iso_uploaded_path, str(iso_path))
+
+ self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
+ fake_attach_cdrom)
+ self.image['disk_format'] = 'iso'
+ self._create_vm()
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_iso_disk_cdrom_attach_with_config_drive(self,
+ mock_from_image):
+ img_props = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=80 * units.Gi,
+ file_type='iso',
+ linked_clone=False)
+
+ mock_from_image.return_value = img_props
+
+ self.flags(force_config_drive=True)
+ iso_path = [
+ ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.iso' % self.fake_image_uuid),
+ ds_util.DatastorePath(self.ds, 'fake-config-drive')]
+ self.iso_index = 0
+
+ def fake_create_config_drive(instance, injected_files, password,
+ data_store_name, folder, uuid, cookies):
+ return 'fake-config-drive'
+
+ def fake_attach_cdrom(vm_ref, instance, data_store_ref,
+ iso_uploaded_path):
+ self.assertEqual(iso_uploaded_path, str(iso_path[self.iso_index]))
+ self.iso_index += 1
+
+ self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
+ fake_attach_cdrom)
+ self.stubs.Set(self.conn._vmops, '_create_config_drive',
+ fake_create_config_drive)
+
+ self.image['disk_format'] = 'iso'
+ self._create_vm()
+ self.assertEqual(self.iso_index, 2)
+
+ def test_cdrom_attach_with_config_drive(self):
+ self.flags(force_config_drive=True)
+
+ iso_path = ds_util.DatastorePath(self.ds, 'fake-config-drive')
+ self.cd_attach_called = False
+
+ def fake_create_config_drive(instance, injected_files, password,
+ data_store_name, folder, uuid, cookies):
+ return 'fake-config-drive'
+
+ def fake_attach_cdrom(vm_ref, instance, data_store_ref,
+ iso_uploaded_path):
+ self.assertEqual(iso_uploaded_path, str(iso_path))
+ self.cd_attach_called = True
+
+ self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
+ fake_attach_cdrom)
+ self.stubs.Set(self.conn._vmops, '_create_config_drive',
+ fake_create_config_drive)
+
+ self._create_vm()
+ self.assertTrue(self.cd_attach_called)
+
+ def test_spawn(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_spawn_vm_ref_cached(self):
+ uuid = uuidutils.generate_uuid()
+ self.assertIsNone(vm_util.vm_ref_cache_get(uuid))
+ self._create_vm(uuid=uuid)
+ self.assertIsNotNone(vm_util.vm_ref_cache_get(uuid))
+
+ def _spawn_power_state(self, power_on):
+ self._spawn = self.conn._vmops.spawn
+ self._power_on = power_on
+
+ def _fake_spawn(context, instance, image_meta, injected_files,
+ admin_password, network_info, block_device_info=None,
+ instance_name=None, power_on=True):
+ return self._spawn(context, instance, image_meta,
+ injected_files, admin_password, network_info,
+ block_device_info=block_device_info,
+ instance_name=instance_name,
+ power_on=self._power_on)
+
+ with (
+ mock.patch.object(self.conn._vmops, 'spawn', _fake_spawn)
+ ):
+ self._create_vm(powered_on=power_on)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ if power_on:
+ self._check_vm_info(info, power_state.RUNNING)
+ else:
+ self._check_vm_info(info, power_state.SHUTDOWN)
+
+ def test_spawn_no_power_on(self):
+ self._spawn_power_state(False)
+
+ def test_spawn_power_on(self):
+ self._spawn_power_state(True)
+
+ def test_spawn_root_size_0(self):
+ self._create_vm(instance_type='m1.micro')
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ cache = ('[%s] vmware_base/%s/%s.vmdk' %
+ (self.ds, self.fake_image_uuid, self.fake_image_uuid))
+ gb_cache = ('[%s] vmware_base/%s/%s.0.vmdk' %
+ (self.ds, self.fake_image_uuid, self.fake_image_uuid))
+ self.assertTrue(vmwareapi_fake.get_file(cache))
+ self.assertFalse(vmwareapi_fake.get_file(gb_cache))
+
+ def _spawn_with_delete_exception(self, fault=None):
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "DeleteDatastoreFile_Task":
+ self.exception = True
+ task_mdo = vmwareapi_fake.create_task(method, "error",
+ error_fault=fault)
+ return task_mdo.obj
+ return task_ref
+
+ with (
+ mock.patch.object(self.conn._session, '_call_method',
+ fake_call_method)
+ ):
+ if fault:
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ else:
+ self.assertRaises(vexc.VMwareDriverException, self._create_vm)
+ self.assertTrue(self.exception)
+
+ def test_spawn_with_delete_exception_not_found(self):
+ self._spawn_with_delete_exception(vmwareapi_fake.FileNotFound())
+
+ def test_spawn_with_delete_exception_file_fault(self):
+ self._spawn_with_delete_exception(vmwareapi_fake.FileFault())
+
+ def test_spawn_with_delete_exception_cannot_delete_file(self):
+ self._spawn_with_delete_exception(vmwareapi_fake.CannotDeleteFile())
+
+ def test_spawn_with_delete_exception_file_locked(self):
+ self._spawn_with_delete_exception(vmwareapi_fake.FileLocked())
+
+ def test_spawn_with_delete_exception_general(self):
+ self._spawn_with_delete_exception()
+
+ def test_spawn_disk_extend(self):
+ self.mox.StubOutWithMock(self.conn._vmops, '_extend_virtual_disk')
+ requested_size = 80 * units.Mi
+ self.conn._vmops._extend_virtual_disk(mox.IgnoreArg(),
+ requested_size, mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_spawn_disk_extend_exists(self):
+ root = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.80.vmdk' % self.fake_image_uuid)
+
+ def _fake_extend(instance, requested_size, name, dc_ref):
+ vmwareapi_fake._add_file(str(root))
+
+ self.stubs.Set(self.conn._vmops, '_extend_virtual_disk',
+ _fake_extend)
+
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.assertTrue(vmwareapi_fake.get_file(str(root)))
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_spawn_disk_extend_sparse(self, mock_from_image):
+ img_props = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=units.Ki,
+ disk_type=constants.DISK_TYPE_SPARSE,
+ linked_clone=True)
+
+ mock_from_image.return_value = img_props
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._vmops, '_extend_virtual_disk'),
+ mock.patch.object(self.conn._vmops, 'get_datacenter_ref_and_name'),
+ ) as (mock_extend, mock_get_dc):
+ dc_val = mock.Mock()
+ dc_val.ref = "fake_dc_ref"
+ dc_val.name = "dc1"
+ mock_get_dc.return_value = dc_val
+ self._create_vm()
+ iid = img_props.image_id
+ cached_image = ds_util.DatastorePath(self.ds, 'vmware_base',
+ iid, '%s.80.vmdk' % iid)
+ mock_extend.assert_called_once_with(
+ self.instance, self.instance.root_gb * units.Mi,
+ str(cached_image), "fake_dc_ref")
+
+ def test_spawn_disk_extend_failed_copy(self):
+ # Spawn instance
+ # copy for extend fails without creating a file
+ #
+ # Expect the copy error to be raised
+ self.flags(use_linked_clone=True, group='vmware')
+
+ CopyError = vexc.FileFaultException
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == 'fake-copy-task':
+ raise CopyError('Copy failed!')
+ return self.wait_task(task_ref)
+
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == "CopyVirtualDisk_Task":
+ return 'fake-copy-task'
+
+ return self.call_method(module, method, *args, **kwargs)
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_call_method',
+ new=fake_call_method),
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ new=fake_wait_for_task)):
+ self.assertRaises(CopyError, self._create_vm)
+
+ def test_spawn_disk_extend_failed_partial_copy(self):
+ # Spawn instance
+ # Copy for extend fails, leaving a file behind
+ #
+ # Expect the file to be cleaned up
+ # Expect the copy error to be raised
+ self.flags(use_linked_clone=True, group='vmware')
+ self.task_ref = None
+ uuid = self.fake_image_uuid
+ cached_image = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds,
+ uuid, uuid)
+
+ CopyError = vexc.FileFaultException
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == self.task_ref:
+ self.task_ref = None
+ self.assertTrue(vmwareapi_fake.get_file(cached_image))
+ # N.B. We don't test for -flat here because real
+ # CopyVirtualDisk_Task doesn't actually create it
+ raise CopyError('Copy failed!')
+ return self.wait_task(task_ref)
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "CopyVirtualDisk_Task":
+ self.task_ref = task_ref
+ return task_ref
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_call_method',
+ new=fake_call_method),
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ new=fake_wait_for_task)):
+ self.assertRaises(CopyError, self._create_vm)
+ self.assertFalse(vmwareapi_fake.get_file(cached_image))
+
+ def test_spawn_disk_extend_failed_partial_copy_failed_cleanup(self):
+ # Spawn instance
+ # Copy for extend fails, leaves file behind
+ # File cleanup fails
+ #
+ # Expect file to be left behind
+ # Expect file cleanup error to be raised
+ self.flags(use_linked_clone=True, group='vmware')
+ self.task_ref = None
+ uuid = self.fake_image_uuid
+ cached_image = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds,
+ uuid, uuid)
+
+ CopyError = vexc.FileFaultException
+ DeleteError = vexc.CannotDeleteFileException
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == self.task_ref:
+ self.task_ref = None
+ self.assertTrue(vmwareapi_fake.get_file(cached_image))
+ # N.B. We don't test for -flat here because real
+ # CopyVirtualDisk_Task doesn't actually create it
+ raise CopyError('Copy failed!')
+ elif task_ref == 'fake-delete-task':
+ raise DeleteError('Delete failed!')
+ return self.wait_task(task_ref)
+
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == "DeleteDatastoreFile_Task":
+ return 'fake-delete-task'
+
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "CopyVirtualDisk_Task":
+ self.task_ref = task_ref
+ return task_ref
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ new=fake_wait_for_task),
+ mock.patch.object(self.conn._session, '_call_method',
+ new=fake_call_method)):
+ self.assertRaises(DeleteError, self._create_vm)
+ self.assertTrue(vmwareapi_fake.get_file(cached_image))
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_spawn_disk_invalid_disk_size(self, mock_from_image):
+ img_props = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=82 * units.Gi,
+ disk_type=constants.DISK_TYPE_SPARSE,
+ linked_clone=True)
+
+ mock_from_image.return_value = img_props
+
+ self.assertRaises(exception.InstanceUnacceptable,
+ self._create_vm)
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_spawn_disk_extend_insufficient_disk_space(self, mock_from_image):
+ img_props = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=1024,
+ disk_type=constants.DISK_TYPE_SPARSE,
+ linked_clone=True)
+
+ mock_from_image.return_value = img_props
+
+ cached_image = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.80.vmdk' %
+ self.fake_image_uuid)
+ tmp_file = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.80-flat.vmdk' %
+ self.fake_image_uuid)
+
+ NoDiskSpace = vexc.get_fault_class('NoDiskSpace')
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == self.task_ref:
+ self.task_ref = None
+ raise NoDiskSpace()
+ return self.wait_task(task_ref)
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == 'ExtendVirtualDisk_Task':
+ self.task_ref = task_ref
+ return task_ref
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ fake_wait_for_task),
+ mock.patch.object(self.conn._session, '_call_method',
+ fake_call_method)
+ ) as (mock_wait_for_task, mock_call_method):
+ self.assertRaises(NoDiskSpace, self._create_vm)
+ self.assertFalse(vmwareapi_fake.get_file(str(cached_image)))
+ self.assertFalse(vmwareapi_fake.get_file(str(tmp_file)))
+
+ def test_spawn_with_move_file_exists_exception(self):
+ # The test will validate that the spawn completes
+ # successfully. The "MoveDatastoreFile_Task" will
+ # raise an file exists exception. The flag
+ # self.exception will be checked to see that
+ # the exception has indeed been raised.
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == self.task_ref:
+ self.task_ref = None
+ self.exception = True
+ raise vexc.FileAlreadyExistsException()
+ return self.wait_task(task_ref)
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "MoveDatastoreFile_Task":
+ self.task_ref = task_ref
+ return task_ref
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ fake_wait_for_task),
+ mock.patch.object(self.conn._session, '_call_method',
+ fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.assertTrue(self.exception)
+
+ def test_spawn_with_move_general_exception(self):
+ # The test will validate that the spawn completes
+ # successfully. The "MoveDatastoreFile_Task" will
+ # raise a general exception. The flag self.exception
+ # will be checked to see that the exception has
+ # indeed been raised.
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == self.task_ref:
+ self.task_ref = None
+ self.exception = True
+ raise vexc.VMwareDriverException('Exception!')
+ return self.wait_task(task_ref)
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "MoveDatastoreFile_Task":
+ self.task_ref = task_ref
+ return task_ref
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ fake_wait_for_task),
+ mock.patch.object(self.conn._session, '_call_method',
+ fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ self.assertRaises(vexc.VMwareDriverException,
+ self._create_vm)
+ self.assertTrue(self.exception)
+
+ def test_spawn_with_move_poll_exception(self):
+ self.call_method = self.conn._session._call_method
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "MoveDatastoreFile_Task":
+ task_mdo = vmwareapi_fake.create_task(method, "error")
+ return task_mdo.obj
+ return task_ref
+
+ with (
+ mock.patch.object(self.conn._session, '_call_method',
+ fake_call_method)
+ ):
+ self.assertRaises(vexc.VMwareDriverException,
+ self._create_vm)
+
+ def test_spawn_with_move_file_exists_poll_exception(self):
+ # The test will validate that the spawn completes
+ # successfully. The "MoveDatastoreFile_Task" will
+ # raise a file exists exception. The flag self.exception
+ # will be checked to see that the exception has
+ # indeed been raised.
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "MoveDatastoreFile_Task":
+ self.exception = True
+ task_mdo = vmwareapi_fake.create_task(method, "error",
+ error_fault=vmwareapi_fake.FileAlreadyExists())
+ return task_mdo.obj
+ return task_ref
+
+ with (
+ mock.patch.object(self.conn._session, '_call_method',
+ fake_call_method)
+ ):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.assertTrue(self.exception)
+
+ def _spawn_attach_volume_vmdk(self, set_image_ref=True, vc_support=False):
+ self._create_instance(set_image_ref=set_image_ref)
+ self.mox.StubOutWithMock(block_device, 'volume_in_mapping')
+ self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping')
+ connection_info = self._test_vmdk_connection_info('vmdk')
+ root_disk = [{'connection_info': connection_info}]
+ v_driver.block_device_info_get_mapping(
+ mox.IgnoreArg()).AndReturn(root_disk)
+ if vc_support:
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_get_res_pool_of_vm')
+ volumeops.VMwareVolumeOps._get_res_pool_of_vm(
+ mox.IgnoreArg()).AndReturn('fake_res_pool')
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_relocate_vmdk_volume')
+ volumeops.VMwareVolumeOps._relocate_vmdk_volume(mox.IgnoreArg(),
+ 'fake_res_pool', mox.IgnoreArg())
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ 'attach_volume')
+ volumeops.VMwareVolumeOps.attach_volume(connection_info,
+ self.instance, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ block_device_info = {'mount_device': 'vda'}
+ self.conn.spawn(self.context, self.instance, self.image,
+ injected_files=[], admin_password=None,
+ network_info=self.network_info,
+ block_device_info=block_device_info)
+
+ def test_spawn_attach_volume_iscsi(self):
+ self._create_instance()
+ self.mox.StubOutWithMock(block_device, 'volume_in_mapping')
+ self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping')
+ connection_info = self._test_vmdk_connection_info('iscsi')
+ root_disk = [{'connection_info': connection_info}]
+ v_driver.block_device_info_get_mapping(
+ mox.IgnoreArg()).AndReturn(root_disk)
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ 'attach_volume')
+ volumeops.VMwareVolumeOps.attach_volume(connection_info,
+ self.instance, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ block_device_info = {'mount_device': 'vda'}
+ self.conn.spawn(self.context, self.instance, self.image,
+ injected_files=[], admin_password=None,
+ network_info=self.network_info,
+ block_device_info=block_device_info)
+
+ def mock_upload_image(self, context, image, instance, **kwargs):
+ self.assertEqual(image, 'Test-Snapshot')
+ self.assertEqual(instance, self.instance)
+ self.assertEqual(kwargs['disk_type'], 'preallocated')
+
+ def test_get_vm_ref_using_extra_config(self):
+ self._create_vm()
+ vm_ref = vm_util._get_vm_ref_from_extraconfig(self.conn._session,
+ self.instance['uuid'])
+ self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
+ # Disrupt the fake Virtual Machine object so that extraConfig
+ # cannot be matched.
+ fake_vm = self._get_vm_record()
+ fake_vm.get('config.extraConfig["nvp.vm-uuid"]').value = ""
+ # We should not get a Virtual Machine through extraConfig.
+ vm_ref = vm_util._get_vm_ref_from_extraconfig(self.conn._session,
+ self.instance['uuid'])
+ self.assertIsNone(vm_ref, 'VM Reference should be none')
+ # Check if we can find the Virtual Machine using the name.
+ vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
+
+ def test_search_vm_ref_by_identifier(self):
+ self._create_vm()
+ vm_ref = vm_util.search_vm_ref_by_identifier(self.conn._session,
+ self.instance['uuid'])
+ self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
+ fake_vm = self._get_vm_record()
+ fake_vm.set("summary.config.instanceUuid", "foo")
+ fake_vm.set("name", "foo")
+ fake_vm.get('config.extraConfig["nvp.vm-uuid"]').value = "foo"
+ self.assertIsNone(vm_util.search_vm_ref_by_identifier(
+ self.conn._session, self.instance['uuid']),
+ "VM Reference should be none")
+ self.assertIsNotNone(
+ vm_util.search_vm_ref_by_identifier(self.conn._session, "foo"),
+ "VM Reference should not be none")
+
+ def test_get_object_for_optionvalue(self):
+ self._create_vm()
+ vms = self.conn._session._call_method(vim_util, "get_objects",
+ "VirtualMachine", ['config.extraConfig["nvp.vm-uuid"]'])
+ vm_ref = vm_util._get_object_for_optionvalue(vms,
+ self.instance["uuid"])
+ self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
+
+ def _test_snapshot(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ with mock.patch.object(images, 'upload_image',
+ self.mock_upload_image):
+ self.conn.snapshot(self.context, self.instance, "Test-Snapshot",
+ func_call_matcher.call)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.assertIsNone(func_call_matcher.match())
+
+ def test_snapshot(self):
+ self._create_vm()
+ self._test_snapshot()
+
+ def test_snapshot_no_root_disk(self):
+ self._iso_disk_type_created(instance_type='m1.micro')
+ self.assertRaises(error_util.NoRootDiskDefined, self.conn.snapshot,
+ self.context, self.instance, "Test-Snapshot",
+ lambda *args, **kwargs: None)
+
+ def test_snapshot_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound, self.conn.snapshot,
+ self.context, self.instance, "Test-Snapshot",
+ lambda *args, **kwargs: None)
+
+ def test_snapshot_delete_vm_snapshot(self):
+ self._create_vm()
+ fake_vm = self._get_vm_record()
+ snapshot_ref = vmwareapi_fake.ManagedObjectReference(
+ value="Snapshot-123",
+ name="VirtualMachineSnapshot")
+
+ self.mox.StubOutWithMock(vmops.VMwareVMOps,
+ '_create_vm_snapshot')
+ self.conn._vmops._create_vm_snapshot(
+ self.instance, fake_vm.obj).AndReturn(snapshot_ref)
+
+ self.mox.StubOutWithMock(vmops.VMwareVMOps,
+ '_delete_vm_snapshot')
+ self.conn._vmops._delete_vm_snapshot(
+ self.instance, fake_vm.obj, snapshot_ref).AndReturn(None)
+ self.mox.ReplayAll()
+
+ self._test_snapshot()
+
+ def _snapshot_delete_vm_snapshot_exception(self, exception, call_count=1):
+ self._create_vm()
+ fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0].obj
+ snapshot_ref = vmwareapi_fake.ManagedObjectReference(
+ value="Snapshot-123",
+ name="VirtualMachineSnapshot")
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ side_effect=exception),
+ mock.patch.object(vmops, '_time_sleep_wrapper')
+ ) as (_fake_wait, _fake_sleep):
+ if exception != error_util.TaskInProgress:
+ self.assertRaises(exception,
+ self.conn._vmops._delete_vm_snapshot,
+ self.instance, fake_vm, snapshot_ref)
+ self.assertEqual(0, _fake_sleep.call_count)
+ else:
+ self.conn._vmops._delete_vm_snapshot(self.instance, fake_vm,
+ snapshot_ref)
+ self.assertEqual(call_count - 1, _fake_sleep.call_count)
+ self.assertEqual(call_count, _fake_wait.call_count)
+
+ def test_snapshot_delete_vm_snapshot_exception(self):
+ self._snapshot_delete_vm_snapshot_exception(exception.NovaException)
+
+ def test_snapshot_delete_vm_snapshot_exception_retry(self):
+ self.flags(api_retry_count=5, group='vmware')
+ self._snapshot_delete_vm_snapshot_exception(error_util.TaskInProgress,
+ 5)
+
+ def test_reboot(self):
+ self._create_vm()
+ info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ reboot_type = "SOFT"
+ self.conn.reboot(self.context, self.instance, self.network_info,
+ reboot_type)
+ info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_reboot_with_uuid(self):
+ """Test fall back to use name when can't find by uuid."""
+ self._create_vm()
+ info = self.conn.get_info({'name': 'fake-name', 'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ reboot_type = "SOFT"
+ self.conn.reboot(self.context, self.instance, self.network_info,
+ reboot_type)
+ info = self.conn.get_info({'name': 'fake-name', 'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_reboot_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound, self.conn.reboot,
+ self.context, self.instance, self.network_info,
+ 'SOFT')
+
+ def test_poll_rebooting_instances(self):
+ self.mox.StubOutWithMock(compute_api.API, 'reboot')
+ compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self._create_vm()
+ instances = [self.instance]
+ self.conn.poll_rebooting_instances(60, instances)
+
+ def test_reboot_not_poweredon(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.SUSPENDED)
+ self.assertRaises(exception.InstanceRebootFailure, self.conn.reboot,
+ self.context, self.instance, self.network_info,
+ 'SOFT')
+
+ def test_suspend(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.SUSPENDED)
+
+ def test_suspend_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound, self.conn.suspend,
+ self.instance)
+
+ def test_resume(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.SUSPENDED)
+ self.conn.resume(self.context, self.instance, self.network_info)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_resume_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound, self.conn.resume,
+ self.context, self.instance, self.network_info)
+
+ def test_resume_not_suspended(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.assertRaises(exception.InstanceResumeFailure, self.conn.resume,
+ self.context, self.instance, self.network_info)
+
+ def test_power_on(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.power_off(self.instance)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.SHUTDOWN)
+ self.conn.power_on(self.context, self.instance, self.network_info)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_power_on_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound, self.conn.power_on,
+ self.context, self.instance, self.network_info)
+
+ def test_power_off(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.power_off(self.instance)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.SHUTDOWN)
+
+ def test_power_off_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound, self.conn.power_off,
+ self.instance)
+
+ def test_resume_state_on_host_boot(self):
+ self._create_vm()
+ self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name')
+ self.mox.StubOutWithMock(self.conn, "reboot")
+ vm_util.get_vm_state_from_name(mox.IgnoreArg(),
+ self.instance['uuid']).AndReturn("poweredOff")
+ self.conn.reboot(self.context, self.instance, 'network_info',
+ 'hard', None)
+ self.mox.ReplayAll()
+ self.conn.resume_state_on_host_boot(self.context, self.instance,
+ 'network_info')
+
+ def test_resume_state_on_host_boot_no_reboot_1(self):
+ """Don't call reboot on instance which is poweredon."""
+ self._create_vm()
+ self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name')
+ self.mox.StubOutWithMock(self.conn, 'reboot')
+ vm_util.get_vm_state_from_name(mox.IgnoreArg(),
+ self.instance['uuid']).AndReturn("poweredOn")
+ self.mox.ReplayAll()
+ self.conn.resume_state_on_host_boot(self.context, self.instance,
+ 'network_info')
+
+ def test_resume_state_on_host_boot_no_reboot_2(self):
+ """Don't call reboot on instance which is suspended."""
+ self._create_vm()
+ self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name')
+ self.mox.StubOutWithMock(self.conn, 'reboot')
+ vm_util.get_vm_state_from_name(mox.IgnoreArg(),
+ self.instance['uuid']).AndReturn("suspended")
+ self.mox.ReplayAll()
+ self.conn.resume_state_on_host_boot(self.context, self.instance,
+ 'network_info')
+
+ def destroy_rescued(self, fake_method):
+ self._rescue()
+ with contextlib.nested(
+ mock.patch.object(self.conn._volumeops, "detach_disk_from_vm",
+ fake_method),
+ mock.patch.object(vm_util, "power_on_instance"),
+ ) as (fake_detach, fake_power_on):
+ self.instance['vm_state'] = vm_states.RESCUED
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ inst_path = ds_util.DatastorePath(self.ds, self.uuid,
+ '%s.vmdk' % self.uuid)
+ self.assertFalse(vmwareapi_fake.get_file(str(inst_path)))
+ rescue_file_path = ds_util.DatastorePath(
+ self.ds, '%s-rescue' % self.uuid, '%s-rescue.vmdk' % self.uuid)
+ self.assertFalse(vmwareapi_fake.get_file(str(rescue_file_path)))
+ # Unrescue does not power on with destroy
+ self.assertFalse(fake_power_on.called)
+
+ def test_destroy_rescued(self):
+ def fake_detach_disk_from_vm(*args, **kwargs):
+ pass
+ self.destroy_rescued(fake_detach_disk_from_vm)
+
+ def test_destroy_rescued_with_exception(self):
+ def fake_detach_disk_from_vm(*args, **kwargs):
+ raise exception.NovaException('Here is my fake exception')
+ self.destroy_rescued(fake_detach_disk_from_vm)
+
+ def test_destroy(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), 1)
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), 0)
+ self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
+
+ def test_destroy_no_datastore(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), 1)
+ # Delete the vmPathName
+ vm = self._get_vm_record()
+ vm.delete('config.files.vmPathName')
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), 0)
+
+ def test_destroy_non_existent(self):
+ self.destroy_disks = True
+ with mock.patch.object(self.conn._vmops,
+ "destroy") as mock_destroy:
+ self._create_instance()
+ self.conn.destroy(self.context, self.instance,
+ self.network_info,
+ None, self.destroy_disks)
+ mock_destroy.assert_called_once_with(self.instance,
+ self.destroy_disks)
+
+ def test_destroy_instance_without_compute(self):
+ self.destroy_disks = True
+ with mock.patch.object(self.conn._vmops,
+ "destroy") as mock_destroy:
+ self.conn.destroy(self.context, self.instance_without_compute,
+ self.network_info,
+ None, self.destroy_disks)
+ self.assertFalse(mock_destroy.called)
+
+ def _destroy_instance_without_vm_ref(self, resize_exists=False,
+ task_state=None):
+
+ def fake_vm_ref_from_name(session, vm_name):
+ if resize_exists:
+ return 'fake-ref'
+
+ self._create_instance()
+ with contextlib.nested(
+ mock.patch.object(vm_util, 'get_vm_ref_from_name',
+ fake_vm_ref_from_name),
+ mock.patch.object(self.conn._session,
+ '_call_method'),
+ mock.patch.object(self.conn._vmops,
+ '_destroy_instance')
+ ) as (mock_get, mock_call, mock_destroy):
+ self.instance.task_state = task_state
+ self.conn.destroy(self.context, self.instance,
+ self.network_info,
+ None, True)
+ if resize_exists:
+ if task_state == task_states.RESIZE_REVERTING:
+ expected = 1
+ else:
+ expected = 2
+ else:
+ expected = 1
+ self.assertEqual(expected, mock_destroy.call_count)
+ self.assertFalse(mock_call.called)
+
+ def test_destroy_instance_without_vm_ref(self):
+ self._destroy_instance_without_vm_ref()
+
+ def test_destroy_instance_without_vm_ref_with_resize(self):
+ self._destroy_instance_without_vm_ref(resize_exists=True)
+
+ def test_destroy_instance_without_vm_ref_with_resize_revert(self):
+ self._destroy_instance_without_vm_ref(resize_exists=True,
+ task_state=task_states.RESIZE_REVERTING)
+
+ def _rescue(self, config_drive=False):
+ # validate that the power on is only called once
+ self._power_on = vm_util.power_on_instance
+ self._power_on_called = 0
+
+ def fake_attach_disk_to_vm(vm_ref, instance,
+ adapter_type, disk_type, vmdk_path=None,
+ disk_size=None, linked_clone=False,
+ controller_key=None, unit_number=None,
+ device_name=None):
+ info = self.conn.get_info(instance)
+ self._check_vm_info(info, power_state.SHUTDOWN)
+
+ if config_drive:
+ def fake_create_config_drive(instance, injected_files, password,
+ data_store_name, folder,
+ instance_uuid, cookies):
+ self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
+ return str(ds_util.DatastorePath(data_store_name,
+ instance_uuid, 'fake.iso'))
+
+ self.stubs.Set(self.conn._vmops, '_create_config_drive',
+ fake_create_config_drive)
+
+ self._create_vm()
+
+ def fake_power_on_instance(session, instance, vm_ref=None):
+ self._power_on_called += 1
+ return self._power_on(session, instance, vm_ref=vm_ref)
+
+ info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.stubs.Set(vm_util, "power_on_instance",
+ fake_power_on_instance)
+ self.stubs.Set(self.conn._volumeops, "attach_disk_to_vm",
+ fake_attach_disk_to_vm)
+
+ self.conn.rescue(self.context, self.instance, self.network_info,
+ self.image, 'fake-password')
+
+ info = self.conn.get_info({'name': '1-rescue',
+ 'uuid': '%s-rescue' % self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.SHUTDOWN)
+ self.assertIsNotNone(vm_util.vm_ref_cache_get('%s-rescue' % self.uuid))
+ self.assertEqual(1, self._power_on_called)
+
+ def test_rescue(self):
+ self._rescue()
+ inst_file_path = ds_util.DatastorePath(self.ds, self.uuid,
+ '%s.vmdk' % self.uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(inst_file_path)))
+ rescue_file_path = ds_util.DatastorePath(self.ds,
+ '%s-rescue' % self.uuid,
+ '%s-rescue.vmdk' % self.uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(rescue_file_path)))
+
+ def test_rescue_with_config_drive(self):
+ self.flags(force_config_drive=True)
+ self._rescue(config_drive=True)
+
+ def test_unrescue(self):
+ # NOTE(dims): driver unrescue ends up eventually in vmops.unrescue
+ # with power_on=True, the test_destroy_rescued tests the
+ # vmops.unrescue with power_on=False
+ self._rescue()
+ vm_ref = vm_util.get_vm_ref(self.conn._session,
+ self.instance)
+ vm_rescue_ref = vm_util.get_vm_ref_from_name(self.conn._session,
+ '%s-rescue' % self.uuid)
+
+ self.poweroff_instance = vm_util.power_off_instance
+
+ def fake_power_off_instance(session, instance, vm_ref):
+ # This is called so that we actually poweroff the simulated vm.
+ # The reason for this is that there is a validation in destroy
+ # that the instance is not powered on.
+ self.poweroff_instance(session, instance, vm_ref)
+
+ def fake_detach_disk_from_vm(vm_ref, instance,
+ device_name, destroy_disk=False):
+ self.test_device_name = device_name
+ info = self.conn.get_info(instance)
+ self._check_vm_info(info, power_state.SHUTDOWN)
+
+ with contextlib.nested(
+ mock.patch.object(vm_util, "power_off_instance",
+ side_effect=fake_power_off_instance),
+ mock.patch.object(self.conn._volumeops, "detach_disk_from_vm",
+ side_effect=fake_detach_disk_from_vm),
+ mock.patch.object(vm_util, "power_on_instance"),
+ ) as (poweroff, detach, fake_power_on):
+ self.conn.unrescue(self.instance, None)
+ poweroff.assert_called_once_with(self.conn._session, mock.ANY,
+ vm_rescue_ref)
+ detach.assert_called_once_with(vm_rescue_ref, mock.ANY,
+ self.test_device_name)
+ fake_power_on.assert_called_once_with(self.conn._session,
+ self.instance,
+ vm_ref=vm_ref)
+ self.test_vm_ref = None
+ self.test_device_name = None
+
+ def test_get_diagnostics(self):
+ self._create_vm()
+ expected = {'memoryReservation': 0, 'suspendInterval': 0,
+ 'maxCpuUsage': 2000, 'toolsInstallerMounted': False,
+ 'consumedOverheadMemory': 20, 'numEthernetCards': 1,
+ 'numCpu': 1, 'featureRequirement': [{'key': 'cpuid.AES'}],
+ 'memoryOverhead': 21417984,
+ 'guestMemoryUsage': 0, 'connectionState': 'connected',
+ 'memorySizeMB': 512, 'balloonedMemory': 0,
+ 'vmPathName': 'fake_path', 'template': False,
+ 'overallCpuUsage': 0, 'powerState': 'poweredOn',
+ 'cpuReservation': 0, 'overallCpuDemand': 0,
+ 'numVirtualDisks': 1, 'hostMemoryUsage': 141}
+ expected = dict([('vmware:' + k, v) for k, v in expected.items()])
+ self.assertThat(
+ self.conn.get_diagnostics({'name': 1, 'uuid': self.uuid,
+ 'node': self.instance_node}),
+ matchers.DictMatches(expected))
+
+ def test_get_instance_diagnostics(self):
+ self._create_vm()
+ expected = {'uptime': 0,
+ 'memory_details': {'used': 0, 'maximum': 512},
+ 'nic_details': [],
+ 'driver': 'vmwareapi',
+ 'state': 'running',
+ 'version': '1.0',
+ 'cpu_details': [],
+ 'disk_details': [],
+ 'hypervisor_os': 'esxi',
+ 'config_drive': False}
+ actual = self.conn.get_instance_diagnostics(
+ {'name': 1, 'uuid': self.uuid, 'node': self.instance_node})
+ self.assertThat(actual.serialize(), matchers.DictMatches(expected))
+
+ def test_get_console_output(self):
+ self.assertRaises(NotImplementedError, self.conn.get_console_output,
+ None, None)
+
+ def _test_finish_migration(self, power_on, resize_instance=False):
+ self._create_vm()
+ self.conn.finish_migration(context=self.context,
+ migration=None,
+ instance=self.instance,
+ disk_info=None,
+ network_info=None,
+ block_device_info=None,
+ resize_instance=resize_instance,
+ image_meta=None,
+ power_on=power_on)
+
+ def _test_finish_revert_migration(self, power_on):
+ self._create_vm()
+ # Ensure ESX driver throws an error
+ self.assertRaises(NotImplementedError,
+ self.conn.finish_revert_migration,
+ self.context,
+ instance=self.instance,
+ network_info=None)
+
+ def test_get_vnc_console_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound,
+ self.conn.get_vnc_console,
+ self.context,
+ self.instance)
+
+ def _test_get_vnc_console(self):
+ self._create_vm()
+ fake_vm = self._get_vm_record()
+ OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
+ opt_val = OptionValue(key='', value=5906)
+ fake_vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
+ vnc_console = self.conn.get_vnc_console(self.context, self.instance)
+ self.assertEqual(self.vnc_host, vnc_console.host)
+ self.assertEqual(5906, vnc_console.port)
+
+ def test_get_vnc_console(self):
+ self._test_get_vnc_console()
+
+ def test_get_vnc_console_noport(self):
+ self._create_vm()
+ self.assertRaises(exception.ConsoleTypeUnavailable,
+ self.conn.get_vnc_console,
+ self.context,
+ self.instance)
+
+ def test_get_volume_connector(self):
+ self._create_vm()
+ connector_dict = self.conn.get_volume_connector(self.instance)
+ fake_vm = self._get_vm_record()
+ fake_vm_id = fake_vm.obj.value
+ self.assertEqual(connector_dict['ip'], 'test_url')
+ self.assertEqual(connector_dict['initiator'], 'iscsi-name')
+ self.assertEqual(connector_dict['host'], 'test_url')
+ self.assertEqual(connector_dict['instance'], fake_vm_id)
+
+ def _test_vmdk_connection_info(self, type):
+ return {'driver_volume_type': type,
+ 'serial': 'volume-fake-id',
+ 'data': {'volume': 'vm-10',
+ 'volume_id': 'volume-fake-id'}}
+
+ def test_volume_attach_vmdk(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('vmdk')
+ mount_point = '/dev/vdc'
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_attach_volume_vmdk')
+ volumeops.VMwareVolumeOps._attach_volume_vmdk(connection_info,
+ self.instance, mount_point)
+ self.mox.ReplayAll()
+ self.conn.attach_volume(None, connection_info, self.instance,
+ mount_point)
+
+ def test_volume_detach_vmdk(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('vmdk')
+ mount_point = '/dev/vdc'
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_detach_volume_vmdk')
+ volumeops.VMwareVolumeOps._detach_volume_vmdk(connection_info,
+ self.instance, mount_point)
+ self.mox.ReplayAll()
+ self.conn.detach_volume(connection_info, self.instance, mount_point,
+ encryption=None)
+
+ def test_attach_vmdk_disk_to_vm(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('vmdk')
+ mount_point = '/dev/vdc'
+
+ # create fake backing info
+ volume_device = vmwareapi_fake.DataObject()
+ volume_device.backing = vmwareapi_fake.DataObject()
+ volume_device.backing.fileName = 'fake_path'
+
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_get_vmdk_base_volume_device')
+ volumeops.VMwareVolumeOps._get_vmdk_base_volume_device(
+ mox.IgnoreArg()).AndReturn(volume_device)
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ 'attach_disk_to_vm')
+ volumeops.VMwareVolumeOps.attach_disk_to_vm(mox.IgnoreArg(),
+ self.instance, mox.IgnoreArg(), mox.IgnoreArg(),
+ vmdk_path='fake_path')
+ self.mox.ReplayAll()
+ self.conn.attach_volume(None, connection_info, self.instance,
+ mount_point)
+
+ def test_detach_vmdk_disk_from_vm(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('vmdk')
+ mount_point = '/dev/vdc'
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_get_volume_uuid')
+ volumeops.VMwareVolumeOps._get_volume_uuid(mox.IgnoreArg(),
+ 'volume-fake-id').AndReturn('fake_disk_uuid')
+ self.mox.StubOutWithMock(vm_util, 'get_vmdk_backed_disk_device')
+ vm_util.get_vmdk_backed_disk_device(mox.IgnoreArg(),
+ 'fake_disk_uuid').AndReturn('fake_device')
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_consolidate_vmdk_volume')
+ volumeops.VMwareVolumeOps._consolidate_vmdk_volume(self.instance,
+ mox.IgnoreArg(), 'fake_device', mox.IgnoreArg())
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ 'detach_disk_from_vm')
+ volumeops.VMwareVolumeOps.detach_disk_from_vm(mox.IgnoreArg(),
+ self.instance, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conn.detach_volume(connection_info, self.instance, mount_point,
+ encryption=None)
+
+ def test_volume_attach_iscsi(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('iscsi')
+ mount_point = '/dev/vdc'
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_attach_volume_iscsi')
+ volumeops.VMwareVolumeOps._attach_volume_iscsi(connection_info,
+ self.instance, mount_point)
+ self.mox.ReplayAll()
+ self.conn.attach_volume(None, connection_info, self.instance,
+ mount_point)
+
+ def test_volume_detach_iscsi(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('iscsi')
+ mount_point = '/dev/vdc'
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_detach_volume_iscsi')
+ volumeops.VMwareVolumeOps._detach_volume_iscsi(connection_info,
+ self.instance, mount_point)
+ self.mox.ReplayAll()
+ self.conn.detach_volume(connection_info, self.instance, mount_point,
+ encryption=None)
+
+ def test_attach_iscsi_disk_to_vm(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('iscsi')
+ connection_info['data']['target_portal'] = 'fake_target_host:port'
+ connection_info['data']['target_iqn'] = 'fake_target_iqn'
+ mount_point = '/dev/vdc'
+ discover = ('fake_name', 'fake_uuid')
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_iscsi_get_target')
+ # simulate target not found
+ volumeops.VMwareVolumeOps._iscsi_get_target(
+ connection_info['data']).AndReturn((None, None))
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_iscsi_add_send_target_host')
+ # rescan gets called with target portal
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_iscsi_rescan_hba')
+ volumeops.VMwareVolumeOps._iscsi_rescan_hba(
+ connection_info['data']['target_portal'])
+ # simulate target found
+ volumeops.VMwareVolumeOps._iscsi_get_target(
+ connection_info['data']).AndReturn(discover)
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ 'attach_disk_to_vm')
+ volumeops.VMwareVolumeOps.attach_disk_to_vm(mox.IgnoreArg(),
+ self.instance, mox.IgnoreArg(), 'rdmp',
+ device_name=mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conn.attach_volume(None, connection_info, self.instance,
+ mount_point)
+
+ def test_iscsi_rescan_hba(self):
+ fake_target_portal = 'fake_target_host:port'
+ host_storage_sys = vmwareapi_fake._get_objects(
+ "HostStorageSystem").objects[0]
+ iscsi_hba_array = host_storage_sys.get('storageDeviceInfo'
+ '.hostBusAdapter')
+ iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0]
+ # Check the host system does not have the send target
+ self.assertRaises(AttributeError, getattr, iscsi_hba,
+ 'configuredSendTarget')
+ # Rescan HBA with the target portal
+ vops = volumeops.VMwareVolumeOps(self.conn._session)
+ vops._iscsi_rescan_hba(fake_target_portal)
+ # Check if HBA has the target portal configured
+ self.assertEqual('fake_target_host',
+ iscsi_hba.configuredSendTarget[0].address)
+ # Rescan HBA with same portal
+ vops._iscsi_rescan_hba(fake_target_portal)
+ self.assertEqual(1, len(iscsi_hba.configuredSendTarget))
+
+ def test_iscsi_get_target(self):
+ data = {'target_portal': 'fake_target_host:port',
+ 'target_iqn': 'fake_target_iqn'}
+ host = vmwareapi_fake._get_objects('HostSystem').objects[0]
+ host._add_iscsi_target(data)
+ vops = volumeops.VMwareVolumeOps(self.conn._session)
+ result = vops._iscsi_get_target(data)
+ self.assertEqual(('fake-device', 'fake-uuid'), result)
+
+ def test_detach_iscsi_disk_from_vm(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('iscsi')
+ connection_info['data']['target_portal'] = 'fake_target_portal'
+ connection_info['data']['target_iqn'] = 'fake_target_iqn'
+ mount_point = '/dev/vdc'
+ find = ('fake_name', 'fake_uuid')
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_iscsi_get_target')
+ volumeops.VMwareVolumeOps._iscsi_get_target(
+ connection_info['data']).AndReturn(find)
+ self.mox.StubOutWithMock(vm_util, 'get_rdm_disk')
+ device = 'fake_device'
+ vm_util.get_rdm_disk(mox.IgnoreArg(), 'fake_uuid').AndReturn(device)
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ 'detach_disk_from_vm')
+ volumeops.VMwareVolumeOps.detach_disk_from_vm(mox.IgnoreArg(),
+ self.instance, device, destroy_disk=True)
+ self.mox.ReplayAll()
+ self.conn.detach_volume(connection_info, self.instance, mount_point,
+ encryption=None)
+
+ def test_connection_info_get(self):
+ self._create_vm()
+ connector = self.conn.get_volume_connector(self.instance)
+ self.assertEqual(connector['ip'], 'test_url')
+ self.assertEqual(connector['host'], 'test_url')
+ self.assertEqual(connector['initiator'], 'iscsi-name')
+ self.assertIn('instance', connector)
+
+ def test_connection_info_get_after_destroy(self):
+ self._create_vm()
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ connector = self.conn.get_volume_connector(self.instance)
+ self.assertEqual(connector['ip'], 'test_url')
+ self.assertEqual(connector['host'], 'test_url')
+ self.assertEqual(connector['initiator'], 'iscsi-name')
+ self.assertNotIn('instance', connector)
+
+ def test_refresh_instance_security_rules(self):
+ self.assertRaises(NotImplementedError,
+ self.conn.refresh_instance_security_rules,
+ instance=None)
+
+ def test_image_aging_image_used(self):
+ self._create_vm()
+ all_instances = [self.instance]
+ self.conn.manage_image_cache(self.context, all_instances)
+ self._cached_files_exist()
+
+ def _get_timestamp_filename(self):
+ return '%s%s' % (imagecache.TIMESTAMP_PREFIX,
+ timeutils.strtime(at=self.old_time,
+ fmt=imagecache.TIMESTAMP_FORMAT))
+
+ def _override_time(self):
+ self.old_time = datetime.datetime(2012, 11, 22, 12, 00, 00)
+
+ def _fake_get_timestamp_filename(fake):
+ return self._get_timestamp_filename()
+
+ self.stubs.Set(imagecache.ImageCacheManager, '_get_timestamp_filename',
+ _fake_get_timestamp_filename)
+
+ def _timestamp_file_exists(self, exists=True):
+ timestamp = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ self._get_timestamp_filename() + '/')
+ if exists:
+ self.assertTrue(vmwareapi_fake.get_file(str(timestamp)))
+ else:
+ self.assertFalse(vmwareapi_fake.get_file(str(timestamp)))
+
+ def _image_aging_image_marked_for_deletion(self):
+ self._create_vm(uuid=uuidutils.generate_uuid())
+ self._cached_files_exist()
+ all_instances = []
+ self.conn.manage_image_cache(self.context, all_instances)
+ self._cached_files_exist()
+ self._timestamp_file_exists()
+
+ def test_image_aging_image_marked_for_deletion(self):
+ self._override_time()
+ self._image_aging_image_marked_for_deletion()
+
+ def _timestamp_file_removed(self):
+ self._override_time()
+ self._image_aging_image_marked_for_deletion()
+ self._create_vm(num_instances=2,
+ uuid=uuidutils.generate_uuid())
+ self._timestamp_file_exists(exists=False)
+
+ def test_timestamp_file_removed_spawn(self):
+ self._timestamp_file_removed()
+
+ def test_timestamp_file_removed_aging(self):
+ self._timestamp_file_removed()
+ ts = self._get_timestamp_filename()
+ ts_path = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid, ts + '/')
+ vmwareapi_fake._add_file(str(ts_path))
+ self._timestamp_file_exists()
+ all_instances = [self.instance]
+ self.conn.manage_image_cache(self.context, all_instances)
+ self._timestamp_file_exists(exists=False)
+
+ def test_image_aging_disabled(self):
+ self._override_time()
+ self.flags(remove_unused_base_images=False)
+ self._create_vm()
+ self._cached_files_exist()
+ all_instances = []
+ self.conn.manage_image_cache(self.context, all_instances)
+ self._cached_files_exist(exists=True)
+ self._timestamp_file_exists(exists=False)
+
+ def _image_aging_aged(self, aging_time=100):
+ self._override_time()
+ cur_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ self.flags(remove_unused_original_minimum_age_seconds=aging_time)
+ self._image_aging_image_marked_for_deletion()
+ all_instances = []
+ timeutils.set_time_override(cur_time)
+ self.conn.manage_image_cache(self.context, all_instances)
+
+ def test_image_aging_aged(self):
+ self._image_aging_aged(aging_time=8)
+ self._cached_files_exist(exists=False)
+
+ def test_image_aging_not_aged(self):
+ self._image_aging_aged()
+ self._cached_files_exist()
+
+
+class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase):
+
+ @mock.patch.object(driver.VMwareVCDriver, '_register_openstack_extension')
+ def setUp(self, mock_register):
+ super(VMwareAPIVCDriverTestCase, self).setUp(create_connection=False)
+ cluster_name = 'test_cluster'
+ cluster_name2 = 'test_cluster2'
+ self.flags(cluster_name=[cluster_name, cluster_name2],
+ api_retry_count=1,
+ task_poll_interval=10, datastore_regex='.*', group='vmware')
+ self.flags(vnc_enabled=False,
+ image_cache_subdirectory_name='vmware_base')
+ vmwareapi_fake.reset()
+ self.conn = driver.VMwareVCDriver(None, False)
+ self._set_exception_vars()
+ self.node_name = self.conn._resources.keys()[0]
+ self.node_name2 = self.conn._resources.keys()[1]
+ if cluster_name2 in self.node_name2:
+ self.ds = 'ds1'
+ else:
+ self.ds = 'ds2'
+ self.vnc_host = 'ha-host'
+
+ def tearDown(self):
+ super(VMwareAPIVCDriverTestCase, self).tearDown()
+ vmwareapi_fake.cleanup()
+
+ def test_public_api_signatures(self):
+ self.assertPublicAPISignatures(v_driver.ComputeDriver(None), self.conn)
+
+ def test_register_extension(self):
+ with mock.patch.object(self.conn._session, '_call_method',
+ return_value=None) as mock_call_method:
+ self.conn._register_openstack_extension()
+ mock_call_method.assert_has_calls(
+ [mock.call(oslo_vim_util, 'find_extension',
+ constants.EXTENSION_KEY),
+ mock.call(oslo_vim_util, 'register_extension',
+ constants.EXTENSION_KEY,
+ constants.EXTENSION_TYPE_INSTANCE)])
+
+ def test_register_extension_already_exists(self):
+ with mock.patch.object(self.conn._session, '_call_method',
+ return_value='fake-extension') as mock_find_ext:
+ self.conn._register_openstack_extension()
+ mock_find_ext.assert_called_once_with(oslo_vim_util,
+ 'find_extension',
+ constants.EXTENSION_KEY)
+
+ def test_list_instances(self):
+ instances = self.conn.list_instances()
+ self.assertEqual(0, len(instances))
+
+ def test_list_instances_from_nodes(self):
+ # Create instance on node1
+ self._create_vm(self.node_name)
+ # Create instances on the other node
+ self._create_vm(self.node_name2, num_instances=2)
+ self._create_vm(self.node_name2, num_instances=3)
+ node1_vmops = self.conn._get_vmops_for_compute_node(self.node_name)
+ node2_vmops = self.conn._get_vmops_for_compute_node(self.node_name2)
+ self.assertEqual(1, len(node1_vmops.list_instances()))
+ self.assertEqual(2, len(node2_vmops.list_instances()))
+ self.assertEqual(3, len(self.conn.list_instances()))
+
+ def _setup_mocks_for_session(self, mock_init):
+ mock_init.return_value = None
+
+ vcdriver = driver.VMwareVCDriver(None, False)
+ vcdriver._session = mock.Mock()
+ vcdriver._session.vim = None
+
+ def side_effect():
+ vcdriver._session.vim = mock.Mock()
+ vcdriver._session._create_session.side_effect = side_effect
+ return vcdriver
+
+ def test_host_power_action(self):
+ self.assertRaises(NotImplementedError,
+ self.conn.host_power_action, 'host', 'action')
+
+ def test_host_maintenance_mode(self):
+ self.assertRaises(NotImplementedError,
+ self.conn.host_maintenance_mode, 'host', 'mode')
+
+ def test_set_host_enabled(self):
+ self.assertRaises(NotImplementedError,
+ self.conn.set_host_enabled, 'host', 'state')
+
+ def test_datastore_regex_configured(self):
+ for node in self.conn._resources.keys():
+ self.assertEqual(self.conn._datastore_regex,
+ self.conn._resources[node]['vmops']._datastore_regex)
+
+ def test_get_available_resource(self):
+ stats = self.conn.get_available_resource(self.node_name)
+ cpu_info = {"model": ["Intel(R) Xeon(R)", "Intel(R) Xeon(R)"],
+ "vendor": ["Intel", "Intel"],
+ "topology": {"cores": 16,
+ "threads": 32}}
+ self.assertEqual(stats['vcpus'], 32)
+ self.assertEqual(stats['local_gb'], 1024)
+ self.assertEqual(stats['local_gb_used'], 1024 - 500)
+ self.assertEqual(stats['memory_mb'], 1000)
+ self.assertEqual(stats['memory_mb_used'], 500)
+ self.assertEqual(stats['hypervisor_type'], 'VMware vCenter Server')
+ self.assertEqual(stats['hypervisor_version'], 5001000)
+ self.assertEqual(stats['hypervisor_hostname'], self.node_name)
+ self.assertEqual(stats['cpu_info'], jsonutils.dumps(cpu_info))
+ self.assertEqual(stats['supported_instances'],
+ '[["i686", "vmware", "hvm"], ["x86_64", "vmware", "hvm"]]')
+
+ def test_invalid_datastore_regex(self):
+
+ # Tests if we raise an exception for Invalid Regular Expression in
+ # vmware_datastore_regex
+ self.flags(cluster_name=['test_cluster'], datastore_regex='fake-ds(01',
+ group='vmware')
+ self.assertRaises(exception.InvalidInput, driver.VMwareVCDriver, None)
+
+ def test_get_available_nodes(self):
+ nodelist = self.conn.get_available_nodes()
+ self.assertEqual(len(nodelist), 2)
+ self.assertIn(self.node_name, nodelist)
+ self.assertIn(self.node_name2, nodelist)
+
+ def test_spawn_multiple_node(self):
+
+ def fake_is_neutron():
+ return False
+
+ self.stubs.Set(nova_utils, 'is_neutron', fake_is_neutron)
+ uuid1 = uuidutils.generate_uuid()
+ uuid2 = uuidutils.generate_uuid()
+ self._create_vm(node=self.node_name, num_instances=1,
+ uuid=uuid1)
+ info = self.conn.get_info({'uuid': uuid1,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ self._create_vm(node=self.node_name2, num_instances=1,
+ uuid=uuid2)
+ info = self.conn.get_info({'uuid': uuid2,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_snapshot(self):
+ self._create_vm()
+ self._test_snapshot()
+
+ def test_snapshot_using_file_manager(self):
+ self._create_vm()
+ uuid_str = uuidutils.generate_uuid()
+ self.mox.StubOutWithMock(uuidutils,
+ 'generate_uuid')
+ uuidutils.generate_uuid().AndReturn(uuid_str)
+
+ self.mox.StubOutWithMock(ds_util, 'file_delete')
+ disk_ds_path = ds_util.DatastorePath(
+ self.ds, "vmware_temp", "%s.vmdk" % uuid_str)
+ disk_ds_flat_path = ds_util.DatastorePath(
+ self.ds, "vmware_temp", "%s-flat.vmdk" % uuid_str)
+ # Check calls for delete vmdk and -flat.vmdk pair
+ ds_util.file_delete(
+ mox.IgnoreArg(), disk_ds_flat_path,
+ mox.IgnoreArg()).AndReturn(None)
+ ds_util.file_delete(
+ mox.IgnoreArg(), disk_ds_path, mox.IgnoreArg()).AndReturn(None)
+
+ self.mox.ReplayAll()
+ self._test_snapshot()
+
+ def test_spawn_invalid_node(self):
+ self._create_instance(node='InvalidNodeName')
+ self.assertRaises(exception.NotFound, self.conn.spawn,
+ self.context, self.instance, self.image,
+ injected_files=[], admin_password=None,
+ network_info=self.network_info,
+ block_device_info=None)
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_spawn_with_sparse_image(self, mock_from_image):
+ img_info = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=1024,
+ disk_type=constants.DISK_TYPE_SPARSE,
+ linked_clone=False)
+
+ mock_from_image.return_value = img_info
+
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_plug_vifs(self):
+ # Check to make sure the method raises NotImplementedError.
+ self._create_instance()
+ self.assertRaises(NotImplementedError,
+ self.conn.plug_vifs,
+ instance=self.instance, network_info=None)
+
+ def test_unplug_vifs(self):
+ # Check to make sure the method raises NotImplementedError.
+ self._create_instance()
+ self.assertRaises(NotImplementedError,
+ self.conn.unplug_vifs,
+ instance=self.instance, network_info=None)
+
+ def _create_vif(self):
+ gw_4 = network_model.IP(address='101.168.1.1', type='gateway')
+ dns_4 = network_model.IP(address='8.8.8.8', type=None)
+ subnet_4 = network_model.Subnet(cidr='101.168.1.0/24',
+ dns=[dns_4],
+ gateway=gw_4,
+ routes=None,
+ dhcp_server='191.168.1.1')
+
+ gw_6 = network_model.IP(address='101:1db9::1', type='gateway')
+ subnet_6 = network_model.Subnet(cidr='101:1db9::/64',
+ dns=None,
+ gateway=gw_6,
+ ips=None,
+ routes=None)
+
+ network_neutron = network_model.Network(id='network-id-xxx-yyy-zzz',
+ bridge=None,
+ label=None,
+ subnets=[subnet_4,
+ subnet_6],
+ bridge_interface='eth0',
+ vlan=99)
+
+ vif_bridge_neutron = network_model.VIF(id='new-vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_neutron,
+ type=None,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+ return vif_bridge_neutron
+
+ def _validate_interfaces(self, id, index, num_iface_ids):
+ vm = self._get_vm_record()
+ found_iface_id = False
+ extras = vm.get("config.extraConfig")
+ key = "nvp.iface-id.%s" % index
+ num_found = 0
+ for c in extras.OptionValue:
+ if c.key.startswith("nvp.iface-id."):
+ num_found += 1
+ if c.key == key and c.value == id:
+ found_iface_id = True
+ self.assertTrue(found_iface_id)
+ self.assertEqual(num_found, num_iface_ids)
+
+ def _attach_interface(self, vif):
+ self.conn.attach_interface(self.instance, self.image, vif)
+ self._validate_interfaces(vif['id'], 1, 2)
+
+ def test_attach_interface(self):
+ self._create_vm()
+ vif = self._create_vif()
+ self._attach_interface(vif)
+
+ def test_attach_interface_with_exception(self):
+ self._create_vm()
+ vif = self._create_vif()
+
+ with mock.patch.object(self.conn._session, '_wait_for_task',
+ side_effect=Exception):
+ self.assertRaises(exception.InterfaceAttachFailed,
+ self.conn.attach_interface,
+ self.instance, self.image, vif)
+
+ @mock.patch.object(vif, 'get_network_device',
+ return_value='fake_device')
+ def _detach_interface(self, vif, mock_get_device):
+ self._create_vm()
+ self._attach_interface(vif)
+ self.conn.detach_interface(self.instance, vif)
+ self._validate_interfaces('free', 1, 2)
+
+ def test_detach_interface(self):
+ vif = self._create_vif()
+ self._detach_interface(vif)
+
+ def test_detach_interface_and_attach(self):
+ vif = self._create_vif()
+ self._detach_interface(vif)
+ self.conn.attach_interface(self.instance, self.image, vif)
+ self._validate_interfaces(vif['id'], 1, 2)
+
+ def test_detach_interface_no_device(self):
+ self._create_vm()
+ vif = self._create_vif()
+ self._attach_interface(vif)
+ self.assertRaises(exception.NotFound, self.conn.detach_interface,
+ self.instance, vif)
+
+ def test_detach_interface_no_vif_match(self):
+ self._create_vm()
+ vif = self._create_vif()
+ self._attach_interface(vif)
+ vif['id'] = 'bad-id'
+ self.assertRaises(exception.NotFound, self.conn.detach_interface,
+ self.instance, vif)
+
+ @mock.patch.object(vif, 'get_network_device',
+ return_value='fake_device')
+ def test_detach_interface_with_exception(self, mock_get_device):
+ self._create_vm()
+ vif = self._create_vif()
+ self._attach_interface(vif)
+
+ with mock.patch.object(self.conn._session, '_wait_for_task',
+ side_effect=Exception):
+ self.assertRaises(exception.InterfaceDetachFailed,
+ self.conn.detach_interface,
+ self.instance, vif)
+
+ def test_migrate_disk_and_power_off(self):
+ def fake_update_instance_progress(context, instance, step,
+ total_steps):
+ pass
+
+ def fake_get_host_ref_from_name(dest):
+ return None
+
+ self._create_vm(instance_type='m1.large')
+ vm_ref_orig = vm_util.get_vm_ref(self.conn._session, self.instance)
+ flavor = self._get_instance_type_by_name('m1.large')
+ self.stubs.Set(self.conn._vmops, "_update_instance_progress",
+ fake_update_instance_progress)
+ self.stubs.Set(self.conn._vmops, "_get_host_ref_from_name",
+ fake_get_host_ref_from_name)
+ self.conn.migrate_disk_and_power_off(self.context, self.instance,
+ 'fake_dest', flavor,
+ None)
+ vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ self.assertNotEqual(vm_ref_orig.value, vm_ref.value,
+ "These should be different")
+
+ def test_disassociate_vmref_from_instance(self):
+ self._create_vm()
+ vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ vm_util.disassociate_vmref_from_instance(self.conn._session,
+ self.instance, vm_ref, "-backup")
+ self.assertRaises(exception.InstanceNotFound,
+ vm_util.get_vm_ref, self.conn._session, self.instance)
+
+ def test_clone_vmref_for_instance(self):
+ self._create_vm()
+ vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ vm_util.disassociate_vmref_from_instance(self.conn._session,
+ self.instance, vm_ref, "-backup")
+ host_ref = vmwareapi_fake._get_object_refs("HostSystem")[0]
+ ds_ref = vmwareapi_fake._get_object_refs("Datastore")[0]
+ dc_obj = vmwareapi_fake._get_objects("Datacenter").objects[0]
+ vm_util.clone_vmref_for_instance(self.conn._session, self.instance,
+ vm_ref, host_ref, ds_ref,
+ dc_obj.get("vmFolder"))
+ self.assertIsNotNone(
+ vm_util.get_vm_ref(self.conn._session, self.instance),
+ "No VM found")
+ cloned_vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ self.assertNotEqual(vm_ref.value, cloned_vm_ref.value,
+ "Reference for the cloned VM should be different")
+ vm_obj = vmwareapi_fake._get_vm_mdo(vm_ref)
+ cloned_vm_obj = vmwareapi_fake._get_vm_mdo(cloned_vm_ref)
+ self.assertEqual(vm_obj.name, self.instance['uuid'] + "-backup",
+ "Original VM name should be with suffix -backup")
+ self.assertEqual(cloned_vm_obj.name, self.instance['uuid'],
+ "VM name does not match instance['uuid']")
+ self.assertRaises(vexc.MissingParameter,
+ vm_util.clone_vmref_for_instance, self.conn._session,
+ self.instance, None, host_ref, ds_ref,
+ dc_obj.get("vmFolder"))
+
+ def test_associate_vmref_for_instance(self):
+ self._create_vm()
+ vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ # First disassociate the VM from the instance so that we have a VM
+ # to later associate using the associate_vmref_for_instance method
+ vm_util.disassociate_vmref_from_instance(self.conn._session,
+ self.instance, vm_ref, "-backup")
+ # Ensure that the VM is indeed disassociated and that we cannot find
+ # the VM using the get_vm_ref method
+ self.assertRaises(exception.InstanceNotFound,
+ vm_util.get_vm_ref, self.conn._session, self.instance)
+ # Associate the VM back to the instance
+ vm_util.associate_vmref_for_instance(self.conn._session, self.instance,
+ suffix="-backup")
+ # Verify if we can get the VM reference
+ self.assertIsNotNone(
+ vm_util.get_vm_ref(self.conn._session, self.instance),
+ "No VM found")
+
+ def test_confirm_migration(self):
+ self._create_vm()
+ self.conn.confirm_migration(self.context, self.instance, None)
+
+ def test_resize_to_smaller_disk(self):
+ self._create_vm(instance_type='m1.large')
+ flavor = self._get_instance_type_by_name('m1.small')
+ self.assertRaises(exception.InstanceFaultRollback,
+ self.conn.migrate_disk_and_power_off, self.context,
+ self.instance, 'fake_dest', flavor, None)
+
+ def test_spawn_attach_volume_vmdk(self):
+ self._spawn_attach_volume_vmdk(vc_support=True)
+
+ def test_spawn_attach_volume_vmdk_no_image_ref(self):
+ self._spawn_attach_volume_vmdk(set_image_ref=False, vc_support=True)
+
+ def test_pause(self):
+ # Tests that the VMwareVCDriver does not implement the pause method.
+ self._create_instance()
+ self.assertRaises(NotImplementedError, self.conn.pause, self.instance)
+
+ def test_unpause(self):
+ # Tests that the VMwareVCDriver does not implement the unpause method.
+ self._create_instance()
+ self.assertRaises(NotImplementedError, self.conn.unpause,
+ self.instance)
+
+ def test_datastore_dc_map(self):
+ vmops = self.conn._resources[self.node_name]['vmops']
+ self.assertEqual({}, vmops._datastore_dc_mapping)
+ self._create_vm()
+ # currently there are 2 data stores
+ self.assertEqual(2, len(vmops._datastore_dc_mapping))
+
+ def test_rollback_live_migration_at_destination(self):
+ with mock.patch.object(self.conn, "destroy") as mock_destroy:
+ self.conn.rollback_live_migration_at_destination(self.context,
+ "instance", [], None)
+ mock_destroy.assert_called_once_with(self.context,
+ "instance", [], None)
+
+ def test_get_instance_disk_info_is_implemented(self):
+ # Ensure that the method has been implemented in the driver
+ try:
+ disk_info = self.conn.get_instance_disk_info('fake_instance_name')
+ self.assertIsNone(disk_info)
+ except NotImplementedError:
+ self.fail("test_get_instance_disk_info() should not raise "
+ "NotImplementedError")
+
+ def test_destroy(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ instances = self.conn.list_instances()
+ self.assertEqual(1, len(instances))
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ instances = self.conn.list_instances()
+ self.assertEqual(0, len(instances))
+ self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
+
+ def test_destroy_no_datastore(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ instances = self.conn.list_instances()
+ self.assertEqual(1, len(instances))
+ # Overwrite the vmPathName
+ vm = self._get_vm_record()
+ vm.set("config.files.vmPathName", None)
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ instances = self.conn.list_instances()
+ self.assertEqual(0, len(instances))
+
+ def test_destroy_non_existent(self):
+ self.destroy_disks = True
+ with mock.patch.object(self.conn._vmops,
+ "destroy") as mock_destroy:
+ self._create_instance()
+ self.conn.destroy(self.context, self.instance,
+ self.network_info,
+ None, self.destroy_disks)
+ mock_destroy.assert_called_once_with(self.instance,
+ self.destroy_disks)
+
+ def test_destroy_instance_without_compute(self):
+ self.destroy_disks = True
+ with mock.patch.object(self.conn._vmops,
+ "destroy") as mock_destroy:
+ self.conn.destroy(self.context, self.instance_without_compute,
+ self.network_info,
+ None, self.destroy_disks)
+ self.assertFalse(mock_destroy.called)
+
+ def test_get_host_uptime(self):
+ self.assertRaises(NotImplementedError,
+ self.conn.get_host_uptime, 'host')
+
+ def _test_finish_migration(self, power_on, resize_instance=False):
+ """Tests the finish_migration method on VC Driver."""
+ # setup the test instance in the database
+ self._create_vm()
+ if resize_instance:
+ self.instance.system_metadata = {'old_instance_type_root_gb': '0'}
+ vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ datastore = ds_util.Datastore(ref='fake-ref', name='fake')
+ dc_info = vmops.DcInfo(ref='fake_ref', name='fake',
+ vmFolder='fake_folder')
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(self.conn._vmops,
+ "_update_instance_progress"),
+ mock.patch.object(self.conn._session, "_wait_for_task"),
+ mock.patch.object(vm_util, "get_vm_resize_spec",
+ return_value='fake-spec'),
+ mock.patch.object(ds_util, "get_datastore",
+ return_value=datastore),
+ mock.patch.object(self.conn._vmops,
+ 'get_datacenter_ref_and_name',
+ return_value=dc_info),
+ mock.patch.object(self.conn._vmops, '_extend_virtual_disk'),
+ mock.patch.object(vm_util, "power_on_instance")
+ ) as (fake_call_method, fake_update_instance_progress,
+ fake_wait_for_task, fake_vm_resize_spec,
+ fake_get_datastore, fake_get_datacenter_ref_and_name,
+ fake_extend_virtual_disk, fake_power_on):
+ self.conn.finish_migration(context=self.context,
+ migration=None,
+ instance=self.instance,
+ disk_info=None,
+ network_info=None,
+ block_device_info=None,
+ resize_instance=resize_instance,
+ image_meta=None,
+ power_on=power_on)
+ if resize_instance:
+ fake_vm_resize_spec.assert_called_once_with(
+ self.conn._session.vim.client.factory,
+ self.instance)
+ fake_call_method.assert_any_call(
+ self.conn._session.vim,
+ "ReconfigVM_Task",
+ vm_ref,
+ spec='fake-spec')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+ fake_extend_virtual_disk.assert_called_once_with(
+ self.instance, self.instance['root_gb'] * units.Mi,
+ None, dc_info.ref)
+ else:
+ self.assertFalse(fake_vm_resize_spec.called)
+ self.assertFalse(fake_call_method.called)
+ self.assertFalse(fake_wait_for_task.called)
+ self.assertFalse(fake_extend_virtual_disk.called)
+
+ if power_on:
+ fake_power_on.assert_called_once_with(self.conn._session,
+ self.instance,
+ vm_ref=vm_ref)
+ else:
+ self.assertFalse(fake_power_on.called)
+ fake_update_instance_progress.called_once_with(
+ self.context, self.instance, 4, vmops.RESIZE_TOTAL_STEPS)
+
+ def test_finish_migration_power_on(self):
+ self._test_finish_migration(power_on=True)
+
+ def test_finish_migration_power_off(self):
+ self._test_finish_migration(power_on=False)
+
+ def test_finish_migration_power_on_resize(self):
+ self._test_finish_migration(power_on=True,
+ resize_instance=True)
+
+ @mock.patch.object(vm_util, 'associate_vmref_for_instance')
+ @mock.patch.object(vm_util, 'power_on_instance')
+ def _test_finish_revert_migration(self, fake_power_on,
+ fake_associate_vmref, power_on):
+ """Tests the finish_revert_migration method on VC Driver."""
+
+ # setup the test instance in the database
+ self._create_instance()
+ self.conn.finish_revert_migration(self.context,
+ instance=self.instance,
+ network_info=None,
+ block_device_info=None,
+ power_on=power_on)
+ fake_associate_vmref.assert_called_once_with(self.conn._session,
+ self.instance,
+ suffix='-orig')
+ if power_on:
+ fake_power_on.assert_called_once_with(self.conn._session,
+ self.instance)
+ else:
+ self.assertFalse(fake_power_on.called)
+
+ def test_finish_revert_migration_power_on(self):
+ self._test_finish_revert_migration(power_on=True)
+
+ def test_finish_revert_migration_power_off(self):
+ self._test_finish_revert_migration(power_on=False)
+
+ def test_pbm_wsdl_location(self):
+ self.flags(pbm_enabled=True,
+ pbm_wsdl_location='fira',
+ group='vmware')
+ self.conn._update_pbm_location()
+ self.assertEqual('fira', self.conn._session._pbm_wsdl_loc)
+ self.assertIsNone(self.conn._session._pbm)
diff --git a/nova/tests/unit/virt/vmwareapi/test_ds_util.py b/nova/tests/unit/virt/vmwareapi/test_ds_util.py
new file mode 100644
index 0000000000..6f5cf74b26
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_ds_util.py
@@ -0,0 +1,548 @@
+# Copyright (c) 2014 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import re
+
+import mock
+from oslo.utils import units
+from oslo.vmware import exceptions as vexc
+from testtools import matchers
+
+from nova import exception
+from nova.i18n import _
+from nova import test
+from nova.tests.unit.virt.vmwareapi import fake
+from nova.virt.vmwareapi import ds_util
+
+
+class DsUtilTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(DsUtilTestCase, self).setUp()
+ self.session = fake.FakeSession()
+ self.flags(api_retry_count=1, group='vmware')
+ fake.reset()
+
+ def tearDown(self):
+ super(DsUtilTestCase, self).tearDown()
+ fake.reset()
+
+ def test_file_delete(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ self.assertEqual('DeleteDatastoreFile_Task', method)
+ name = kwargs.get('name')
+ self.assertEqual('[ds] fake/path', name)
+ datacenter = kwargs.get('datacenter')
+ self.assertEqual('fake-dc-ref', datacenter)
+ return 'fake_delete_task'
+
+ with contextlib.nested(
+ mock.patch.object(self.session, '_wait_for_task'),
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ ds_path = ds_util.DatastorePath('ds', 'fake/path')
+ ds_util.file_delete(self.session,
+ ds_path, 'fake-dc-ref')
+ _wait_for_task.assert_has_calls([
+ mock.call('fake_delete_task')])
+
+ def test_file_move(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ self.assertEqual('MoveDatastoreFile_Task', method)
+ sourceName = kwargs.get('sourceName')
+ self.assertEqual('[ds] tmp/src', sourceName)
+ destinationName = kwargs.get('destinationName')
+ self.assertEqual('[ds] base/dst', destinationName)
+ sourceDatacenter = kwargs.get('sourceDatacenter')
+ self.assertEqual('fake-dc-ref', sourceDatacenter)
+ destinationDatacenter = kwargs.get('destinationDatacenter')
+ self.assertEqual('fake-dc-ref', destinationDatacenter)
+ return 'fake_move_task'
+
+ with contextlib.nested(
+ mock.patch.object(self.session, '_wait_for_task'),
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ src_ds_path = ds_util.DatastorePath('ds', 'tmp/src')
+ dst_ds_path = ds_util.DatastorePath('ds', 'base/dst')
+ ds_util.file_move(self.session,
+ 'fake-dc-ref', src_ds_path, dst_ds_path)
+ _wait_for_task.assert_has_calls([
+ mock.call('fake_move_task')])
+
+ def test_mkdir(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ self.assertEqual('MakeDirectory', method)
+ name = kwargs.get('name')
+ self.assertEqual('[ds] fake/path', name)
+ datacenter = kwargs.get('datacenter')
+ self.assertEqual('fake-dc-ref', datacenter)
+ createParentDirectories = kwargs.get('createParentDirectories')
+ self.assertTrue(createParentDirectories)
+
+ with mock.patch.object(self.session, '_call_method',
+ fake_call_method):
+ ds_path = ds_util.DatastorePath('ds', 'fake/path')
+ ds_util.mkdir(self.session, ds_path, 'fake-dc-ref')
+
+ def test_file_exists(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == 'SearchDatastore_Task':
+ ds_browser = args[0]
+ self.assertEqual('fake-browser', ds_browser)
+ datastorePath = kwargs.get('datastorePath')
+ self.assertEqual('[ds] fake/path', datastorePath)
+ return 'fake_exists_task'
+
+ # Should never get here
+ self.fail()
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == 'fake_exists_task':
+ result_file = fake.DataObject()
+ result_file.path = 'fake-file'
+
+ result = fake.DataObject()
+ result.file = [result_file]
+ result.path = '[ds] fake/path'
+
+ task_info = fake.DataObject()
+ task_info.result = result
+
+ return task_info
+
+ # Should never get here
+ self.fail()
+
+ with contextlib.nested(
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method),
+ mock.patch.object(self.session, '_wait_for_task',
+ fake_wait_for_task)):
+ ds_path = ds_util.DatastorePath('ds', 'fake/path')
+ file_exists = ds_util.file_exists(self.session,
+ 'fake-browser', ds_path, 'fake-file')
+ self.assertTrue(file_exists)
+
+ def test_file_exists_fails(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == 'SearchDatastore_Task':
+ return 'fake_exists_task'
+
+ # Should never get here
+ self.fail()
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == 'fake_exists_task':
+ raise vexc.FileNotFoundException()
+
+ # Should never get here
+ self.fail()
+
+ with contextlib.nested(
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method),
+ mock.patch.object(self.session, '_wait_for_task',
+ fake_wait_for_task)):
+ ds_path = ds_util.DatastorePath('ds', 'fake/path')
+ file_exists = ds_util.file_exists(self.session,
+ 'fake-browser', ds_path, 'fake-file')
+ self.assertFalse(file_exists)
+
+ def _mock_get_datastore_calls(self, *datastores):
+ """Mock vim_util calls made by get_datastore."""
+
+ datastores_i = [None]
+
+ # For the moment, at least, this list of datastores is simply passed to
+ # get_properties_for_a_collection_of_objects, which we mock below. We
+ # don't need to over-complicate the fake function by worrying about its
+ # contents.
+ fake_ds_list = ['fake-ds']
+
+ def fake_call_method(module, method, *args, **kwargs):
+ # Mock the call which returns a list of datastores for the cluster
+ if (module == ds_util.vim_util and
+ method == 'get_dynamic_property' and
+ args == ('fake-cluster', 'ClusterComputeResource',
+ 'datastore')):
+ fake_ds_mor = fake.DataObject()
+ fake_ds_mor.ManagedObjectReference = fake_ds_list
+ return fake_ds_mor
+
+ # Return the datastore result sets we were passed in, in the order
+ # given
+ if (module == ds_util.vim_util and
+ method == 'get_properties_for_a_collection_of_objects' and
+ args[0] == 'Datastore' and
+ args[1] == fake_ds_list):
+ # Start a new iterator over given datastores
+ datastores_i[0] = iter(datastores)
+ return datastores_i[0].next()
+
+ # Continue returning results from the current iterator.
+ if (module == ds_util.vim_util and
+ method == 'continue_to_get_objects'):
+ try:
+ return datastores_i[0].next()
+ except StopIteration:
+ return None
+
+ # Sentinel that get_datastore's use of vim has changed
+ self.fail('Unexpected vim call in get_datastore: %s' % method)
+
+ return mock.patch.object(self.session, '_call_method',
+ side_effect=fake_call_method)
+
+ def test_get_datastore(self):
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.Datastore())
+ fake_objects.add_object(fake.Datastore("fake-ds-2", 2048, 1000,
+ False, "normal"))
+ fake_objects.add_object(fake.Datastore("fake-ds-3", 4096, 2000,
+ True, "inMaintenance"))
+
+ with self._mock_get_datastore_calls(fake_objects):
+ result = ds_util.get_datastore(self.session, 'fake-cluster')
+ self.assertEqual("fake-ds", result.name)
+ self.assertEqual(units.Ti, result.capacity)
+ self.assertEqual(500 * units.Gi, result.freespace)
+
+ def test_get_datastore_with_regex(self):
+ # Test with a regex that matches with a datastore
+ datastore_valid_regex = re.compile("^openstack.*\d$")
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.Datastore("openstack-ds0"))
+ fake_objects.add_object(fake.Datastore("fake-ds0"))
+ fake_objects.add_object(fake.Datastore("fake-ds1"))
+
+ with self._mock_get_datastore_calls(fake_objects):
+ result = ds_util.get_datastore(self.session, 'fake-cluster',
+ datastore_valid_regex)
+ self.assertEqual("openstack-ds0", result.name)
+
+ def test_get_datastore_with_token(self):
+ regex = re.compile("^ds.*\d$")
+ fake0 = fake.FakeRetrieveResult()
+ fake0.add_object(fake.Datastore("ds0", 10 * units.Gi, 5 * units.Gi))
+ fake0.add_object(fake.Datastore("foo", 10 * units.Gi, 9 * units.Gi))
+ setattr(fake0, 'token', 'token-0')
+ fake1 = fake.FakeRetrieveResult()
+ fake1.add_object(fake.Datastore("ds2", 10 * units.Gi, 8 * units.Gi))
+ fake1.add_object(fake.Datastore("ds3", 10 * units.Gi, 1 * units.Gi))
+
+ with self._mock_get_datastore_calls(fake0, fake1):
+ result = ds_util.get_datastore(self.session, 'fake-cluster', regex)
+ self.assertEqual("ds2", result.name)
+
+ def test_get_datastore_with_list(self):
+ # Test with a regex containing whitelist of datastores
+ datastore_valid_regex = re.compile("(openstack-ds0|openstack-ds2)")
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.Datastore("openstack-ds0"))
+ fake_objects.add_object(fake.Datastore("openstack-ds1"))
+ fake_objects.add_object(fake.Datastore("openstack-ds2"))
+
+ with self._mock_get_datastore_calls(fake_objects):
+ result = ds_util.get_datastore(self.session, 'fake-cluster',
+ datastore_valid_regex)
+ self.assertNotEqual("openstack-ds1", result.name)
+
+ def test_get_datastore_with_regex_error(self):
+ # Test with a regex that has no match
+ # Checks if code raises DatastoreNotFound with a specific message
+ datastore_invalid_regex = re.compile("unknown-ds")
+ exp_message = (_("Datastore regex %s did not match any datastores")
+ % datastore_invalid_regex.pattern)
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.Datastore("fake-ds0"))
+ fake_objects.add_object(fake.Datastore("fake-ds1"))
+ # assertRaisesRegExp would have been a good choice instead of
+ # try/catch block, but it's available only from Py 2.7.
+ try:
+ with self._mock_get_datastore_calls(fake_objects):
+ ds_util.get_datastore(self.session, 'fake-cluster',
+ datastore_invalid_regex)
+ except exception.DatastoreNotFound as e:
+ self.assertEqual(exp_message, e.args[0])
+ else:
+ self.fail("DatastoreNotFound Exception was not raised with "
+ "message: %s" % exp_message)
+
+ def test_get_datastore_without_datastore(self):
+ self.assertRaises(exception.DatastoreNotFound,
+ ds_util.get_datastore,
+ fake.FakeObjectRetrievalSession(None), cluster="fake-cluster")
+
+ def test_get_datastore_inaccessible_ds(self):
+ data_store = fake.Datastore()
+ data_store.set("summary.accessible", False)
+
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(data_store)
+
+ with self._mock_get_datastore_calls(fake_objects):
+ self.assertRaises(exception.DatastoreNotFound,
+ ds_util.get_datastore,
+ self.session, 'fake-cluster')
+
+ def test_get_datastore_ds_in_maintenance(self):
+ data_store = fake.Datastore()
+ data_store.set("summary.maintenanceMode", "inMaintenance")
+
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(data_store)
+
+ with self._mock_get_datastore_calls(fake_objects):
+ self.assertRaises(exception.DatastoreNotFound,
+ ds_util.get_datastore,
+ self.session, 'fake-cluster')
+
+ def test_get_datastore_no_host_in_cluster(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ return ''
+
+ with mock.patch.object(self.session, '_call_method',
+ fake_call_method):
+ self.assertRaises(exception.DatastoreNotFound,
+ ds_util.get_datastore,
+ self.session, 'fake-cluster')
+
+ def _test_is_datastore_valid(self, accessible=True,
+ maintenance_mode="normal",
+ type="VMFS",
+ datastore_regex=None):
+ propdict = {}
+ propdict["summary.accessible"] = accessible
+ propdict["summary.maintenanceMode"] = maintenance_mode
+ propdict["summary.type"] = type
+ propdict["summary.name"] = "ds-1"
+
+ return ds_util._is_datastore_valid(propdict, datastore_regex)
+
+ def test_is_datastore_valid(self):
+ for ds_type in ds_util.ALLOWED_DATASTORE_TYPES:
+ self.assertTrue(self._test_is_datastore_valid(True,
+ "normal",
+ ds_type))
+
+ def test_is_datastore_valid_inaccessible_ds(self):
+ self.assertFalse(self._test_is_datastore_valid(False,
+ "normal",
+ "VMFS"))
+
+ def test_is_datastore_valid_ds_in_maintenance(self):
+ self.assertFalse(self._test_is_datastore_valid(True,
+ "inMaintenance",
+ "VMFS"))
+
+ def test_is_datastore_valid_ds_type_invalid(self):
+ self.assertFalse(self._test_is_datastore_valid(True,
+ "normal",
+ "vfat"))
+
+ def test_is_datastore_valid_not_matching_regex(self):
+ datastore_regex = re.compile("ds-2")
+ self.assertFalse(self._test_is_datastore_valid(True,
+ "normal",
+ "VMFS",
+ datastore_regex))
+
+ def test_is_datastore_valid_matching_regex(self):
+ datastore_regex = re.compile("ds-1")
+ self.assertTrue(self._test_is_datastore_valid(True,
+ "normal",
+ "VMFS",
+ datastore_regex))
+
+
+class DatastoreTestCase(test.NoDBTestCase):
+ def test_ds(self):
+ ds = ds_util.Datastore(
+ "fake_ref", "ds_name", 2 * units.Gi, 1 * units.Gi)
+ self.assertEqual('ds_name', ds.name)
+ self.assertEqual('fake_ref', ds.ref)
+ self.assertEqual(2 * units.Gi, ds.capacity)
+ self.assertEqual(1 * units.Gi, ds.freespace)
+
+ def test_ds_invalid_space(self):
+ self.assertRaises(ValueError, ds_util.Datastore,
+ "fake_ref", "ds_name", 1 * units.Gi, 2 * units.Gi)
+ self.assertRaises(ValueError, ds_util.Datastore,
+ "fake_ref", "ds_name", None, 2 * units.Gi)
+
+ def test_ds_no_capacity_no_freespace(self):
+ ds = ds_util.Datastore("fake_ref", "ds_name")
+ self.assertIsNone(ds.capacity)
+ self.assertIsNone(ds.freespace)
+
+ def test_ds_invalid(self):
+ self.assertRaises(ValueError, ds_util.Datastore, None, "ds_name")
+ self.assertRaises(ValueError, ds_util.Datastore, "fake_ref", None)
+
+ def test_build_path(self):
+ ds = ds_util.Datastore("fake_ref", "ds_name")
+ ds_path = ds.build_path("some_dir", "foo.vmdk")
+ self.assertEqual('[ds_name] some_dir/foo.vmdk', str(ds_path))
+
+
+class DatastorePathTestCase(test.NoDBTestCase):
+
+ def test_ds_path(self):
+ p = ds_util.DatastorePath('dsname', 'a/b/c', 'file.iso')
+ self.assertEqual('[dsname] a/b/c/file.iso', str(p))
+ self.assertEqual('a/b/c/file.iso', p.rel_path)
+ self.assertEqual('a/b/c', p.parent.rel_path)
+ self.assertEqual('[dsname] a/b/c', str(p.parent))
+ self.assertEqual('dsname', p.datastore)
+ self.assertEqual('file.iso', p.basename)
+ self.assertEqual('a/b/c', p.dirname)
+
+ def test_ds_path_no_ds_name(self):
+ bad_args = [
+ ('', ['a/b/c', 'file.iso']),
+ (None, ['a/b/c', 'file.iso'])]
+ for t in bad_args:
+ self.assertRaises(
+ ValueError, ds_util.DatastorePath,
+ t[0], *t[1])
+
+ def test_ds_path_invalid_path_components(self):
+ bad_args = [
+ ('dsname', [None]),
+ ('dsname', ['', None]),
+ ('dsname', ['a', None]),
+ ('dsname', ['a', None, 'b']),
+ ('dsname', [None, '']),
+ ('dsname', [None, 'b'])]
+
+ for t in bad_args:
+ self.assertRaises(
+ ValueError, ds_util.DatastorePath,
+ t[0], *t[1])
+
+ def test_ds_path_no_subdir(self):
+ args = [
+ ('dsname', ['', 'x.vmdk']),
+ ('dsname', ['x.vmdk'])]
+
+ canonical_p = ds_util.DatastorePath('dsname', 'x.vmdk')
+ self.assertEqual('[dsname] x.vmdk', str(canonical_p))
+ self.assertEqual('', canonical_p.dirname)
+ self.assertEqual('x.vmdk', canonical_p.basename)
+ self.assertEqual('x.vmdk', canonical_p.rel_path)
+ for t in args:
+ p = ds_util.DatastorePath(t[0], *t[1])
+ self.assertEqual(str(canonical_p), str(p))
+
+ def test_ds_path_ds_only(self):
+ args = [
+ ('dsname', []),
+ ('dsname', ['']),
+ ('dsname', ['', ''])]
+
+ canonical_p = ds_util.DatastorePath('dsname')
+ self.assertEqual('[dsname]', str(canonical_p))
+ self.assertEqual('', canonical_p.rel_path)
+ self.assertEqual('', canonical_p.basename)
+ self.assertEqual('', canonical_p.dirname)
+ for t in args:
+ p = ds_util.DatastorePath(t[0], *t[1])
+ self.assertEqual(str(canonical_p), str(p))
+ self.assertEqual(canonical_p.rel_path, p.rel_path)
+
+ def test_ds_path_equivalence(self):
+ args = [
+ ('dsname', ['a/b/c/', 'x.vmdk']),
+ ('dsname', ['a/', 'b/c/', 'x.vmdk']),
+ ('dsname', ['a', 'b', 'c', 'x.vmdk']),
+ ('dsname', ['a/b/c', 'x.vmdk'])]
+
+ canonical_p = ds_util.DatastorePath('dsname', 'a/b/c', 'x.vmdk')
+ for t in args:
+ p = ds_util.DatastorePath(t[0], *t[1])
+ self.assertEqual(str(canonical_p), str(p))
+ self.assertEqual(canonical_p.datastore, p.datastore)
+ self.assertEqual(canonical_p.rel_path, p.rel_path)
+ self.assertEqual(str(canonical_p.parent), str(p.parent))
+
+ def test_ds_path_non_equivalence(self):
+ args = [
+ # leading slash
+ ('dsname', ['/a', 'b', 'c', 'x.vmdk']),
+ ('dsname', ['/a/b/c/', 'x.vmdk']),
+ ('dsname', ['a/b/c', '/x.vmdk']),
+ # leading space
+ ('dsname', ['a/b/c/', ' x.vmdk']),
+ ('dsname', ['a/', ' b/c/', 'x.vmdk']),
+ ('dsname', [' a', 'b', 'c', 'x.vmdk']),
+ # trailing space
+ ('dsname', ['/a/b/c/', 'x.vmdk ']),
+ ('dsname', ['a/b/c/ ', 'x.vmdk'])]
+
+ canonical_p = ds_util.DatastorePath('dsname', 'a/b/c', 'x.vmdk')
+ for t in args:
+ p = ds_util.DatastorePath(t[0], *t[1])
+ self.assertNotEqual(str(canonical_p), str(p))
+
+ def test_ds_path_hashable(self):
+ ds1 = ds_util.DatastorePath('dsname', 'path')
+ ds2 = ds_util.DatastorePath('dsname', 'path')
+
+ # If the above objects have the same hash, they will only be added to
+ # the set once
+ self.assertThat(set([ds1, ds2]), matchers.HasLength(1))
+
+ def test_equal(self):
+ a = ds_util.DatastorePath('ds_name', 'a')
+ b = ds_util.DatastorePath('ds_name', 'a')
+ self.assertEqual(a, b)
+
+ def test_join(self):
+ p = ds_util.DatastorePath('ds_name', 'a')
+ ds_path = p.join('b')
+ self.assertEqual('[ds_name] a/b', str(ds_path))
+
+ p = ds_util.DatastorePath('ds_name', 'a')
+ ds_path = p.join()
+ self.assertEqual('[ds_name] a', str(ds_path))
+
+ bad_args = [
+ [None],
+ ['', None],
+ ['a', None],
+ ['a', None, 'b']]
+ for arg in bad_args:
+ self.assertRaises(ValueError, p.join, *arg)
+
+ def test_ds_path_parse(self):
+ p = ds_util.DatastorePath.parse('[dsname]')
+ self.assertEqual('dsname', p.datastore)
+ self.assertEqual('', p.rel_path)
+
+ p = ds_util.DatastorePath.parse('[dsname] folder')
+ self.assertEqual('dsname', p.datastore)
+ self.assertEqual('folder', p.rel_path)
+
+ p = ds_util.DatastorePath.parse('[dsname] folder/file')
+ self.assertEqual('dsname', p.datastore)
+ self.assertEqual('folder/file', p.rel_path)
+
+ for p in [None, '']:
+ self.assertRaises(ValueError, ds_util.DatastorePath.parse, p)
+
+ for p in ['bad path', '/a/b/c', 'a/b/c']:
+ self.assertRaises(IndexError, ds_util.DatastorePath.parse, p)
diff --git a/nova/tests/unit/virt/vmwareapi/test_ds_util_datastore_selection.py b/nova/tests/unit/virt/vmwareapi/test_ds_util_datastore_selection.py
new file mode 100644
index 0000000000..1351530143
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_ds_util_datastore_selection.py
@@ -0,0 +1,163 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import re
+
+from oslo.utils import units
+
+from nova import test
+from nova.virt.vmwareapi import ds_util
+
+ResultSet = collections.namedtuple('ResultSet', ['objects'])
+ObjectContent = collections.namedtuple('ObjectContent', ['obj', 'propSet'])
+DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
+MoRef = collections.namedtuple('ManagedObjectReference', ['value'])
+
+
+class VMwareDSUtilDatastoreSelectionTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(VMwareDSUtilDatastoreSelectionTestCase, self).setUp()
+ self.data = [
+ ['VMFS', 'os-some-name', True, 'normal', 987654321, 12346789],
+ ['NFS', 'another-name', True, 'normal', 9876543210, 123467890],
+ ['BAD', 'some-name-bad', True, 'normal', 98765432100, 1234678900],
+ ['VMFS', 'some-name-good', False, 'normal', 987654321, 12346789],
+ ['VMFS', 'new-name', True, 'inMaintenance', 987654321, 12346789]
+ ]
+
+ def build_result_set(self, mock_data, name_list=None):
+ # datastores will have a moref_id of ds-000 and
+ # so on based on their index in the mock_data list
+ if name_list is None:
+ name_list = self.propset_name_list
+
+ objects = []
+ for id, row in enumerate(mock_data):
+ obj = ObjectContent(
+ obj=MoRef(value="ds-%03d" % id),
+ propSet=[])
+ for index, value in enumerate(row):
+ obj.propSet.append(
+ DynamicProperty(name=name_list[index], val=row[index]))
+ objects.append(obj)
+ return ResultSet(objects=objects)
+
+ @property
+ def propset_name_list(self):
+ return ['summary.type', 'summary.name', 'summary.accessible',
+ 'summary.maintenanceMode', 'summary.capacity',
+ 'summary.freeSpace']
+
+ def test_filter_datastores_simple(self):
+ datastores = self.build_result_set(self.data)
+ best_match = ds_util.Datastore(ref='fake_ref', name='ds',
+ capacity=0, freespace=0)
+ rec = ds_util._select_datastore(datastores, best_match)
+
+ self.assertIsNotNone(rec.ref, "could not find datastore!")
+ self.assertEqual('ds-001', rec.ref.value,
+ "didn't find the right datastore!")
+ self.assertEqual(123467890, rec.freespace,
+ "did not obtain correct freespace!")
+
+ def test_filter_datastores_empty(self):
+ data = []
+ datastores = self.build_result_set(data)
+
+ best_match = ds_util.Datastore(ref='fake_ref', name='ds',
+ capacity=0, freespace=0)
+ rec = ds_util._select_datastore(datastores, best_match)
+
+ self.assertEqual(rec, best_match)
+
+ def test_filter_datastores_no_match(self):
+ datastores = self.build_result_set(self.data)
+ datastore_regex = re.compile('no_match.*')
+
+ best_match = ds_util.Datastore(ref='fake_ref', name='ds',
+ capacity=0, freespace=0)
+ rec = ds_util._select_datastore(datastores,
+ best_match,
+ datastore_regex)
+
+ self.assertEqual(rec, best_match, "did not match datastore properly")
+
+ def test_filter_datastores_specific_match(self):
+
+ data = [
+ ['VMFS', 'os-some-name', True, 'normal', 987654321, 1234678],
+ ['NFS', 'another-name', True, 'normal', 9876543210, 123467890],
+ ['BAD', 'some-name-bad', True, 'normal', 98765432100, 1234678900],
+ ['VMFS', 'some-name-good', True, 'normal', 987654321, 12346789],
+ ['VMFS', 'some-other-good', False, 'normal', 987654321000,
+ 12346789000],
+ ['VMFS', 'new-name', True, 'inMaintenance', 987654321000,
+ 12346789000]
+ ]
+ # only the DS some-name-good is accessible and matches the regex
+ datastores = self.build_result_set(data)
+ datastore_regex = re.compile('.*-good$')
+
+ best_match = ds_util.Datastore(ref='fake_ref', name='ds',
+ capacity=0, freespace=0)
+ rec = ds_util._select_datastore(datastores,
+ best_match,
+ datastore_regex)
+
+ self.assertIsNotNone(rec, "could not find datastore!")
+ self.assertEqual('ds-003', rec.ref.value,
+ "didn't find the right datastore!")
+ self.assertNotEqual('ds-004', rec.ref.value,
+ "accepted an unreachable datastore!")
+ self.assertEqual('some-name-good', rec.name)
+ self.assertEqual(12346789, rec.freespace,
+ "did not obtain correct freespace!")
+ self.assertEqual(987654321, rec.capacity,
+ "did not obtain correct capacity!")
+
+ def test_filter_datastores_missing_props(self):
+ data = [
+ ['VMFS', 'os-some-name', 987654321, 1234678],
+ ['NFS', 'another-name', 9876543210, 123467890],
+ ]
+ # no matches are expected when 'summary.accessible' is missing
+ prop_names = ['summary.type', 'summary.name',
+ 'summary.capacity', 'summary.freeSpace']
+ datastores = self.build_result_set(data, prop_names)
+ best_match = ds_util.Datastore(ref='fake_ref', name='ds',
+ capacity=0, freespace=0)
+
+ rec = ds_util._select_datastore(datastores, best_match)
+ self.assertEqual(rec, best_match, "no matches were expected")
+
+ def test_filter_datastores_best_match(self):
+ data = [
+ ['VMFS', 'spam-good', True, 20 * units.Gi, 10 * units.Gi],
+ ['NFS', 'eggs-good', True, 40 * units.Gi, 15 * units.Gi],
+ ['BAD', 'some-name-bad', True, 30 * units.Gi, 20 * units.Gi],
+ ['VMFS', 'some-name-good', True, 50 * units.Gi, 5 * units.Gi],
+ ['VMFS', 'some-other-good', True, 10 * units.Gi, 10 * units.Gi],
+ ]
+
+ datastores = self.build_result_set(data)
+ datastore_regex = re.compile('.*-good$')
+
+ # the current best match is better than all candidates
+ best_match = ds_util.Datastore(ref='ds-100', name='best-ds-good',
+ capacity=20 * units.Gi, freespace=19 * units.Gi)
+ rec = ds_util._select_datastore(datastores,
+ best_match,
+ datastore_regex)
+ self.assertEqual(rec, best_match, "did not match datastore properly")
diff --git a/nova/tests/unit/virt/vmwareapi/test_imagecache.py b/nova/tests/unit/virt/vmwareapi/test_imagecache.py
new file mode 100644
index 0000000000..d277963106
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_imagecache.py
@@ -0,0 +1,277 @@
+# Copyright (c) 2014 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import datetime
+
+import mock
+from oslo.config import cfg
+from oslo.utils import timeutils
+
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit.virt.vmwareapi import fake
+from nova.virt.vmwareapi import ds_util
+from nova.virt.vmwareapi import imagecache
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vmops
+
+CONF = cfg.CONF
+
+
+class ImageCacheManagerTestCase(test.NoDBTestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(ImageCacheManagerTestCase, self).setUp()
+ self._session = mock.Mock(name='session')
+ self._imagecache = imagecache.ImageCacheManager(self._session,
+ 'fake-base-folder')
+ self._time = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ self._file_name = 'ts-2012-11-22-12-00-00'
+ fake.reset()
+
+ def tearDown(self):
+ super(ImageCacheManagerTestCase, self).tearDown()
+ fake.reset()
+
+ def test_timestamp_cleanup(self):
+ def fake_get_timestamp(ds_browser, ds_path):
+ self.assertEqual('fake-ds-browser', ds_browser)
+ self.assertEqual('[fake-ds] fake-path', str(ds_path))
+ if not self.exists:
+ return
+ ts = '%s%s' % (imagecache.TIMESTAMP_PREFIX,
+ timeutils.strtime(at=self._time,
+ fmt=imagecache.TIMESTAMP_FORMAT))
+ return ts
+
+ with contextlib.nested(
+ mock.patch.object(self._imagecache, '_get_timestamp',
+ fake_get_timestamp),
+ mock.patch.object(ds_util, 'file_delete')
+ ) as (_get_timestamp, _file_delete):
+ self.exists = False
+ self._imagecache.timestamp_cleanup(
+ 'fake-dc-ref', 'fake-ds-browser',
+ ds_util.DatastorePath('fake-ds', 'fake-path'))
+ self.assertEqual(0, _file_delete.call_count)
+ self.exists = True
+ self._imagecache.timestamp_cleanup(
+ 'fake-dc-ref', 'fake-ds-browser',
+ ds_util.DatastorePath('fake-ds', 'fake-path'))
+ expected_ds_path = ds_util.DatastorePath(
+ 'fake-ds', 'fake-path', self._file_name)
+ _file_delete.assert_called_once_with(self._session,
+ expected_ds_path, 'fake-dc-ref')
+
+ def test_get_timestamp(self):
+ def fake_get_sub_folders(session, ds_browser, ds_path):
+ self.assertEqual('fake-ds-browser', ds_browser)
+ self.assertEqual('[fake-ds] fake-path', str(ds_path))
+ if self.exists:
+ files = set()
+ files.add(self._file_name)
+ return files
+
+ with contextlib.nested(
+ mock.patch.object(ds_util, 'get_sub_folders',
+ fake_get_sub_folders)
+ ):
+ self.exists = True
+ ts = self._imagecache._get_timestamp(
+ 'fake-ds-browser',
+ ds_util.DatastorePath('fake-ds', 'fake-path'))
+ self.assertEqual(self._file_name, ts)
+ self.exists = False
+ ts = self._imagecache._get_timestamp(
+ 'fake-ds-browser',
+ ds_util.DatastorePath('fake-ds', 'fake-path'))
+ self.assertIsNone(ts)
+
+ def test_get_timestamp_filename(self):
+ timeutils.set_time_override(override_time=self._time)
+ fn = self._imagecache._get_timestamp_filename()
+ self.assertEqual(self._file_name, fn)
+
+ def test_get_datetime_from_filename(self):
+ t = self._imagecache._get_datetime_from_filename(self._file_name)
+ self.assertEqual(self._time, t)
+
+ def test_get_ds_browser(self):
+ cache = self._imagecache._ds_browser
+ ds_browser = mock.Mock()
+ moref = fake.ManagedObjectReference('datastore-100')
+ self.assertIsNone(cache.get(moref.value))
+ mock_get_method = mock.Mock(return_value=ds_browser)
+ with mock.patch.object(vim_util, 'get_dynamic_property',
+ mock_get_method):
+ ret = self._imagecache._get_ds_browser(moref)
+ mock_get_method.assert_called_once_with(mock.ANY, moref,
+ 'Datastore', 'browser')
+ self.assertIs(ds_browser, ret)
+ self.assertIs(ds_browser, cache.get(moref.value))
+
+ def test_list_base_images(self):
+ def fake_get_dynamic_property(vim, mobj, type, property_name):
+ return 'fake-ds-browser'
+
+ def fake_get_sub_folders(session, ds_browser, ds_path):
+ files = set()
+ files.add('image-ref-uuid')
+ return files
+
+ with contextlib.nested(
+ mock.patch.object(vim_util, 'get_dynamic_property',
+ fake_get_dynamic_property),
+ mock.patch.object(ds_util, 'get_sub_folders',
+ fake_get_sub_folders)
+ ) as (_get_dynamic, _get_sub_folders):
+ fake_ds_ref = fake.ManagedObjectReference('fake-ds-ref')
+ datastore = ds_util.Datastore(name='ds', ref=fake_ds_ref)
+ ds_path = datastore.build_path('base_folder')
+ images = self._imagecache._list_datastore_images(
+ ds_path, datastore)
+ originals = set()
+ originals.add('image-ref-uuid')
+ self.assertEqual({'originals': originals,
+ 'unexplained_images': []},
+ images)
+
+ @mock.patch.object(imagecache.ImageCacheManager, 'timestamp_folder_get')
+ @mock.patch.object(imagecache.ImageCacheManager, 'timestamp_cleanup')
+ @mock.patch.object(imagecache.ImageCacheManager, '_get_ds_browser')
+ def test_enlist_image(self,
+ mock_get_ds_browser,
+ mock_timestamp_cleanup,
+ mock_timestamp_folder_get):
+ image_id = "fake_image_id"
+ dc_ref = "fake_dc_ref"
+ fake_ds_ref = mock.Mock()
+ ds = ds_util.Datastore(
+ ref=fake_ds_ref, name='fake_ds',
+ capacity=1,
+ freespace=1)
+
+ ds_browser = mock.Mock()
+ mock_get_ds_browser.return_value = ds_browser
+ timestamp_folder_path = mock.Mock()
+ mock_timestamp_folder_get.return_value = timestamp_folder_path
+
+ self._imagecache.enlist_image(image_id, ds, dc_ref)
+
+ cache_root_folder = ds.build_path("fake-base-folder")
+ mock_get_ds_browser.assert_called_once_with(
+ ds.ref)
+ mock_timestamp_folder_get.assert_called_once_with(
+ cache_root_folder, "fake_image_id")
+ mock_timestamp_cleanup.assert_called_once_with(
+ dc_ref, ds_browser, timestamp_folder_path)
+
+ def test_age_cached_images(self):
+ def fake_get_ds_browser(ds_ref):
+ return 'fake-ds-browser'
+
+ def fake_get_timestamp(ds_browser, ds_path):
+ self._get_timestamp_called += 1
+ path = str(ds_path)
+ if path == '[fake-ds] fake-path/fake-image-1':
+ # No time stamp exists
+ return
+ if path == '[fake-ds] fake-path/fake-image-2':
+ # Timestamp that will be valid => no deletion
+ return 'ts-2012-11-22-10-00-00'
+ if path == '[fake-ds] fake-path/fake-image-3':
+ # Timestamp that will be invalid => deletion
+ return 'ts-2012-11-20-12-00-00'
+ self.fail()
+
+ def fake_mkdir(session, ts_path, dc_ref):
+ self.assertEqual(
+ '[fake-ds] fake-path/fake-image-1/ts-2012-11-22-12-00-00',
+ str(ts_path))
+
+ def fake_file_delete(session, ds_path, dc_ref):
+ self.assertEqual('[fake-ds] fake-path/fake-image-3', str(ds_path))
+
+ def fake_timestamp_cleanup(dc_ref, ds_browser, ds_path):
+ self.assertEqual('[fake-ds] fake-path/fake-image-4', str(ds_path))
+
+ with contextlib.nested(
+ mock.patch.object(self._imagecache, '_get_ds_browser',
+ fake_get_ds_browser),
+ mock.patch.object(self._imagecache, '_get_timestamp',
+ fake_get_timestamp),
+ mock.patch.object(ds_util, 'mkdir',
+ fake_mkdir),
+ mock.patch.object(ds_util, 'file_delete',
+ fake_file_delete),
+ mock.patch.object(self._imagecache, 'timestamp_cleanup',
+ fake_timestamp_cleanup),
+ ) as (_get_ds_browser, _get_timestamp, _mkdir, _file_delete,
+ _timestamp_cleanup):
+ timeutils.set_time_override(override_time=self._time)
+ datastore = ds_util.Datastore(name='ds', ref='fake-ds-ref')
+ dc_info = vmops.DcInfo(ref='dc_ref', name='name',
+ vmFolder='vmFolder')
+ self._get_timestamp_called = 0
+ self._imagecache.originals = set(['fake-image-1', 'fake-image-2',
+ 'fake-image-3', 'fake-image-4'])
+ self._imagecache.used_images = set(['fake-image-4'])
+ self._imagecache._age_cached_images(
+ 'fake-context', datastore, dc_info,
+ ds_util.DatastorePath('fake-ds', 'fake-path'))
+ self.assertEqual(3, self._get_timestamp_called)
+
+ def test_update(self):
+ def fake_list_datastore_images(ds_path, datastore):
+ return {'unexplained_images': [],
+ 'originals': self.images}
+
+ def fake_age_cached_images(context, datastore,
+ dc_info, ds_path):
+ self.assertEqual('[ds] fake-base-folder', str(ds_path))
+ self.assertEqual(self.images,
+ self._imagecache.used_images)
+ self.assertEqual(self.images,
+ self._imagecache.originals)
+
+ with contextlib.nested(
+ mock.patch.object(self._imagecache, '_list_datastore_images',
+ fake_list_datastore_images),
+ mock.patch.object(self._imagecache,
+ '_age_cached_images',
+ fake_age_cached_images)
+ ) as (_list_base, _age_and_verify):
+ instances = [{'image_ref': '1',
+ 'host': CONF.host,
+ 'name': 'inst-1',
+ 'uuid': '123',
+ 'vm_state': '',
+ 'task_state': ''},
+ {'image_ref': '2',
+ 'host': CONF.host,
+ 'name': 'inst-2',
+ 'uuid': '456',
+ 'vm_state': '',
+ 'task_state': ''}]
+ all_instances = [fake_instance.fake_instance_obj(None, **instance)
+ for instance in instances]
+ self.images = set(['1', '2'])
+ datastore = ds_util.Datastore(name='ds', ref='fake-ds-ref')
+ dc_info = vmops.DcInfo(ref='dc_ref', name='name',
+ vmFolder='vmFolder')
+ datastores_info = [(datastore, dc_info)]
+ self._imagecache.update('context', all_instances, datastores_info)
diff --git a/nova/tests/unit/virt/vmwareapi/test_images.py b/nova/tests/unit/virt/vmwareapi/test_images.py
new file mode 100644
index 0000000000..07fc3be214
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_images.py
@@ -0,0 +1,216 @@
+# Copyright (c) 2014 VMware, Inc.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test suite for images.
+"""
+
+import contextlib
+
+import mock
+from oslo.utils import units
+
+from nova import exception
+from nova import test
+import nova.tests.unit.image.fake
+from nova.virt.vmwareapi import constants
+from nova.virt.vmwareapi import images
+from nova.virt.vmwareapi import read_write_util
+
+
+class VMwareImagesTestCase(test.NoDBTestCase):
+ """Unit tests for Vmware API connection calls."""
+
+ def test_fetch_image(self):
+ """Test fetching images."""
+
+ dc_name = 'fake-dc'
+ file_path = 'fake_file'
+ ds_name = 'ds1'
+ host = mock.MagicMock()
+ context = mock.MagicMock()
+
+ image_data = {
+ 'id': nova.tests.unit.image.fake.get_valid_image_id(),
+ 'disk_format': 'vmdk',
+ 'size': 512,
+ }
+ read_file_handle = mock.MagicMock()
+ write_file_handle = mock.MagicMock()
+ read_iter = mock.MagicMock()
+ instance = {}
+ instance['image_ref'] = image_data['id']
+ instance['uuid'] = 'fake-uuid'
+
+ def fake_read_handle(read_iter):
+ return read_file_handle
+
+ def fake_write_handle(host, dc_name, ds_name, cookies,
+ file_path, file_size):
+ return write_file_handle
+
+ with contextlib.nested(
+ mock.patch.object(read_write_util, 'GlanceFileRead',
+ side_effect=fake_read_handle),
+ mock.patch.object(read_write_util, 'VMwareHTTPWriteFile',
+ side_effect=fake_write_handle),
+ mock.patch.object(images, 'start_transfer'),
+ mock.patch.object(images.IMAGE_API, 'get',
+ return_value=image_data),
+ mock.patch.object(images.IMAGE_API, 'download',
+ return_value=read_iter),
+ ) as (glance_read, http_write, start_transfer, image_show,
+ image_download):
+ images.fetch_image(context, instance,
+ host, dc_name,
+ ds_name, file_path)
+
+ glance_read.assert_called_once_with(read_iter)
+ http_write.assert_called_once_with(host, dc_name, ds_name, None,
+ file_path, image_data['size'])
+ start_transfer.assert_called_once_with(
+ context, read_file_handle,
+ image_data['size'],
+ write_file_handle=write_file_handle)
+ image_download.assert_called_once_with(context, instance['image_ref'])
+ image_show.assert_called_once_with(context, instance['image_ref'])
+
+ def _setup_mock_get_remote_image_service(self,
+ mock_get_remote_image_service,
+ metadata):
+ mock_image_service = mock.MagicMock()
+ mock_image_service.show.return_value = metadata
+ mock_get_remote_image_service.return_value = [mock_image_service, 'i']
+
+ def test_from_image_with_image_ref(self):
+ raw_disk_size_in_gb = 83
+ raw_disk_size_in_bytes = raw_disk_size_in_gb * units.Gi
+ image_id = nova.tests.unit.image.fake.get_valid_image_id()
+ mdata = {'size': raw_disk_size_in_bytes,
+ 'disk_format': 'vmdk',
+ 'properties': {
+ "vmware_ostype": constants.DEFAULT_OS_TYPE,
+ "vmware_adaptertype": constants.DEFAULT_ADAPTER_TYPE,
+ "vmware_disktype": constants.DEFAULT_DISK_TYPE,
+ "hw_vif_model": constants.DEFAULT_VIF_MODEL,
+ images.LINKED_CLONE_PROPERTY: True}}
+
+ img_props = images.VMwareImage.from_image(image_id, mdata)
+
+ image_size_in_kb = raw_disk_size_in_bytes / units.Ki
+
+ # assert that defaults are set and no value returned is left empty
+ self.assertEqual(constants.DEFAULT_OS_TYPE, img_props.os_type)
+ self.assertEqual(constants.DEFAULT_ADAPTER_TYPE,
+ img_props.adapter_type)
+ self.assertEqual(constants.DEFAULT_DISK_TYPE, img_props.disk_type)
+ self.assertEqual(constants.DEFAULT_VIF_MODEL, img_props.vif_model)
+ self.assertTrue(img_props.linked_clone)
+ self.assertEqual(image_size_in_kb, img_props.file_size_in_kb)
+
+ def _image_build(self, image_lc_setting, global_lc_setting,
+ disk_format=constants.DEFAULT_DISK_FORMAT,
+ os_type=constants.DEFAULT_OS_TYPE,
+ adapter_type=constants.DEFAULT_ADAPTER_TYPE,
+ disk_type=constants.DEFAULT_DISK_TYPE,
+ vif_model=constants.DEFAULT_VIF_MODEL):
+ self.flags(use_linked_clone=global_lc_setting, group='vmware')
+ raw_disk_size_in_gb = 93
+ raw_disk_size_in_btyes = raw_disk_size_in_gb * units.Gi
+
+ image_id = nova.tests.unit.image.fake.get_valid_image_id()
+ mdata = {'size': raw_disk_size_in_btyes,
+ 'disk_format': disk_format,
+ 'properties': {
+ "vmware_ostype": os_type,
+ "vmware_adaptertype": adapter_type,
+ "vmware_disktype": disk_type,
+ "hw_vif_model": vif_model}}
+
+ if image_lc_setting is not None:
+ mdata['properties'][
+ images.LINKED_CLONE_PROPERTY] = image_lc_setting
+
+ return images.VMwareImage.from_image(image_id, mdata)
+
+ def test_use_linked_clone_override_nf(self):
+ image_props = self._image_build(None, False)
+ self.assertFalse(image_props.linked_clone,
+ "No overrides present but still overridden!")
+
+ def test_use_linked_clone_override_nt(self):
+ image_props = self._image_build(None, True)
+ self.assertTrue(image_props.linked_clone,
+ "No overrides present but still overridden!")
+
+ def test_use_linked_clone_override_ny(self):
+ image_props = self._image_build(None, "yes")
+ self.assertTrue(image_props.linked_clone,
+ "No overrides present but still overridden!")
+
+ def test_use_linked_clone_override_ft(self):
+ image_props = self._image_build(False, True)
+ self.assertFalse(image_props.linked_clone,
+ "image level metadata failed to override global")
+
+ def test_use_linked_clone_override_string_nt(self):
+ image_props = self._image_build("no", True)
+ self.assertFalse(image_props.linked_clone,
+ "image level metadata failed to override global")
+
+ def test_use_linked_clone_override_string_yf(self):
+ image_props = self._image_build("yes", False)
+ self.assertTrue(image_props.linked_clone,
+ "image level metadata failed to override global")
+
+ def test_use_disk_format_none(self):
+ image = self._image_build(None, True, disk_format=None)
+ self.assertIsNone(image.file_type)
+ self.assertFalse(image.is_iso)
+
+ def test_use_disk_format_iso(self):
+ image = self._image_build(None, True, disk_format='iso')
+ self.assertEqual('iso', image.file_type)
+ self.assertTrue(image.is_iso)
+
+ def test_use_bad_disk_format(self):
+ self.assertRaises(exception.InvalidDiskFormat,
+ self._image_build,
+ None,
+ True,
+ disk_format='bad_disk_format')
+
+ def test_image_no_defaults(self):
+ image = self._image_build(False, False,
+ disk_format='iso',
+ os_type='fake-os-type',
+ adapter_type='fake-adapter-type',
+ disk_type='fake-disk-type',
+ vif_model='fake-vif-model')
+ self.assertEqual('iso', image.file_type)
+ self.assertEqual('fake-os-type', image.os_type)
+ self.assertEqual('fake-adapter-type', image.adapter_type)
+ self.assertEqual('fake-disk-type', image.disk_type)
+ self.assertEqual('fake-vif-model', image.vif_model)
+ self.assertFalse(image.linked_clone)
+
+ def test_image_defaults(self):
+ image = images.VMwareImage(image_id='fake-image-id')
+
+ # N.B. We intentially don't use the defined constants here. Amongst
+ # other potential failures, we're interested in changes to their
+ # values, which would not otherwise be picked up.
+ self.assertEqual('otherGuest', image.os_type)
+ self.assertEqual('lsiLogic', image.adapter_type)
+ self.assertEqual('preallocated', image.disk_type)
+ self.assertEqual('e1000', image.vif_model)
diff --git a/nova/tests/unit/virt/vmwareapi/test_io_util.py b/nova/tests/unit/virt/vmwareapi/test_io_util.py
new file mode 100644
index 0000000000..a03c1e95b5
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_io_util.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2014 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import exception
+from nova import test
+from nova.virt.vmwareapi import io_util
+
+
+@mock.patch.object(io_util, 'IMAGE_API')
+class GlanceWriteThreadTestCase(test.NoDBTestCase):
+
+ def test_start_image_update_service_exception(self, mocked):
+ mocked.update.side_effect = exception.ImageNotAuthorized(
+ image_id='image')
+ write_thread = io_util.GlanceWriteThread(
+ None, None, image_id=None)
+ write_thread.start()
+ self.assertRaises(exception.ImageNotAuthorized, write_thread.wait)
+ write_thread.stop()
+ write_thread.close()
diff --git a/nova/tests/unit/virt/vmwareapi/test_read_write_util.py b/nova/tests/unit/virt/vmwareapi/test_read_write_util.py
new file mode 100644
index 0000000000..468d8b213a
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_read_write_util.py
@@ -0,0 +1,39 @@
+# Copyright 2013 IBM Corp.
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import httplib
+
+from oslo.config import cfg
+
+from nova import test
+from nova.virt.vmwareapi import read_write_util
+
+CONF = cfg.CONF
+
+
+class ReadWriteUtilTestCase(test.NoDBTestCase):
+ def test_ipv6_host(self):
+ ipv6_host = 'fd8c:215d:178e:c51e:200:c9ff:fed1:584c'
+ self.mox.StubOutWithMock(httplib.HTTPConnection, 'endheaders')
+ httplib.HTTPConnection.endheaders()
+ self.mox.ReplayAll()
+ file = read_write_util.VMwareHTTPWriteFile(ipv6_host,
+ 'fake_dc',
+ 'fake_ds',
+ dict(),
+ '/tmp/fake.txt',
+ 0)
+ self.assertEqual(ipv6_host, file.conn.host)
+ self.assertEqual(443, file.conn.port)
diff --git a/nova/tests/unit/virt/vmwareapi/test_vif.py b/nova/tests/unit/virt/vmwareapi/test_vif.py
new file mode 100644
index 0000000000..2a4d086c36
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_vif.py
@@ -0,0 +1,346 @@
+# Copyright 2013 Canonical Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+
+import mock
+from oslo.config import cfg
+from oslo.vmware import exceptions as vexc
+
+from nova import exception
+from nova.network import model as network_model
+from nova import test
+from nova.tests.unit import matchers
+from nova.tests.unit import utils
+from nova.tests.unit.virt.vmwareapi import fake
+from nova.virt.vmwareapi import network_util
+from nova.virt.vmwareapi import vif
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
+
+CONF = cfg.CONF
+
+
+class VMwareVifTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(VMwareVifTestCase, self).setUp()
+ self.flags(vlan_interface='vmnet0', group='vmware')
+ network = network_model.Network(id=0,
+ bridge='fa0',
+ label='fake',
+ vlan=3,
+ bridge_interface='eth0',
+ injected=True)
+
+ self.vif = network_model.NetworkInfo([
+ network_model.VIF(id=None,
+ address='DE:AD:BE:EF:00:00',
+ network=network,
+ type=None,
+ devname=None,
+ ovs_interfaceid=None,
+ rxtx_cap=3)
+ ])[0]
+ self.session = fake.FakeSession()
+ self.cluster = None
+
+ def tearDown(self):
+ super(VMwareVifTestCase, self).tearDown()
+
+ def test_ensure_vlan_bridge(self):
+ self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
+ self.mox.StubOutWithMock(network_util,
+ 'get_vswitch_for_vlan_interface')
+ self.mox.StubOutWithMock(network_util,
+ 'check_if_vlan_interface_exists')
+ self.mox.StubOutWithMock(network_util, 'create_port_group')
+ network_util.get_network_with_the_name(self.session, 'fa0',
+ self.cluster).AndReturn(None)
+ network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
+ self.cluster).AndReturn('vmnet0')
+ network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
+ self.cluster).AndReturn(True)
+ network_util.create_port_group(self.session, 'fa0', 'vmnet0', 3,
+ self.cluster)
+ network_util.get_network_with_the_name(self.session, 'fa0', None)
+
+ self.mox.ReplayAll()
+ vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=True)
+
+ # FlatDHCP network mode without vlan - network doesn't exist with the host
+ def test_ensure_vlan_bridge_without_vlan(self):
+ self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
+ self.mox.StubOutWithMock(network_util,
+ 'get_vswitch_for_vlan_interface')
+ self.mox.StubOutWithMock(network_util,
+ 'check_if_vlan_interface_exists')
+ self.mox.StubOutWithMock(network_util, 'create_port_group')
+
+ network_util.get_network_with_the_name(self.session, 'fa0',
+ self.cluster).AndReturn(None)
+ network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
+ self.cluster).AndReturn('vmnet0')
+ network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
+ self.cluster).AndReturn(True)
+ network_util.create_port_group(self.session, 'fa0', 'vmnet0', 0,
+ self.cluster)
+ network_util.get_network_with_the_name(self.session, 'fa0', None)
+ self.mox.ReplayAll()
+ vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
+
+ # FlatDHCP network mode without vlan - network exists with the host
+ # Get vswitch and check vlan interface should not be called
+ def test_ensure_vlan_bridge_with_network(self):
+ self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
+ self.mox.StubOutWithMock(network_util,
+ 'get_vswitch_for_vlan_interface')
+ self.mox.StubOutWithMock(network_util,
+ 'check_if_vlan_interface_exists')
+ self.mox.StubOutWithMock(network_util, 'create_port_group')
+ vm_network = {'name': 'VM Network', 'type': 'Network'}
+ network_util.get_network_with_the_name(self.session, 'fa0',
+ self.cluster).AndReturn(vm_network)
+ self.mox.ReplayAll()
+ vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
+
+ # Flat network mode with DVS
+ def test_ensure_vlan_bridge_with_existing_dvs(self):
+ network_ref = {'dvpg': 'dvportgroup-2062',
+ 'type': 'DistributedVirtualPortgroup'}
+ self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
+ self.mox.StubOutWithMock(network_util,
+ 'get_vswitch_for_vlan_interface')
+ self.mox.StubOutWithMock(network_util,
+ 'check_if_vlan_interface_exists')
+ self.mox.StubOutWithMock(network_util, 'create_port_group')
+
+ network_util.get_network_with_the_name(self.session, 'fa0',
+ self.cluster).AndReturn(network_ref)
+ self.mox.ReplayAll()
+ ref = vif.ensure_vlan_bridge(self.session,
+ self.vif,
+ create_vlan=False)
+ self.assertThat(ref, matchers.DictMatches(network_ref))
+
+ def test_get_network_ref_neutron(self):
+ self.mox.StubOutWithMock(vif, 'get_neutron_network')
+ vif.get_neutron_network(self.session, 'fa0', self.cluster, self.vif)
+ self.mox.ReplayAll()
+ vif.get_network_ref(self.session, self.cluster, self.vif, True)
+
+ def test_get_network_ref_flat_dhcp(self):
+ self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
+ vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
+ create_vlan=False)
+ self.mox.ReplayAll()
+ vif.get_network_ref(self.session, self.cluster, self.vif, False)
+
+ def test_get_network_ref_bridge(self):
+ self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
+ vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
+ create_vlan=True)
+ self.mox.ReplayAll()
+ network = network_model.Network(id=0,
+ bridge='fa0',
+ label='fake',
+ vlan=3,
+ bridge_interface='eth0',
+ injected=True,
+ should_create_vlan=True)
+ self.vif = network_model.NetworkInfo([
+ network_model.VIF(id=None,
+ address='DE:AD:BE:EF:00:00',
+ network=network,
+ type=None,
+ devname=None,
+ ovs_interfaceid=None,
+ rxtx_cap=3)
+ ])[0]
+ vif.get_network_ref(self.session, self.cluster, self.vif, False)
+
+ def test_get_network_ref_bridge_from_opaque(self):
+ opaque_networks = [{'opaqueNetworkId': 'bridge_id',
+ 'opaqueNetworkName': 'name',
+ 'opaqueNetworkType': 'OpaqueNetwork'}]
+ network_ref = vif._get_network_ref_from_opaque(opaque_networks,
+ 'integration_bridge', 'bridge_id')
+ self.assertEqual('bridge_id', network_ref['network-id'])
+
+ def test_get_network_ref_multiple_bridges_from_opaque(self):
+ opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
+ 'opaqueNetworkName': 'name1',
+ 'opaqueNetworkType': 'OpaqueNetwork'},
+ {'opaqueNetworkId': 'bridge_id2',
+ 'opaqueNetworkName': 'name2',
+ 'opaqueNetworkType': 'OpaqueNetwork'}]
+ network_ref = vif._get_network_ref_from_opaque(opaque_networks,
+ 'integration_bridge', 'bridge_id2')
+ self.assertEqual('bridge_id2', network_ref['network-id'])
+
+ def test_get_network_ref_integration(self):
+ opaque_networks = [{'opaqueNetworkId': 'integration_bridge',
+ 'opaqueNetworkName': 'name',
+ 'opaqueNetworkType': 'OpaqueNetwork'}]
+ network_ref = vif._get_network_ref_from_opaque(opaque_networks,
+ 'integration_bridge', 'bridge_id')
+ self.assertEqual('integration_bridge', network_ref['network-id'])
+
+ def test_get_network_ref_bridge_none(self):
+ opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
+ 'opaqueNetworkName': 'name1',
+ 'opaqueNetworkType': 'OpaqueNetwork'},
+ {'opaqueNetworkId': 'bridge_id2',
+ 'opaqueNetworkName': 'name2',
+ 'opaqueNetworkType': 'OpaqueNetwork'}]
+ network_ref = vif._get_network_ref_from_opaque(opaque_networks,
+ 'integration_bridge', 'bridge_id')
+ self.assertIsNone(network_ref)
+
+ def test_get_network_ref_integration_multiple(self):
+ opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
+ 'opaqueNetworkName': 'name1',
+ 'opaqueNetworkType': 'OpaqueNetwork'},
+ {'opaqueNetworkId': 'integration_bridge',
+ 'opaqueNetworkName': 'name2',
+ 'opaqueNetworkType': 'OpaqueNetwork'}]
+ network_ref = vif._get_network_ref_from_opaque(opaque_networks,
+ 'integration_bridge', 'bridge_id')
+ self.assertIsNone(network_ref)
+
+ def test_get_neutron_network(self):
+ self.mox.StubOutWithMock(vm_util, 'get_host_ref')
+ self.mox.StubOutWithMock(self.session, '_call_method')
+ self.mox.StubOutWithMock(vif, '_get_network_ref_from_opaque')
+ vm_util.get_host_ref(self.session,
+ self.cluster).AndReturn('fake-host')
+ opaque = fake.DataObject()
+ opaque.HostOpaqueNetworkInfo = ['fake-network-info']
+ self.session._call_method(vim_util, "get_dynamic_property",
+ 'fake-host', 'HostSystem',
+ 'config.network.opaqueNetwork').AndReturn(opaque)
+ vif._get_network_ref_from_opaque(opaque.HostOpaqueNetworkInfo,
+ CONF.vmware.integration_bridge,
+ self.vif['network']['id']).AndReturn('fake-network-ref')
+ self.mox.ReplayAll()
+ network_ref = vif.get_neutron_network(self.session,
+ self.vif['network']['id'],
+ self.cluster,
+ self.vif)
+ self.assertEqual(network_ref, 'fake-network-ref')
+
+ def test_get_neutron_network_opaque_network_not_found(self):
+ self.mox.StubOutWithMock(vm_util, 'get_host_ref')
+ self.mox.StubOutWithMock(self.session, '_call_method')
+ self.mox.StubOutWithMock(vif, '_get_network_ref_from_opaque')
+ vm_util.get_host_ref(self.session,
+ self.cluster).AndReturn('fake-host')
+ opaque = fake.DataObject()
+ opaque.HostOpaqueNetworkInfo = ['fake-network-info']
+ self.session._call_method(vim_util, "get_dynamic_property",
+ 'fake-host', 'HostSystem',
+ 'config.network.opaqueNetwork').AndReturn(opaque)
+ vif._get_network_ref_from_opaque(opaque.HostOpaqueNetworkInfo,
+ CONF.vmware.integration_bridge,
+ self.vif['network']['id']).AndReturn(None)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NetworkNotFoundForBridge,
+ vif.get_neutron_network, self.session,
+ self.vif['network']['id'], self.cluster, self.vif)
+
+ def test_get_neutron_network_bridge_network_not_found(self):
+ self.mox.StubOutWithMock(vm_util, 'get_host_ref')
+ self.mox.StubOutWithMock(self.session, '_call_method')
+ self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
+ vm_util.get_host_ref(self.session,
+ self.cluster).AndReturn('fake-host')
+ opaque = fake.DataObject()
+ opaque.HostOpaqueNetworkInfo = ['fake-network-info']
+ self.session._call_method(vim_util, "get_dynamic_property",
+ 'fake-host', 'HostSystem',
+ 'config.network.opaqueNetwork').AndReturn(None)
+ network_util.get_network_with_the_name(self.session, 0,
+ self.cluster).AndReturn(None)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NetworkNotFoundForBridge,
+ vif.get_neutron_network, self.session,
+ self.vif['network']['id'], self.cluster, self.vif)
+
+ def test_create_port_group_already_exists(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == 'AddPortGroup':
+ raise vexc.AlreadyExistsException()
+
+ with contextlib.nested(
+ mock.patch.object(vm_util, 'get_add_vswitch_port_group_spec'),
+ mock.patch.object(vm_util, 'get_host_ref'),
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method)
+ ) as (_add_vswitch, _get_host, _call_method):
+ network_util.create_port_group(self.session, 'pg_name',
+ 'vswitch_name', vlan_id=0,
+ cluster=None)
+
+ def test_create_port_group_exception(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == 'AddPortGroup':
+ raise vexc.VMwareDriverException()
+
+ with contextlib.nested(
+ mock.patch.object(vm_util, 'get_add_vswitch_port_group_spec'),
+ mock.patch.object(vm_util, 'get_host_ref'),
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method)
+ ) as (_add_vswitch, _get_host, _call_method):
+ self.assertRaises(vexc.VMwareDriverException,
+ network_util.create_port_group,
+ self.session, 'pg_name',
+ 'vswitch_name', vlan_id=0,
+ cluster=None)
+
+ def test_get_neutron_network_invalid_property(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == 'get_dynamic_property':
+ raise vexc.InvalidPropertyException()
+
+ with contextlib.nested(
+ mock.patch.object(vm_util, 'get_host_ref'),
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method),
+ mock.patch.object(network_util, 'get_network_with_the_name')
+ ) as (_get_host, _call_method, _get_name):
+ vif.get_neutron_network(self.session, 'network_name',
+ 'cluster', self.vif)
+
+ def test_get_vif_info_none(self):
+ vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
+ 'is_neutron', 'fake_model', None)
+ self.assertEqual([], vif_info)
+
+ def test_get_vif_info_empty_list(self):
+ vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
+ 'is_neutron', 'fake_model', [])
+ self.assertEqual([], vif_info)
+
+ @mock.patch.object(vif, 'get_network_ref', return_value='fake_ref')
+ def test_get_vif_info(self, mock_get_network_ref):
+ network_info = utils.get_test_network_info()
+ vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
+ 'is_neutron', 'fake_model', network_info)
+ expected = [{'iface_id': 'vif-xxx-yyy-zzz',
+ 'mac_address': 'fake',
+ 'network_name': 'fake',
+ 'network_ref': 'fake_ref',
+ 'vif_model': 'fake_model'}]
+ self.assertEqual(expected, vif_info)
diff --git a/nova/tests/unit/virt/vmwareapi/test_vim_util.py b/nova/tests/unit/virt/vmwareapi/test_vim_util.py
new file mode 100644
index 0000000000..d00e127b66
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_vim_util.py
@@ -0,0 +1,117 @@
+# Copyright (c) 2013 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+
+import fixtures
+import mock
+
+from nova import test
+from nova.tests.unit.virt.vmwareapi import fake
+from nova.tests.unit.virt.vmwareapi import stubs
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import vim_util
+
+
+def _fake_get_object_properties(vim, collector, mobj,
+ type, properties):
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.ObjectContent(None))
+ return fake_objects
+
+
+def _fake_get_object_properties_missing(vim, collector, mobj,
+ type, properties):
+ fake_objects = fake.FakeRetrieveResult()
+ ml = [fake.MissingProperty()]
+ fake_objects.add_object(fake.ObjectContent(None, missing_list=ml))
+ return fake_objects
+
+
+class VMwareVIMUtilTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(VMwareVIMUtilTestCase, self).setUp()
+ fake.reset()
+ self.vim = fake.FakeVim()
+ self.vim._login()
+
+ def test_get_dynamic_properties_missing(self):
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.vmwareapi.vim_util.get_object_properties',
+ _fake_get_object_properties))
+ res = vim_util.get_dynamic_property('fake-vim', 'fake-obj',
+ 'fake-type', 'fake-property')
+ self.assertIsNone(res)
+
+ def test_get_dynamic_properties_missing_path_exists(self):
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.vmwareapi.vim_util.get_object_properties',
+ _fake_get_object_properties_missing))
+ res = vim_util.get_dynamic_property('fake-vim', 'fake-obj',
+ 'fake-type', 'fake-property')
+ self.assertIsNone(res)
+
+ def test_get_dynamic_properties_with_token(self):
+ ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
+ DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
+
+ # Add a token to our results, indicating that more are available
+ result = fake.FakeRetrieveResult(token='fake_token')
+
+ # We expect these properties to be returned
+ result.add_object(ObjectContent(propSet=[
+ DynamicProperty(name='name1', val='value1'),
+ DynamicProperty(name='name2', val='value2')
+ ]))
+
+ # These properties should be ignored
+ result.add_object(ObjectContent(propSet=[
+ DynamicProperty(name='name3', val='value3')
+ ]))
+
+ retrievePropertiesEx = mock.MagicMock(name='RetrievePropertiesEx')
+ retrievePropertiesEx.return_value = result
+
+ calls = {'RetrievePropertiesEx': retrievePropertiesEx}
+ with stubs.fake_suds_context(calls):
+ session = driver.VMwareAPISession(host_ip='localhost')
+
+ service_content = session.vim.service_content
+ props = session._call_method(vim_util, "get_dynamic_properties",
+ service_content.propertyCollector,
+ 'fake_type', None)
+
+ self.assertEqual(props, {
+ 'name1': 'value1',
+ 'name2': 'value2'
+ })
+
+ @mock.patch.object(vim_util, 'get_object_properties', return_value=None)
+ def test_get_dynamic_properties_no_objects(self, mock_get_object_props):
+ res = vim_util.get_dynamic_properties('fake-vim', 'fake-obj',
+ 'fake-type', 'fake-property')
+ self.assertEqual({}, res)
+
+ def test_get_inner_objects(self):
+ property = ['summary.name']
+ # Get the fake datastores directly from the cluster
+ cluster_refs = fake._get_object_refs('ClusterComputeResource')
+ cluster = fake._get_object(cluster_refs[0])
+ expected_ds = cluster.datastore.ManagedObjectReference
+ # Get the fake datastores using inner objects utility method
+ result = vim_util.get_inner_objects(
+ self.vim, cluster_refs[0], 'datastore', 'Datastore', property)
+ datastores = [oc.obj for oc in result.objects]
+ self.assertEqual(expected_ds, datastores)
diff --git a/nova/tests/unit/virt/vmwareapi/test_vm_util.py b/nova/tests/unit/virt/vmwareapi/test_vm_util.py
new file mode 100644
index 0000000000..906d03cf66
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_vm_util.py
@@ -0,0 +1,1069 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# Copyright 2013 Canonical Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import contextlib
+import re
+
+import mock
+from oslo.vmware import exceptions as vexc
+
+from nova import context
+from nova import exception
+from nova.network import model as network_model
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit.virt.vmwareapi import fake
+from nova.tests.unit.virt.vmwareapi import stubs
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import vm_util
+
+
+class partialObject(object):
+ def __init__(self, path='fake-path'):
+ self.path = path
+ self.fault = fake.DataObject()
+
+
+class VMwareVMUtilTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(VMwareVMUtilTestCase, self).setUp()
+ fake.reset()
+ stubs.set_stubs(self.stubs)
+ vm_util.vm_refs_cache_reset()
+
+ def _test_get_stats_from_cluster(self, connection_state="connected",
+ maintenance_mode=False):
+ ManagedObjectRefs = [fake.ManagedObjectReference("host1",
+ "HostSystem"),
+ fake.ManagedObjectReference("host2",
+ "HostSystem")]
+ hosts = fake._convert_to_array_of_mor(ManagedObjectRefs)
+ respool = fake.ManagedObjectReference("resgroup-11", "ResourcePool")
+ prop_dict = {'host': hosts, 'resourcePool': respool}
+
+ hardware = fake.DataObject()
+ hardware.numCpuCores = 8
+ hardware.numCpuThreads = 16
+ hardware.vendor = "Intel"
+ hardware.cpuModel = "Intel(R) Xeon(R)"
+
+ runtime_host_1 = fake.DataObject()
+ runtime_host_1.connectionState = "connected"
+ runtime_host_1.inMaintenanceMode = False
+
+ runtime_host_2 = fake.DataObject()
+ runtime_host_2.connectionState = connection_state
+ runtime_host_2.inMaintenanceMode = maintenance_mode
+
+ prop_list_host_1 = [fake.Prop(name="hardware_summary", val=hardware),
+ fake.Prop(name="runtime_summary",
+ val=runtime_host_1)]
+ prop_list_host_2 = [fake.Prop(name="hardware_summary", val=hardware),
+ fake.Prop(name="runtime_summary",
+ val=runtime_host_2)]
+
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.ObjectContent("prop_list_host1",
+ prop_list_host_1))
+ fake_objects.add_object(fake.ObjectContent("prop_list_host1",
+ prop_list_host_2))
+
+ respool_resource_usage = fake.DataObject()
+ respool_resource_usage.maxUsage = 5368709120
+ respool_resource_usage.overallUsage = 2147483648
+
+ def fake_call_method(*args):
+ if "get_dynamic_properties" in args:
+ return prop_dict
+ elif "get_properties_for_a_collection_of_objects" in args:
+ return fake_objects
+ else:
+ return respool_resource_usage
+
+ session = fake.FakeSession()
+ with mock.patch.object(session, '_call_method', fake_call_method):
+ result = vm_util.get_stats_from_cluster(session, "cluster1")
+ cpu_info = {}
+ mem_info = {}
+ if connection_state == "connected" and not maintenance_mode:
+ cpu_info['vcpus'] = 32
+ cpu_info['cores'] = 16
+ cpu_info['vendor'] = ["Intel", "Intel"]
+ cpu_info['model'] = ["Intel(R) Xeon(R)",
+ "Intel(R) Xeon(R)"]
+ else:
+ cpu_info['vcpus'] = 16
+ cpu_info['cores'] = 8
+ cpu_info['vendor'] = ["Intel"]
+ cpu_info['model'] = ["Intel(R) Xeon(R)"]
+ mem_info['total'] = 5120
+ mem_info['free'] = 3072
+ expected_stats = {'cpu': cpu_info, 'mem': mem_info}
+ self.assertEqual(expected_stats, result)
+
+ def test_get_stats_from_cluster_hosts_connected_and_active(self):
+ self._test_get_stats_from_cluster()
+
+ def test_get_stats_from_cluster_hosts_disconnected_and_active(self):
+ self._test_get_stats_from_cluster(connection_state="disconnected")
+
+ def test_get_stats_from_cluster_hosts_connected_and_maintenance(self):
+ self._test_get_stats_from_cluster(maintenance_mode=True)
+
+ def test_get_host_ref_no_hosts_in_cluster(self):
+ self.assertRaises(exception.NoValidHost,
+ vm_util.get_host_ref,
+ fake.FakeObjectRetrievalSession(""), 'fake_cluster')
+
+ def test_get_resize_spec(self):
+ fake_instance = {'id': 7, 'name': 'fake!',
+ 'uuid': 'bda5fb9e-b347-40e8-8256-42397848cb00',
+ 'vcpus': 2, 'memory_mb': 2048}
+ result = vm_util.get_vm_resize_spec(fake.FakeFactory(),
+ fake_instance)
+ expected = """{'memoryMB': 2048,
+ 'numCPUs': 2,
+ 'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_cdrom_attach_config_spec(self):
+
+ result = vm_util.get_cdrom_attach_config_spec(fake.FakeFactory(),
+ fake.Datastore(),
+ "/tmp/foo.iso",
+ 200, 0)
+ expected = """{
+ 'deviceChange': [
+ {
+ 'device': {
+ 'connectable': {
+ 'allowGuestControl': False,
+ 'startConnected': True,
+ 'connected': True,
+ 'obj_name': 'ns0: VirtualDeviceConnectInfo'
+ },
+ 'backing': {
+ 'datastore': {
+ "summary.maintenanceMode": "normal",
+ "summary.type": "VMFS",
+ "summary.accessible":true,
+ "summary.name": "fake-ds",
+ "summary.capacity": 1099511627776,
+ "summary.freeSpace": 536870912000,
+ "browser": ""
+ },
+ 'fileName': '/tmp/foo.iso',
+ 'obj_name': 'ns0: VirtualCdromIsoBackingInfo'
+ },
+ 'controllerKey': 200,
+ 'unitNumber': 0,
+ 'key': -1,
+ 'obj_name': 'ns0: VirtualCdrom'
+ },
+ 'operation': 'add',
+ 'obj_name': 'ns0: VirtualDeviceConfigSpec'
+ }
+ ],
+ 'obj_name': 'ns0: VirtualMachineConfigSpec'
+}
+"""
+
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_lsilogic_controller_spec(self):
+ # Test controller spec returned for lsiLogic sas adapter type
+ config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
+ adapter_type="lsiLogicsas")
+ self.assertEqual("ns0:VirtualLsiLogicSASController",
+ config_spec.device.obj_name)
+
+ def test_paravirtual_controller_spec(self):
+ # Test controller spec returned for paraVirtual adapter type
+ config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
+ adapter_type="paraVirtual")
+ self.assertEqual("ns0:ParaVirtualSCSIController",
+ config_spec.device.obj_name)
+
+ def _vmdk_path_and_adapter_type_devices(self, filename, parent=None):
+ # Test the adapter_type returned for a lsiLogic sas controller
+ controller_key = 1000
+ disk = fake.VirtualDisk()
+ disk.controllerKey = controller_key
+ disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
+ disk_backing.fileName = filename
+ if parent:
+ disk_backing.parent = parent
+ disk.backing = disk_backing
+ controller = fake.VirtualLsiLogicSASController()
+ controller.key = controller_key
+ devices = [disk, controller]
+ return devices
+
+ def test_get_vmdk_path(self):
+ uuid = '00000000-0000-0000-0000-000000000000'
+ filename = '[test_datastore] %s/%s.vmdk' % (uuid, uuid)
+ devices = self._vmdk_path_and_adapter_type_devices(filename)
+ session = fake.FakeSession()
+
+ with mock.patch.object(session, '_call_method',
+ return_value=devices):
+ instance = {'uuid': uuid}
+ vmdk_path = vm_util.get_vmdk_path(session, None, instance)
+ self.assertEqual(filename, vmdk_path)
+
+ def test_get_vmdk_path_and_adapter_type(self):
+ filename = '[test_datastore] test_file.vmdk'
+ devices = self._vmdk_path_and_adapter_type_devices(filename)
+ vmdk_info = vm_util.get_vmdk_path_and_adapter_type(devices)
+ adapter_type = vmdk_info[1]
+ self.assertEqual('lsiLogicsas', adapter_type)
+ self.assertEqual(vmdk_info[0], filename)
+
+ def test_get_vmdk_path_and_adapter_type_with_match(self):
+ n_filename = '[test_datastore] uuid/uuid.vmdk'
+ devices = self._vmdk_path_and_adapter_type_devices(n_filename)
+ vmdk_info = vm_util.get_vmdk_path_and_adapter_type(
+ devices, uuid='uuid')
+ adapter_type = vmdk_info[1]
+ self.assertEqual('lsiLogicsas', adapter_type)
+ self.assertEqual(n_filename, vmdk_info[0])
+
+ def test_get_vmdk_path_and_adapter_type_with_nomatch(self):
+ n_filename = '[test_datastore] diuu/diuu.vmdk'
+ devices = self._vmdk_path_and_adapter_type_devices(n_filename)
+ vmdk_info = vm_util.get_vmdk_path_and_adapter_type(
+ devices, uuid='uuid')
+ adapter_type = vmdk_info[1]
+ self.assertEqual('lsiLogicsas', adapter_type)
+ self.assertIsNone(vmdk_info[0])
+
+ def test_get_vmdk_adapter_type(self):
+ # Test for the adapter_type to be used in vmdk descriptor
+ # Adapter type in vmdk descriptor is same for LSI-SAS, LSILogic
+ # and ParaVirtual
+ vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogic")
+ self.assertEqual("lsiLogic", vmdk_adapter_type)
+ vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogicsas")
+ self.assertEqual("lsiLogic", vmdk_adapter_type)
+ vmdk_adapter_type = vm_util.get_vmdk_adapter_type("paraVirtual")
+ self.assertEqual("lsiLogic", vmdk_adapter_type)
+ vmdk_adapter_type = vm_util.get_vmdk_adapter_type("dummyAdapter")
+ self.assertEqual("dummyAdapter", vmdk_adapter_type)
+
+ def test_find_allocated_slots(self):
+ disk1 = fake.VirtualDisk(200, 0)
+ disk2 = fake.VirtualDisk(200, 1)
+ disk3 = fake.VirtualDisk(201, 1)
+ ide0 = fake.VirtualIDEController(200)
+ ide1 = fake.VirtualIDEController(201)
+ scsi0 = fake.VirtualLsiLogicController(key=1000, scsiCtlrUnitNumber=7)
+ devices = [disk1, disk2, disk3, ide0, ide1, scsi0]
+ taken = vm_util._find_allocated_slots(devices)
+ self.assertEqual([0, 1], sorted(taken[200]))
+ self.assertEqual([1], taken[201])
+ self.assertEqual([7], taken[1000])
+
+ def test_allocate_controller_key_and_unit_number_ide_default(self):
+ # Test that default IDE controllers are used when there is a free slot
+ # on them
+ disk1 = fake.VirtualDisk(200, 0)
+ disk2 = fake.VirtualDisk(200, 1)
+ ide0 = fake.VirtualIDEController(200)
+ ide1 = fake.VirtualIDEController(201)
+ devices = [disk1, disk2, ide0, ide1]
+ (controller_key, unit_number,
+ controller_spec) = vm_util.allocate_controller_key_and_unit_number(
+ None,
+ devices,
+ 'ide')
+ self.assertEqual(201, controller_key)
+ self.assertEqual(0, unit_number)
+ self.assertIsNone(controller_spec)
+
+ def test_allocate_controller_key_and_unit_number_ide(self):
+ # Test that a new controller is created when there is no free slot on
+ # the default IDE controllers
+ ide0 = fake.VirtualIDEController(200)
+ ide1 = fake.VirtualIDEController(201)
+ devices = [ide0, ide1]
+ for controller_key in [200, 201]:
+ for unit_number in [0, 1]:
+ disk = fake.VirtualDisk(controller_key, unit_number)
+ devices.append(disk)
+ factory = fake.FakeFactory()
+ (controller_key, unit_number,
+ controller_spec) = vm_util.allocate_controller_key_and_unit_number(
+ factory,
+ devices,
+ 'ide')
+ self.assertEqual(-101, controller_key)
+ self.assertEqual(0, unit_number)
+ self.assertIsNotNone(controller_spec)
+
+ def test_allocate_controller_key_and_unit_number_scsi(self):
+ # Test that we allocate on existing SCSI controller if there is a free
+ # slot on it
+ devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7)]
+ for unit_number in range(7):
+ disk = fake.VirtualDisk(1000, unit_number)
+ devices.append(disk)
+ factory = fake.FakeFactory()
+ (controller_key, unit_number,
+ controller_spec) = vm_util.allocate_controller_key_and_unit_number(
+ factory,
+ devices,
+ 'lsiLogic')
+ self.assertEqual(1000, controller_key)
+ self.assertEqual(8, unit_number)
+ self.assertIsNone(controller_spec)
+
+ def _test_get_vnc_config_spec(self, port):
+
+ result = vm_util.get_vnc_config_spec(fake.FakeFactory(),
+ port)
+ return result
+
+ def test_get_vnc_config_spec(self):
+ result = self._test_get_vnc_config_spec(7)
+ expected = """{'extraConfig': [
+ {'value': 'true',
+ 'key': 'RemoteDisplay.vnc.enabled',
+ 'obj_name': 'ns0:OptionValue'},
+ {'value': 7,
+ 'key': 'RemoteDisplay.vnc.port',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def _create_fake_vms(self):
+ fake_vms = fake.FakeRetrieveResult()
+ OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
+ for i in range(10):
+ vm = fake.ManagedObject()
+ opt_val = OptionValue(key='', value=5900 + i)
+ vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
+ fake_vms.add_object(vm)
+ return fake_vms
+
+ def test_get_vnc_port(self):
+ fake_vms = self._create_fake_vms()
+ self.flags(vnc_port=5900, group='vmware')
+ self.flags(vnc_port_total=10000, group='vmware')
+ actual = vm_util.get_vnc_port(
+ fake.FakeObjectRetrievalSession(fake_vms))
+ self.assertEqual(actual, 5910)
+
+ def test_get_vnc_port_exhausted(self):
+ fake_vms = self._create_fake_vms()
+ self.flags(vnc_port=5900, group='vmware')
+ self.flags(vnc_port_total=10, group='vmware')
+ self.assertRaises(exception.ConsolePortRangeExhausted,
+ vm_util.get_vnc_port,
+ fake.FakeObjectRetrievalSession(fake_vms))
+
+ def test_get_all_cluster_refs_by_name_none(self):
+ fake_objects = fake.FakeRetrieveResult()
+ refs = vm_util.get_all_cluster_refs_by_name(
+ fake.FakeObjectRetrievalSession(fake_objects), ['fake_cluster'])
+ self.assertEqual({}, refs)
+
+ def test_get_all_cluster_refs_by_name_exists(self):
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.ClusterComputeResource(name='cluster'))
+ refs = vm_util.get_all_cluster_refs_by_name(
+ fake.FakeObjectRetrievalSession(fake_objects), ['cluster'])
+ self.assertEqual(1, len(refs))
+
+ def test_get_all_cluster_refs_by_name_missing(self):
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(partialObject(path='cluster'))
+ refs = vm_util.get_all_cluster_refs_by_name(
+ fake.FakeObjectRetrievalSession(fake_objects), ['cluster'])
+ self.assertEqual({}, refs)
+
+ def test_propset_dict_simple(self):
+ ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
+ DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
+
+ object = ObjectContent(propSet=[
+ DynamicProperty(name='foo', val="bar")])
+ propdict = vm_util.propset_dict(object.propSet)
+ self.assertEqual("bar", propdict['foo'])
+
+ def test_propset_dict_complex(self):
+ ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
+ DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
+ MoRef = collections.namedtuple('Val', ['value'])
+
+ object = ObjectContent(propSet=[
+ DynamicProperty(name='foo', val="bar"),
+ DynamicProperty(name='some.thing',
+ val=MoRef(value='else')),
+ DynamicProperty(name='another.thing', val='value')])
+
+ propdict = vm_util.propset_dict(object.propSet)
+ self.assertEqual("bar", propdict['foo'])
+ self.assertTrue(hasattr(propdict['some.thing'], 'value'))
+ self.assertEqual("else", propdict['some.thing'].value)
+ self.assertEqual("value", propdict['another.thing'])
+
+ def _test_detach_virtual_disk_spec(self, destroy_disk=False):
+ virtual_device_config = vm_util.detach_virtual_disk_spec(
+ fake.FakeFactory(),
+ 'fake_device',
+ destroy_disk)
+ self.assertEqual('remove', virtual_device_config.operation)
+ self.assertEqual('fake_device', virtual_device_config.device)
+ self.assertEqual('ns0:VirtualDeviceConfigSpec',
+ virtual_device_config.obj_name)
+ if destroy_disk:
+ self.assertEqual('destroy', virtual_device_config.fileOperation)
+ else:
+ self.assertFalse(hasattr(virtual_device_config, 'fileOperation'))
+
+ def test_detach_virtual_disk_spec(self):
+ self._test_detach_virtual_disk_spec(destroy_disk=False)
+
+ def test_detach_virtual_disk_destroy_spec(self):
+ self._test_detach_virtual_disk_spec(destroy_disk=True)
+
+ def test_get_vm_create_spec(self):
+ instance_uuid = uuidutils.generate_uuid()
+ fake_instance = {'id': 7, 'name': 'fake!',
+ 'uuid': instance_uuid,
+ 'vcpus': 2, 'memory_mb': 2048}
+ result = vm_util.get_vm_create_spec(fake.FakeFactory(),
+ fake_instance, instance_uuid,
+ 'fake-datastore', [])
+ expected = """{
+ 'files': {'vmPathName': '[fake-datastore]',
+ 'obj_name': 'ns0:VirtualMachineFileInfo'},
+ 'instanceUuid': '%(instance_uuid)s',
+ 'name': '%(instance_uuid)s', 'deviceChange': [],
+ 'extraConfig': [{'value': '%(instance_uuid)s',
+ 'key': 'nvp.vm-uuid',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'memoryMB': 2048,
+ 'managedBy': {'extensionKey': 'org.openstack.compute',
+ 'type': 'instance',
+ 'obj_name': 'ns0:ManagedByInfo'},
+ 'obj_name': 'ns0:VirtualMachineConfigSpec',
+ 'guestId': 'otherGuest',
+ 'tools': {'beforeGuestStandby': True,
+ 'beforeGuestReboot': True,
+ 'beforeGuestShutdown': True,
+ 'afterResume': True,
+ 'afterPowerOn': True,
+ 'obj_name': 'ns0:ToolsConfigInfo'},
+ 'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_vm_create_spec_with_allocations(self):
+ instance_uuid = uuidutils.generate_uuid()
+ fake_instance = {'id': 7, 'name': 'fake!',
+ 'uuid': instance_uuid,
+ 'vcpus': 2, 'memory_mb': 2048}
+ result = vm_util.get_vm_create_spec(fake.FakeFactory(),
+ fake_instance, instance_uuid,
+ 'fake-datastore', [],
+ allocations={'cpu_limit': 7,
+ 'cpu_reservation': 6})
+ expected = """{
+ 'files': {'vmPathName': '[fake-datastore]',
+ 'obj_name': 'ns0:VirtualMachineFileInfo'},
+ 'instanceUuid': '%(instance_uuid)s',
+ 'name': '%(instance_uuid)s', 'deviceChange': [],
+ 'extraConfig': [{'value': '%(instance_uuid)s',
+ 'key': 'nvp.vm-uuid',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'memoryMB': 2048,
+ 'managedBy': {'extensionKey': 'org.openstack.compute',
+ 'type': 'instance',
+ 'obj_name': 'ns0:ManagedByInfo'},
+ 'obj_name': 'ns0:VirtualMachineConfigSpec',
+ 'guestId': 'otherGuest',
+ 'tools': {'beforeGuestStandby': True,
+ 'beforeGuestReboot': True,
+ 'beforeGuestShutdown': True,
+ 'afterResume': True,
+ 'afterPowerOn': True,
+ 'obj_name': 'ns0:ToolsConfigInfo'},
+ 'cpuAllocation': {'reservation': 6,
+ 'limit': 7,
+ 'obj_name': 'ns0:ResourceAllocationInfo'},
+ 'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_vm_create_spec_with_limit(self):
+ instance_uuid = uuidutils.generate_uuid()
+ fake_instance = {'id': 7, 'name': 'fake!',
+ 'uuid': instance_uuid,
+ 'vcpus': 2, 'memory_mb': 2048}
+ result = vm_util.get_vm_create_spec(fake.FakeFactory(),
+ fake_instance, instance_uuid,
+ 'fake-datastore', [],
+ allocations={'cpu_limit': 7})
+ expected = """{
+ 'files': {'vmPathName': '[fake-datastore]',
+ 'obj_name': 'ns0:VirtualMachineFileInfo'},
+ 'instanceUuid': '%(instance_uuid)s',
+ 'name': '%(instance_uuid)s', 'deviceChange': [],
+ 'extraConfig': [{'value': '%(instance_uuid)s',
+ 'key': 'nvp.vm-uuid',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'memoryMB': 2048,
+ 'managedBy': {'extensionKey': 'org.openstack.compute',
+ 'type': 'instance',
+ 'obj_name': 'ns0:ManagedByInfo'},
+ 'obj_name': 'ns0:VirtualMachineConfigSpec',
+ 'guestId': 'otherGuest',
+ 'tools': {'beforeGuestStandby': True,
+ 'beforeGuestReboot': True,
+ 'beforeGuestShutdown': True,
+ 'afterResume': True,
+ 'afterPowerOn': True,
+ 'obj_name': 'ns0:ToolsConfigInfo'},
+ 'cpuAllocation': {'limit': 7,
+ 'obj_name': 'ns0:ResourceAllocationInfo'},
+ 'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_vm_create_spec_with_share(self):
+ instance_uuid = uuidutils.generate_uuid()
+ fake_instance = {'id': 7, 'name': 'fake!',
+ 'uuid': instance_uuid,
+ 'vcpus': 2, 'memory_mb': 2048}
+ shares = {'cpu_shares_level': 'high'}
+ result = vm_util.get_vm_create_spec(fake.FakeFactory(),
+ fake_instance, instance_uuid,
+ 'fake-datastore', [],
+ allocations=shares)
+ expected = """{
+ 'files': {'vmPathName': '[fake-datastore]',
+ 'obj_name': 'ns0:VirtualMachineFileInfo'},
+ 'instanceUuid': '%(instance_uuid)s',
+ 'name': '%(instance_uuid)s', 'deviceChange': [],
+ 'extraConfig': [{'value': '%(instance_uuid)s',
+ 'key': 'nvp.vm-uuid',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'memoryMB': 2048,
+ 'managedBy': {'extensionKey': 'org.openstack.compute',
+ 'type': 'instance',
+ 'obj_name': 'ns0:ManagedByInfo'},
+ 'obj_name': 'ns0:VirtualMachineConfigSpec',
+ 'guestId': 'otherGuest',
+ 'tools': {'beforeGuestStandby': True,
+ 'beforeGuestReboot': True,
+ 'beforeGuestShutdown': True,
+ 'afterResume': True,
+ 'afterPowerOn': True,
+ 'obj_name': 'ns0:ToolsConfigInfo'},
+ 'cpuAllocation': {'shares': {'level': 'high',
+ 'shares': 0,
+ 'obj_name':'ns0:SharesInfo'},
+ 'obj_name':'ns0:ResourceAllocationInfo'},
+ 'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_vm_create_spec_with_share_custom(self):
+ instance_uuid = uuidutils.generate_uuid()
+ fake_instance = {'id': 7, 'name': 'fake!',
+ 'uuid': instance_uuid,
+ 'vcpus': 2, 'memory_mb': 2048}
+ shares = {'cpu_shares_level': 'custom',
+ 'cpu_shares_share': 1948}
+ result = vm_util.get_vm_create_spec(fake.FakeFactory(),
+ fake_instance, instance_uuid,
+ 'fake-datastore', [],
+ allocations=shares)
+ expected = """{
+ 'files': {'vmPathName': '[fake-datastore]',
+ 'obj_name': 'ns0:VirtualMachineFileInfo'},
+ 'instanceUuid': '%(instance_uuid)s',
+ 'name': '%(instance_uuid)s', 'deviceChange': [],
+ 'extraConfig': [{'value': '%(instance_uuid)s',
+ 'key': 'nvp.vm-uuid',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'memoryMB': 2048,
+ 'managedBy': {'extensionKey': 'org.openstack.compute',
+ 'type': 'instance',
+ 'obj_name': 'ns0:ManagedByInfo'},
+ 'obj_name': 'ns0:VirtualMachineConfigSpec',
+ 'guestId': 'otherGuest',
+ 'tools': {'beforeGuestStandby': True,
+ 'beforeGuestReboot': True,
+ 'beforeGuestShutdown': True,
+ 'afterResume': True,
+ 'afterPowerOn': True,
+ 'obj_name': 'ns0:ToolsConfigInfo'},
+ 'cpuAllocation': {'shares': {'level': 'custom',
+ 'shares': 1948,
+ 'obj_name':'ns0:SharesInfo'},
+ 'obj_name':'ns0:ResourceAllocationInfo'},
+ 'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_create_vm(self):
+
+ method_list = ['CreateVM_Task', 'get_dynamic_property']
+
+ def fake_call_method(module, method, *args, **kwargs):
+ expected_method = method_list.pop(0)
+ self.assertEqual(expected_method, method)
+ if (expected_method == 'CreateVM_Task'):
+ return 'fake_create_vm_task'
+ elif (expected_method == 'get_dynamic_property'):
+ task_info = mock.Mock(state="success", result="fake_vm_ref")
+ return task_info
+ else:
+ self.fail('Should not get here....')
+
+ def fake_wait_for_task(self, *args):
+ task_info = mock.Mock(state="success", result="fake_vm_ref")
+ return task_info
+
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ fake_call_mock = mock.Mock(side_effect=fake_call_method)
+ fake_wait_mock = mock.Mock(side_effect=fake_wait_for_task)
+ with contextlib.nested(
+ mock.patch.object(session, '_wait_for_task',
+ fake_wait_mock),
+ mock.patch.object(session, '_call_method',
+ fake_call_mock)
+ ) as (wait_for_task, call_method):
+ vm_ref = vm_util.create_vm(
+ session,
+ fake_instance,
+ 'fake_vm_folder',
+ 'fake_config_spec',
+ 'fake_res_pool_ref')
+ self.assertEqual('fake_vm_ref', vm_ref)
+
+ call_method.assert_called_once_with(mock.ANY, 'CreateVM_Task',
+ 'fake_vm_folder', config='fake_config_spec',
+ pool='fake_res_pool_ref')
+ wait_for_task.assert_called_once_with('fake_create_vm_task')
+
+ @mock.patch.object(vm_util.LOG, 'warning')
+ def test_create_vm_invalid_guestid(self, mock_log_warn):
+ """Ensure we warn when create_vm() fails after we passed an
+ unrecognised guestId
+ """
+
+ found = [False]
+
+ def fake_log_warn(msg, values):
+ if not isinstance(values, dict):
+ return
+ if values.get('ostype') == 'invalid_os_type':
+ found[0] = True
+ mock_log_warn.side_effect = fake_log_warn
+
+ instance_values = {'id': 7, 'name': 'fake-name',
+ 'uuid': uuidutils.generate_uuid(),
+ 'vcpus': 2, 'memory_mb': 2048}
+ instance = fake_instance.fake_instance_obj(
+ context.RequestContext('fake', 'fake', is_admin=False),
+ **instance_values)
+
+ session = driver.VMwareAPISession()
+
+ config_spec = vm_util.get_vm_create_spec(
+ session.vim.client.factory,
+ instance, instance.name, 'fake-datastore', [],
+ os_type='invalid_os_type')
+
+ self.assertRaises(vexc.VMwareDriverException,
+ vm_util.create_vm, session, instance, 'folder',
+ config_spec, 'res-pool')
+ self.assertTrue(found[0])
+
+ def test_convert_vif_model(self):
+ expected = "VirtualE1000"
+ result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000)
+ self.assertEqual(expected, result)
+ expected = "VirtualE1000e"
+ result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000E)
+ self.assertEqual(expected, result)
+ types = ["VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
+ "VirtualVmxnet"]
+ for type in types:
+ self.assertEqual(type,
+ vm_util.convert_vif_model(type))
+ self.assertRaises(exception.Invalid,
+ vm_util.convert_vif_model,
+ "InvalidVifModel")
+
+ def test_power_on_instance_with_vm_ref(self):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(session, "_wait_for_task"),
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.power_on_instance(session, fake_instance,
+ vm_ref='fake-vm-ref')
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOnVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ def test_power_on_instance_without_vm_ref(self):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(vm_util, "get_vm_ref",
+ return_value='fake-vm-ref'),
+ mock.patch.object(session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(session, "_wait_for_task"),
+ ) as (fake_get_vm_ref, fake_call_method, fake_wait_for_task):
+ vm_util.power_on_instance(session, fake_instance)
+ fake_get_vm_ref.assert_called_once_with(session, fake_instance)
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOnVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ def test_power_on_instance_with_exception(self):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(session, "_wait_for_task",
+ side_effect=exception.NovaException('fake')),
+ ) as (fake_call_method, fake_wait_for_task):
+ self.assertRaises(exception.NovaException,
+ vm_util.power_on_instance,
+ session, fake_instance,
+ vm_ref='fake-vm-ref')
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOnVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ def test_power_on_instance_with_power_state_exception(self):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(
+ session, "_wait_for_task",
+ side_effect=vexc.InvalidPowerStateException),
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.power_on_instance(session, fake_instance,
+ vm_ref='fake-vm-ref')
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOnVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ def test_create_virtual_disk(self):
+ session = fake.FakeSession()
+ dm = session.vim.service_content.virtualDiskManager
+ with contextlib.nested(
+ mock.patch.object(vm_util, "get_vmdk_create_spec",
+ return_value='fake-spec'),
+ mock.patch.object(session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(session, "_wait_for_task"),
+ ) as (fake_get_spec, fake_call_method, fake_wait_for_task):
+ vm_util.create_virtual_disk(session, 'fake-dc-ref',
+ 'fake-adapter-type', 'fake-disk-type',
+ 'fake-path', 7)
+ fake_get_spec.assert_called_once_with(
+ session.vim.client.factory, 7,
+ 'fake-adapter-type',
+ 'fake-disk-type')
+ fake_call_method.assert_called_once_with(
+ session.vim,
+ "CreateVirtualDisk_Task",
+ dm,
+ name='fake-path',
+ datacenter='fake-dc-ref',
+ spec='fake-spec')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ def test_copy_virtual_disk(self):
+ session = fake.FakeSession()
+ dm = session.vim.service_content.virtualDiskManager
+ with contextlib.nested(
+ mock.patch.object(session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(session, "_wait_for_task"),
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.copy_virtual_disk(session, 'fake-dc-ref',
+ 'fake-source', 'fake-dest')
+ fake_call_method.assert_called_once_with(
+ session.vim,
+ "CopyVirtualDisk_Task",
+ dm,
+ sourceName='fake-source',
+ sourceDatacenter='fake-dc-ref',
+ destName='fake-dest')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ def _create_fake_vm_objects(self):
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.VirtualMachine())
+ return fake_objects
+
+ def test_get_values(self):
+ objects = self._create_fake_vm_objects()
+ query = vm_util.get_values_from_object_properties(
+ fake.FakeObjectRetrievalSession(objects), objects)
+ self.assertEqual('poweredOn', query['runtime.powerState'])
+ self.assertEqual('guestToolsRunning',
+ query['summary.guest.toolsRunningStatus'])
+ self.assertEqual('toolsOk', query['summary.guest.toolsStatus'])
+
+ def test_reconfigure_vm(self):
+ session = fake.FakeSession()
+ with contextlib.nested(
+ mock.patch.object(session, '_call_method',
+ return_value='fake_reconfigure_task'),
+ mock.patch.object(session, '_wait_for_task')
+ ) as (_call_method, _wait_for_task):
+ vm_util.reconfigure_vm(session, 'fake-ref', 'fake-spec')
+ _call_method.assert_called_once_with(mock.ANY,
+ 'ReconfigVM_Task', 'fake-ref', spec='fake-spec')
+ _wait_for_task.assert_called_once_with(
+ 'fake_reconfigure_task')
+
+ def test_get_network_attach_config_spec_opaque(self):
+ vif_info = {'network_name': 'br-int',
+ 'mac_address': '00:00:00:ca:fe:01',
+ 'network_ref': {'type': 'OpaqueNetwork',
+ 'network-id': 'fake-network-id',
+ 'network-type': 'opaque'},
+ 'iface_id': 7,
+ 'vif_model': 'VirtualE1000'}
+ result = vm_util.get_network_attach_config_spec(
+ fake.FakeFactory(), vif_info, 1)
+ card = 'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo'
+ expected = """{
+ 'extraConfig': [{'value': 7,
+ 'key': 'nvp.iface-id.1',
+ 'obj_name':'ns0:OptionValue'}],
+ 'deviceChange': [
+ {'device': {
+ 'macAddress':'00:00:00:ca:fe:01',
+ 'addressType': 'manual',
+ 'connectable': {
+ 'allowGuestControl':True,
+ 'startConnected': True,
+ 'connected': True,
+ 'obj_name':'ns0:VirtualDeviceConnectInfo'},
+ 'backing': {
+ 'opaqueNetworkType': 'opaque',
+ 'opaqueNetworkId': 'fake-network-id',
+ 'obj_name': '%(card)s'},
+ 'key': -47,
+ 'obj_name': 'ns0:VirtualE1000',
+ 'wakeOnLanEnabled': True},
+ 'operation': 'add',
+ 'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
+ 'obj_name':'ns0:VirtualMachineConfigSpec'}""" % {'card': card}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_network_attach_config_spec_dvs(self):
+ vif_info = {'network_name': 'br100',
+ 'mac_address': '00:00:00:ca:fe:01',
+ 'network_ref': {'type': 'DistributedVirtualPortgroup',
+ 'dvsw': 'fake-network-id',
+ 'dvpg': 'fake-group'},
+ 'iface_id': 7,
+ 'vif_model': 'VirtualE1000'}
+ result = vm_util.get_network_attach_config_spec(
+ fake.FakeFactory(), vif_info, 1)
+ port = 'ns0:DistributedVirtualSwitchPortConnection'
+ backing = 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo'
+ expected = """{
+ 'extraConfig': [{'value': 7,
+ 'key': 'nvp.iface-id.1',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'deviceChange': [
+ {'device': {'macAddress': '00:00:00:ca:fe:01',
+ 'addressType': 'manual',
+ 'connectable': {
+ 'allowGuestControl': True,
+ 'startConnected': True,
+ 'connected': True,
+ 'obj_name': 'ns0:VirtualDeviceConnectInfo'},
+ 'backing': {
+ 'port': {
+ 'portgroupKey': 'fake-group',
+ 'switchUuid': 'fake-network-id',
+ 'obj_name': '%(obj_name_port)s'},
+ 'obj_name': '%(obj_name_backing)s'},
+ 'key': -47,
+ 'obj_name': 'ns0:VirtualE1000',
+ 'wakeOnLanEnabled': True},
+ 'operation': 'add',
+ 'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
+ 'obj_name':'ns0:VirtualMachineConfigSpec'}""" % {
+ 'obj_name_backing': backing,
+ 'obj_name_port': port}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_network_detach_config_spec(self):
+ result = vm_util.get_network_detach_config_spec(
+ fake.FakeFactory(), 'fake-device', 2)
+ expected = """{
+ 'extraConfig': [{'value': 'free',
+ 'key': 'nvp.iface-id.2',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'deviceChange': [{'device': 'fake-device',
+ 'operation': 'remove',
+ 'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
+ 'obj_name':'ns0:VirtualMachineConfigSpec'}"""
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ @mock.patch.object(vm_util, "get_vm_ref")
+ def test_power_off_instance(self, fake_get_ref):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, '_call_method',
+ return_value='fake-task'),
+ mock.patch.object(session, '_wait_for_task')
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.power_off_instance(session, fake_instance, 'fake-vm-ref')
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOffVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+ self.assertFalse(fake_get_ref.called)
+
+ @mock.patch.object(vm_util, "get_vm_ref", return_value="fake-vm-ref")
+ def test_power_off_instance_no_vm_ref(self, fake_get_ref):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, '_call_method',
+ return_value='fake-task'),
+ mock.patch.object(session, '_wait_for_task')
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.power_off_instance(session, fake_instance)
+ fake_get_ref.assert_called_once_with(session, fake_instance)
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOffVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ @mock.patch.object(vm_util, "get_vm_ref")
+ def test_power_off_instance_with_exception(self, fake_get_ref):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, '_call_method',
+ return_value='fake-task'),
+ mock.patch.object(session, '_wait_for_task',
+ side_effect=exception.NovaException('fake'))
+ ) as (fake_call_method, fake_wait_for_task):
+ self.assertRaises(exception.NovaException,
+ vm_util.power_off_instance,
+ session, fake_instance, 'fake-vm-ref')
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOffVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+ self.assertFalse(fake_get_ref.called)
+
+ @mock.patch.object(vm_util, "get_vm_ref")
+ def test_power_off_instance_power_state_exception(self, fake_get_ref):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, '_call_method',
+ return_value='fake-task'),
+ mock.patch.object(
+ session, '_wait_for_task',
+ side_effect=vexc.InvalidPowerStateException)
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.power_off_instance(session, fake_instance, 'fake-vm-ref')
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOffVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+ self.assertFalse(fake_get_ref.called)
+
+
+@mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
+class VMwareVMUtilGetHostRefTestCase(test.NoDBTestCase):
+ # N.B. Mocking on the class only mocks test_*(), but we need
+ # VMwareAPISession.vim to be mocked in both setUp and tests. Not mocking in
+ # setUp causes object initialisation to fail. Not mocking in tests results
+ # in vim calls not using FakeVim.
+ @mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
+ def setUp(self):
+ super(VMwareVMUtilGetHostRefTestCase, self).setUp()
+ fake.reset()
+ vm_util.vm_refs_cache_reset()
+
+ self.session = driver.VMwareAPISession()
+
+ # Create a fake VirtualMachine running on a known host
+ self.host_ref = fake._db_content['HostSystem'].keys()[0]
+ self.vm_ref = fake.create_vm(host_ref=self.host_ref)
+
+ @mock.patch.object(vm_util, 'get_vm_ref')
+ def test_get_host_ref_for_vm(self, mock_get_vm_ref):
+ mock_get_vm_ref.return_value = self.vm_ref
+
+ ret = vm_util.get_host_ref_for_vm(self.session, 'fake-instance')
+
+ mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance')
+ self.assertEqual(self.host_ref, ret)
+
+ @mock.patch.object(vm_util, 'get_vm_ref')
+ def test_get_host_name_for_vm(self, mock_get_vm_ref):
+ mock_get_vm_ref.return_value = self.vm_ref
+
+ host = fake._get_object(self.host_ref)
+
+ ret = vm_util.get_host_name_for_vm(self.session, 'fake-instance')
+
+ mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance')
+ self.assertEqual(host.name, ret)
diff --git a/nova/tests/unit/virt/vmwareapi/test_vmops.py b/nova/tests/unit/virt/vmwareapi/test_vmops.py
new file mode 100644
index 0000000000..e70f4661b0
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_vmops.py
@@ -0,0 +1,1293 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import contextlib
+
+import mock
+from oslo.utils import units
+from oslo.vmware import exceptions as vexc
+
+from nova.compute import power_state
+from nova import context
+from nova import db
+from nova import exception
+from nova.network import model as network_model
+from nova import objects
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit import fake_instance
+import nova.tests.unit.image.fake
+from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
+from nova.tests.unit.virt.vmwareapi import stubs
+from nova.virt.vmwareapi import constants
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import ds_util
+from nova.virt.vmwareapi import images
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
+from nova.virt.vmwareapi import vmops
+
+
+class DsPathMatcher:
+ def __init__(self, expected_ds_path_str):
+ self.expected_ds_path_str = expected_ds_path_str
+
+ def __eq__(self, ds_path_param):
+ return str(ds_path_param) == self.expected_ds_path_str
+
+
+class VMwareVMOpsTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(VMwareVMOpsTestCase, self).setUp()
+ vmwareapi_fake.reset()
+ stubs.set_stubs(self.stubs)
+ self.flags(image_cache_subdirectory_name='vmware_base',
+ my_ip='',
+ flat_injected=True,
+ vnc_enabled=True)
+ self._context = context.RequestContext('fake_user', 'fake_project')
+ self._session = driver.VMwareAPISession()
+
+ self._virtapi = mock.Mock()
+ self._vmops = vmops.VMwareVMOps(self._session, self._virtapi, None)
+
+ self._image_id = nova.tests.unit.image.fake.get_valid_image_id()
+ self._instance_values = {
+ 'name': 'fake_name',
+ 'uuid': 'fake_uuid',
+ 'vcpus': 1,
+ 'memory_mb': 512,
+ 'image_ref': self._image_id,
+ 'root_gb': 10,
+ 'node': 'respool-1001(MyResPoolName)',
+ 'expected_attrs': ['system_metadata'],
+ }
+ self._instance = fake_instance.fake_instance_obj(
+ self._context, **self._instance_values)
+
+ fake_ds_ref = vmwareapi_fake.ManagedObjectReference('fake-ds')
+ self._ds = ds_util.Datastore(
+ ref=fake_ds_ref, name='fake_ds',
+ capacity=10 * units.Gi,
+ freespace=10 * units.Gi)
+ self._dc_info = vmops.DcInfo(
+ ref='fake_dc_ref', name='fake_dc',
+ vmFolder='fake_vm_folder')
+
+ subnet_4 = network_model.Subnet(cidr='192.168.0.1/24',
+ dns=[network_model.IP('192.168.0.1')],
+ gateway=
+ network_model.IP('192.168.0.1'),
+ ips=[
+ network_model.IP('192.168.0.100')],
+ routes=None)
+ subnet_6 = network_model.Subnet(cidr='dead:beef::1/64',
+ dns=None,
+ gateway=
+ network_model.IP('dead:beef::1'),
+ ips=[network_model.IP(
+ 'dead:beef::dcad:beff:feef:0')],
+ routes=None)
+ network = network_model.Network(id=0,
+ bridge='fa0',
+ label='fake',
+ subnets=[subnet_4, subnet_6],
+ vlan=None,
+ bridge_interface=None,
+ injected=True)
+ self._network_values = {
+ 'id': None,
+ 'address': 'DE:AD:BE:EF:00:00',
+ 'network': network,
+ 'type': None,
+ 'devname': None,
+ 'ovs_interfaceid': None,
+ 'rxtx_cap': 3
+ }
+ self.network_info = network_model.NetworkInfo([
+ network_model.VIF(**self._network_values)
+ ])
+ pure_IPv6_network = network_model.Network(id=0,
+ bridge='fa0',
+ label='fake',
+ subnets=[subnet_6],
+ vlan=None,
+ bridge_interface=None,
+ injected=True)
+ self.pure_IPv6_network_info = network_model.NetworkInfo([
+ network_model.VIF(id=None,
+ address='DE:AD:BE:EF:00:00',
+ network=pure_IPv6_network,
+ type=None,
+ devname=None,
+ ovs_interfaceid=None,
+ rxtx_cap=3)
+ ])
+
+ def test_get_machine_id_str(self):
+ result = vmops.VMwareVMOps._get_machine_id_str(self.network_info)
+ self.assertEqual('DE:AD:BE:EF:00:00;192.168.0.100;255.255.255.0;'
+ '192.168.0.1;192.168.0.255;192.168.0.1#', result)
+ result = vmops.VMwareVMOps._get_machine_id_str(
+ self.pure_IPv6_network_info)
+ self.assertEqual('DE:AD:BE:EF:00:00;;;;;#', result)
+
+ def _setup_create_folder_mocks(self):
+ ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
+ base_name = 'folder'
+ ds_name = "datastore"
+ ds_ref = mock.Mock()
+ ds_ref.value = 1
+ dc_ref = mock.Mock()
+ ops._datastore_dc_mapping[ds_ref.value] = vmops.DcInfo(
+ ref=dc_ref,
+ name='fake-name',
+ vmFolder='fake-folder')
+ path = ds_util.DatastorePath(ds_name, base_name)
+ return ds_name, ds_ref, ops, path, dc_ref
+
+ @mock.patch.object(ds_util, 'mkdir')
+ def test_create_folder_if_missing(self, mock_mkdir):
+ ds_name, ds_ref, ops, path, dc = self._setup_create_folder_mocks()
+ ops._create_folder_if_missing(ds_name, ds_ref, 'folder')
+ mock_mkdir.assert_called_with(ops._session, path, dc)
+
+ @mock.patch.object(ds_util, 'mkdir')
+ def test_create_folder_if_missing_exception(self, mock_mkdir):
+ ds_name, ds_ref, ops, path, dc = self._setup_create_folder_mocks()
+ ds_util.mkdir.side_effect = vexc.FileAlreadyExistsException()
+ ops._create_folder_if_missing(ds_name, ds_ref, 'folder')
+ mock_mkdir.assert_called_with(ops._session, path, dc)
+
+ @mock.patch.object(ds_util, 'file_exists', return_value=True)
+ def test_check_if_folder_file_exists_with_existing(self,
+ mock_exists):
+ ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
+ ops._create_folder_if_missing = mock.Mock()
+ mock_ds_ref = mock.Mock()
+ ops._check_if_folder_file_exists(mock.Mock(), mock_ds_ref, "datastore",
+ "folder", "some_file")
+ ops._create_folder_if_missing.assert_called_once_with('datastore',
+ mock_ds_ref,
+ 'vmware_base')
+
+ @mock.patch.object(ds_util, 'file_exists', return_value=False)
+ def test_check_if_folder_file_exists_no_existing(self, mock_exists):
+ ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
+ ops._create_folder_if_missing = mock.Mock()
+ mock_ds_ref = mock.Mock()
+ ops._check_if_folder_file_exists(mock.Mock(), mock_ds_ref, "datastore",
+ "folder", "some_file")
+ ops._create_folder_if_missing.assert_called_once_with('datastore',
+ mock_ds_ref,
+ 'vmware_base')
+
+ def test_get_valid_vms_from_retrieve_result(self):
+ ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
+ fake_objects = vmwareapi_fake.FakeRetrieveResult()
+ fake_objects.add_object(vmwareapi_fake.VirtualMachine())
+ fake_objects.add_object(vmwareapi_fake.VirtualMachine())
+ fake_objects.add_object(vmwareapi_fake.VirtualMachine())
+ vms = ops._get_valid_vms_from_retrieve_result(fake_objects)
+ self.assertEqual(3, len(vms))
+
+ def test_get_valid_vms_from_retrieve_result_with_invalid(self):
+ ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
+ fake_objects = vmwareapi_fake.FakeRetrieveResult()
+ fake_objects.add_object(vmwareapi_fake.VirtualMachine())
+ invalid_vm1 = vmwareapi_fake.VirtualMachine()
+ invalid_vm1.set('runtime.connectionState', 'orphaned')
+ invalid_vm2 = vmwareapi_fake.VirtualMachine()
+ invalid_vm2.set('runtime.connectionState', 'inaccessible')
+ fake_objects.add_object(invalid_vm1)
+ fake_objects.add_object(invalid_vm2)
+ vms = ops._get_valid_vms_from_retrieve_result(fake_objects)
+ self.assertEqual(1, len(vms))
+
+ def test_delete_vm_snapshot(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ self.assertEqual('RemoveSnapshot_Task', method)
+ self.assertEqual('fake_vm_snapshot', args[0])
+ self.assertFalse(kwargs['removeChildren'])
+ self.assertTrue(kwargs['consolidate'])
+ return 'fake_remove_snapshot_task'
+
+ with contextlib.nested(
+ mock.patch.object(self._session, '_wait_for_task'),
+ mock.patch.object(self._session, '_call_method', fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ self._vmops._delete_vm_snapshot(self._instance,
+ "fake_vm_ref", "fake_vm_snapshot")
+ _wait_for_task.assert_has_calls([
+ mock.call('fake_remove_snapshot_task')])
+
+ def test_create_vm_snapshot(self):
+
+ method_list = ['CreateSnapshot_Task', 'get_dynamic_property']
+
+ def fake_call_method(module, method, *args, **kwargs):
+ expected_method = method_list.pop(0)
+ self.assertEqual(expected_method, method)
+ if (expected_method == 'CreateSnapshot_Task'):
+ self.assertEqual('fake_vm_ref', args[0])
+ self.assertFalse(kwargs['memory'])
+ self.assertTrue(kwargs['quiesce'])
+ return 'fake_snapshot_task'
+ elif (expected_method == 'get_dynamic_property'):
+ task_info = mock.Mock()
+ task_info.result = "fake_snapshot_ref"
+ self.assertEqual(('fake_snapshot_task', 'Task', 'info'), args)
+ return task_info
+
+ with contextlib.nested(
+ mock.patch.object(self._session, '_wait_for_task'),
+ mock.patch.object(self._session, '_call_method', fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ snap = self._vmops._create_vm_snapshot(self._instance,
+ "fake_vm_ref")
+ self.assertEqual("fake_snapshot_ref", snap)
+ _wait_for_task.assert_has_calls([
+ mock.call('fake_snapshot_task')])
+
+ def test_update_instance_progress(self):
+ instance = objects.Instance(context=mock.MagicMock(), uuid='fake-uuid')
+ with mock.patch.object(instance, 'save') as mock_save:
+ self._vmops._update_instance_progress(instance._context,
+ instance, 5, 10)
+ mock_save.assert_called_once_with()
+ self.assertEqual(50, instance.progress)
+
+ @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref')
+ @mock.patch.object(driver.VMwareAPISession, '_call_method')
+ def test_get_info(self, mock_call, mock_get_vm_ref):
+ props = ['summary.config.numCpu', 'summary.config.memorySizeMB',
+ 'runtime.powerState']
+ prop_cpu = vmwareapi_fake.Prop(props[0], 4)
+ prop_mem = vmwareapi_fake.Prop(props[1], 128)
+ prop_state = vmwareapi_fake.Prop(props[2], 'poweredOn')
+ prop_list = [prop_state, prop_mem, prop_cpu]
+ obj_content = vmwareapi_fake.ObjectContent(None, prop_list=prop_list)
+ result = vmwareapi_fake.FakeRetrieveResult()
+ result.add_object(obj_content)
+ mock_call.return_value = result
+ info = self._vmops.get_info(self._instance)
+ mock_call.assert_called_once_with(vim_util,
+ 'get_object_properties', None, 'fake_ref', 'VirtualMachine',
+ props)
+ mock_get_vm_ref.assert_called_once_with(self._session,
+ self._instance)
+ self.assertEqual(power_state.RUNNING, info['state'])
+ self.assertEqual(128 * 1024, info['max_mem'])
+ self.assertEqual(128 * 1024, info['mem'])
+ self.assertEqual(4, info['num_cpu'])
+ self.assertEqual(0, info['cpu_time'])
+
+ @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref')
+ @mock.patch.object(driver.VMwareAPISession, '_call_method')
+ def test_get_info_when_ds_unavailable(self, mock_call, mock_get_vm_ref):
+ props = ['summary.config.numCpu', 'summary.config.memorySizeMB',
+ 'runtime.powerState']
+ prop_state = vmwareapi_fake.Prop(props[2], 'poweredOff')
+ # when vm's ds not available, only power state can be received
+ prop_list = [prop_state]
+ obj_content = vmwareapi_fake.ObjectContent(None, prop_list=prop_list)
+ result = vmwareapi_fake.FakeRetrieveResult()
+ result.add_object(obj_content)
+ mock_call.return_value = result
+ info = self._vmops.get_info(self._instance)
+ mock_call.assert_called_once_with(vim_util,
+ 'get_object_properties', None, 'fake_ref', 'VirtualMachine',
+ props)
+ mock_get_vm_ref.assert_called_once_with(self._session,
+ self._instance)
+ self.assertEqual(power_state.SHUTDOWN, info['state'])
+ self.assertEqual(0, info['max_mem'])
+ self.assertEqual(0, info['mem'])
+ self.assertEqual(0, info['num_cpu'])
+ self.assertEqual(0, info['cpu_time'])
+
+ def _test_get_datacenter_ref_and_name(self, ds_ref_exists=False):
+ instance_ds_ref = mock.Mock()
+ instance_ds_ref.value = "ds-1"
+ _vcvmops = vmops.VMwareVMOps(self._session, None, None)
+ if ds_ref_exists:
+ ds_ref = mock.Mock()
+ ds_ref.value = "ds-1"
+ else:
+ ds_ref = None
+
+ def fake_call_method(module, method, *args, **kwargs):
+ fake_object1 = vmwareapi_fake.FakeRetrieveResult()
+ fake_object1.add_object(vmwareapi_fake.Datacenter(
+ ds_ref=ds_ref))
+ if not ds_ref:
+ # Token is set for the fake_object1, so it will continue to
+ # fetch the next object.
+ setattr(fake_object1, 'token', 'token-0')
+ if method == "continue_to_get_objects":
+ fake_object2 = vmwareapi_fake.FakeRetrieveResult()
+ fake_object2.add_object(vmwareapi_fake.Datacenter())
+ return fake_object2
+
+ return fake_object1
+
+ with mock.patch.object(self._session, '_call_method',
+ side_effect=fake_call_method) as fake_call:
+ dc_info = _vcvmops.get_datacenter_ref_and_name(instance_ds_ref)
+
+ if ds_ref:
+ self.assertEqual(1, len(_vcvmops._datastore_dc_mapping))
+ fake_call.assert_called_once_with(vim_util, "get_objects",
+ "Datacenter", ["name", "datastore", "vmFolder"])
+ self.assertEqual("ha-datacenter", dc_info.name)
+ else:
+ calls = [mock.call(vim_util, "get_objects", "Datacenter",
+ ["name", "datastore", "vmFolder"]),
+ mock.call(vim_util, "continue_to_get_objects",
+ "token-0")]
+ fake_call.assert_has_calls(calls)
+ self.assertIsNone(dc_info)
+
+ def test_get_datacenter_ref_and_name(self):
+ self._test_get_datacenter_ref_and_name(ds_ref_exists=True)
+
+ def test_get_datacenter_ref_and_name_with_no_datastore(self):
+ self._test_get_datacenter_ref_and_name()
+
+ def test_unrescue_power_on(self):
+ self._test_unrescue(True)
+
+ def test_unrescue_power_off(self):
+ self._test_unrescue(False)
+
+ def _test_unrescue(self, power_on):
+ self._vmops._volumeops = mock.Mock()
+ vm_rescue_ref = mock.Mock()
+ vm_ref = mock.Mock()
+
+ args_list = [(vm_ref, 'VirtualMachine',
+ 'config.hardware.device'),
+ (vm_rescue_ref, 'VirtualMachine',
+ 'config.hardware.device')]
+
+ def fake_call_method(module, method, *args, **kwargs):
+ expected_args = args_list.pop(0)
+ self.assertEqual('get_dynamic_property', method)
+ self.assertEqual(expected_args, args)
+
+ path = mock.Mock()
+ path_and_type = (path, mock.Mock(), mock.Mock())
+ with contextlib.nested(
+ mock.patch.object(vm_util, 'get_vmdk_path_and_adapter_type',
+ return_value=path_and_type),
+ mock.patch.object(vm_util, 'get_vmdk_volume_disk'),
+ mock.patch.object(vm_util, 'power_on_instance'),
+ mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref),
+ mock.patch.object(vm_util, 'get_vm_ref_from_name',
+ return_value=vm_rescue_ref),
+ mock.patch.object(self._session, '_call_method',
+ fake_call_method),
+ mock.patch.object(vm_util, 'power_off_instance'),
+ mock.patch.object(self._vmops, '_destroy_instance'),
+ ) as (_get_vmdk_path_and_adapter_type, _get_vmdk_volume_disk,
+ _power_on_instance, _get_vm_ref, _get_vm_ref_from_name,
+ _call_method, _power_off, _destroy_instance):
+ self._vmops.unrescue(self._instance, power_on=power_on)
+
+ _get_vmdk_path_and_adapter_type.assert_called_once_with(
+ None, uuid='fake_uuid')
+ _get_vmdk_volume_disk.assert_called_once_with(None, path=path)
+ if power_on:
+ _power_on_instance.assert_called_once_with(self._session,
+ self._instance,
+ vm_ref=vm_ref)
+ else:
+ self.assertFalse(_power_on_instance.called)
+ _get_vm_ref.assert_called_once_with(self._session,
+ self._instance)
+ _get_vm_ref_from_name.assert_called_once_with(self._session,
+ 'fake_uuid-rescue')
+ _power_off.assert_called_once_with(self._session, self._instance,
+ vm_rescue_ref)
+ _destroy_instance.assert_called_once_with(self._instance,
+ instance_name='fake_uuid-rescue')
+
+ def _test_finish_migration(self, power_on=True, resize_instance=False):
+ """Tests the finish_migration method on vmops."""
+ if resize_instance:
+ self._instance.system_metadata = {'old_instance_type_root_gb': '0'}
+ datastore = ds_util.Datastore(ref='fake-ref', name='fake')
+ dc_info = vmops.DcInfo(ref='fake_ref', name='fake',
+ vmFolder='fake_folder')
+ with contextlib.nested(
+ mock.patch.object(self._session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(self._vmops, "_update_instance_progress"),
+ mock.patch.object(self._session, "_wait_for_task"),
+ mock.patch.object(vm_util, "get_vm_resize_spec",
+ return_value='fake-spec'),
+ mock.patch.object(ds_util, "get_datastore",
+ return_value=datastore),
+ mock.patch.object(self._vmops, 'get_datacenter_ref_and_name',
+ return_value=dc_info),
+ mock.patch.object(self._vmops, '_extend_virtual_disk'),
+ mock.patch.object(vm_util, "power_on_instance")
+ ) as (fake_call_method, fake_update_instance_progress,
+ fake_wait_for_task, fake_vm_resize_spec,
+ fake_get_datastore, fake_get_datacenter_ref_and_name,
+ fake_extend_virtual_disk, fake_power_on):
+ self._vmops.finish_migration(context=self._context,
+ migration=None,
+ instance=self._instance,
+ disk_info=None,
+ network_info=None,
+ block_device_info=None,
+ resize_instance=resize_instance,
+ image_meta=None,
+ power_on=power_on)
+ if resize_instance:
+ fake_vm_resize_spec.assert_called_once_with(
+ self._session.vim.client.factory,
+ self._instance)
+ fake_call_method.assert_has_calls(mock.call(
+ self._session.vim,
+ "ReconfigVM_Task",
+ 'f',
+ spec='fake-spec'))
+ fake_wait_for_task.assert_called_once_with('fake-task')
+ fake_extend_virtual_disk.assert_called_once_with(
+ self._instance, self._instance['root_gb'] * units.Mi,
+ None, dc_info.ref)
+ else:
+ self.assertFalse(fake_vm_resize_spec.called)
+ self.assertFalse(fake_wait_for_task.called)
+ self.assertFalse(fake_extend_virtual_disk.called)
+
+ if power_on:
+ fake_power_on.assert_called_once_with(self._session,
+ self._instance,
+ vm_ref='f')
+ else:
+ self.assertFalse(fake_power_on.called)
+ fake_update_instance_progress.called_once_with(
+ self._context, self._instance, 4, vmops.RESIZE_TOTAL_STEPS)
+
+ def test_finish_migration_power_on(self):
+ self._test_finish_migration(power_on=True, resize_instance=False)
+
+ def test_finish_migration_power_off(self):
+ self._test_finish_migration(power_on=False, resize_instance=False)
+
+ def test_finish_migration_power_on_resize(self):
+ self._test_finish_migration(power_on=True, resize_instance=True)
+
+ @mock.patch.object(vm_util, 'associate_vmref_for_instance')
+ @mock.patch.object(vm_util, 'power_on_instance')
+ def _test_finish_revert_migration(self, fake_power_on,
+ fake_associate_vmref, power_on):
+ """Tests the finish_revert_migration method on vmops."""
+
+ # setup the test instance in the database
+ self._vmops.finish_revert_migration(self._context,
+ instance=self._instance,
+ network_info=None,
+ block_device_info=None,
+ power_on=power_on)
+ fake_associate_vmref.assert_called_once_with(self._session,
+ self._instance,
+ suffix='-orig')
+ if power_on:
+ fake_power_on.assert_called_once_with(self._session,
+ self._instance)
+ else:
+ self.assertFalse(fake_power_on.called)
+
+ def test_finish_revert_migration_power_on(self):
+ self._test_finish_revert_migration(power_on=True)
+
+ def test_finish_revert_migration_power_off(self):
+ self._test_finish_revert_migration(power_on=False)
+
+ @mock.patch.object(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
+ @mock.patch.object(vmops.VMwareVMOps, '_create_config_drive')
+ def test_configure_config_drive(self,
+ mock_create_config_drive,
+ mock_attach_cdrom_to_vm):
+ injected_files = mock.Mock()
+ admin_password = mock.Mock()
+ vm_ref = mock.Mock()
+ mock_create_config_drive.return_value = "fake_iso_path"
+ self._vmops._configure_config_drive(
+ self._instance, vm_ref, self._dc_info, self._ds,
+ injected_files, admin_password)
+
+ upload_iso_path = self._ds.build_path("fake_iso_path")
+ mock_create_config_drive.assert_called_once_with(self._instance,
+ injected_files, admin_password, self._ds.name,
+ self._dc_info.name, self._instance.uuid, "Fake-CookieJar")
+ mock_attach_cdrom_to_vm.assert_called_once_with(
+ vm_ref, self._instance, self._ds.ref, str(upload_iso_path))
+
+ @mock.patch.object(vmops.LOG, 'debug')
+ @mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
+ @mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
+ def test_spawn_mask_block_device_info_password(self,
+ mock_build_virtual_machine,
+ mock_get_vm_config_info,
+ mock_debug):
+ # Very simple test that just ensures block_device_info auth_password
+ # is masked when logged; the rest of the test just fails out early.
+ data = {'auth_password': 'scrubme'}
+ bdm = [{'connection_info': {'data': data}}]
+ bdi = {'block_device_mapping': bdm}
+
+ self.password_logged = False
+
+ # Tests that the parameters to the to_xml method are sanitized for
+ # passwords when logged.
+ def fake_debug(*args, **kwargs):
+ if 'auth_password' in args[0]:
+ self.password_logged = True
+ self.assertNotIn('scrubme', args[0])
+
+ mock_debug.side_effect = fake_debug
+ self.flags(flat_injected=False, vnc_enabled=False)
+
+ # Call spawn(). We don't care what it does as long as it generates
+ # the log message, which we check below.
+ with mock.patch.object(self._vmops, '_volumeops') as mock_vo:
+ mock_vo.attach_root_volume.side_effect = test.TestingException
+ try:
+ self._vmops.spawn(
+ self._context, self._instance, {},
+ injected_files=None, admin_password=None,
+ network_info=[], block_device_info=bdi
+ )
+ except test.TestingException:
+ pass
+
+ # Check that the relevant log message was generated, and therefore
+ # that we checked it was scrubbed
+ self.assertTrue(self.password_logged)
+
+ def test_get_ds_browser(self):
+ cache = self._vmops._datastore_browser_mapping
+ ds_browser = mock.Mock()
+ moref = vmwareapi_fake.ManagedObjectReference('datastore-100')
+ self.assertIsNone(cache.get(moref.value))
+ mock_call_method = mock.Mock(return_value=ds_browser)
+ with mock.patch.object(self._session, '_call_method',
+ mock_call_method):
+ ret = self._vmops._get_ds_browser(moref)
+ mock_call_method.assert_called_once_with(vim_util,
+ 'get_dynamic_property', moref, 'Datastore', 'browser')
+ self.assertIs(ds_browser, ret)
+ self.assertIs(ds_browser, cache.get(moref.value))
+
+ @mock.patch.object(
+ vmops.VMwareVMOps, '_sized_image_exists', return_value=False)
+ @mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk')
+ @mock.patch.object(vm_util, 'copy_virtual_disk')
+ def _test_use_disk_image_as_linked_clone(self,
+ mock_copy_virtual_disk,
+ mock_extend_virtual_disk,
+ mock_sized_image_exists,
+ flavor_fits_image=False):
+ file_size = 10 * units.Gi if flavor_fits_image else 5 * units.Gi
+ image_info = images.VMwareImage(
+ image_id=self._image_id,
+ file_size=file_size,
+ linked_clone=False)
+
+ cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
+ mock_imagecache = mock.Mock()
+ mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
+ vi = vmops.VirtualMachineInstanceConfigInfo(
+ self._instance, "fake_uuid", image_info,
+ self._ds, self._dc_info, mock_imagecache)
+
+ sized_cached_image_ds_loc = cache_root_folder.join(
+ "%s.%s.vmdk" % (self._image_id, vi.root_gb))
+
+ self._vmops._volumeops = mock.Mock()
+ mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
+
+ self._vmops._use_disk_image_as_linked_clone("fake_vm_ref", vi)
+
+ mock_copy_virtual_disk.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ str(vi.cache_image_path),
+ str(sized_cached_image_ds_loc))
+
+ if not flavor_fits_image:
+ mock_extend_virtual_disk.assert_called_once_with(
+ self._instance, vi.root_gb * units.Mi,
+ str(sized_cached_image_ds_loc),
+ self._dc_info.ref)
+
+ mock_attach_disk_to_vm.assert_called_once_with(
+ "fake_vm_ref", self._instance, vi.ii.adapter_type,
+ vi.ii.disk_type,
+ str(sized_cached_image_ds_loc),
+ vi.root_gb * units.Mi, False)
+
+ def test_use_disk_image_as_linked_clone(self):
+ self._test_use_disk_image_as_linked_clone()
+
+ def test_use_disk_image_as_linked_clone_flavor_fits_image(self):
+ self._test_use_disk_image_as_linked_clone(flavor_fits_image=True)
+
+ @mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk')
+ @mock.patch.object(vm_util, 'copy_virtual_disk')
+ def _test_use_disk_image_as_full_clone(self,
+ mock_copy_virtual_disk,
+ mock_extend_virtual_disk,
+ flavor_fits_image=False):
+ file_size = 10 * units.Gi if flavor_fits_image else 5 * units.Gi
+ image_info = images.VMwareImage(
+ image_id=self._image_id,
+ file_size=file_size,
+ linked_clone=False)
+
+ cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
+ mock_imagecache = mock.Mock()
+ mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
+ vi = vmops.VirtualMachineInstanceConfigInfo(
+ self._instance, "fake_uuid", image_info,
+ self._ds, self._dc_info, mock_imagecache)
+
+ self._vmops._volumeops = mock.Mock()
+ mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
+
+ self._vmops._use_disk_image_as_full_clone("fake_vm_ref", vi)
+
+ mock_copy_virtual_disk.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ str(vi.cache_image_path),
+ '[fake_ds] fake_uuid/fake_uuid.vmdk')
+
+ if not flavor_fits_image:
+ mock_extend_virtual_disk.assert_called_once_with(
+ self._instance, vi.root_gb * units.Mi,
+ '[fake_ds] fake_uuid/fake_uuid.vmdk', self._dc_info.ref)
+
+ mock_attach_disk_to_vm.assert_called_once_with(
+ "fake_vm_ref", self._instance, vi.ii.adapter_type,
+ vi.ii.disk_type, '[fake_ds] fake_uuid/fake_uuid.vmdk',
+ vi.root_gb * units.Mi, False)
+
+ def test_use_disk_image_as_full_clone(self):
+ self._test_use_disk_image_as_full_clone()
+
+ def test_use_disk_image_as_full_clone_image_too_big(self):
+ self._test_use_disk_image_as_full_clone(flavor_fits_image=True)
+
+ @mock.patch.object(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
+ @mock.patch.object(vm_util, 'create_virtual_disk')
+ def _test_use_iso_image(self,
+ mock_create_virtual_disk,
+ mock_attach_cdrom,
+ with_root_disk):
+ image_info = images.VMwareImage(
+ image_id=self._image_id,
+ file_size=10 * units.Mi,
+ linked_clone=True)
+
+ cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
+ mock_imagecache = mock.Mock()
+ mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
+ vi = vmops.VirtualMachineInstanceConfigInfo(
+ self._instance, "fake_uuid", image_info,
+ self._ds, self._dc_info, mock_imagecache)
+
+ self._vmops._volumeops = mock.Mock()
+ mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
+
+ self._vmops._use_iso_image("fake_vm_ref", vi)
+
+ mock_attach_cdrom.assert_called_once_with(
+ "fake_vm_ref", self._instance, self._ds.ref,
+ str(vi.cache_image_path))
+
+ if with_root_disk:
+ mock_create_virtual_disk.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ vi.ii.adapter_type, vi.ii.disk_type,
+ '[fake_ds] fake_uuid/fake_uuid.vmdk',
+ vi.root_gb * units.Mi)
+ linked_clone = False
+ mock_attach_disk_to_vm.assert_called_once_with(
+ "fake_vm_ref", self._instance,
+ vi.ii.adapter_type, vi.ii.disk_type,
+ '[fake_ds] fake_uuid/fake_uuid.vmdk',
+ vi.root_gb * units.Mi, linked_clone)
+
+ def test_use_iso_image_with_root_disk(self):
+ self._test_use_iso_image(with_root_disk=True)
+
+ def test_use_iso_image_without_root_disk(self):
+ self._test_use_iso_image(with_root_disk=False)
+
+ def _verify_spawn_method_calls(self, mock_call_method):
+ # TODO(vui): More explicit assertions of spawn() behavior
+ # are waiting on additional refactoring pertaining to image
+ # handling/manipulation. Till then, we continue to assert on the
+ # sequence of VIM operations invoked.
+ expected_methods = ['get_dynamic_property',
+ 'SearchDatastore_Task',
+ 'CreateVirtualDisk_Task',
+ 'DeleteDatastoreFile_Task',
+ 'MoveDatastoreFile_Task',
+ 'DeleteDatastoreFile_Task',
+ 'SearchDatastore_Task',
+ 'ExtendVirtualDisk_Task',
+ ]
+
+ recorded_methods = [c[1][1] for c in mock_call_method.mock_calls]
+ self.assertEqual(expected_methods, recorded_methods)
+
+ @mock.patch(
+ 'nova.virt.vmwareapi.vmops.VMwareVMOps._configure_config_drive')
+ @mock.patch('nova.virt.vmwareapi.ds_util.get_datastore')
+ @mock.patch(
+ 'nova.virt.vmwareapi.vmops.VMwareVMOps.get_datacenter_ref_and_name')
+ @mock.patch('nova.virt.vmwareapi.vm_util.get_mo_id_from_instance',
+ return_value='fake_node_mo_id')
+ @mock.patch('nova.virt.vmwareapi.vm_util.get_res_pool_ref',
+ return_value='fake_rp_ref')
+ @mock.patch('nova.virt.vmwareapi.vif.get_vif_info',
+ return_value=[])
+ @mock.patch('nova.utils.is_neutron',
+ return_value=False)
+ @mock.patch('nova.virt.vmwareapi.vm_util.get_vm_create_spec',
+ return_value='fake_create_spec')
+ @mock.patch('nova.virt.vmwareapi.vm_util.create_vm',
+ return_value='fake_vm_ref')
+ @mock.patch('nova.virt.vmwareapi.ds_util.mkdir')
+ @mock.patch('nova.virt.vmwareapi.vmops.VMwareVMOps._set_machine_id')
+ @mock.patch(
+ 'nova.virt.vmwareapi.imagecache.ImageCacheManager.enlist_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_get_and_set_vnc_config')
+ @mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
+ @mock.patch('nova.virt.vmwareapi.vm_util.copy_virtual_disk')
+ # TODO(dims): Need to add tests for create_virtual_disk after the
+ # disk/image code in spawn gets refactored
+ def _test_spawn(self,
+ mock_copy_virtual_disk,
+ mock_power_on_instance,
+ mock_get_and_set_vnc_config,
+ mock_enlist_image,
+ mock_set_machine_id,
+ mock_mkdir,
+ mock_create_vm,
+ mock_get_create_spec,
+ mock_is_neutron,
+ mock_get_vif_info,
+ mock_get_res_pool_ref,
+ mock_get_mo_id_for_instance,
+ mock_get_datacenter_ref_and_name,
+ mock_get_datastore,
+ mock_configure_config_drive,
+ block_device_info=None,
+ power_on=True,
+ allocations=None,
+ config_drive=False):
+
+ self._vmops._volumeops = mock.Mock()
+ image = {
+ 'id': 'fake-image-d',
+ 'disk_format': 'vmdk',
+ 'size': 1 * units.Gi,
+ }
+ network_info = mock.Mock()
+ mock_get_datastore.return_value = self._ds
+ mock_get_datacenter_ref_and_name.return_value = self._dc_info
+ mock_call_method = mock.Mock(return_value='fake_task')
+
+ with contextlib.nested(
+ mock.patch.object(self._session, '_wait_for_task'),
+ mock.patch.object(self._session, '_call_method',
+ mock_call_method),
+ mock.patch.object(uuidutils, 'generate_uuid',
+ return_value='tmp-uuid'),
+ mock.patch.object(images, 'fetch_image')
+ ) as (_wait_for_task, _call_method, _generate_uuid, _fetch_image):
+ self._vmops.spawn(self._context, self._instance, image,
+ injected_files='fake_files',
+ admin_password='password',
+ network_info=network_info,
+ block_device_info=block_device_info,
+ power_on=power_on)
+
+ mock_is_neutron.assert_called_once_with()
+
+ expected_mkdir_calls = 2
+ if block_device_info and len(block_device_info.get(
+ 'block_device_mapping', [])) > 0:
+ # if block_device_info contains key 'block_device_mapping'
+ # with any information, method mkdir wouldn't be called in
+ # method self._vmops.spawn()
+ expected_mkdir_calls = 0
+
+ self.assertEqual(expected_mkdir_calls, len(mock_mkdir.mock_calls))
+
+ mock_get_mo_id_for_instance.assert_called_once_with(self._instance)
+ mock_get_res_pool_ref.assert_called_once_with(
+ self._session, None, 'fake_node_mo_id')
+ mock_get_vif_info.assert_called_once_with(
+ self._session, None, False,
+ constants.DEFAULT_VIF_MODEL, network_info)
+ if allocations is None:
+ allocations = {}
+ mock_get_create_spec.assert_called_once_with(
+ self._session.vim.client.factory,
+ self._instance,
+ 'fake_uuid',
+ 'fake_ds',
+ [],
+ 'otherGuest',
+ allocations=allocations)
+ mock_create_vm.assert_called_once_with(
+ self._session,
+ self._instance,
+ 'fake_vm_folder',
+ 'fake_create_spec',
+ 'fake_rp_ref')
+ mock_get_and_set_vnc_config.assert_called_once_with(
+ self._session.vim.client.factory,
+ self._instance)
+ mock_set_machine_id.assert_called_once_with(
+ self._session.vim.client.factory,
+ self._instance,
+ network_info)
+ if power_on:
+ mock_power_on_instance.assert_called_once_with(
+ self._session, self._instance, vm_ref='fake_vm_ref')
+ else:
+ self.assertFalse(mock_power_on_instance.called)
+
+ if block_device_info:
+ root_disk = block_device_info['block_device_mapping'][0]
+ mock_attach = self._vmops._volumeops.attach_root_volume
+ mock_attach.assert_called_once_with(
+ root_disk['connection_info'], self._instance, 'vda',
+ self._ds.ref)
+ self.assertFalse(_wait_for_task.called)
+ self.assertFalse(_fetch_image.called)
+ self.assertFalse(_call_method.called)
+ else:
+ mock_enlist_image.assert_called_once_with(
+ self._image_id, self._ds, self._dc_info.ref)
+
+ upload_file_name = 'vmware_temp/tmp-uuid/%s/%s-flat.vmdk' % (
+ self._image_id, self._image_id)
+ _fetch_image.assert_called_once_with(
+ self._context,
+ self._instance,
+ self._session._host,
+ self._dc_info.name,
+ self._ds.name,
+ upload_file_name,
+ cookies='Fake-CookieJar')
+ self.assertTrue(len(_wait_for_task.mock_calls) > 0)
+ self._verify_spawn_method_calls(_call_method)
+
+ dc_ref = 'fake_dc_ref'
+ source_file = unicode('[fake_ds] vmware_base/%s/%s.vmdk' %
+ (self._image_id, self._image_id))
+ dest_file = unicode('[fake_ds] vmware_base/%s/%s.%d.vmdk' %
+ (self._image_id, self._image_id,
+ self._instance['root_gb']))
+ # TODO(dims): add more tests for copy_virtual_disk after
+ # the disk/image code in spawn gets refactored
+ mock_copy_virtual_disk.assert_called_with(self._session,
+ dc_ref,
+ source_file,
+ dest_file)
+ if config_drive:
+ mock_configure_config_drive.assert_called_once_with(
+ self._instance, 'fake_vm_ref', self._dc_info,
+ self._ds, 'fake_files', 'password')
+
+ @mock.patch.object(ds_util, 'get_datastore')
+ @mock.patch.object(vmops.VMwareVMOps, 'get_datacenter_ref_and_name')
+ def _test_get_spawn_vm_config_info(self,
+ mock_get_datacenter_ref_and_name,
+ mock_get_datastore,
+ image_size_bytes=0,
+ instance_name=None):
+ image_info = images.VMwareImage(
+ image_id=self._image_id,
+ file_size=image_size_bytes,
+ linked_clone=True)
+
+ mock_get_datastore.return_value = self._ds
+ mock_get_datacenter_ref_and_name.return_value = self._dc_info
+
+ vi = self._vmops._get_vm_config_info(
+ self._instance, image_info, instance_name=instance_name)
+ self.assertEqual(image_info, vi.ii)
+ self.assertEqual(self._ds, vi.datastore)
+ self.assertEqual(self._instance.root_gb, vi.root_gb)
+ self.assertEqual(self._instance, vi.instance)
+ if instance_name is not None:
+ self.assertEqual(instance_name, vi.instance_name)
+ else:
+ self.assertEqual(self._instance.uuid, vi.instance_name)
+
+ cache_image_path = '[%s] vmware_base/%s/%s.vmdk' % (
+ self._ds.name, self._image_id, self._image_id)
+ self.assertEqual(cache_image_path, str(vi.cache_image_path))
+
+ cache_image_folder = '[%s] vmware_base/%s' % (
+ self._ds.name, self._image_id)
+ self.assertEqual(cache_image_folder, str(vi.cache_image_folder))
+
+ def test_get_spawn_vm_config_info(self):
+ image_size = (self._instance.root_gb) * units.Gi / 2
+ self._test_get_spawn_vm_config_info(image_size_bytes=image_size)
+
+ def test_get_spawn_vm_config_info_image_too_big(self):
+ image_size = (self._instance.root_gb + 1) * units.Gi
+ self.assertRaises(exception.InstanceUnacceptable,
+ self._test_get_spawn_vm_config_info,
+ image_size_bytes=image_size)
+
+ def test_get_spawn_vm_config_info_with_instance_name(self):
+ image_size = (self._instance.root_gb) * units.Gi / 2
+ self._test_get_spawn_vm_config_info(
+ image_size_bytes=image_size,
+ instance_name="foo_instance_name")
+
+ def test_spawn(self):
+ self._test_spawn()
+
+ def test_spawn_config_drive_enabled(self):
+ self.flags(force_config_drive=True)
+ self._test_spawn(config_drive=True)
+
+ def test_spawn_no_power_on(self):
+ self._test_spawn(power_on=False)
+
+ def test_spawn_with_block_device_info(self):
+ block_device_info = {
+ 'block_device_mapping': [{'connection_info': 'fake'}]
+ }
+ self._test_spawn(block_device_info=block_device_info)
+
+ def test_spawn_with_block_device_info_with_config_drive(self):
+ self.flags(force_config_drive=True)
+ block_device_info = {
+ 'block_device_mapping': [{'connection_info': 'fake'}]
+ }
+ self._test_spawn(block_device_info=block_device_info,
+ config_drive=True)
+
+ def test_build_virtual_machine(self):
+ image_id = nova.tests.unit.image.fake.get_valid_image_id()
+ image = images.VMwareImage(image_id=image_id)
+
+ vm_ref = self._vmops.build_virtual_machine(self._instance,
+ 'fake-instance-name',
+ image, self._dc_info,
+ self._ds, self.network_info)
+
+ vm = vmwareapi_fake._get_object(vm_ref)
+
+ # Test basic VM parameters
+ self.assertEqual('fake-instance-name', vm.name)
+ # NOTE(mdbooth): The instanceUuid behaviour below is apparently
+ # deliberate.
+ self.assertEqual('fake-instance-name',
+ vm.get('summary.config.instanceUuid'))
+ self.assertEqual(self._instance_values['vcpus'],
+ vm.get('summary.config.numCpu'))
+ self.assertEqual(self._instance_values['memory_mb'],
+ vm.get('summary.config.memorySizeMB'))
+
+ # Test NSX config
+ for optval in vm.get('config.extraConfig').OptionValue:
+ if optval.key == 'nvp.vm-uuid':
+ self.assertEqual(self._instance_values['uuid'], optval.value)
+ break
+ else:
+ self.fail('nvp.vm-uuid not found in extraConfig')
+
+ # Test that the VM is associated with the specified datastore
+ datastores = vm.datastore.ManagedObjectReference
+ self.assertEqual(1, len(datastores))
+
+ datastore = vmwareapi_fake._get_object(datastores[0])
+ self.assertEqual(self._ds.name, datastore.get('summary.name'))
+
+ # Test that the VM's network is configured as specified
+ devices = vm.get('config.hardware.device').VirtualDevice
+ for device in devices:
+ if device.obj_name != 'ns0:VirtualE1000':
+ continue
+ self.assertEqual(self._network_values['address'],
+ device.macAddress)
+ break
+ else:
+ self.fail('NIC not configured')
+
+ def test_spawn_cpu_limit(self):
+ def _fake_flavor_get(context, id):
+ flavor = stubs._fake_flavor_get(context, id)
+ flavor['extra_specs'].update({'quota:cpu_limit': 7})
+ return flavor
+
+ with mock.patch.object(db, 'flavor_get', _fake_flavor_get):
+ self._test_spawn(allocations={'cpu_limit': 7})
+
+ def test_spawn_cpu_reservation(self):
+ def _fake_flavor_get(context, id):
+ flavor = stubs._fake_flavor_get(context, id)
+ flavor['extra_specs'].update({'quota:cpu_reservation': 7})
+ return flavor
+
+ with mock.patch.object(db, 'flavor_get', _fake_flavor_get):
+ self._test_spawn(allocations={'cpu_reservation': 7})
+
+ def test_spawn_cpu_allocations(self):
+ def _fake_flavor_get(context, id):
+ flavor = stubs._fake_flavor_get(context, id)
+ flavor['extra_specs'].update({'quota:cpu_limit': 7,
+ 'quota:cpu_reservation': 6})
+ return flavor
+
+ with mock.patch.object(db, 'flavor_get', _fake_flavor_get):
+ self._test_spawn(allocations={'cpu_limit': 7,
+ 'cpu_reservation': 6})
+
+ def test_spawn_cpu_shares_level(self):
+ def _fake_flavor_get(context, id):
+ flavor = stubs._fake_flavor_get(context, id)
+ flavor['extra_specs'].update({'quota:cpu_shares_level': 'high'})
+ return flavor
+
+ with mock.patch.object(db, 'flavor_get', _fake_flavor_get):
+ self._test_spawn(allocations={'cpu_shares_level': 'high'})
+
+ def test_spawn_cpu_shares_custom(self):
+ def _fake_flavor_get(context, id):
+ flavor = stubs._fake_flavor_get(context, id)
+ flavor['extra_specs'].update({'quota:cpu_shares_level': 'custom',
+ 'quota:cpu_shares_share': 1948})
+ return flavor
+
+ with mock.patch.object(db, 'flavor_get', _fake_flavor_get):
+ self._test_spawn(allocations={'cpu_shares_level': 'custom',
+ 'cpu_shares_share': 1948})
+
+ def _make_vm_config_info(self, is_iso=False, is_sparse_disk=False):
+ disk_type = (constants.DISK_TYPE_SPARSE if is_sparse_disk
+ else constants.DEFAULT_DISK_TYPE)
+ file_type = (constants.DISK_FORMAT_ISO if is_iso
+ else constants.DEFAULT_DISK_FORMAT)
+
+ image_info = images.VMwareImage(
+ image_id=self._image_id,
+ file_size=10 * units.Mi,
+ file_type=file_type,
+ disk_type=disk_type,
+ linked_clone=True)
+ cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
+ mock_imagecache = mock.Mock()
+ mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
+ vi = vmops.VirtualMachineInstanceConfigInfo(
+ self._instance, "fake_uuid", image_info,
+ self._ds, self._dc_info, mock_imagecache)
+ return vi
+
+ @mock.patch.object(vmops.VMwareVMOps, 'check_cache_folder')
+ @mock.patch.object(vmops.VMwareVMOps, '_fetch_image_as_file')
+ @mock.patch.object(vmops.VMwareVMOps, '_prepare_iso_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_prepare_sparse_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_prepare_flat_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_cache_iso_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_cache_sparse_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_cache_flat_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
+ def _test_fetch_image_if_missing(self,
+ mock_delete_datastore_file,
+ mock_cache_flat_image,
+ mock_cache_sparse_image,
+ mock_cache_iso_image,
+ mock_prepare_flat_image,
+ mock_prepare_sparse_image,
+ mock_prepare_iso_image,
+ mock_fetch_image_as_file,
+ mock_check_cache_folder,
+ is_iso=False,
+ is_sparse_disk=False):
+
+ tmp_dir_path = mock.Mock()
+ tmp_image_path = mock.Mock()
+ if is_iso:
+ mock_prepare = mock_prepare_iso_image
+ mock_cache = mock_cache_iso_image
+ elif is_sparse_disk:
+ mock_prepare = mock_prepare_sparse_image
+ mock_cache = mock_cache_sparse_image
+ else:
+ mock_prepare = mock_prepare_flat_image
+ mock_cache = mock_cache_flat_image
+ mock_prepare.return_value = tmp_dir_path, tmp_image_path
+
+ vi = self._make_vm_config_info(is_iso, is_sparse_disk)
+ self._vmops._fetch_image_if_missing(self._context, vi)
+
+ mock_check_cache_folder.assert_called_once_with(
+ self._ds.name, self._ds.ref)
+ mock_prepare.assert_called_once_with(vi)
+ mock_fetch_image_as_file.assert_called_once_with(
+ self._context, vi, tmp_image_path)
+ mock_cache.assert_called_once_with(vi, tmp_image_path)
+ mock_delete_datastore_file.assert_called_once_with(
+ str(tmp_dir_path), self._dc_info.ref)
+
+ def test_fetch_image_if_missing(self):
+ self._test_fetch_image_if_missing()
+
+ def test_fetch_image_if_missing_with_sparse(self):
+ self._test_fetch_image_if_missing(
+ is_sparse_disk=True)
+
+ def test_fetch_image_if_missing_with_iso(self):
+ self._test_fetch_image_if_missing(
+ is_iso=True)
+
+ @mock.patch.object(images, 'fetch_image')
+ def test_fetch_image_as_file(self, mock_fetch_image):
+ vi = self._make_vm_config_info()
+ image_ds_loc = mock.Mock()
+ self._vmops._fetch_image_as_file(self._context, vi, image_ds_loc)
+ mock_fetch_image.assert_called_once_with(
+ self._context,
+ vi.instance,
+ self._session._host,
+ self._dc_info.name,
+ self._ds.name,
+ image_ds_loc.rel_path,
+ cookies='Fake-CookieJar')
+
+ @mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
+ def test_prepare_iso_image(self, mock_generate_uuid):
+ vi = self._make_vm_config_info(is_iso=True)
+ tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_iso_image(vi)
+
+ expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
+ expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s.iso' % (
+ self._ds.name, self._image_id, self._image_id)
+
+ self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
+ self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
+
+ @mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
+ def test_prepare_sparse_image(self, mock_generate_uuid):
+ vi = self._make_vm_config_info(is_sparse_disk=True)
+ tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_sparse_image(vi)
+
+ expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
+ expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s' % (
+ self._ds.name, self._image_id, "tmp-sparse.vmdk")
+
+ self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
+ self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
+
+ @mock.patch.object(ds_util, 'mkdir')
+ @mock.patch.object(vm_util, 'create_virtual_disk')
+ @mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
+ @mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
+ def test_prepare_flat_image(self,
+ mock_generate_uuid,
+ mock_delete_datastore_file,
+ mock_create_virtual_disk,
+ mock_mkdir):
+ vi = self._make_vm_config_info()
+ tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_flat_image(vi)
+
+ expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
+ expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s-flat.vmdk' % (
+ self._ds.name, self._image_id, self._image_id)
+ expected_image_path_parent = '[%s] vmware_temp/tmp-uuid/%s' % (
+ self._ds.name, self._image_id)
+ expected_path_to_create = '[%s] vmware_temp/tmp-uuid/%s/%s.vmdk' % (
+ self._ds.name, self._image_id, self._image_id)
+
+ mock_mkdir.assert_called_once_with(
+ self._session, DsPathMatcher(expected_image_path_parent),
+ self._dc_info.ref)
+
+ self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
+ self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
+
+ image_info = vi.ii
+ mock_create_virtual_disk.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ image_info.adapter_type,
+ image_info.disk_type,
+ DsPathMatcher(expected_path_to_create),
+ image_info.file_size_in_kb)
+ mock_delete_datastore_file.assert_called_once_with(
+ DsPathMatcher(expected_image_path),
+ self._dc_info.ref)
+
+ @mock.patch.object(ds_util, 'file_move')
+ def test_cache_iso_image(self, mock_file_move):
+ vi = self._make_vm_config_info(is_iso=True)
+ tmp_image_ds_loc = mock.Mock()
+
+ self._vmops._cache_iso_image(vi, tmp_image_ds_loc)
+
+ mock_file_move.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ tmp_image_ds_loc.parent,
+ DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id))
+
+ @mock.patch.object(ds_util, 'file_move')
+ def test_cache_flat_image(self, mock_file_move):
+ vi = self._make_vm_config_info()
+ tmp_image_ds_loc = mock.Mock()
+
+ self._vmops._cache_flat_image(vi, tmp_image_ds_loc)
+
+ mock_file_move.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ tmp_image_ds_loc.parent,
+ DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id))
+
+ @mock.patch.object(ds_util, 'file_move')
+ @mock.patch.object(vm_util, 'copy_virtual_disk')
+ @mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
+ def test_cache_sparse_image(self,
+ mock_delete_datastore_file,
+ mock_copy_virtual_disk,
+ mock_file_move):
+ vi = self._make_vm_config_info(is_sparse_disk=True)
+
+ sparse_disk_path = "[%s] vmware_temp/tmp-uuid/%s/tmp-sparse.vmdk" % (
+ self._ds.name, self._image_id)
+ tmp_image_ds_loc = ds_util.DatastorePath.parse(sparse_disk_path)
+
+ self._vmops._cache_sparse_image(vi, tmp_image_ds_loc)
+
+ target_disk_path = "[%s] vmware_temp/tmp-uuid/%s/%s.vmdk" % (
+ self._ds.name,
+ self._image_id, self._image_id)
+ mock_copy_virtual_disk.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ sparse_disk_path,
+ DsPathMatcher(target_disk_path))
diff --git a/nova/tests/unit/virt/vmwareapi/test_volumeops.py b/nova/tests/unit/virt/vmwareapi/test_volumeops.py
new file mode 100644
index 0000000000..8dc6b500cb
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_volumeops.py
@@ -0,0 +1,95 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+
+import mock
+
+from nova import test
+from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
+from nova.tests.unit.virt.vmwareapi import stubs
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import volumeops
+
+
+class VMwareVolumeOpsTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+
+ super(VMwareVolumeOpsTestCase, self).setUp()
+ vmwareapi_fake.reset()
+ stubs.set_stubs(self.stubs)
+ self._session = driver.VMwareAPISession()
+
+ self._volumeops = volumeops.VMwareVolumeOps(self._session)
+ self.instance = {'name': 'fake_name', 'uuid': 'fake_uuid'}
+
+ def _test_detach_disk_from_vm(self, destroy_disk=False):
+ def fake_call_method(module, method, *args, **kwargs):
+ vmdk_detach_config_spec = kwargs.get('spec')
+ virtual_device_config = vmdk_detach_config_spec.deviceChange[0]
+ self.assertEqual('remove', virtual_device_config.operation)
+ self.assertEqual('ns0:VirtualDeviceConfigSpec',
+ virtual_device_config.obj_name)
+ if destroy_disk:
+ self.assertEqual('destroy',
+ virtual_device_config.fileOperation)
+ else:
+ self.assertFalse(hasattr(virtual_device_config,
+ 'fileOperation'))
+ return 'fake_configure_task'
+ with contextlib.nested(
+ mock.patch.object(self._session, '_wait_for_task'),
+ mock.patch.object(self._session, '_call_method',
+ fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ fake_device = vmwareapi_fake.DataObject()
+ fake_device.backing = vmwareapi_fake.DataObject()
+ fake_device.backing.fileName = 'fake_path'
+ fake_device.key = 'fake_key'
+ self._volumeops.detach_disk_from_vm('fake_vm_ref', self.instance,
+ fake_device, destroy_disk)
+ _wait_for_task.assert_has_calls([
+ mock.call('fake_configure_task')])
+
+ def test_detach_with_destroy_disk_from_vm(self):
+ self._test_detach_disk_from_vm(destroy_disk=True)
+
+ def test_detach_without_destroy_disk_from_vm(self):
+ self._test_detach_disk_from_vm(destroy_disk=False)
+
+ def _fake_call_get_dynamic_property(self, uuid, result):
+ def fake_call_method(vim, method, vm_ref, type, prop):
+ expected_prop = 'config.extraConfig["volume-%s"]' % uuid
+ self.assertEqual('VirtualMachine', type)
+ self.assertEqual(expected_prop, prop)
+ return result
+ return fake_call_method
+
+ def test_get_volume_uuid(self):
+ vm_ref = mock.Mock()
+ uuid = '1234'
+ opt_val = vmwareapi_fake.OptionValue('volume-%s' % uuid, 'volume-val')
+ fake_call = self._fake_call_get_dynamic_property(uuid, opt_val)
+ with mock.patch.object(self._session, "_call_method", fake_call):
+ val = self._volumeops._get_volume_uuid(vm_ref, uuid)
+ self.assertEqual('volume-val', val)
+
+ def test_get_volume_uuid_not_found(self):
+ vm_ref = mock.Mock()
+ uuid = '1234'
+ fake_call = self._fake_call_get_dynamic_property(uuid, None)
+ with mock.patch.object(self._session, "_call_method", fake_call):
+ val = self._volumeops._get_volume_uuid(vm_ref, uuid)
+ self.assertIsNone(val)
diff --git a/nova/tests/unit/virt/xenapi/__init__.py b/nova/tests/unit/virt/xenapi/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/__init__.py
diff --git a/nova/tests/unit/virt/xenapi/client/__init__.py b/nova/tests/unit/virt/xenapi/client/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/client/__init__.py
diff --git a/nova/tests/unit/virt/xenapi/client/test_objects.py b/nova/tests/unit/virt/xenapi/client/test_objects.py
new file mode 100644
index 0000000000..efaf17a9c7
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/client/test_objects.py
@@ -0,0 +1,113 @@
+# Copyright (c) 2014 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.tests.unit.virt.xenapi import stubs
+from nova import utils
+from nova.virt.xenapi.client import objects
+
+
+class XenAPISessionObjectTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(XenAPISessionObjectTestCase, self).setUp()
+ self.session = mock.Mock()
+ self.obj = objects.XenAPISessionObject(self.session, "FAKE")
+
+ def test_call_method_via_attr(self):
+ self.session.call_xenapi.return_value = "asdf"
+
+ result = self.obj.get_X("ref")
+
+ self.assertEqual(result, "asdf")
+ self.session.call_xenapi.assert_called_once_with("FAKE.get_X", "ref")
+
+
+class ObjectsTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(ObjectsTestCase, self).setUp()
+ self.session = mock.Mock()
+
+ def test_VM(self):
+ vm = objects.VM(self.session)
+ vm.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VM.get_X", "ref")
+
+ def test_SR(self):
+ sr = objects.SR(self.session)
+ sr.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("SR.get_X", "ref")
+
+ def test_VDI(self):
+ vdi = objects.VDI(self.session)
+ vdi.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VDI.get_X", "ref")
+
+ def test_VBD(self):
+ vbd = objects.VBD(self.session)
+ vbd.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VBD.get_X", "ref")
+
+ def test_PBD(self):
+ pbd = objects.PBD(self.session)
+ pbd.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("PBD.get_X", "ref")
+
+ def test_PIF(self):
+ pif = objects.PIF(self.session)
+ pif.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("PIF.get_X", "ref")
+
+ def test_VLAN(self):
+ vlan = objects.VLAN(self.session)
+ vlan.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VLAN.get_X", "ref")
+
+ def test_host(self):
+ host = objects.Host(self.session)
+ host.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("host.get_X", "ref")
+
+ def test_network(self):
+ network = objects.Network(self.session)
+ network.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("network.get_X",
+ "ref")
+
+ def test_pool(self):
+ pool = objects.Pool(self.session)
+ pool.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("pool.get_X", "ref")
+
+
+class VBDTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(VBDTestCase, self).setUp()
+ self.session = mock.Mock()
+ self.session.VBD = objects.VBD(self.session)
+
+ def test_plug(self):
+ self.session.VBD.plug("vbd_ref", "vm_ref")
+ self.session.call_xenapi.assert_called_once_with("VBD.plug", "vbd_ref")
+
+ def test_unplug(self):
+ self.session.VBD.unplug("vbd_ref", "vm_ref")
+ self.session.call_xenapi.assert_called_once_with("VBD.unplug",
+ "vbd_ref")
+
+ @mock.patch.object(utils, 'synchronized')
+ def test_vbd_plug_check_synchronized(self, mock_synchronized):
+ self.session.VBD.unplug("vbd_ref", "vm_ref")
+ mock_synchronized.assert_called_once_with("xenapi-vbd-vm_ref")
diff --git a/nova/tests/unit/virt/xenapi/client/test_session.py b/nova/tests/unit/virt/xenapi/client/test_session.py
new file mode 100644
index 0000000000..1fbbbf752d
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/client/test_session.py
@@ -0,0 +1,158 @@
+# Copyright (c) 2014 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import errno
+import socket
+
+import mock
+
+from nova import exception
+from nova.tests.unit.virt.xenapi import stubs
+from nova import version
+from nova.virt.xenapi.client import session
+
+
+class SessionTestCase(stubs.XenAPITestBaseNoDB):
+ @mock.patch.object(session.XenAPISession, '_create_session')
+ @mock.patch.object(session.XenAPISession, '_get_product_version_and_brand')
+ @mock.patch.object(session.XenAPISession, '_verify_plugin_version')
+ def test_session_passes_version(self, mock_verify, mock_version,
+ create_session):
+ sess = mock.Mock()
+ create_session.return_value = sess
+ mock_version.return_value = ('version', 'brand')
+
+ session.XenAPISession('url', 'username', 'password')
+
+ expected_version = '%s %s %s' % (version.vendor_string(),
+ version.product_string(),
+ version.version_string_with_package())
+ sess.login_with_password.assert_called_with('username', 'password',
+ expected_version,
+ 'OpenStack')
+
+
+class ApplySessionHelpersTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(ApplySessionHelpersTestCase, self).setUp()
+ self.session = mock.Mock()
+ session.apply_session_helpers(self.session)
+
+ def test_apply_session_helpers_add_VM(self):
+ self.session.VM.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VM.get_X", "ref")
+
+ def test_apply_session_helpers_add_SR(self):
+ self.session.SR.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("SR.get_X", "ref")
+
+ def test_apply_session_helpers_add_VDI(self):
+ self.session.VDI.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VDI.get_X", "ref")
+
+ def test_apply_session_helpers_add_VBD(self):
+ self.session.VBD.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VBD.get_X", "ref")
+
+ def test_apply_session_helpers_add_PBD(self):
+ self.session.PBD.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("PBD.get_X", "ref")
+
+ def test_apply_session_helpers_add_PIF(self):
+ self.session.PIF.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("PIF.get_X", "ref")
+
+ def test_apply_session_helpers_add_VLAN(self):
+ self.session.VLAN.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VLAN.get_X", "ref")
+
+ def test_apply_session_helpers_add_host(self):
+ self.session.host.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("host.get_X", "ref")
+
+ def test_apply_session_helpers_add_network(self):
+ self.session.network.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("network.get_X",
+ "ref")
+
+ def test_apply_session_helpers_add_pool(self):
+ self.session.pool.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("pool.get_X", "ref")
+
+
+class CallPluginTestCase(stubs.XenAPITestBaseNoDB):
+ def _get_fake_xapisession(self):
+ class FakeXapiSession(session.XenAPISession):
+ def __init__(self, **kwargs):
+ "Skip the superclass's dirty init"
+ self.XenAPI = mock.MagicMock()
+
+ return FakeXapiSession()
+
+ def setUp(self):
+ super(CallPluginTestCase, self).setUp()
+ self.session = self._get_fake_xapisession()
+
+ def test_serialized_with_retry_socket_error_conn_reset(self):
+ exc = socket.error
+ exc.errno = errno.ECONNRESET
+ plugin = 'glance'
+ fn = 'download_vhd'
+ num_retries = 1
+ callback = None
+ retry_cb = mock.Mock()
+ with mock.patch.object(self.session, 'call_plugin_serialized',
+ autospec=True) as call_plugin_serialized:
+ call_plugin_serialized.side_effect = exc
+ self.assertRaises(exception.PluginRetriesExceeded,
+ self.session.call_plugin_serialized_with_retry, plugin, fn,
+ num_retries, callback, retry_cb)
+ call_plugin_serialized.assert_called_with(plugin, fn)
+ self.assertEqual(2, call_plugin_serialized.call_count)
+ self.assertEqual(2, retry_cb.call_count)
+
+ def test_serialized_with_retry_socket_error_reraised(self):
+ exc = socket.error
+ exc.errno = errno.ECONNREFUSED
+ plugin = 'glance'
+ fn = 'download_vhd'
+ num_retries = 1
+ callback = None
+ retry_cb = mock.Mock()
+ with mock.patch.object(self.session, 'call_plugin_serialized',
+ autospec=True) as call_plugin_serialized:
+ call_plugin_serialized.side_effect = exc
+ self.assertRaises(socket.error,
+ self.session.call_plugin_serialized_with_retry, plugin, fn,
+ num_retries, callback, retry_cb)
+ call_plugin_serialized.assert_called_once_with(plugin, fn)
+ self.assertEqual(0, retry_cb.call_count)
+
+ def test_serialized_with_retry_socket_reset_reraised(self):
+ exc = socket.error
+ exc.errno = errno.ECONNRESET
+ plugin = 'glance'
+ fn = 'download_vhd'
+ num_retries = 1
+ callback = None
+ retry_cb = mock.Mock()
+ with mock.patch.object(self.session, 'call_plugin_serialized',
+ autospec=True) as call_plugin_serialized:
+ call_plugin_serialized.side_effect = exc
+ self.assertRaises(exception.PluginRetriesExceeded,
+ self.session.call_plugin_serialized_with_retry, plugin, fn,
+ num_retries, callback, retry_cb)
+ call_plugin_serialized.assert_called_with(plugin, fn)
+ self.assertEqual(2, call_plugin_serialized.call_count)
diff --git a/nova/tests/unit/virt/xenapi/image/__init__.py b/nova/tests/unit/virt/xenapi/image/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/image/__init__.py
diff --git a/nova/tests/unit/virt/xenapi/image/test_bittorrent.py b/nova/tests/unit/virt/xenapi/image/test_bittorrent.py
new file mode 100644
index 0000000000..5422036b98
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/image/test_bittorrent.py
@@ -0,0 +1,163 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mox
+import pkg_resources
+import six
+
+from nova import context
+from nova.i18n import _
+from nova import test
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import driver as xenapi_conn
+from nova.virt.xenapi import fake
+from nova.virt.xenapi.image import bittorrent
+from nova.virt.xenapi import vm_utils
+
+
+class TestBittorrentStore(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(TestBittorrentStore, self).setUp()
+ self.store = bittorrent.BittorrentStore()
+ self.mox = mox.Mox()
+
+ self.flags(torrent_base_url='http://foo',
+ connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ self.context = context.RequestContext(
+ 'user', 'project', auth_token='foobar')
+
+ fake.reset()
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+
+ def mock_iter_eps(namespace):
+ return []
+
+ self.stubs.Set(pkg_resources, 'iter_entry_points', mock_iter_eps)
+
+ driver = xenapi_conn.XenAPIDriver(False)
+ self.session = driver._session
+
+ self.stubs.Set(
+ vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path')
+
+ def test_download_image(self):
+
+ instance = {'uuid': '00000000-0000-0000-0000-000000007357'}
+ params = {'image_id': 'fake_image_uuid',
+ 'sr_path': '/fake/sr/path',
+ 'torrent_download_stall_cutoff': 600,
+ 'torrent_listen_port_end': 6891,
+ 'torrent_listen_port_start': 6881,
+ 'torrent_max_last_accessed': 86400,
+ 'torrent_max_seeder_processes_per_host': 1,
+ 'torrent_seed_chance': 1.0,
+ 'torrent_seed_duration': 3600,
+ 'torrent_url': 'http://foo/fake_image_uuid.torrent',
+ 'uuid_stack': ['uuid1']}
+
+ self.stubs.Set(vm_utils, '_make_uuid_stack',
+ lambda *a, **kw: ['uuid1'])
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.session.call_plugin_serialized(
+ 'bittorrent', 'download_vhd', **params)
+ self.mox.ReplayAll()
+
+ self.store.download_image(self.context, self.session,
+ instance, 'fake_image_uuid')
+
+ self.mox.VerifyAll()
+
+ def test_upload_image(self):
+ self.assertRaises(NotImplementedError, self.store.upload_image,
+ self.context, self.session, mox.IgnoreArg, 'fake_image_uuid',
+ ['fake_vdi_uuid'])
+
+
+def bad_fetcher(image_id):
+ raise test.TestingException("just plain bad.")
+
+
+def another_fetcher(image_id):
+ return "http://www.foobar.com/%s" % image_id
+
+
+class MockEntryPoint(object):
+ name = "torrent_url"
+
+ def load(self):
+ return another_fetcher
+
+
+class LookupTorrentURLTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(LookupTorrentURLTestCase, self).setUp()
+ self.store = bittorrent.BittorrentStore()
+ self.image_id = 'fakeimageid'
+
+ def _mock_iter_none(self, namespace):
+ return []
+
+ def _mock_iter_single(self, namespace):
+ return [MockEntryPoint()]
+
+ def test_default_fetch_url_no_base_url_set(self):
+ self.flags(torrent_base_url=None,
+ group='xenserver')
+ self.stubs.Set(pkg_resources, 'iter_entry_points',
+ self._mock_iter_none)
+
+ exc = self.assertRaises(
+ RuntimeError, self.store._lookup_torrent_url_fn)
+ self.assertEqual(_('Cannot create default bittorrent URL without'
+ ' torrent_base_url set'
+ ' or torrent URL fetcher extension'),
+ six.text_type(exc))
+
+ def test_default_fetch_url_base_url_is_set(self):
+ self.flags(torrent_base_url='http://foo',
+ group='xenserver')
+ self.stubs.Set(pkg_resources, 'iter_entry_points',
+ self._mock_iter_single)
+
+ lookup_fn = self.store._lookup_torrent_url_fn()
+ self.assertEqual('http://foo/fakeimageid.torrent',
+ lookup_fn(self.image_id))
+
+ def test_with_extension(self):
+ self.stubs.Set(pkg_resources, 'iter_entry_points',
+ self._mock_iter_single)
+
+ lookup_fn = self.store._lookup_torrent_url_fn()
+ self.assertEqual("http://www.foobar.com/%s" % self.image_id,
+ lookup_fn(self.image_id))
+
+ def test_multiple_extensions_found(self):
+ self.flags(torrent_base_url=None,
+ group='xenserver')
+
+ def mock_iter_multiple(namespace):
+ return [MockEntryPoint(), MockEntryPoint()]
+
+ self.stubs.Set(pkg_resources, 'iter_entry_points', mock_iter_multiple)
+
+ exc = self.assertRaises(
+ RuntimeError, self.store._lookup_torrent_url_fn)
+ self.assertEqual(_('Multiple torrent URL fetcher extensions found.'
+ ' Failing.'),
+ six.text_type(exc))
diff --git a/nova/tests/unit/virt/xenapi/image/test_glance.py b/nova/tests/unit/virt/xenapi/image/test_glance.py
new file mode 100644
index 0000000000..8fbb853efa
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/image/test_glance.py
@@ -0,0 +1,256 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import random
+import time
+
+import mock
+from mox3 import mox
+
+from nova.compute import utils as compute_utils
+from nova import context
+from nova import exception
+from nova.openstack.common import log as logging
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import driver as xenapi_conn
+from nova.virt.xenapi import fake
+from nova.virt.xenapi.image import glance
+from nova.virt.xenapi import vm_utils
+
+
+class TestGlanceStore(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(TestGlanceStore, self).setUp()
+ self.store = glance.GlanceStore()
+
+ self.flags(host='1.1.1.1',
+ port=123,
+ api_insecure=False, group='glance')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ self.context = context.RequestContext(
+ 'user', 'project', auth_token='foobar')
+
+ fake.reset()
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+ self.session = driver._session
+
+ self.stubs.Set(
+ vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path')
+
+ self.instance = {'uuid': 'blah',
+ 'system_metadata': [],
+ 'auto_disk_config': True,
+ 'os_type': 'default',
+ 'xenapi_use_agent': 'true'}
+
+ def _get_params(self):
+ return {'image_id': 'fake_image_uuid',
+ 'glance_host': '1.1.1.1',
+ 'glance_port': 123,
+ 'glance_use_ssl': False,
+ 'sr_path': '/fake/sr/path',
+ 'extra_headers': {'X-Service-Catalog': '[]',
+ 'X-Auth-Token': 'foobar',
+ 'X-Roles': '',
+ 'X-Tenant-Id': 'project',
+ 'X-User-Id': 'user',
+ 'X-Identity-Status': 'Confirmed'}}
+
+ def _get_download_params(self):
+ params = self._get_params()
+ params['uuid_stack'] = ['uuid1']
+ return params
+
+ def test_download_image(self):
+ params = self._get_download_params()
+
+ self.stubs.Set(vm_utils, '_make_uuid_stack',
+ lambda *a, **kw: ['uuid1'])
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.session.call_plugin_serialized('glance', 'download_vhd', **params)
+ self.mox.ReplayAll()
+
+ self.store.download_image(self.context, self.session,
+ self.instance, 'fake_image_uuid')
+
+ self.mox.VerifyAll()
+
+ @mock.patch.object(vm_utils, '_make_uuid_stack', return_value=['uuid1'])
+ @mock.patch.object(random, 'shuffle')
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
+ @mock.patch.object(logging.getLogger('nova.virt.xenapi.client.session'),
+ 'debug')
+ def test_download_image_retry(self, mock_log_debug, mock_fault, mock_sleep,
+ mock_shuffle, mock_make_uuid_stack):
+ params = self._get_download_params()
+ self.flags(num_retries=2, group='glance')
+
+ params.pop("glance_port")
+ params.pop("glance_host")
+ calls = [mock.call('glance', 'download_vhd', glance_port=9292,
+ glance_host='10.0.1.1', **params),
+ mock.call('glance', 'download_vhd', glance_port=9293,
+ glance_host='10.0.0.1', **params)]
+ log_calls = [mock.call(mock.ANY, {'callback_result': '10.0.1.1',
+ 'attempts': 3, 'attempt': 1,
+ 'fn': 'download_vhd',
+ 'plugin': 'glance'}),
+ mock.call(mock.ANY, {'callback_result': '10.0.0.1',
+ 'attempts': 3, 'attempt': 2,
+ 'fn': 'download_vhd',
+ 'plugin': 'glance'})]
+
+ glance_api_servers = ['10.0.1.1:9292',
+ 'http://10.0.0.1:9293']
+ self.flags(api_servers=glance_api_servers, group='glance')
+
+ with (mock.patch.object(self.session, 'call_plugin_serialized')
+ ) as mock_call_plugin_serialized:
+ error_details = ["", "", "RetryableError", ""]
+ error = self.session.XenAPI.Failure(details=error_details)
+ mock_call_plugin_serialized.side_effect = [error, "success"]
+
+ self.store.download_image(self.context, self.session,
+ self.instance, 'fake_image_uuid')
+
+ mock_call_plugin_serialized.assert_has_calls(calls)
+ mock_log_debug.assert_has_calls(log_calls, any_order=True)
+
+ self.assertEqual(1, mock_fault.call_count)
+
+ def _get_upload_params(self, auto_disk_config=True,
+ expected_os_type='default'):
+ params = self._get_params()
+ params['vdi_uuids'] = ['fake_vdi_uuid']
+ params['properties'] = {'auto_disk_config': auto_disk_config,
+ 'os_type': expected_os_type}
+ return params
+
+ def _test_upload_image(self, auto_disk_config, expected_os_type='default'):
+ params = self._get_upload_params(auto_disk_config, expected_os_type)
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.session.call_plugin_serialized('glance', 'upload_vhd', **params)
+
+ self.mox.ReplayAll()
+ self.store.upload_image(self.context, self.session, self.instance,
+ 'fake_image_uuid', ['fake_vdi_uuid'])
+ self.mox.VerifyAll()
+
+ def test_upload_image(self):
+ self._test_upload_image(True)
+
+ def test_upload_image_None_os_type(self):
+ self.instance['os_type'] = None
+ self._test_upload_image(True, 'linux')
+
+ def test_upload_image_no_os_type(self):
+ del self.instance['os_type']
+ self._test_upload_image(True, 'linux')
+
+ def test_upload_image_auto_config_disk_disabled(self):
+ sys_meta = [{"key": "image_auto_disk_config", "value": "Disabled"}]
+ self.instance["system_metadata"] = sys_meta
+ self._test_upload_image("disabled")
+
+ def test_upload_image_raises_exception(self):
+ params = self._get_upload_params()
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(RuntimeError)
+ self.mox.ReplayAll()
+
+ self.assertRaises(RuntimeError, self.store.upload_image,
+ self.context, self.session, self.instance,
+ 'fake_image_uuid', ['fake_vdi_uuid'])
+ self.mox.VerifyAll()
+
+ def test_upload_image_retries_then_raises_exception(self):
+ self.flags(num_retries=2, group='glance')
+ params = self._get_upload_params()
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.mox.StubOutWithMock(time, 'sleep')
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ error_details = ["", "", "RetryableError", ""]
+ error = self.session.XenAPI.Failure(details=error_details)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(error)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ error, (fake.Failure,
+ error,
+ mox.IgnoreArg()))
+ time.sleep(0.5)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(error)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ error, (fake.Failure,
+ error,
+ mox.IgnoreArg()))
+ time.sleep(1)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(error)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ error, (fake.Failure,
+ error,
+ mox.IgnoreArg()))
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.CouldNotUploadImage,
+ self.store.upload_image,
+ self.context, self.session, self.instance,
+ 'fake_image_uuid', ['fake_vdi_uuid'])
+ self.mox.VerifyAll()
+
+ def test_upload_image_retries_on_signal_exception(self):
+ self.flags(num_retries=2, group='glance')
+ params = self._get_upload_params()
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.mox.StubOutWithMock(time, 'sleep')
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ error_details = ["", "task signaled", "", ""]
+ error = self.session.XenAPI.Failure(details=error_details)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(error)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ error, (fake.Failure,
+ error,
+ mox.IgnoreArg()))
+ time.sleep(0.5)
+ # Note(johngarbutt) XenServer 6.1 and later has this error
+ error_details = ["", "signal: SIGTERM", "", ""]
+ error = self.session.XenAPI.Failure(details=error_details)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(error)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ error, (fake.Failure,
+ error,
+ mox.IgnoreArg()))
+ time.sleep(1)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params)
+ self.mox.ReplayAll()
+
+ self.store.upload_image(self.context, self.session, self.instance,
+ 'fake_image_uuid', ['fake_vdi_uuid'])
+ self.mox.VerifyAll()
diff --git a/nova/tests/unit/virt/xenapi/image/test_utils.py b/nova/tests/unit/virt/xenapi/image/test_utils.py
new file mode 100644
index 0000000000..4763f66683
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/image/test_utils.py
@@ -0,0 +1,252 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import tarfile
+
+import mock
+
+from nova import test
+from nova.virt.xenapi.image import utils
+
+
+@mock.patch.object(utils, 'IMAGE_API')
+class GlanceImageTestCase(test.NoDBTestCase):
+
+ def _get_image(self):
+ return utils.GlanceImage(mock.sentinel.context,
+ mock.sentinel.image_ref)
+
+ def test_meta(self, mocked):
+ mocked.get.return_value = mock.sentinel.meta
+
+ image = self._get_image()
+ self.assertEqual(mock.sentinel.meta, image.meta)
+ mocked.get.assert_called_once_with(mock.sentinel.context,
+ mock.sentinel.image_ref)
+
+ def test_download_to(self, mocked):
+ mocked.download.return_value = None
+
+ image = self._get_image()
+ result = image.download_to(mock.sentinel.fobj)
+ self.assertIsNone(result)
+ mocked.download.assert_called_once_with(mock.sentinel.context,
+ mock.sentinel.image_ref,
+ mock.sentinel.fobj)
+
+ def test_is_raw_tgz_empty_meta(self, mocked):
+ mocked.get.return_value = {}
+
+ image = self._get_image()
+ self.assertEqual(False, image.is_raw_tgz())
+
+ def test_is_raw_tgz_for_raw_tgz(self, mocked):
+ mocked.get.return_value = {
+ 'disk_format': 'raw',
+ 'container_format': 'tgz'
+ }
+
+ image = self._get_image()
+ self.assertEqual(True, image.is_raw_tgz())
+
+ def test_data(self, mocked):
+ mocked.download.return_value = mock.sentinel.image
+ image = self._get_image()
+
+ self.assertEqual(mock.sentinel.image, image.data())
+
+
+class RawImageTestCase(test.NoDBTestCase):
+ def test_get_size(self):
+ glance_image = self.mox.CreateMock(utils.GlanceImage)
+ glance_image.meta = {'size': '123'}
+ raw_image = utils.RawImage(glance_image)
+ self.mox.ReplayAll()
+
+ self.assertEqual(123, raw_image.get_size())
+
+ def test_stream_to(self):
+ glance_image = self.mox.CreateMock(utils.GlanceImage)
+ glance_image.download_to('file').AndReturn('result')
+ raw_image = utils.RawImage(glance_image)
+ self.mox.ReplayAll()
+
+ self.assertEqual('result', raw_image.stream_to('file'))
+
+
+class TestIterableBasedFile(test.NoDBTestCase):
+ def test_constructor(self):
+ class FakeIterable(object):
+ def __iter__(_self):
+ return 'iterator'
+
+ the_file = utils.IterableToFileAdapter(FakeIterable())
+
+ self.assertEqual('iterator', the_file.iterator)
+
+ def test_read_one_character(self):
+ the_file = utils.IterableToFileAdapter([
+ 'chunk1', 'chunk2'
+ ])
+
+ self.assertEqual('c', the_file.read(1))
+
+ def test_read_stores_remaining_characters(self):
+ the_file = utils.IterableToFileAdapter([
+ 'chunk1', 'chunk2'
+ ])
+
+ the_file.read(1)
+
+ self.assertEqual('hunk1', the_file.remaining_data)
+
+ def test_read_remaining_characters(self):
+ the_file = utils.IterableToFileAdapter([
+ 'chunk1', 'chunk2'
+ ])
+
+ self.assertEqual('c', the_file.read(1))
+ self.assertEqual('h', the_file.read(1))
+
+ def test_read_reached_end_of_file(self):
+ the_file = utils.IterableToFileAdapter([
+ 'chunk1', 'chunk2'
+ ])
+
+ self.assertEqual('chunk1', the_file.read(100))
+ self.assertEqual('chunk2', the_file.read(100))
+ self.assertEqual('', the_file.read(100))
+
+ def test_empty_chunks(self):
+ the_file = utils.IterableToFileAdapter([
+ '', '', 'chunk2'
+ ])
+
+ self.assertEqual('chunk2', the_file.read(100))
+
+
+class RawTGZTestCase(test.NoDBTestCase):
+ def test_as_tarfile(self):
+ image = utils.RawTGZImage(None)
+ self.mox.StubOutWithMock(image, '_as_file')
+ self.mox.StubOutWithMock(utils.tarfile, 'open')
+
+ image._as_file().AndReturn('the_file')
+ utils.tarfile.open(mode='r|gz', fileobj='the_file').AndReturn('tf')
+
+ self.mox.ReplayAll()
+
+ result = image._as_tarfile()
+ self.assertEqual('tf', result)
+
+ def test_as_file(self):
+ self.mox.StubOutWithMock(utils, 'IterableToFileAdapter')
+ glance_image = self.mox.CreateMock(utils.GlanceImage)
+ image = utils.RawTGZImage(glance_image)
+ glance_image.data().AndReturn('iterable-data')
+ utils.IterableToFileAdapter('iterable-data').AndReturn('data-as-file')
+
+ self.mox.ReplayAll()
+
+ result = image._as_file()
+
+ self.assertEqual('data-as-file', result)
+
+ def test_get_size(self):
+ tar_file = self.mox.CreateMock(tarfile.TarFile)
+ tar_info = self.mox.CreateMock(tarfile.TarInfo)
+
+ image = utils.RawTGZImage(None)
+
+ self.mox.StubOutWithMock(image, '_as_tarfile')
+
+ image._as_tarfile().AndReturn(tar_file)
+ tar_file.next().AndReturn(tar_info)
+ tar_info.size = 124
+
+ self.mox.ReplayAll()
+
+ result = image.get_size()
+
+ self.assertEqual(124, result)
+ self.assertEqual(image._tar_info, tar_info)
+ self.assertEqual(image._tar_file, tar_file)
+
+ def test_get_size_called_twice(self):
+ tar_file = self.mox.CreateMock(tarfile.TarFile)
+ tar_info = self.mox.CreateMock(tarfile.TarInfo)
+
+ image = utils.RawTGZImage(None)
+
+ self.mox.StubOutWithMock(image, '_as_tarfile')
+
+ image._as_tarfile().AndReturn(tar_file)
+ tar_file.next().AndReturn(tar_info)
+ tar_info.size = 124
+
+ self.mox.ReplayAll()
+
+ image.get_size()
+ result = image.get_size()
+
+ self.assertEqual(124, result)
+ self.assertEqual(image._tar_info, tar_info)
+ self.assertEqual(image._tar_file, tar_file)
+
+ def test_stream_to_without_size_retrieved(self):
+ source_tar = self.mox.CreateMock(tarfile.TarFile)
+ first_tarinfo = self.mox.CreateMock(tarfile.TarInfo)
+ target_file = self.mox.CreateMock(file)
+ source_file = self.mox.CreateMock(file)
+
+ image = utils.RawTGZImage(None)
+ image._image_service_and_image_id = ('service', 'id')
+
+ self.mox.StubOutWithMock(image, '_as_tarfile', source_tar)
+ self.mox.StubOutWithMock(utils.shutil, 'copyfileobj')
+
+ image._as_tarfile().AndReturn(source_tar)
+ source_tar.next().AndReturn(first_tarinfo)
+ source_tar.extractfile(first_tarinfo).AndReturn(source_file)
+ utils.shutil.copyfileobj(source_file, target_file)
+ source_tar.close()
+
+ self.mox.ReplayAll()
+
+ image.stream_to(target_file)
+
+ def test_stream_to_with_size_retrieved(self):
+ source_tar = self.mox.CreateMock(tarfile.TarFile)
+ first_tarinfo = self.mox.CreateMock(tarfile.TarInfo)
+ target_file = self.mox.CreateMock(file)
+ source_file = self.mox.CreateMock(file)
+ first_tarinfo.size = 124
+
+ image = utils.RawTGZImage(None)
+ image._image_service_and_image_id = ('service', 'id')
+
+ self.mox.StubOutWithMock(image, '_as_tarfile', source_tar)
+ self.mox.StubOutWithMock(utils.shutil, 'copyfileobj')
+
+ image._as_tarfile().AndReturn(source_tar)
+ source_tar.next().AndReturn(first_tarinfo)
+ source_tar.extractfile(first_tarinfo).AndReturn(source_file)
+ utils.shutil.copyfileobj(source_file, target_file)
+ source_tar.close()
+
+ self.mox.ReplayAll()
+
+ image.get_size()
+ image.stream_to(target_file)
diff --git a/nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py b/nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py
new file mode 100644
index 0000000000..4a86ce5371
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py
@@ -0,0 +1,182 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import tarfile
+
+import eventlet
+
+from nova.image import glance
+from nova import test
+from nova.virt.xenapi.client import session as xenapi_session
+from nova.virt.xenapi.image import vdi_through_dev
+
+
+@contextlib.contextmanager
+def fake_context(result=None):
+ yield result
+
+
+class TestDelegatingToCommand(test.NoDBTestCase):
+ def test_upload_image_is_delegated_to_command(self):
+ command = self.mox.CreateMock(vdi_through_dev.UploadToGlanceAsRawTgz)
+ self.mox.StubOutWithMock(vdi_through_dev, 'UploadToGlanceAsRawTgz')
+ vdi_through_dev.UploadToGlanceAsRawTgz(
+ 'ctx', 'session', 'instance', 'image_id', 'vdis').AndReturn(
+ command)
+ command.upload_image().AndReturn('result')
+ self.mox.ReplayAll()
+
+ store = vdi_through_dev.VdiThroughDevStore()
+ result = store.upload_image(
+ 'ctx', 'session', 'instance', 'image_id', 'vdis')
+
+ self.assertEqual('result', result)
+
+
+class TestUploadToGlanceAsRawTgz(test.NoDBTestCase):
+ def test_upload_image(self):
+ store = vdi_through_dev.UploadToGlanceAsRawTgz(
+ 'context', 'session', 'instance', 'id', ['vdi0', 'vdi1'])
+ self.mox.StubOutWithMock(store, '_perform_upload')
+ self.mox.StubOutWithMock(store, '_get_vdi_ref')
+ self.mox.StubOutWithMock(vdi_through_dev, 'glance')
+ self.mox.StubOutWithMock(vdi_through_dev, 'vm_utils')
+ self.mox.StubOutWithMock(vdi_through_dev, 'utils')
+
+ store._get_vdi_ref().AndReturn('vdi_ref')
+ vdi_through_dev.vm_utils.vdi_attached_here(
+ 'session', 'vdi_ref', read_only=True).AndReturn(
+ fake_context('dev'))
+ vdi_through_dev.utils.make_dev_path('dev').AndReturn('devpath')
+ vdi_through_dev.utils.temporary_chown('devpath').AndReturn(
+ fake_context())
+ store._perform_upload('devpath')
+
+ self.mox.ReplayAll()
+
+ store.upload_image()
+
+ def test__perform_upload(self):
+ producer = self.mox.CreateMock(vdi_through_dev.TarGzProducer)
+ consumer = self.mox.CreateMock(glance.UpdateGlanceImage)
+ pool = self.mox.CreateMock(eventlet.GreenPool)
+ store = vdi_through_dev.UploadToGlanceAsRawTgz(
+ 'context', 'session', 'instance', 'id', ['vdi0', 'vdi1'])
+ self.mox.StubOutWithMock(store, '_create_pipe')
+ self.mox.StubOutWithMock(store, '_get_virtual_size')
+ self.mox.StubOutWithMock(producer, 'get_metadata')
+ self.mox.StubOutWithMock(vdi_through_dev, 'TarGzProducer')
+ self.mox.StubOutWithMock(glance, 'UpdateGlanceImage')
+ self.mox.StubOutWithMock(vdi_through_dev, 'eventlet')
+
+ producer.get_metadata().AndReturn('metadata')
+ store._get_virtual_size().AndReturn('324')
+ store._create_pipe().AndReturn(('readfile', 'writefile'))
+ vdi_through_dev.TarGzProducer(
+ 'devpath', 'writefile', '324', 'disk.raw').AndReturn(
+ producer)
+ glance.UpdateGlanceImage('context', 'id', 'metadata',
+ 'readfile').AndReturn(consumer)
+ vdi_through_dev.eventlet.GreenPool().AndReturn(pool)
+ pool.spawn(producer.start)
+ pool.spawn(consumer.start)
+ pool.waitall()
+
+ self.mox.ReplayAll()
+
+ store._perform_upload('devpath')
+
+ def test__get_vdi_ref(self):
+ session = self.mox.CreateMock(xenapi_session.XenAPISession)
+ store = vdi_through_dev.UploadToGlanceAsRawTgz(
+ 'context', session, 'instance', 'id', ['vdi0', 'vdi1'])
+ session.call_xenapi('VDI.get_by_uuid', 'vdi0').AndReturn('vdi_ref')
+
+ self.mox.ReplayAll()
+
+ self.assertEqual('vdi_ref', store._get_vdi_ref())
+
+ def test__get_virtual_size(self):
+ session = self.mox.CreateMock(xenapi_session.XenAPISession)
+ store = vdi_through_dev.UploadToGlanceAsRawTgz(
+ 'context', session, 'instance', 'id', ['vdi0', 'vdi1'])
+ self.mox.StubOutWithMock(store, '_get_vdi_ref')
+ store._get_vdi_ref().AndReturn('vdi_ref')
+ session.call_xenapi('VDI.get_virtual_size', 'vdi_ref')
+
+ self.mox.ReplayAll()
+
+ store._get_virtual_size()
+
+ def test__create_pipe(self):
+ store = vdi_through_dev.UploadToGlanceAsRawTgz(
+ 'context', 'session', 'instance', 'id', ['vdi0', 'vdi1'])
+ self.mox.StubOutWithMock(vdi_through_dev, 'os')
+ self.mox.StubOutWithMock(vdi_through_dev, 'greenio')
+ vdi_through_dev.os.pipe().AndReturn(('rpipe', 'wpipe'))
+ vdi_through_dev.greenio.GreenPipe('rpipe', 'rb', 0).AndReturn('rfile')
+ vdi_through_dev.greenio.GreenPipe('wpipe', 'wb', 0).AndReturn('wfile')
+
+ self.mox.ReplayAll()
+
+ result = store._create_pipe()
+ self.assertEqual(('rfile', 'wfile'), result)
+
+
+class TestTarGzProducer(test.NoDBTestCase):
+ def test_constructor(self):
+ producer = vdi_through_dev.TarGzProducer('devpath', 'writefile',
+ '100', 'fname')
+
+ self.assertEqual('devpath', producer.fpath)
+ self.assertEqual('writefile', producer.output)
+ self.assertEqual('100', producer.size)
+ self.assertEqual('writefile', producer.output)
+
+ def test_start(self):
+ outf = self.mox.CreateMock(file)
+ producer = vdi_through_dev.TarGzProducer('fpath', outf,
+ '100', 'fname')
+
+ tfile = self.mox.CreateMock(tarfile.TarFile)
+ tinfo = self.mox.CreateMock(tarfile.TarInfo)
+
+ inf = self.mox.CreateMock(file)
+
+ self.mox.StubOutWithMock(vdi_through_dev, 'tarfile')
+ self.mox.StubOutWithMock(producer, '_open_file')
+
+ vdi_through_dev.tarfile.TarInfo(name='fname').AndReturn(tinfo)
+ vdi_through_dev.tarfile.open(fileobj=outf, mode='w|gz').AndReturn(
+ fake_context(tfile))
+ producer._open_file('fpath', 'rb').AndReturn(fake_context(inf))
+ tfile.addfile(tinfo, fileobj=inf)
+ outf.close()
+
+ self.mox.ReplayAll()
+
+ producer.start()
+
+ self.assertEqual(100, tinfo.size)
+
+ def test_get_metadata(self):
+ producer = vdi_through_dev.TarGzProducer('devpath', 'writefile',
+ '100', 'fname')
+
+ self.assertEqual({
+ 'disk_format': 'raw',
+ 'container_format': 'tgz'},
+ producer.get_metadata())
diff --git a/nova/tests/unit/virt/xenapi/stubs.py b/nova/tests/unit/virt/xenapi/stubs.py
new file mode 100644
index 0000000000..ad13ca41df
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/stubs.py
@@ -0,0 +1,365 @@
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Stubouts, mocks and fixtures for the test suite."""
+
+import pickle
+import random
+
+from oslo.serialization import jsonutils
+
+from nova import test
+import nova.tests.unit.image.fake
+from nova.virt.xenapi.client import session
+from nova.virt.xenapi import fake
+from nova.virt.xenapi import vm_utils
+from nova.virt.xenapi import vmops
+
+
+def stubout_firewall_driver(stubs, conn):
+
+ def fake_none(self, *args):
+ return
+
+ _vmops = conn._vmops
+ stubs.Set(_vmops.firewall_driver, 'prepare_instance_filter', fake_none)
+ stubs.Set(_vmops.firewall_driver, 'instance_filter_exists', fake_none)
+
+
+def stubout_instance_snapshot(stubs):
+ def fake_fetch_image(context, session, instance, name_label, image, type):
+ return {'root': dict(uuid=_make_fake_vdi(), file=None),
+ 'kernel': dict(uuid=_make_fake_vdi(), file=None),
+ 'ramdisk': dict(uuid=_make_fake_vdi(), file=None)}
+
+ stubs.Set(vm_utils, '_fetch_image', fake_fetch_image)
+
+ def fake_wait_for_vhd_coalesce(*args):
+ # TODO(sirp): Should we actually fake out the data here
+ return "fakeparent", "fakebase"
+
+ stubs.Set(vm_utils, '_wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce)
+
+
+def stubout_session(stubs, cls, product_version=(5, 6, 2),
+ product_brand='XenServer', **opt_args):
+ """Stubs out methods from XenAPISession."""
+ stubs.Set(session.XenAPISession, '_create_session',
+ lambda s, url: cls(url, **opt_args))
+ stubs.Set(session.XenAPISession, '_get_product_version_and_brand',
+ lambda s: (product_version, product_brand))
+
+
+def stubout_get_this_vm_uuid(stubs):
+ def f(session):
+ vms = [rec['uuid'] for ref, rec
+ in fake.get_all_records('VM').iteritems()
+ if rec['is_control_domain']]
+ return vms[0]
+ stubs.Set(vm_utils, 'get_this_vm_uuid', f)
+
+
+def stubout_image_service_download(stubs):
+ def fake_download(*args, **kwargs):
+ pass
+ stubs.Set(nova.tests.unit.image.fake._FakeImageService,
+ 'download', fake_download)
+
+
+def stubout_stream_disk(stubs):
+ def fake_stream_disk(*args, **kwargs):
+ pass
+ stubs.Set(vm_utils, '_stream_disk', fake_stream_disk)
+
+
+def stubout_determine_is_pv_objectstore(stubs):
+ """Assumes VMs stu have PV kernels."""
+
+ def f(*args):
+ return False
+ stubs.Set(vm_utils, '_determine_is_pv_objectstore', f)
+
+
+def stubout_is_snapshot(stubs):
+ """Always returns true
+
+ xenapi fake driver does not create vmrefs for snapshots.
+ """
+
+ def f(*args):
+ return True
+ stubs.Set(vm_utils, 'is_snapshot', f)
+
+
+def stubout_lookup_image(stubs):
+ """Simulates a failure in lookup image."""
+ def f(_1, _2, _3, _4):
+ raise Exception("Test Exception raised by fake lookup_image")
+ stubs.Set(vm_utils, 'lookup_image', f)
+
+
+def stubout_fetch_disk_image(stubs, raise_failure=False):
+ """Simulates a failure in fetch image_glance_disk."""
+
+ def _fake_fetch_disk_image(context, session, instance, name_label, image,
+ image_type):
+ if raise_failure:
+ raise fake.Failure("Test Exception raised by "
+ "fake fetch_image_glance_disk")
+ elif image_type == vm_utils.ImageType.KERNEL:
+ filename = "kernel"
+ elif image_type == vm_utils.ImageType.RAMDISK:
+ filename = "ramdisk"
+ else:
+ filename = "unknown"
+
+ vdi_type = vm_utils.ImageType.to_string(image_type)
+ return {vdi_type: dict(uuid=None, file=filename)}
+
+ stubs.Set(vm_utils, '_fetch_disk_image', _fake_fetch_disk_image)
+
+
+def stubout_create_vm(stubs):
+ """Simulates a failure in create_vm."""
+
+ def f(*args):
+ raise fake.Failure("Test Exception raised by fake create_vm")
+ stubs.Set(vm_utils, 'create_vm', f)
+
+
+def stubout_attach_disks(stubs):
+ """Simulates a failure in _attach_disks."""
+
+ def f(*args):
+ raise fake.Failure("Test Exception raised by fake _attach_disks")
+ stubs.Set(vmops.VMOps, '_attach_disks', f)
+
+
+def _make_fake_vdi():
+ sr_ref = fake.get_all('SR')[0]
+ vdi_ref = fake.create_vdi('', sr_ref)
+ vdi_rec = fake.get_record('VDI', vdi_ref)
+ return vdi_rec['uuid']
+
+
+class FakeSessionForVMTests(fake.SessionBase):
+ """Stubs out a XenAPISession for VM tests."""
+
+ _fake_iptables_save_output = ("# Generated by iptables-save v1.4.10 on "
+ "Sun Nov 6 22:49:02 2011\n"
+ "*filter\n"
+ ":INPUT ACCEPT [0:0]\n"
+ ":FORWARD ACCEPT [0:0]\n"
+ ":OUTPUT ACCEPT [0:0]\n"
+ "COMMIT\n"
+ "# Completed on Sun Nov 6 22:49:02 2011\n")
+
+ def host_call_plugin(self, _1, _2, plugin, method, _5):
+ if (plugin, method) == ('glance', 'download_vhd'):
+ root_uuid = _make_fake_vdi()
+ return pickle.dumps(dict(root=dict(uuid=root_uuid)))
+ elif (plugin, method) == ("xenhost", "iptables_config"):
+ return fake.as_json(out=self._fake_iptables_save_output,
+ err='')
+ else:
+ return (super(FakeSessionForVMTests, self).
+ host_call_plugin(_1, _2, plugin, method, _5))
+
+ def VM_start(self, _1, ref, _2, _3):
+ vm = fake.get_record('VM', ref)
+ if vm['power_state'] != 'Halted':
+ raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted',
+ vm['power_state']])
+ vm['power_state'] = 'Running'
+ vm['is_a_template'] = False
+ vm['is_control_domain'] = False
+ vm['domid'] = random.randrange(1, 1 << 16)
+ return vm
+
+ def VM_start_on(self, _1, vm_ref, host_ref, _2, _3):
+ vm_rec = self.VM_start(_1, vm_ref, _2, _3)
+ vm_rec['resident_on'] = host_ref
+
+ def VDI_snapshot(self, session_ref, vm_ref, _1):
+ sr_ref = "fakesr"
+ return fake.create_vdi('fakelabel', sr_ref, read_only=True)
+
+ def SR_scan(self, session_ref, sr_ref):
+ pass
+
+
+class FakeSessionForFirewallTests(FakeSessionForVMTests):
+ """Stubs out a XenApi Session for doing IPTable Firewall tests."""
+
+ def __init__(self, uri, test_case=None):
+ super(FakeSessionForFirewallTests, self).__init__(uri)
+ if hasattr(test_case, '_in_rules'):
+ self._in_rules = test_case._in_rules
+ if hasattr(test_case, '_in6_filter_rules'):
+ self._in6_filter_rules = test_case._in6_filter_rules
+ self._test_case = test_case
+
+ def host_call_plugin(self, _1, _2, plugin, method, args):
+ """Mock method four host_call_plugin to be used in unit tests
+ for the dom0 iptables Firewall drivers for XenAPI
+
+ """
+ if plugin == "xenhost" and method == "iptables_config":
+ # The command to execute is a json-encoded list
+ cmd_args = args.get('cmd_args', None)
+ cmd = jsonutils.loads(cmd_args)
+ if not cmd:
+ ret_str = ''
+ else:
+ output = ''
+ process_input = args.get('process_input', None)
+ if cmd == ['ip6tables-save', '-c']:
+ output = '\n'.join(self._in6_filter_rules)
+ if cmd == ['iptables-save', '-c']:
+ output = '\n'.join(self._in_rules)
+ if cmd == ['iptables-restore', '-c', ]:
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ if self._test_case is not None:
+ self._test_case._out_rules = lines
+ output = '\n'.join(lines)
+ if cmd == ['ip6tables-restore', '-c', ]:
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ output = '\n'.join(lines)
+ ret_str = fake.as_json(out=output, err='')
+ return ret_str
+ else:
+ return (super(FakeSessionForVMTests, self).
+ host_call_plugin(_1, _2, plugin, method, args))
+
+
+def stub_out_vm_methods(stubs):
+ def fake_acquire_bootlock(self, vm):
+ pass
+
+ def fake_release_bootlock(self, vm):
+ pass
+
+ def fake_generate_ephemeral(*args):
+ pass
+
+ def fake_wait_for_device(dev):
+ pass
+
+ stubs.Set(vmops.VMOps, "_acquire_bootlock", fake_acquire_bootlock)
+ stubs.Set(vmops.VMOps, "_release_bootlock", fake_release_bootlock)
+ stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
+ stubs.Set(vm_utils, '_wait_for_device', fake_wait_for_device)
+
+
+class FakeSessionForVolumeTests(fake.SessionBase):
+ """Stubs out a XenAPISession for Volume tests."""
+ def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
+ _6, _7, _8, _9, _10, _11):
+ valid_vdi = False
+ refs = fake.get_all('VDI')
+ for ref in refs:
+ rec = fake.get_record('VDI', ref)
+ if rec['uuid'] == uuid:
+ valid_vdi = True
+ if not valid_vdi:
+ raise fake.Failure([['INVALID_VDI', 'session', self._session]])
+
+
+class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
+ """Stubs out a XenAPISession for Volume tests: it injects failures."""
+ def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
+ _6, _7, _8, _9, _10, _11):
+ # This is for testing failure
+ raise fake.Failure([['INVALID_VDI', 'session', self._session]])
+
+ def PBD_unplug(self, _1, ref):
+ rec = fake.get_record('PBD', ref)
+ rec['currently-attached'] = False
+
+ def SR_forget(self, _1, ref):
+ pass
+
+
+def stub_out_migration_methods(stubs):
+ fakesr = fake.create_sr()
+
+ def fake_import_all_migrated_disks(session, instance):
+ vdi_ref = fake.create_vdi(instance['name'], fakesr)
+ vdi_rec = fake.get_record('VDI', vdi_ref)
+ vdi_rec['other_config']['nova_disk_type'] = 'root'
+ return {"root": {'uuid': vdi_rec['uuid'], 'ref': vdi_ref},
+ "ephemerals": {}}
+
+ def fake_wait_for_instance_to_start(self, *args):
+ pass
+
+ def fake_get_vdi(session, vm_ref, userdevice='0'):
+ vdi_ref_parent = fake.create_vdi('derp-parent', fakesr)
+ vdi_rec_parent = fake.get_record('VDI', vdi_ref_parent)
+ vdi_ref = fake.create_vdi('derp', fakesr,
+ sm_config={'vhd-parent': vdi_rec_parent['uuid']})
+ vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
+ return vdi_ref, vdi_rec
+
+ def fake_sr(session, *args):
+ return fakesr
+
+ def fake_get_sr_path(*args):
+ return "fake"
+
+ def fake_destroy(*args, **kwargs):
+ pass
+
+ def fake_generate_ephemeral(*args):
+ pass
+
+ stubs.Set(vmops.VMOps, '_destroy', fake_destroy)
+ stubs.Set(vmops.VMOps, '_wait_for_instance_to_start',
+ fake_wait_for_instance_to_start)
+ stubs.Set(vm_utils, 'import_all_migrated_disks',
+ fake_import_all_migrated_disks)
+ stubs.Set(vm_utils, 'scan_default_sr', fake_sr)
+ stubs.Set(vm_utils, 'get_vdi_for_vm_safely', fake_get_vdi)
+ stubs.Set(vm_utils, 'get_sr_path', fake_get_sr_path)
+ stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
+
+
+class FakeSessionForFailedMigrateTests(FakeSessionForVMTests):
+ def VM_assert_can_migrate(self, session, vmref, migrate_data,
+ live, vdi_map, vif_map, options):
+ raise fake.Failure("XenAPI VM.assert_can_migrate failed")
+
+ def host_migrate_receive(self, session, hostref, networkref, options):
+ raise fake.Failure("XenAPI host.migrate_receive failed")
+
+ def VM_migrate_send(self, session, vmref, migrate_data, islive, vdi_map,
+ vif_map, options):
+ raise fake.Failure("XenAPI VM.migrate_send failed")
+
+
+# FIXME(sirp): XenAPITestBase is deprecated, all tests should be converted
+# over to use XenAPITestBaseNoDB
+class XenAPITestBase(test.TestCase):
+ def setUp(self):
+ super(XenAPITestBase, self).setUp()
+ self.useFixture(test.ReplaceModule('XenAPI', fake))
+ fake.reset()
+
+
+class XenAPITestBaseNoDB(test.NoDBTestCase):
+ def setUp(self):
+ super(XenAPITestBaseNoDB, self).setUp()
+ self.useFixture(test.ReplaceModule('XenAPI', fake))
+ fake.reset()
diff --git a/nova/tests/unit/virt/xenapi/test_agent.py b/nova/tests/unit/virt/xenapi/test_agent.py
new file mode 100644
index 0000000000..5004b381d4
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_agent.py
@@ -0,0 +1,468 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import time
+import uuid
+
+import mock
+
+from nova import exception
+from nova import test
+from nova.virt.xenapi import agent
+from nova.virt.xenapi import fake as xenapi_fake
+
+
+def _get_fake_instance(**kwargs):
+ system_metadata = []
+ for k, v in kwargs.items():
+ system_metadata.append({
+ "key": k,
+ "value": v
+ })
+
+ return {
+ "system_metadata": system_metadata,
+ "uuid": "uuid",
+ "key_data": "ssh-rsa asdf",
+ "os_type": "asdf",
+ }
+
+
+class AgentTestCaseBase(test.NoDBTestCase):
+ def _create_agent(self, instance, session="session"):
+ self.session = session
+ self.virtapi = "virtapi"
+ self.vm_ref = "vm_ref"
+ return agent.XenAPIBasedAgent(self.session, self.virtapi,
+ instance, self.vm_ref)
+
+
+class AgentImageFlagsTestCase(AgentTestCaseBase):
+ def test_agent_is_present(self):
+ self.flags(use_agent_default=False, group='xenserver')
+ instance = {"system_metadata":
+ [{"key": "image_xenapi_use_agent", "value": "true"}]}
+ self.assertTrue(agent.should_use_agent(instance))
+
+ def test_agent_is_disabled(self):
+ self.flags(use_agent_default=True, group='xenserver')
+ instance = {"system_metadata":
+ [{"key": "image_xenapi_use_agent", "value": "false"}]}
+ self.assertFalse(agent.should_use_agent(instance))
+
+ def test_agent_uses_deafault_when_prop_invalid(self):
+ self.flags(use_agent_default=True, group='xenserver')
+ instance = {"system_metadata":
+ [{"key": "image_xenapi_use_agent", "value": "bob"}],
+ "uuid": "uuid"}
+ self.assertTrue(agent.should_use_agent(instance))
+
+ def test_agent_default_not_present(self):
+ self.flags(use_agent_default=False, group='xenserver')
+ instance = {"system_metadata": []}
+ self.assertFalse(agent.should_use_agent(instance))
+
+ def test_agent_default_present(self):
+ self.flags(use_agent_default=True, group='xenserver')
+ instance = {"system_metadata": []}
+ self.assertTrue(agent.should_use_agent(instance))
+
+
+class SysMetaKeyTestBase():
+ key = None
+
+ def _create_agent_with_value(self, value):
+ kwargs = {self.key: value}
+ instance = _get_fake_instance(**kwargs)
+ return self._create_agent(instance)
+
+ def test_get_sys_meta_key_true(self):
+ agent = self._create_agent_with_value("true")
+ self.assertTrue(agent._get_sys_meta_key(self.key))
+
+ def test_get_sys_meta_key_false(self):
+ agent = self._create_agent_with_value("False")
+ self.assertFalse(agent._get_sys_meta_key(self.key))
+
+ def test_get_sys_meta_key_invalid_is_false(self):
+ agent = self._create_agent_with_value("invalid")
+ self.assertFalse(agent._get_sys_meta_key(self.key))
+
+ def test_get_sys_meta_key_missing_is_false(self):
+ instance = _get_fake_instance()
+ agent = self._create_agent(instance)
+ self.assertFalse(agent._get_sys_meta_key(self.key))
+
+
+class SkipSshFlagTestCase(SysMetaKeyTestBase, AgentTestCaseBase):
+ key = "image_xenapi_skip_agent_inject_ssh"
+
+ def test_skip_ssh_key_inject(self):
+ agent = self._create_agent_with_value("True")
+ self.assertTrue(agent._skip_ssh_key_inject())
+
+
+class SkipFileInjectAtBootFlagTestCase(SysMetaKeyTestBase, AgentTestCaseBase):
+ key = "image_xenapi_skip_agent_inject_files_at_boot"
+
+ def test_skip_inject_files_at_boot(self):
+ agent = self._create_agent_with_value("True")
+ self.assertTrue(agent._skip_inject_files_at_boot())
+
+
+class InjectSshTestCase(AgentTestCaseBase):
+ def test_inject_ssh_key_succeeds(self):
+ instance = _get_fake_instance()
+ agent = self._create_agent(instance)
+ self.mox.StubOutWithMock(agent, "inject_file")
+
+ agent.inject_file("/root/.ssh/authorized_keys",
+ "\n# The following ssh key was injected by Nova"
+ "\nssh-rsa asdf\n")
+
+ self.mox.ReplayAll()
+ agent.inject_ssh_key()
+
+ def _test_inject_ssh_key_skipped(self, instance):
+ agent = self._create_agent(instance)
+
+ # make sure its not called
+ self.mox.StubOutWithMock(agent, "inject_file")
+ self.mox.ReplayAll()
+
+ agent.inject_ssh_key()
+
+ def test_inject_ssh_key_skipped_no_key_data(self):
+ instance = _get_fake_instance()
+ instance["key_data"] = None
+ self._test_inject_ssh_key_skipped(instance)
+
+ def test_inject_ssh_key_skipped_windows(self):
+ instance = _get_fake_instance()
+ instance["os_type"] = "windows"
+ self._test_inject_ssh_key_skipped(instance)
+
+ def test_inject_ssh_key_skipped_cloud_init_present(self):
+ instance = _get_fake_instance(
+ image_xenapi_skip_agent_inject_ssh="True")
+ self._test_inject_ssh_key_skipped(instance)
+
+
+class FileInjectionTestCase(AgentTestCaseBase):
+ def test_inject_file(self):
+ instance = _get_fake_instance()
+ agent = self._create_agent(instance)
+ self.mox.StubOutWithMock(agent, "_call_agent")
+
+ b64_path = base64.b64encode('path')
+ b64_contents = base64.b64encode('contents')
+ agent._call_agent('inject_file',
+ {'b64_contents': b64_contents,
+ 'b64_path': b64_path})
+
+ self.mox.ReplayAll()
+
+ agent.inject_file("path", "contents")
+
+ def test_inject_files(self):
+ instance = _get_fake_instance()
+ agent = self._create_agent(instance)
+ self.mox.StubOutWithMock(agent, "inject_file")
+
+ files = [("path1", "content1"), ("path2", "content2")]
+ agent.inject_file(*files[0])
+ agent.inject_file(*files[1])
+
+ self.mox.ReplayAll()
+
+ agent.inject_files(files)
+
+ def test_inject_files_skipped_when_cloud_init_installed(self):
+ instance = _get_fake_instance(
+ image_xenapi_skip_agent_inject_files_at_boot="True")
+ agent = self._create_agent(instance)
+ self.mox.StubOutWithMock(agent, "inject_file")
+
+ files = [("path1", "content1"), ("path2", "content2")]
+
+ self.mox.ReplayAll()
+
+ agent.inject_files(files)
+
+
+class FakeRebootException(Exception):
+ details = ["", "", "", "asdf REBOOT: asdf"]
+
+
+class RebootRetryTestCase(AgentTestCaseBase):
+ @mock.patch.object(agent, '_wait_for_new_dom_id')
+ def test_retry_on_reboot(self, mock_wait):
+ mock_session = mock.Mock()
+
+ def fake_call_plugin(*args, **kwargs):
+ if fake_call_plugin.called:
+ return {"returncode": '0', "message": "done"}
+ else:
+ fake_call_plugin.called = True
+ raise FakeRebootException()
+
+ fake_call_plugin.called = False
+ mock_session.XenAPI.Failure = FakeRebootException
+ mock_session.VM.get_domid.return_value = "fake_dom_id"
+ mock_session.call_plugin.side_effect = fake_call_plugin
+
+ agent = self._create_agent(None, mock_session)
+
+ result = agent._call_agent("asdf")
+ self.assertEqual("done", result)
+ self.assertTrue(mock_session.VM.get_domid.called)
+ self.assertEqual(2, mock_session.call_plugin.call_count)
+ mock_wait.called_once_with(mock_session, self.vm_ref,
+ "fake_dom_id", "asdf")
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(time, 'time')
+ def test_wait_for_new_dom_id_found(self, mock_time, mock_sleep):
+ mock_session = mock.Mock()
+ mock_session.VM.get_domid.return_value = "new"
+
+ agent._wait_for_new_dom_id(mock_session, "vm_ref", "old", "method")
+
+ mock_session.VM.get_domid.assert_called_once_with("vm_ref")
+ self.assertFalse(mock_sleep.called)
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(time, 'time')
+ def test_wait_for_new_dom_id_after_retry(self, mock_time, mock_sleep):
+ self.flags(agent_timeout=3, group="xenserver")
+ mock_time.return_value = 0
+ mock_session = mock.Mock()
+ old = 40
+ new = 42
+ mock_session.VM.get_domid.side_effect = [old, -1, new]
+
+ agent._wait_for_new_dom_id(mock_session, "vm_ref", old, "method")
+
+ mock_session.VM.get_domid.assert_called_with("vm_ref")
+ self.assertEqual(3, mock_session.VM.get_domid.call_count)
+ self.assertEqual(2, mock_sleep.call_count)
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(time, 'time')
+ def test_wait_for_new_dom_id_timeout(self, mock_time, mock_sleep):
+ self.flags(agent_timeout=3, group="xenserver")
+
+ def fake_time():
+ fake_time.time = fake_time.time + 1
+ return fake_time.time
+
+ fake_time.time = 0
+ mock_time.side_effect = fake_time
+ mock_session = mock.Mock()
+ mock_session.VM.get_domid.return_value = "old"
+
+ self.assertRaises(exception.AgentTimeout,
+ agent._wait_for_new_dom_id,
+ mock_session, "vm_ref", "old", "method")
+
+ self.assertEqual(4, mock_session.VM.get_domid.call_count)
+
+
+class SetAdminPasswordTestCase(AgentTestCaseBase):
+ @mock.patch.object(agent.XenAPIBasedAgent, '_call_agent')
+ @mock.patch("nova.virt.xenapi.agent.SimpleDH")
+ def test_exchange_key_with_agent(self, mock_simple_dh, mock_call_agent):
+ agent = self._create_agent(None)
+ instance_mock = mock_simple_dh()
+ instance_mock.get_public.return_value = 4321
+ mock_call_agent.return_value = "1234"
+
+ result = agent._exchange_key_with_agent()
+
+ mock_call_agent.assert_called_once_with('key_init', {"pub": "4321"},
+ success_codes=['D0'],
+ ignore_errors=False)
+ result.compute_shared.assert_called_once_with(1234)
+
+ @mock.patch.object(agent.XenAPIBasedAgent, '_call_agent')
+ @mock.patch.object(agent.XenAPIBasedAgent,
+ '_save_instance_password_if_sshkey_present')
+ @mock.patch.object(agent.XenAPIBasedAgent, '_exchange_key_with_agent')
+ def test_set_admin_password_works(self, mock_exchange, mock_save,
+ mock_call_agent):
+ mock_dh = mock.Mock(spec_set=agent.SimpleDH)
+ mock_dh.encrypt.return_value = "enc_pass"
+ mock_exchange.return_value = mock_dh
+ agent_inst = self._create_agent(None)
+
+ agent_inst.set_admin_password("new_pass")
+
+ mock_dh.encrypt.assert_called_once_with("new_pass\n")
+ mock_call_agent.assert_called_once_with('password',
+ {'enc_pass': 'enc_pass'})
+ mock_save.assert_called_once_with("new_pass")
+
+ @mock.patch.object(agent.XenAPIBasedAgent, '_add_instance_fault')
+ @mock.patch.object(agent.XenAPIBasedAgent, '_exchange_key_with_agent')
+ def test_set_admin_password_silently_fails(self, mock_exchange,
+ mock_add_fault):
+ error = exception.AgentTimeout(method="fake")
+ mock_exchange.side_effect = error
+ agent_inst = self._create_agent(None)
+
+ agent_inst.set_admin_password("new_pass")
+
+ mock_add_fault.assert_called_once_with(error, mock.ANY)
+
+
+class UpgradeRequiredTestCase(test.NoDBTestCase):
+ def test_less_than(self):
+ self.assertTrue(agent.is_upgrade_required('1.2.3.4', '1.2.3.5'))
+
+ def test_greater_than(self):
+ self.assertFalse(agent.is_upgrade_required('1.2.3.5', '1.2.3.4'))
+
+ def test_equal(self):
+ self.assertFalse(agent.is_upgrade_required('1.2.3.4', '1.2.3.4'))
+
+ def test_non_lexical(self):
+ self.assertFalse(agent.is_upgrade_required('1.2.3.10', '1.2.3.4'))
+
+ def test_length(self):
+ self.assertTrue(agent.is_upgrade_required('1.2.3', '1.2.3.4'))
+
+
+@mock.patch.object(uuid, "uuid4")
+class CallAgentTestCase(AgentTestCaseBase):
+ def test_call_agent_success(self, mock_uuid):
+ session = mock.Mock()
+ instance = {"uuid": "fake"}
+ addl_args = {"foo": "bar"}
+
+ session.VM.get_domid.return_value = '42'
+ mock_uuid.return_value = 1
+ session.call_plugin.return_value = {'returncode': '4',
+ 'message': "asdf\\r\\n"}
+
+ self.assertEqual("asdf",
+ agent._call_agent(session, instance, "vm_ref",
+ "method", addl_args, timeout=300,
+ success_codes=['0', '4']))
+
+ expected_args = {
+ 'id': '1',
+ 'dom_id': '42',
+ 'timeout': '300',
+ }
+ expected_args.update(addl_args)
+ session.VM.get_domid.assert_called_once_with("vm_ref")
+ session.call_plugin.assert_called_once_with("agent", "method",
+ expected_args)
+
+ def _call_agent_setup(self, session, mock_uuid,
+ returncode='0', success_codes=None,
+ exception=None):
+ session.XenAPI.Failure = xenapi_fake.Failure
+ instance = {"uuid": "fake"}
+
+ session.VM.get_domid.return_value = 42
+ mock_uuid.return_value = 1
+ if exception:
+ session.call_plugin.side_effect = exception
+ else:
+ session.call_plugin.return_value = {'returncode': returncode,
+ 'message': "asdf\\r\\n"}
+
+ return agent._call_agent(session, instance, "vm_ref", "method",
+ success_codes=success_codes)
+
+ def _assert_agent_called(self, session, mock_uuid):
+ expected_args = {
+ 'id': '1',
+ 'dom_id': '42',
+ 'timeout': '30',
+ }
+ session.call_plugin.assert_called_once_with("agent", "method",
+ expected_args)
+ session.VM.get_domid.assert_called_once_with("vm_ref")
+
+ def test_call_agent_works_with_defaults(self, mock_uuid):
+ session = mock.Mock()
+ self._call_agent_setup(session, mock_uuid)
+ self._assert_agent_called(session, mock_uuid)
+
+ def test_call_agent_fails_with_timeout(self, mock_uuid):
+ session = mock.Mock()
+ self.assertRaises(exception.AgentTimeout, self._call_agent_setup,
+ session, mock_uuid,
+ exception=xenapi_fake.Failure(["TIMEOUT:fake"]))
+ self._assert_agent_called(session, mock_uuid)
+
+ def test_call_agent_fails_with_not_implemented(self, mock_uuid):
+ session = mock.Mock()
+ self.assertRaises(exception.AgentNotImplemented,
+ self._call_agent_setup,
+ session, mock_uuid,
+ exception=xenapi_fake.Failure(["NOT IMPLEMENTED:"]))
+ self._assert_agent_called(session, mock_uuid)
+
+ def test_call_agent_fails_with_other_error(self, mock_uuid):
+ session = mock.Mock()
+ self.assertRaises(exception.AgentError, self._call_agent_setup,
+ session, mock_uuid,
+ exception=xenapi_fake.Failure(["asdf"]))
+ self._assert_agent_called(session, mock_uuid)
+
+ def test_call_agent_fails_with_returned_error(self, mock_uuid):
+ session = mock.Mock()
+ self.assertRaises(exception.AgentError, self._call_agent_setup,
+ session, mock_uuid, returncode='42')
+ self._assert_agent_called(session, mock_uuid)
+
+
+class XenAPIBasedAgent(AgentTestCaseBase):
+ @mock.patch.object(agent.XenAPIBasedAgent, "_add_instance_fault")
+ @mock.patch.object(agent, "_call_agent")
+ def test_call_agent_swallows_error(self, mock_call_agent,
+ mock_add_instance_fault):
+ fake_error = exception.AgentError(method="bob")
+ mock_call_agent.side_effect = fake_error
+
+ instance = _get_fake_instance()
+ agent = self._create_agent(instance)
+
+ agent._call_agent("bob")
+
+ mock_call_agent.assert_called_once_with(agent.session, agent.instance,
+ agent.vm_ref, "bob", None, None, None)
+ mock_add_instance_fault.assert_called_once_with(fake_error, mock.ANY)
+
+ @mock.patch.object(agent.XenAPIBasedAgent, "_add_instance_fault")
+ @mock.patch.object(agent, "_call_agent")
+ def test_call_agent_throws_error(self, mock_call_agent,
+ mock_add_instance_fault):
+ fake_error = exception.AgentError(method="bob")
+ mock_call_agent.side_effect = fake_error
+
+ instance = _get_fake_instance()
+ agent = self._create_agent(instance)
+
+ self.assertRaises(exception.AgentError, agent._call_agent,
+ "bob", ignore_errors=False)
+
+ mock_call_agent.assert_called_once_with(agent.session, agent.instance,
+ agent.vm_ref, "bob", None, None, None)
+ self.assertFalse(mock_add_instance_fault.called)
diff --git a/nova/tests/unit/virt/xenapi/test_driver.py b/nova/tests/unit/virt/xenapi/test_driver.py
new file mode 100644
index 0000000000..eb3e02f29e
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_driver.py
@@ -0,0 +1,101 @@
+# Copyright (c) 2013 Rackspace Hosting
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import math
+
+import mock
+from oslo.utils import units
+
+from nova.compute import arch
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt import driver
+from nova.virt import fake
+from nova.virt import xenapi
+from nova.virt.xenapi import driver as xenapi_driver
+
+
+class XenAPIDriverTestCase(stubs.XenAPITestBaseNoDB):
+ """Unit tests for Driver operations."""
+
+ def _get_driver(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.flags(connection_url='test_url',
+ connection_password='test_pass', group='xenserver')
+ return xenapi.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def host_stats(self, refresh=True):
+ return {'host_memory_total': 3 * units.Mi,
+ 'host_memory_free_computed': 2 * units.Mi,
+ 'disk_total': 5 * units.Gi,
+ 'disk_used': 2 * units.Gi,
+ 'disk_allocated': 4 * units.Gi,
+ 'host_hostname': 'somename',
+ 'supported_instances': arch.X86_64,
+ 'host_cpu_info': {'cpu_count': 50},
+ 'vcpus_used': 10,
+ 'pci_passthrough_devices': ''}
+
+ def test_available_resource(self):
+ driver = self._get_driver()
+ driver._session.product_version = (6, 8, 2)
+
+ self.stubs.Set(driver.host_state, 'get_host_stats', self.host_stats)
+
+ resources = driver.get_available_resource(None)
+ self.assertEqual(6008002, resources['hypervisor_version'])
+ self.assertEqual(50, resources['vcpus'])
+ self.assertEqual(3, resources['memory_mb'])
+ self.assertEqual(5, resources['local_gb'])
+ self.assertEqual(10, resources['vcpus_used'])
+ self.assertEqual(3 - 2, resources['memory_mb_used'])
+ self.assertEqual(2, resources['local_gb_used'])
+ self.assertEqual('xen', resources['hypervisor_type'])
+ self.assertEqual('somename', resources['hypervisor_hostname'])
+ self.assertEqual(1, resources['disk_available_least'])
+
+ def test_overhead(self):
+ driver = self._get_driver()
+ instance = {'memory_mb': 30720, 'vcpus': 4}
+
+ # expected memory overhead per:
+ # https://wiki.openstack.org/wiki/XenServer/Overhead
+ expected = ((instance['memory_mb'] * xenapi_driver.OVERHEAD_PER_MB) +
+ (instance['vcpus'] * xenapi_driver.OVERHEAD_PER_VCPU) +
+ xenapi_driver.OVERHEAD_BASE)
+ expected = math.ceil(expected)
+ overhead = driver.estimate_instance_overhead(instance)
+ self.assertEqual(expected, overhead['memory_mb'])
+
+ def test_set_bootable(self):
+ driver = self._get_driver()
+
+ self.mox.StubOutWithMock(driver._vmops, 'set_bootable')
+ driver._vmops.set_bootable('inst', True)
+ self.mox.ReplayAll()
+
+ driver.set_bootable('inst', True)
+
+ def test_post_interrupted_snapshot_cleanup(self):
+ driver = self._get_driver()
+ fake_vmops_cleanup = mock.Mock()
+ driver._vmops.post_interrupted_snapshot_cleanup = fake_vmops_cleanup
+
+ driver.post_interrupted_snapshot_cleanup("context", "instance")
+
+ fake_vmops_cleanup.assert_called_once_with("context", "instance")
+
+ def test_public_api_signatures(self):
+ inst = self._get_driver()
+ self.assertPublicAPISignatures(driver.ComputeDriver(None), inst)
diff --git a/nova/tests/unit/virt/xenapi/test_network_utils.py b/nova/tests/unit/virt/xenapi/test_network_utils.py
new file mode 100644
index 0000000000..5aa660f2a7
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_network_utils.py
@@ -0,0 +1,76 @@
+
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import exception
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import network_utils
+
+
+class NetworkUtilsTestCase(stubs.XenAPITestBaseNoDB):
+ def test_find_network_with_name_label_works(self):
+ session = mock.Mock()
+ session.network.get_by_name_label.return_value = ["net"]
+
+ result = network_utils.find_network_with_name_label(session, "label")
+
+ self.assertEqual("net", result)
+ session.network.get_by_name_label.assert_called_once_with("label")
+
+ def test_find_network_with_name_returns_none(self):
+ session = mock.Mock()
+ session.network.get_by_name_label.return_value = []
+
+ result = network_utils.find_network_with_name_label(session, "label")
+
+ self.assertIsNone(result)
+
+ def test_find_network_with_name_label_raises(self):
+ session = mock.Mock()
+ session.network.get_by_name_label.return_value = ["net", "net2"]
+
+ self.assertRaises(exception.NovaException,
+ network_utils.find_network_with_name_label,
+ session, "label")
+
+ def test_find_network_with_bridge_works(self):
+ session = mock.Mock()
+ session.network.get_all_records_where.return_value = {"net": "asdf"}
+
+ result = network_utils.find_network_with_bridge(session, "bridge")
+
+ self.assertEqual(result, "net")
+ expr = 'field "name__label" = "bridge" or field "bridge" = "bridge"'
+ session.network.get_all_records_where.assert_called_once_with(expr)
+
+ def test_find_network_with_bridge_raises_too_many(self):
+ session = mock.Mock()
+ session.network.get_all_records_where.return_value = {
+ "net": "asdf",
+ "net2": "asdf2"
+ }
+
+ self.assertRaises(exception.NovaException,
+ network_utils.find_network_with_bridge,
+ session, "bridge")
+
+ def test_find_network_with_bridge_raises_no_networks(self):
+ session = mock.Mock()
+ session.network.get_all_records_where.return_value = {}
+
+ self.assertRaises(exception.NovaException,
+ network_utils.find_network_with_bridge,
+ session, "bridge")
diff --git a/nova/tests/unit/virt/xenapi/test_vm_utils.py b/nova/tests/unit/virt/xenapi/test_vm_utils.py
new file mode 100644
index 0000000000..ac54bd1480
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_vm_utils.py
@@ -0,0 +1,2422 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import uuid
+
+from eventlet import greenthread
+import fixtures
+import mock
+import mox
+from oslo.concurrency import lockutils
+from oslo.concurrency import processutils
+from oslo.config import cfg
+from oslo.utils import timeutils
+from oslo.utils import units
+import six
+
+from nova.compute import flavors
+from nova.compute import power_state
+from nova.compute import vm_mode
+from nova import context
+from nova import exception
+from nova.i18n import _
+from nova.openstack.common.fixture import config as config_fixture
+from nova import test
+from nova.tests.unit.virt.xenapi import stubs
+from nova.tests.unit.virt.xenapi import test_xenapi
+from nova import utils
+from nova.virt.xenapi.client import session as xenapi_session
+from nova.virt.xenapi import driver as xenapi_conn
+from nova.virt.xenapi import fake
+from nova.virt.xenapi import vm_utils
+
+CONF = cfg.CONF
+XENSM_TYPE = 'xensm'
+ISCSI_TYPE = 'iscsi'
+
+
+def get_fake_connection_data(sr_type):
+ fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR',
+ 'name_label': 'fake_storage',
+ 'name_description': 'test purposes',
+ 'server': 'myserver',
+ 'serverpath': '/local/scratch/myname',
+ 'sr_type': 'nfs',
+ 'introduce_sr_keys': ['server',
+ 'serverpath',
+ 'sr_type'],
+ 'vdi_uuid': 'falseVDI'},
+ ISCSI_TYPE: {'volume_id': 'fake_volume_id',
+ 'target_lun': 1,
+ 'target_iqn': 'fake_iqn:volume-fake_volume_id',
+ 'target_portal': u'localhost:3260',
+ 'target_discovered': False}, }
+ return fakes[sr_type]
+
+
+def _get_fake_session(error=None):
+ session = mock.Mock()
+ xenapi_session.apply_session_helpers(session)
+
+ if error is not None:
+ class FakeException(Exception):
+ details = [error, "a", "b", "c"]
+
+ session.XenAPI.Failure = FakeException
+ session.call_xenapi.side_effect = FakeException
+
+ return session
+
+
+@contextlib.contextmanager
+def contextified(result):
+ yield result
+
+
+def _fake_noop(*args, **kwargs):
+ return
+
+
+class VMUtilsTestBase(stubs.XenAPITestBaseNoDB):
+ pass
+
+
+class LookupTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(LookupTestCase, self).setUp()
+ self.session = self.mox.CreateMockAnything('Fake Session')
+ self.name_label = 'my_vm'
+
+ def _do_mock(self, result):
+ self.session.call_xenapi(
+ "VM.get_by_name_label", self.name_label).AndReturn(result)
+ self.mox.ReplayAll()
+
+ def test_normal(self):
+ self._do_mock(['x'])
+ result = vm_utils.lookup(self.session, self.name_label)
+ self.assertEqual('x', result)
+
+ def test_no_result(self):
+ self._do_mock([])
+ result = vm_utils.lookup(self.session, self.name_label)
+ self.assertIsNone(result)
+
+ def test_too_many(self):
+ self._do_mock(['a', 'b'])
+ self.assertRaises(exception.InstanceExists,
+ vm_utils.lookup,
+ self.session, self.name_label)
+
+ def test_rescue_none(self):
+ self.session.call_xenapi(
+ "VM.get_by_name_label", self.name_label + '-rescue').AndReturn([])
+ self._do_mock(['x'])
+ result = vm_utils.lookup(self.session, self.name_label,
+ check_rescue=True)
+ self.assertEqual('x', result)
+
+ def test_rescue_found(self):
+ self.session.call_xenapi(
+ "VM.get_by_name_label",
+ self.name_label + '-rescue').AndReturn(['y'])
+ self.mox.ReplayAll()
+ result = vm_utils.lookup(self.session, self.name_label,
+ check_rescue=True)
+ self.assertEqual('y', result)
+
+ def test_rescue_too_many(self):
+ self.session.call_xenapi(
+ "VM.get_by_name_label",
+ self.name_label + '-rescue').AndReturn(['a', 'b', 'c'])
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InstanceExists,
+ vm_utils.lookup,
+ self.session, self.name_label,
+ check_rescue=True)
+
+
+class GenerateConfigDriveTestCase(VMUtilsTestBase):
+ def test_no_admin_pass(self):
+ instance = {}
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr('session').AndReturn('sr_ref')
+
+ self.mox.StubOutWithMock(vm_utils, 'create_vdi')
+ vm_utils.create_vdi('session', 'sr_ref', instance, 'config-2',
+ 'configdrive',
+ 64 * units.Mi).AndReturn('vdi_ref')
+
+ self.mox.StubOutWithMock(vm_utils, 'vdi_attached_here')
+ vm_utils.vdi_attached_here(
+ 'session', 'vdi_ref', read_only=False).AndReturn(
+ contextified('mounted_dev'))
+
+ class FakeInstanceMetadata(object):
+ def __init__(_self, instance, content=None, extra_md=None,
+ network_info=None):
+ self.assertEqual(network_info, "nw_info")
+
+ def metadata_for_config_drive(_self):
+ return []
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.api.metadata.base.InstanceMetadata',
+ FakeInstanceMetadata))
+
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots',
+ '-allow-lowercase', '-allow-multidot', '-l',
+ '-publisher', mox.IgnoreArg(), '-quiet',
+ '-J', '-r', '-V', 'config-2', mox.IgnoreArg(),
+ attempts=1, run_as_root=False).AndReturn(None)
+ utils.execute('dd', mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(), run_as_root=True).AndReturn(None)
+
+ self.mox.StubOutWithMock(vm_utils, 'create_vbd')
+ vm_utils.create_vbd('session', 'vm_ref', 'vdi_ref', mox.IgnoreArg(),
+ bootable=False, read_only=True).AndReturn(None)
+
+ self.mox.ReplayAll()
+
+ # And the actual call we're testing
+ vm_utils.generate_configdrive('session', instance, 'vm_ref',
+ 'userdevice', "nw_info")
+
+ @mock.patch.object(vm_utils, "destroy_vdi")
+ @mock.patch.object(vm_utils, "vdi_attached_here")
+ @mock.patch.object(vm_utils, "create_vdi")
+ @mock.patch.object(vm_utils, "safe_find_sr")
+ def test_vdi_cleaned_up(self, mock_find, mock_create_vdi, mock_attached,
+ mock_destroy):
+ mock_create_vdi.return_value = 'vdi_ref'
+ mock_attached.side_effect = test.TestingException
+ mock_destroy.side_effect = exception.StorageError(reason="")
+
+ instance = {"uuid": "asdf"}
+ self.assertRaises(test.TestingException,
+ vm_utils.generate_configdrive,
+ 'session', instance, 'vm_ref', 'userdevice',
+ 'nw_info')
+ mock_destroy.assert_called_once_with('session', 'vdi_ref')
+
+
+class XenAPIGetUUID(VMUtilsTestBase):
+ def test_get_this_vm_uuid_new_kernel(self):
+ self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid')
+
+ vm_utils._get_sys_hypervisor_uuid().AndReturn(
+ '2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f')
+
+ self.mox.ReplayAll()
+ self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f',
+ vm_utils.get_this_vm_uuid(None))
+ self.mox.VerifyAll()
+
+ def test_get_this_vm_uuid_old_kernel_reboot(self):
+ self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid')
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ vm_utils._get_sys_hypervisor_uuid().AndRaise(
+ IOError(13, 'Permission denied'))
+ utils.execute('xenstore-read', 'domid', run_as_root=True).AndReturn(
+ ('27', ''))
+ utils.execute('xenstore-read', '/local/domain/27/vm',
+ run_as_root=True).AndReturn(
+ ('/vm/2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', ''))
+
+ self.mox.ReplayAll()
+ self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f',
+ vm_utils.get_this_vm_uuid(None))
+ self.mox.VerifyAll()
+
+
+class FakeSession(object):
+ def call_xenapi(self, *args):
+ pass
+
+ def call_plugin(self, *args):
+ pass
+
+ def call_plugin_serialized(self, plugin, fn, *args, **kwargs):
+ pass
+
+ def call_plugin_serialized_with_retry(self, plugin, fn, num_retries,
+ callback, *args, **kwargs):
+ pass
+
+
+class FetchVhdImageTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(FetchVhdImageTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ self.context.auth_token = 'auth_token'
+ self.session = FakeSession()
+ self.instance = {"uuid": "uuid"}
+
+ self.mox.StubOutWithMock(vm_utils, '_make_uuid_stack')
+ vm_utils._make_uuid_stack().AndReturn(["uuid_stack"])
+
+ self.mox.StubOutWithMock(vm_utils, 'get_sr_path')
+ vm_utils.get_sr_path(self.session).AndReturn('sr_path')
+
+ def _stub_glance_download_vhd(self, raise_exc=None):
+ self.mox.StubOutWithMock(
+ self.session, 'call_plugin_serialized_with_retry')
+ func = self.session.call_plugin_serialized_with_retry(
+ 'glance', 'download_vhd', 0, mox.IgnoreArg(), mox.IgnoreArg(),
+ extra_headers={'X-Service-Catalog': '[]',
+ 'X-Auth-Token': 'auth_token',
+ 'X-Roles': '',
+ 'X-Tenant-Id': None,
+ 'X-User-Id': None,
+ 'X-Identity-Status': 'Confirmed'},
+ image_id='image_id',
+ uuid_stack=["uuid_stack"],
+ sr_path='sr_path')
+
+ if raise_exc:
+ func.AndRaise(raise_exc)
+ else:
+ func.AndReturn({'root': {'uuid': 'vdi'}})
+
+ def _stub_bittorrent_download_vhd(self, raise_exc=None):
+ self.mox.StubOutWithMock(
+ self.session, 'call_plugin_serialized')
+ func = self.session.call_plugin_serialized(
+ 'bittorrent', 'download_vhd',
+ image_id='image_id',
+ uuid_stack=["uuid_stack"],
+ sr_path='sr_path',
+ torrent_download_stall_cutoff=600,
+ torrent_listen_port_start=6881,
+ torrent_listen_port_end=6891,
+ torrent_max_last_accessed=86400,
+ torrent_max_seeder_processes_per_host=1,
+ torrent_seed_chance=1.0,
+ torrent_seed_duration=3600,
+ torrent_url='http://foo/image_id.torrent'
+ )
+ if raise_exc:
+ func.AndRaise(raise_exc)
+ else:
+ func.AndReturn({'root': {'uuid': 'vdi'}})
+
+ def test_fetch_vhd_image_works_with_glance(self):
+ self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
+ vm_utils._image_uses_bittorrent(
+ self.context, self.instance).AndReturn(False)
+
+ self._stub_glance_download_vhd()
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr(self.session).AndReturn("sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_scan_sr')
+ vm_utils._scan_sr(self.session, "sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
+ vm_utils._check_vdi_size(
+ self.context, self.session, self.instance, "vdi")
+
+ self.mox.ReplayAll()
+
+ self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
+ self.session, self.instance, 'image_id')['root']['uuid'])
+
+ self.mox.VerifyAll()
+
+ def test_fetch_vhd_image_works_with_bittorrent(self):
+ cfg.CONF.import_opt('torrent_base_url',
+ 'nova.virt.xenapi.image.bittorrent',
+ group='xenserver')
+ self.flags(torrent_base_url='http://foo', group='xenserver')
+
+ self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
+ vm_utils._image_uses_bittorrent(
+ self.context, self.instance).AndReturn(True)
+
+ self._stub_bittorrent_download_vhd()
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr(self.session).AndReturn("sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_scan_sr')
+ vm_utils._scan_sr(self.session, "sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
+ vm_utils._check_vdi_size(self.context, self.session, self.instance,
+ "vdi")
+
+ self.mox.ReplayAll()
+
+ self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
+ self.session, self.instance, 'image_id')['root']['uuid'])
+
+ self.mox.VerifyAll()
+
+ def test_fetch_vhd_image_cleans_up_vdi_on_fail(self):
+ self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
+ vm_utils._image_uses_bittorrent(
+ self.context, self.instance).AndReturn(False)
+
+ self._stub_glance_download_vhd()
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr(self.session).AndReturn("sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_scan_sr')
+ vm_utils._scan_sr(self.session, "sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
+ vm_utils._check_vdi_size(self.context, self.session, self.instance,
+ "vdi").AndRaise(exception.FlavorDiskTooSmall)
+
+ self.mox.StubOutWithMock(self.session, 'call_xenapi')
+ self.session.call_xenapi("VDI.get_by_uuid", "vdi").AndReturn("ref")
+
+ self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
+ vm_utils.destroy_vdi(self.session,
+ "ref").AndRaise(exception.StorageError(reason=""))
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ vm_utils._fetch_vhd_image, self.context, self.session,
+ self.instance, 'image_id')
+
+ self.mox.VerifyAll()
+
+ def test_fallback_to_default_handler(self):
+ cfg.CONF.import_opt('torrent_base_url',
+ 'nova.virt.xenapi.image.bittorrent',
+ group='xenserver')
+ self.flags(torrent_base_url='http://foo', group='xenserver')
+
+ self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
+ vm_utils._image_uses_bittorrent(
+ self.context, self.instance).AndReturn(True)
+
+ self._stub_bittorrent_download_vhd(raise_exc=RuntimeError)
+
+ vm_utils._make_uuid_stack().AndReturn(["uuid_stack"])
+ vm_utils.get_sr_path(self.session).AndReturn('sr_path')
+
+ self._stub_glance_download_vhd()
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr(self.session).AndReturn("sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_scan_sr')
+ vm_utils._scan_sr(self.session, "sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
+ vm_utils._check_vdi_size(self.context, self.session, self.instance,
+ "vdi")
+
+ self.mox.ReplayAll()
+
+ self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
+ self.session, self.instance, 'image_id')['root']['uuid'])
+
+ self.mox.VerifyAll()
+
+ def test_default_handler_does_not_fallback_to_itself(self):
+ cfg.CONF.import_opt('torrent_base_url',
+ 'nova.virt.xenapi.image.bittorrent',
+ group='xenserver')
+ self.flags(torrent_base_url='http://foo', group='xenserver')
+
+ self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
+ vm_utils._image_uses_bittorrent(
+ self.context, self.instance).AndReturn(False)
+
+ self._stub_glance_download_vhd(raise_exc=RuntimeError)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(RuntimeError, vm_utils._fetch_vhd_image,
+ self.context, self.session, self.instance, 'image_id')
+
+ self.mox.VerifyAll()
+
+
+class TestImageCompression(VMUtilsTestBase):
+ def test_image_compression(self):
+ # Testing for nova.conf, too low, negative, and a correct value.
+ self.assertIsNone(vm_utils.get_compression_level())
+ self.flags(image_compression_level=0, group='xenserver')
+ self.assertIsNone(vm_utils.get_compression_level())
+ self.flags(image_compression_level=-6, group='xenserver')
+ self.assertIsNone(vm_utils.get_compression_level())
+ self.flags(image_compression_level=6, group='xenserver')
+ self.assertEqual(vm_utils.get_compression_level(), 6)
+
+
+class ResizeHelpersTestCase(VMUtilsTestBase):
+ def test_repair_filesystem(self):
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ utils.execute('e2fsck', '-f', "-y", "fakepath",
+ run_as_root=True, check_exit_code=[0, 1, 2]).AndReturn(
+ ("size is: 42", ""))
+
+ self.mox.ReplayAll()
+
+ vm_utils._repair_filesystem("fakepath")
+
+ def _call_tune2fs_remove_journal(self, path):
+ utils.execute("tune2fs", "-O ^has_journal", path, run_as_root=True)
+
+ def _call_tune2fs_add_journal(self, path):
+ utils.execute("tune2fs", "-j", path, run_as_root=True)
+
+ def _call_parted_mkpart(self, path, start, end):
+ utils.execute('parted', '--script', path, 'rm', '1',
+ run_as_root=True)
+ utils.execute('parted', '--script', path, 'mkpart',
+ 'primary', '%ds' % start, '%ds' % end, run_as_root=True)
+
+ def _call_parted_boot_flag(sef, path):
+ utils.execute('parted', '--script', path, 'set', '1',
+ 'boot', 'on', run_as_root=True)
+
+ def test_resize_part_and_fs_down_succeeds(self):
+ self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ dev_path = "/dev/fake"
+ partition_path = "%s1" % dev_path
+ vm_utils._repair_filesystem(partition_path)
+ self._call_tune2fs_remove_journal(partition_path)
+ utils.execute("resize2fs", partition_path, "10s", run_as_root=True)
+ self._call_parted_mkpart(dev_path, 0, 9)
+ self._call_parted_boot_flag(dev_path)
+ self._call_tune2fs_add_journal(partition_path)
+
+ self.mox.ReplayAll()
+
+ vm_utils._resize_part_and_fs("fake", 0, 20, 10, "boot")
+
+ def test_log_progress_if_required(self):
+ self.mox.StubOutWithMock(vm_utils.LOG, "debug")
+ vm_utils.LOG.debug(_("Sparse copy in progress, "
+ "%(complete_pct).2f%% complete. "
+ "%(left)s bytes left to copy"),
+ {"complete_pct": 50.0, "left": 1})
+ current = timeutils.utcnow()
+ timeutils.set_time_override(current)
+ timeutils.advance_time_seconds(vm_utils.PROGRESS_INTERVAL_SECONDS + 1)
+ self.mox.ReplayAll()
+ vm_utils._log_progress_if_required(1, current, 2)
+
+ def test_log_progress_if_not_required(self):
+ self.mox.StubOutWithMock(vm_utils.LOG, "debug")
+ current = timeutils.utcnow()
+ timeutils.set_time_override(current)
+ timeutils.advance_time_seconds(vm_utils.PROGRESS_INTERVAL_SECONDS - 1)
+ self.mox.ReplayAll()
+ vm_utils._log_progress_if_required(1, current, 2)
+
+ def test_resize_part_and_fs_down_fails_disk_too_big(self):
+ self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ dev_path = "/dev/fake"
+ partition_path = "%s1" % dev_path
+ new_sectors = 10
+ vm_utils._repair_filesystem(partition_path)
+ self._call_tune2fs_remove_journal(partition_path)
+ mobj = utils.execute("resize2fs",
+ partition_path,
+ "%ss" % new_sectors,
+ run_as_root=True)
+ mobj.AndRaise(processutils.ProcessExecutionError)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ResizeError,
+ vm_utils._resize_part_and_fs,
+ "fake", 0, 20, 10, "boot")
+
+ def test_resize_part_and_fs_up_succeeds(self):
+ self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ dev_path = "/dev/fake"
+ partition_path = "%s1" % dev_path
+ vm_utils._repair_filesystem(partition_path)
+ self._call_tune2fs_remove_journal(partition_path)
+ self._call_parted_mkpart(dev_path, 0, 29)
+ utils.execute("resize2fs", partition_path, run_as_root=True)
+ self._call_tune2fs_add_journal(partition_path)
+
+ self.mox.ReplayAll()
+
+ vm_utils._resize_part_and_fs("fake", 0, 20, 30, "")
+
+ def test_resize_disk_throws_on_zero_size(self):
+ self.assertRaises(exception.ResizeError,
+ vm_utils.resize_disk, "session", "instance", "vdi_ref",
+ {"root_gb": 0})
+
+ def test_auto_config_disk_returns_early_on_zero_size(self):
+ vm_utils.try_auto_configure_disk("bad_session", "bad_vdi_ref", 0)
+
+ @mock.patch.object(utils, "execute")
+ def test_get_partitions(self, mock_execute):
+ parted_return = "BYT;\n...\n"
+ parted_return += "1:2s:11s:10s:ext3::boot;\n"
+ parted_return += "2:20s:11s:10s::bob:;\n"
+ mock_execute.return_value = (parted_return, None)
+
+ partitions = vm_utils._get_partitions("abc")
+
+ self.assertEqual(2, len(partitions))
+ self.assertEqual((1, 2, 10, "ext3", "", "boot"), partitions[0])
+ self.assertEqual((2, 20, 10, "", "bob", ""), partitions[1])
+
+
+class CheckVDISizeTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(CheckVDISizeTestCase, self).setUp()
+ self.context = 'fakecontext'
+ self.session = 'fakesession'
+ self.instance = dict(uuid='fakeinstance')
+ self.vdi_uuid = 'fakeuuid'
+
+ def test_not_too_large(self):
+ self.mox.StubOutWithMock(flavors, 'extract_flavor')
+ flavors.extract_flavor(self.instance).AndReturn(
+ dict(root_gb=1))
+
+ self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size')
+ vm_utils._get_vdi_chain_size(self.session,
+ self.vdi_uuid).AndReturn(1073741824)
+
+ self.mox.ReplayAll()
+
+ vm_utils._check_vdi_size(self.context, self.session, self.instance,
+ self.vdi_uuid)
+
+ def test_too_large(self):
+ self.mox.StubOutWithMock(flavors, 'extract_flavor')
+ flavors.extract_flavor(self.instance).AndReturn(
+ dict(root_gb=1))
+
+ self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size')
+ vm_utils._get_vdi_chain_size(self.session,
+ self.vdi_uuid).AndReturn(11811160065) # 10GB overhead allowed
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ vm_utils._check_vdi_size, self.context, self.session,
+ self.instance, self.vdi_uuid)
+
+ def test_zero_root_gb_disables_check(self):
+ self.mox.StubOutWithMock(flavors, 'extract_flavor')
+ flavors.extract_flavor(self.instance).AndReturn(
+ dict(root_gb=0))
+
+ self.mox.ReplayAll()
+
+ vm_utils._check_vdi_size(self.context, self.session, self.instance,
+ self.vdi_uuid)
+
+
+class GetInstanceForVdisForSrTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(GetInstanceForVdisForSrTestCase, self).setUp()
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ def test_get_instance_vdis_for_sr(self):
+ vm_ref = fake.create_vm("foo", "Running")
+ sr_ref = fake.create_sr()
+
+ vdi_1 = fake.create_vdi('vdiname1', sr_ref)
+ vdi_2 = fake.create_vdi('vdiname2', sr_ref)
+
+ for vdi_ref in [vdi_1, vdi_2]:
+ fake.create_vbd(vm_ref, vdi_ref)
+
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+
+ result = list(vm_utils.get_instance_vdis_for_sr(
+ driver._session, vm_ref, sr_ref))
+
+ self.assertEqual([vdi_1, vdi_2], result)
+
+ def test_get_instance_vdis_for_sr_no_vbd(self):
+ vm_ref = fake.create_vm("foo", "Running")
+ sr_ref = fake.create_sr()
+
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+
+ result = list(vm_utils.get_instance_vdis_for_sr(
+ driver._session, vm_ref, sr_ref))
+
+ self.assertEqual([], result)
+
+
+class VMRefOrRaiseVMFoundTestCase(VMUtilsTestBase):
+
+ def test_lookup_call(self):
+ mock = mox.Mox()
+ mock.StubOutWithMock(vm_utils, 'lookup')
+
+ vm_utils.lookup('session', 'somename').AndReturn('ignored')
+
+ mock.ReplayAll()
+ vm_utils.vm_ref_or_raise('session', 'somename')
+ mock.VerifyAll()
+
+ def test_return_value(self):
+ mock = mox.Mox()
+ mock.StubOutWithMock(vm_utils, 'lookup')
+
+ vm_utils.lookup(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('vmref')
+
+ mock.ReplayAll()
+ self.assertEqual(
+ 'vmref', vm_utils.vm_ref_or_raise('session', 'somename'))
+ mock.VerifyAll()
+
+
+class VMRefOrRaiseVMNotFoundTestCase(VMUtilsTestBase):
+
+ def test_exception_raised(self):
+ mock = mox.Mox()
+ mock.StubOutWithMock(vm_utils, 'lookup')
+
+ vm_utils.lookup('session', 'somename').AndReturn(None)
+
+ mock.ReplayAll()
+ self.assertRaises(
+ exception.InstanceNotFound,
+ lambda: vm_utils.vm_ref_or_raise('session', 'somename')
+ )
+ mock.VerifyAll()
+
+ def test_exception_msg_contains_vm_name(self):
+ mock = mox.Mox()
+ mock.StubOutWithMock(vm_utils, 'lookup')
+
+ vm_utils.lookup('session', 'somename').AndReturn(None)
+
+ mock.ReplayAll()
+ try:
+ vm_utils.vm_ref_or_raise('session', 'somename')
+ except exception.InstanceNotFound as e:
+ self.assertIn('somename', six.text_type(e))
+ mock.VerifyAll()
+
+
+@mock.patch.object(vm_utils, 'safe_find_sr', return_value='safe_find_sr')
+class CreateCachedImageTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(CreateCachedImageTestCase, self).setUp()
+ self.session = _get_fake_session()
+
+ @mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
+ def test_cached(self, mock_clone_vdi, mock_safe_find_sr):
+ self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
+ None, None, None, 'vdi_uuid']
+ self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
+ vm_utils._create_cached_image('context', self.session,
+ 'instance', 'name', 'uuid',
+ vm_utils.ImageType.DISK_VHD))
+
+ @mock.patch.object(vm_utils, '_safe_copy_vdi', return_value='new_vdi_ref')
+ def test_no_cow(self, mock_safe_copy_vdi, mock_safe_find_sr):
+ self.flags(use_cow_images=False)
+ self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
+ None, None, None, 'vdi_uuid']
+ self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
+ vm_utils._create_cached_image('context', self.session,
+ 'instance', 'name', 'uuid',
+ vm_utils.ImageType.DISK_VHD))
+
+ def test_no_cow_no_ext(self, mock_safe_find_sr):
+ self.flags(use_cow_images=False)
+ self.session.call_xenapi.side_effect = ['non-ext', {'vdi_ref': 2},
+ 'vdi_ref', None, None, None,
+ 'vdi_uuid']
+ self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
+ vm_utils._create_cached_image('context', self.session,
+ 'instance', 'name', 'uuid',
+ vm_utils.ImageType.DISK_VHD))
+
+ @mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
+ @mock.patch.object(vm_utils, '_fetch_image',
+ return_value={'root': {'uuid': 'vdi_uuid',
+ 'file': None}})
+ def test_noncached(self, mock_fetch_image, mock_clone_vdi,
+ mock_safe_find_sr):
+ self.session.call_xenapi.side_effect = ['ext', {}, 'cache_vdi_ref',
+ None, None, None, None, None,
+ None, 'vdi_uuid']
+ self.assertEqual((True, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
+ vm_utils._create_cached_image('context', self.session,
+ 'instance', 'name', 'uuid',
+ vm_utils.ImageType.DISK_VHD))
+
+
+class BittorrentTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(BittorrentTestCase, self).setUp()
+ self.context = context.get_admin_context()
+
+ def test_image_uses_bittorrent(self):
+ instance = {'system_metadata': {'image_bittorrent': True}}
+ self.flags(torrent_images='some', group='xenserver')
+ self.assertTrue(vm_utils._image_uses_bittorrent(self.context,
+ instance))
+
+ def _test_create_image(self, cache_type):
+ instance = {'system_metadata': {'image_cache_in_nova': True}}
+ self.flags(cache_images=cache_type, group='xenserver')
+
+ was = {'called': None}
+
+ def fake_create_cached_image(*args):
+ was['called'] = 'some'
+ return (False, {})
+ self.stubs.Set(vm_utils, '_create_cached_image',
+ fake_create_cached_image)
+
+ def fake_fetch_image(*args):
+ was['called'] = 'none'
+ return {}
+ self.stubs.Set(vm_utils, '_fetch_image',
+ fake_fetch_image)
+
+ vm_utils.create_image(self.context, None, instance,
+ 'foo', 'bar', 'baz')
+
+ self.assertEqual(was['called'], cache_type)
+
+ def test_create_image_cached(self):
+ self._test_create_image('some')
+
+ def test_create_image_uncached(self):
+ self._test_create_image('none')
+
+
+class ShutdownTestCase(VMUtilsTestBase):
+
+ def test_hardshutdown_should_return_true_when_vm_is_shutdown(self):
+ self.mock = mox.Mox()
+ session = FakeSession()
+ instance = "instance"
+ vm_ref = "vm-ref"
+ self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True)
+ self.mock.StubOutWithMock(vm_utils, 'LOG')
+ self.assertTrue(vm_utils.hard_shutdown_vm(
+ session, instance, vm_ref))
+
+ def test_cleanshutdown_should_return_true_when_vm_is_shutdown(self):
+ self.mock = mox.Mox()
+ session = FakeSession()
+ instance = "instance"
+ vm_ref = "vm-ref"
+ self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True)
+ self.mock.StubOutWithMock(vm_utils, 'LOG')
+ self.assertTrue(vm_utils.clean_shutdown_vm(
+ session, instance, vm_ref))
+
+
+class CreateVBDTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(CreateVBDTestCase, self).setUp()
+ self.session = FakeSession()
+ self.mock = mox.Mox()
+ self.mock.StubOutWithMock(self.session, 'call_xenapi')
+ self.vbd_rec = self._generate_vbd_rec()
+
+ def _generate_vbd_rec(self):
+ vbd_rec = {}
+ vbd_rec['VM'] = 'vm_ref'
+ vbd_rec['VDI'] = 'vdi_ref'
+ vbd_rec['userdevice'] = '0'
+ vbd_rec['bootable'] = False
+ vbd_rec['mode'] = 'RW'
+ vbd_rec['type'] = 'disk'
+ vbd_rec['unpluggable'] = True
+ vbd_rec['empty'] = False
+ vbd_rec['other_config'] = {}
+ vbd_rec['qos_algorithm_type'] = ''
+ vbd_rec['qos_algorithm_params'] = {}
+ vbd_rec['qos_supported_algorithms'] = []
+ return vbd_rec
+
+ def test_create_vbd_default_args(self):
+ self.session.call_xenapi('VBD.create',
+ self.vbd_rec).AndReturn("vbd_ref")
+ self.mock.ReplayAll()
+
+ result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0)
+ self.assertEqual(result, "vbd_ref")
+ self.mock.VerifyAll()
+
+ def test_create_vbd_osvol(self):
+ self.session.call_xenapi('VBD.create',
+ self.vbd_rec).AndReturn("vbd_ref")
+ self.session.call_xenapi('VBD.add_to_other_config', "vbd_ref",
+ "osvol", "True")
+ self.mock.ReplayAll()
+ result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0,
+ osvol=True)
+ self.assertEqual(result, "vbd_ref")
+ self.mock.VerifyAll()
+
+ def test_create_vbd_extra_args(self):
+ self.vbd_rec['VDI'] = 'OpaqueRef:NULL'
+ self.vbd_rec['type'] = 'a'
+ self.vbd_rec['mode'] = 'RO'
+ self.vbd_rec['bootable'] = True
+ self.vbd_rec['empty'] = True
+ self.vbd_rec['unpluggable'] = False
+ self.session.call_xenapi('VBD.create',
+ self.vbd_rec).AndReturn("vbd_ref")
+ self.mock.ReplayAll()
+
+ result = vm_utils.create_vbd(self.session, "vm_ref", None, 0,
+ vbd_type="a", read_only=True, bootable=True,
+ empty=True, unpluggable=False)
+ self.assertEqual(result, "vbd_ref")
+ self.mock.VerifyAll()
+
+ def test_attach_cd(self):
+ self.mock.StubOutWithMock(vm_utils, 'create_vbd')
+
+ vm_utils.create_vbd(self.session, "vm_ref", None, 1,
+ vbd_type='cd', read_only=True, bootable=True,
+ empty=True, unpluggable=False).AndReturn("vbd_ref")
+ self.session.call_xenapi('VBD.insert', "vbd_ref", "vdi_ref")
+ self.mock.ReplayAll()
+
+ result = vm_utils.attach_cd(self.session, "vm_ref", "vdi_ref", 1)
+ self.assertEqual(result, "vbd_ref")
+ self.mock.VerifyAll()
+
+
+class UnplugVbdTestCase(VMUtilsTestBase):
+ @mock.patch.object(greenthread, 'sleep')
+ def test_unplug_vbd_works(self, mock_sleep):
+ session = _get_fake_session()
+ vbd_ref = "vbd_ref"
+ vm_ref = 'vm_ref'
+
+ vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
+
+ session.call_xenapi.assert_called_once_with('VBD.unplug', vbd_ref)
+ self.assertEqual(0, mock_sleep.call_count)
+
+ def test_unplug_vbd_raises_unexpected_error(self):
+ session = _get_fake_session()
+ vbd_ref = "vbd_ref"
+ vm_ref = 'vm_ref'
+ session.call_xenapi.side_effect = test.TestingException()
+
+ self.assertRaises(test.TestingException, vm_utils.unplug_vbd,
+ session, vm_ref, vbd_ref)
+ self.assertEqual(1, session.call_xenapi.call_count)
+
+ def test_unplug_vbd_already_detached_works(self):
+ error = "DEVICE_ALREADY_DETACHED"
+ session = _get_fake_session(error)
+ vbd_ref = "vbd_ref"
+ vm_ref = 'vm_ref'
+
+ vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
+ self.assertEqual(1, session.call_xenapi.call_count)
+
+ def test_unplug_vbd_already_raises_unexpected_xenapi_error(self):
+ session = _get_fake_session("")
+ vbd_ref = "vbd_ref"
+ vm_ref = 'vm_ref'
+
+ self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
+ session, vbd_ref, vm_ref)
+ self.assertEqual(1, session.call_xenapi.call_count)
+
+ def _test_uplug_vbd_retries(self, mock_sleep, error):
+ session = _get_fake_session(error)
+ vbd_ref = "vbd_ref"
+ vm_ref = 'vm_ref'
+
+ self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
+ session, vm_ref, vbd_ref)
+
+ self.assertEqual(11, session.call_xenapi.call_count)
+ self.assertEqual(10, mock_sleep.call_count)
+
+ @mock.patch.object(greenthread, 'sleep')
+ def test_uplug_vbd_retries_on_rejected(self, mock_sleep):
+ self._test_uplug_vbd_retries(mock_sleep,
+ "DEVICE_DETACH_REJECTED")
+
+ @mock.patch.object(greenthread, 'sleep')
+ def test_uplug_vbd_retries_on_internal_error(self, mock_sleep):
+ self._test_uplug_vbd_retries(mock_sleep,
+ "INTERNAL_ERROR")
+
+
+class VDIOtherConfigTestCase(VMUtilsTestBase):
+ """Tests to ensure that the code is populating VDI's `other_config`
+ attribute with the correct metadta.
+ """
+
+ def setUp(self):
+ super(VDIOtherConfigTestCase, self).setUp()
+
+ class _FakeSession():
+ def call_xenapi(self, operation, *args, **kwargs):
+ # VDI.add_to_other_config -> VDI_add_to_other_config
+ method = getattr(self, operation.replace('.', '_'), None)
+ if method:
+ return method(*args, **kwargs)
+
+ self.operation = operation
+ self.args = args
+ self.kwargs = kwargs
+
+ self.session = _FakeSession()
+ self.context = context.get_admin_context()
+ self.fake_instance = {'uuid': 'aaaa-bbbb-cccc-dddd',
+ 'name': 'myinstance'}
+
+ def test_create_vdi(self):
+ # Some images are registered with XenServer explicitly by calling
+ # `create_vdi`
+ vm_utils.create_vdi(self.session, 'sr_ref', self.fake_instance,
+ 'myvdi', 'root', 1024, read_only=True)
+
+ expected = {'nova_disk_type': 'root',
+ 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
+
+ self.assertEqual(expected, self.session.args[0]['other_config'])
+
+ def test_create_image(self):
+ # Other images are registered implicitly when they are dropped into
+ # the SR by a dom0 plugin or some other process
+ self.flags(cache_images='none', group='xenserver')
+
+ def fake_fetch_image(*args):
+ return {'root': {'uuid': 'fake-uuid'}}
+
+ self.stubs.Set(vm_utils, '_fetch_image', fake_fetch_image)
+
+ other_config = {}
+
+ def VDI_add_to_other_config(ref, key, value):
+ other_config[key] = value
+
+ # Stubbing on the session object and not class so we don't pollute
+ # other tests
+ self.session.VDI_add_to_other_config = VDI_add_to_other_config
+ self.session.VDI_get_other_config = lambda vdi: {}
+
+ vm_utils.create_image(self.context, self.session, self.fake_instance,
+ 'myvdi', 'image1', vm_utils.ImageType.DISK_VHD)
+
+ expected = {'nova_disk_type': 'root',
+ 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
+
+ self.assertEqual(expected, other_config)
+
+ def test_import_migrated_vhds(self):
+ # Migrated images should preserve the `other_config`
+ other_config = {}
+
+ def VDI_add_to_other_config(ref, key, value):
+ other_config[key] = value
+
+ def call_plugin_serialized(*args, **kwargs):
+ return {'root': {'uuid': 'aaaa-bbbb-cccc-dddd'}}
+
+ # Stubbing on the session object and not class so we don't pollute
+ # other tests
+ self.session.VDI_add_to_other_config = VDI_add_to_other_config
+ self.session.VDI_get_other_config = lambda vdi: {}
+ self.session.call_plugin_serialized = call_plugin_serialized
+
+ self.stubs.Set(vm_utils, 'get_sr_path', lambda *a, **k: None)
+ self.stubs.Set(vm_utils, 'scan_default_sr', lambda *a, **k: None)
+
+ vm_utils._import_migrated_vhds(self.session, self.fake_instance,
+ "disk_label", "root", "vdi_label")
+
+ expected = {'nova_disk_type': 'root',
+ 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
+
+ self.assertEqual(expected, other_config)
+
+
+class GenerateDiskTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(GenerateDiskTestCase, self).setUp()
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+ self.session = driver._session
+ self.session.is_local_connection = False
+ self.vm_ref = fake.create_vm("foo", "Running")
+
+ def tearDown(self):
+ super(GenerateDiskTestCase, self).tearDown()
+ fake.destroy_vm(self.vm_ref)
+
+ def _expect_parted_calls(self):
+ self.mox.StubOutWithMock(utils, "execute")
+ self.mox.StubOutWithMock(utils, "trycmd")
+ self.mox.StubOutWithMock(vm_utils, "destroy_vdi")
+ self.mox.StubOutWithMock(vm_utils.os.path, "exists")
+ if self.session.is_local_connection:
+ utils.execute('parted', '--script', '/dev/fakedev', 'mklabel',
+ 'msdos', check_exit_code=False, run_as_root=True)
+ utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart',
+ 'primary', '0', '-0',
+ check_exit_code=False, run_as_root=True)
+ vm_utils.os.path.exists('/dev/mapper/fakedev1').AndReturn(True)
+ utils.trycmd('kpartx', '-a', '/dev/fakedev',
+ discard_warnings=True, run_as_root=True)
+ else:
+ utils.execute('parted', '--script', '/dev/fakedev', 'mklabel',
+ 'msdos', check_exit_code=True, run_as_root=True)
+ utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart',
+ 'primary', '0', '-0',
+ check_exit_code=True, run_as_root=True)
+
+ def _check_vdi(self, vdi_ref, check_attached=True):
+ vdi_rec = self.session.call_xenapi("VDI.get_record", vdi_ref)
+ self.assertEqual(str(10 * units.Mi), vdi_rec["virtual_size"])
+ if check_attached:
+ vbd_ref = vdi_rec["VBDs"][0]
+ vbd_rec = self.session.call_xenapi("VBD.get_record", vbd_ref)
+ self.assertEqual(self.vm_ref, vbd_rec['VM'])
+ else:
+ self.assertEqual(0, len(vdi_rec["VBDs"]))
+
+ @test_xenapi.stub_vm_utils_with_vdi_attached_here
+ def test_generate_disk_with_no_fs_given(self):
+ self._expect_parted_calls()
+
+ self.mox.ReplayAll()
+ vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
+ self.vm_ref, "2", "name", "user", 10, None)
+ self._check_vdi(vdi_ref)
+
+ @test_xenapi.stub_vm_utils_with_vdi_attached_here
+ def test_generate_disk_swap(self):
+ self._expect_parted_calls()
+ utils.execute('mkswap', '/dev/fakedev1', run_as_root=True)
+
+ self.mox.ReplayAll()
+ vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
+ self.vm_ref, "2", "name", "swap", 10, "linux-swap")
+ self._check_vdi(vdi_ref)
+
+ @test_xenapi.stub_vm_utils_with_vdi_attached_here
+ def test_generate_disk_ephemeral(self):
+ self._expect_parted_calls()
+ utils.execute('mkfs', '-t', 'ext4', '/dev/fakedev1',
+ run_as_root=True)
+
+ self.mox.ReplayAll()
+ vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
+ self.vm_ref, "2", "name", "ephemeral", 10, "ext4")
+ self._check_vdi(vdi_ref)
+
+ @test_xenapi.stub_vm_utils_with_vdi_attached_here
+ def test_generate_disk_ensure_cleanup_called(self):
+ self._expect_parted_calls()
+ utils.execute('mkfs', '-t', 'ext4', '/dev/fakedev1',
+ run_as_root=True).AndRaise(test.TestingException)
+ vm_utils.destroy_vdi(self.session,
+ mox.IgnoreArg()).AndRaise(exception.StorageError(reason=""))
+
+ self.mox.ReplayAll()
+ self.assertRaises(test.TestingException, vm_utils._generate_disk,
+ self.session, {"uuid": "fake_uuid"},
+ self.vm_ref, "2", "name", "ephemeral", 10, "ext4")
+
+ @test_xenapi.stub_vm_utils_with_vdi_attached_here
+ def test_generate_disk_ephemeral_local_not_attached(self):
+ self.session.is_local_connection = True
+ self._expect_parted_calls()
+ utils.execute('mkfs', '-t', 'ext4', '/dev/mapper/fakedev1',
+ run_as_root=True)
+
+ self.mox.ReplayAll()
+ vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
+ None, "2", "name", "ephemeral", 10, "ext4")
+ self._check_vdi(vdi_ref, check_attached=False)
+
+
+class GenerateEphemeralTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(GenerateEphemeralTestCase, self).setUp()
+ self.session = "session"
+ self.instance = "instance"
+ self.vm_ref = "vm_ref"
+ self.name_label = "name"
+ self.ephemeral_name_label = "name ephemeral"
+ self.userdevice = 4
+ self.mox.StubOutWithMock(vm_utils, "_generate_disk")
+ self.mox.StubOutWithMock(vm_utils, "safe_destroy_vdis")
+
+ def test_get_ephemeral_disk_sizes_simple(self):
+ result = vm_utils.get_ephemeral_disk_sizes(20)
+ expected = [20]
+ self.assertEqual(expected, list(result))
+
+ def test_get_ephemeral_disk_sizes_three_disks_2000(self):
+ result = vm_utils.get_ephemeral_disk_sizes(4030)
+ expected = [2000, 2000, 30]
+ self.assertEqual(expected, list(result))
+
+ def test_get_ephemeral_disk_sizes_two_disks_1024(self):
+ result = vm_utils.get_ephemeral_disk_sizes(2048)
+ expected = [1024, 1024]
+ self.assertEqual(expected, list(result))
+
+ def _expect_generate_disk(self, size, device, name_label):
+ vm_utils._generate_disk(self.session, self.instance, self.vm_ref,
+ str(device), name_label, 'ephemeral',
+ size * 1024, None).AndReturn(device)
+
+ def test_generate_ephemeral_adds_one_disk(self):
+ self._expect_generate_disk(20, self.userdevice,
+ self.ephemeral_name_label)
+ self.mox.ReplayAll()
+
+ vm_utils.generate_ephemeral(self.session, self.instance, self.vm_ref,
+ str(self.userdevice), self.name_label, 20)
+
+ def test_generate_ephemeral_adds_multiple_disks(self):
+ self._expect_generate_disk(2000, self.userdevice,
+ self.ephemeral_name_label)
+ self._expect_generate_disk(2000, self.userdevice + 1,
+ self.ephemeral_name_label + " (1)")
+ self._expect_generate_disk(30, self.userdevice + 2,
+ self.ephemeral_name_label + " (2)")
+ self.mox.ReplayAll()
+
+ vm_utils.generate_ephemeral(self.session, self.instance, self.vm_ref,
+ str(self.userdevice), self.name_label, 4030)
+
+ def test_generate_ephemeral_cleans_up_on_error(self):
+ self._expect_generate_disk(1024, self.userdevice,
+ self.ephemeral_name_label)
+ self._expect_generate_disk(1024, self.userdevice + 1,
+ self.ephemeral_name_label + " (1)")
+
+ vm_utils._generate_disk(self.session, self.instance, self.vm_ref,
+ str(self.userdevice + 2), "name ephemeral (2)", 'ephemeral',
+ units.Mi, None).AndRaise(exception.NovaException)
+
+ vm_utils.safe_destroy_vdis(self.session, [4, 5])
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.NovaException, vm_utils.generate_ephemeral,
+ self.session, self.instance, self.vm_ref,
+ str(self.userdevice), self.name_label, 4096)
+
+
+class FakeFile(object):
+ def __init__(self):
+ self._file_operations = []
+
+ def seek(self, offset):
+ self._file_operations.append((self.seek, offset))
+
+
+class StreamDiskTestCase(VMUtilsTestBase):
+ def setUp(self):
+ import __builtin__
+ super(StreamDiskTestCase, self).setUp()
+ self.mox.StubOutWithMock(vm_utils.utils, 'make_dev_path')
+ self.mox.StubOutWithMock(vm_utils.utils, 'temporary_chown')
+ self.mox.StubOutWithMock(vm_utils, '_write_partition')
+
+ # NOTE(matelakat): This might hide the fail reason, as test runners
+ # are unhappy with a mocked out open.
+ self.mox.StubOutWithMock(__builtin__, 'open')
+ self.image_service_func = self.mox.CreateMockAnything()
+
+ def test_non_ami(self):
+ fake_file = FakeFile()
+
+ vm_utils.utils.make_dev_path('dev').AndReturn('some_path')
+ vm_utils.utils.temporary_chown(
+ 'some_path').AndReturn(contextified(None))
+ open('some_path', 'wb').AndReturn(contextified(fake_file))
+ self.image_service_func(fake_file)
+
+ self.mox.ReplayAll()
+
+ vm_utils._stream_disk("session", self.image_service_func,
+ vm_utils.ImageType.KERNEL, None, 'dev')
+
+ self.assertEqual([(fake_file.seek, 0)], fake_file._file_operations)
+
+ def test_ami_disk(self):
+ fake_file = FakeFile()
+
+ vm_utils._write_partition("session", 100, 'dev')
+ vm_utils.utils.make_dev_path('dev').AndReturn('some_path')
+ vm_utils.utils.temporary_chown(
+ 'some_path').AndReturn(contextified(None))
+ open('some_path', 'wb').AndReturn(contextified(fake_file))
+ self.image_service_func(fake_file)
+
+ self.mox.ReplayAll()
+
+ vm_utils._stream_disk("session", self.image_service_func,
+ vm_utils.ImageType.DISK, 100, 'dev')
+
+ self.assertEqual(
+ [(fake_file.seek, vm_utils.MBR_SIZE_BYTES)],
+ fake_file._file_operations)
+
+
+class VMUtilsSRPath(VMUtilsTestBase):
+ def setUp(self):
+ super(VMUtilsSRPath, self).setUp()
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+ self.session = driver._session
+ self.session.is_local_connection = False
+
+ def test_defined(self):
+ self.mox.StubOutWithMock(vm_utils, "safe_find_sr")
+ self.mox.StubOutWithMock(self.session, "call_xenapi")
+
+ vm_utils.safe_find_sr(self.session).AndReturn("sr_ref")
+ self.session.host_ref = "host_ref"
+ self.session.call_xenapi('PBD.get_all_records_where',
+ 'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn(
+ {'pbd_ref': {'device_config': {'path': 'sr_path'}}})
+
+ self.mox.ReplayAll()
+ self.assertEqual(vm_utils.get_sr_path(self.session), "sr_path")
+
+ def test_default(self):
+ self.mox.StubOutWithMock(vm_utils, "safe_find_sr")
+ self.mox.StubOutWithMock(self.session, "call_xenapi")
+
+ vm_utils.safe_find_sr(self.session).AndReturn("sr_ref")
+ self.session.host_ref = "host_ref"
+ self.session.call_xenapi('PBD.get_all_records_where',
+ 'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn(
+ {'pbd_ref': {'device_config': {}}})
+ self.session.call_xenapi("SR.get_record", "sr_ref").AndReturn(
+ {'uuid': 'sr_uuid', 'type': 'ext'})
+ self.mox.ReplayAll()
+ self.assertEqual(vm_utils.get_sr_path(self.session),
+ "/var/run/sr-mount/sr_uuid")
+
+
+class CreateKernelRamdiskTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(CreateKernelRamdiskTestCase, self).setUp()
+ self.context = "context"
+ self.session = FakeSession()
+ self.instance = {"kernel_id": None, "ramdisk_id": None}
+ self.name_label = "name"
+ self.mox.StubOutWithMock(self.session, "call_plugin")
+ self.mox.StubOutWithMock(uuid, "uuid4")
+ self.mox.StubOutWithMock(vm_utils, "_fetch_disk_image")
+
+ def test_create_kernel_and_ramdisk_no_create(self):
+ self.mox.ReplayAll()
+ result = vm_utils.create_kernel_and_ramdisk(self.context,
+ self.session, self.instance, self.name_label)
+ self.assertEqual((None, None), result)
+
+ def test_create_kernel_and_ramdisk_create_both_cached(self):
+ kernel_id = "kernel"
+ ramdisk_id = "ramdisk"
+ self.instance["kernel_id"] = kernel_id
+ self.instance["ramdisk_id"] = ramdisk_id
+
+ args_kernel = {}
+ args_kernel['cached-image'] = kernel_id
+ args_kernel['new-image-uuid'] = "fake_uuid1"
+ uuid.uuid4().AndReturn("fake_uuid1")
+ self.session.call_plugin('kernel', 'create_kernel_ramdisk',
+ args_kernel).AndReturn("k")
+
+ args_ramdisk = {}
+ args_ramdisk['cached-image'] = ramdisk_id
+ args_ramdisk['new-image-uuid'] = "fake_uuid2"
+ uuid.uuid4().AndReturn("fake_uuid2")
+ self.session.call_plugin('kernel', 'create_kernel_ramdisk',
+ args_ramdisk).AndReturn("r")
+
+ self.mox.ReplayAll()
+ result = vm_utils.create_kernel_and_ramdisk(self.context,
+ self.session, self.instance, self.name_label)
+ self.assertEqual(("k", "r"), result)
+
+ def test_create_kernel_and_ramdisk_create_kernel_not_cached(self):
+ kernel_id = "kernel"
+ self.instance["kernel_id"] = kernel_id
+
+ args_kernel = {}
+ args_kernel['cached-image'] = kernel_id
+ args_kernel['new-image-uuid'] = "fake_uuid1"
+ uuid.uuid4().AndReturn("fake_uuid1")
+ self.session.call_plugin('kernel', 'create_kernel_ramdisk',
+ args_kernel).AndReturn("")
+
+ kernel = {"kernel": {"file": "k"}}
+ vm_utils._fetch_disk_image(self.context, self.session, self.instance,
+ self.name_label, kernel_id, 0).AndReturn(kernel)
+
+ self.mox.ReplayAll()
+ result = vm_utils.create_kernel_and_ramdisk(self.context,
+ self.session, self.instance, self.name_label)
+ self.assertEqual(("k", None), result)
+
+
+class ScanSrTestCase(VMUtilsTestBase):
+ @mock.patch.object(vm_utils, "_scan_sr")
+ @mock.patch.object(vm_utils, "safe_find_sr")
+ def test_scan_default_sr(self, mock_safe_find_sr, mock_scan_sr):
+ mock_safe_find_sr.return_value = "sr_ref"
+
+ self.assertEqual("sr_ref", vm_utils.scan_default_sr("fake_session"))
+
+ mock_scan_sr.assert_called_once_with("fake_session", "sr_ref")
+
+ def test_scan_sr_works(self):
+ session = mock.Mock()
+ vm_utils._scan_sr(session, "sr_ref")
+ session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
+
+ def test_scan_sr_unknown_error_fails_once(self):
+ session = mock.Mock()
+ session.call_xenapi.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ vm_utils._scan_sr, session, "sr_ref")
+ session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
+
+ @mock.patch.object(greenthread, 'sleep')
+ def test_scan_sr_known_error_retries_then_throws(self, mock_sleep):
+ session = mock.Mock()
+
+ class FakeException(Exception):
+ details = ['SR_BACKEND_FAILURE_40', "", "", ""]
+
+ session.XenAPI.Failure = FakeException
+ session.call_xenapi.side_effect = FakeException
+
+ self.assertRaises(FakeException,
+ vm_utils._scan_sr, session, "sr_ref")
+
+ session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
+ self.assertEqual(4, session.call_xenapi.call_count)
+ mock_sleep.assert_has_calls([mock.call(2), mock.call(4), mock.call(8)])
+
+ @mock.patch.object(greenthread, 'sleep')
+ def test_scan_sr_known_error_retries_then_succeeds(self, mock_sleep):
+ session = mock.Mock()
+
+ class FakeException(Exception):
+ details = ['SR_BACKEND_FAILURE_40', "", "", ""]
+
+ session.XenAPI.Failure = FakeException
+
+ def fake_call_xenapi(*args):
+ fake_call_xenapi.count += 1
+ if fake_call_xenapi.count != 2:
+ raise FakeException()
+
+ fake_call_xenapi.count = 0
+ session.call_xenapi.side_effect = fake_call_xenapi
+
+ vm_utils._scan_sr(session, "sr_ref")
+
+ session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
+ self.assertEqual(2, session.call_xenapi.call_count)
+ mock_sleep.assert_called_once_with(2)
+
+
+@mock.patch.object(flavors, 'extract_flavor',
+ return_value={
+ 'memory_mb': 1024,
+ 'vcpus': 1,
+ 'vcpu_weight': 1.0,
+ })
+class CreateVmTestCase(VMUtilsTestBase):
+ def test_vss_provider(self, mock_extract):
+ self.flags(vcpu_pin_set="2,3")
+ session = _get_fake_session()
+ instance = {
+ "uuid": "uuid", "os_type": "windows"
+ }
+
+ vm_utils.create_vm(session, instance, "label",
+ "kernel", "ramdisk")
+
+ vm_rec = {
+ 'VCPUs_params': {'cap': '0', 'mask': '2,3', 'weight': '1.0'},
+ 'PV_args': '',
+ 'memory_static_min': '0',
+ 'ha_restart_priority': '',
+ 'HVM_boot_policy': 'BIOS order',
+ 'PV_bootloader': '', 'tags': [],
+ 'VCPUs_max': '1',
+ 'memory_static_max': '1073741824',
+ 'actions_after_shutdown': 'destroy',
+ 'memory_dynamic_max': '1073741824',
+ 'user_version': '0',
+ 'xenstore_data': {'vm-data/allowvssprovider': 'false'},
+ 'blocked_operations': {},
+ 'is_a_template': False,
+ 'name_description': '',
+ 'memory_dynamic_min': '1073741824',
+ 'actions_after_crash': 'destroy',
+ 'memory_target': '1073741824',
+ 'PV_ramdisk': '',
+ 'PV_bootloader_args': '',
+ 'PCI_bus': '',
+ 'other_config': {'nova_uuid': 'uuid'},
+ 'name_label': 'label',
+ 'actions_after_reboot': 'restart',
+ 'VCPUs_at_startup': '1',
+ 'HVM_boot_params': {'order': 'dc'},
+ 'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
+ 'timeoffset': '0', 'viridian': 'true',
+ 'acpi': 'true'},
+ 'PV_legacy_args': '',
+ 'PV_kernel': '',
+ 'affinity': '',
+ 'recommendations': '',
+ 'ha_always_run': False
+ }
+ session.call_xenapi.assert_called_once_with("VM.create", vm_rec)
+
+ def test_invalid_cpu_mask_raises(self, mock_extract):
+ self.flags(vcpu_pin_set="asdf")
+ session = mock.Mock()
+ instance = {
+ "uuid": "uuid",
+ }
+ self.assertRaises(exception.Invalid,
+ vm_utils.create_vm,
+ session, instance, "label",
+ "kernel", "ramdisk")
+
+ def test_destroy_vm(self, mock_extract):
+ session = mock.Mock()
+ instance = {
+ "uuid": "uuid",
+ }
+
+ vm_utils.destroy_vm(session, instance, "vm_ref")
+
+ session.VM.destroy.assert_called_once_with("vm_ref")
+
+ def test_destroy_vm_silently_fails(self, mock_extract):
+ session = mock.Mock()
+ exc = test.TestingException()
+ session.XenAPI.Failure = test.TestingException
+ session.VM.destroy.side_effect = exc
+ instance = {
+ "uuid": "uuid",
+ }
+
+ vm_utils.destroy_vm(session, instance, "vm_ref")
+
+ session.VM.destroy.assert_called_once_with("vm_ref")
+
+
+class DetermineVmModeTestCase(VMUtilsTestBase):
+ def test_determine_vm_mode_returns_xen_mode(self):
+ instance = {"vm_mode": "xen"}
+ self.assertEqual(vm_mode.XEN,
+ vm_utils.determine_vm_mode(instance, None))
+
+ def test_determine_vm_mode_returns_hvm_mode(self):
+ instance = {"vm_mode": "hvm"}
+ self.assertEqual(vm_mode.HVM,
+ vm_utils.determine_vm_mode(instance, None))
+
+ def test_determine_vm_mode_returns_xen_for_linux(self):
+ instance = {"vm_mode": None, "os_type": "linux"}
+ self.assertEqual(vm_mode.XEN,
+ vm_utils.determine_vm_mode(instance, None))
+
+ def test_determine_vm_mode_returns_hvm_for_windows(self):
+ instance = {"vm_mode": None, "os_type": "windows"}
+ self.assertEqual(vm_mode.HVM,
+ vm_utils.determine_vm_mode(instance, None))
+
+ def test_determine_vm_mode_returns_hvm_by_default(self):
+ instance = {"vm_mode": None, "os_type": None}
+ self.assertEqual(vm_mode.HVM,
+ vm_utils.determine_vm_mode(instance, None))
+
+ def test_determine_vm_mode_returns_xen_for_VHD(self):
+ instance = {"vm_mode": None, "os_type": None}
+ self.assertEqual(vm_mode.XEN,
+ vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK_VHD))
+
+ def test_determine_vm_mode_returns_xen_for_DISK(self):
+ instance = {"vm_mode": None, "os_type": None}
+ self.assertEqual(vm_mode.XEN,
+ vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK))
+
+
+class CallXenAPIHelpersTestCase(VMUtilsTestBase):
+ def test_vm_get_vbd_refs(self):
+ session = mock.Mock()
+ session.call_xenapi.return_value = "foo"
+ self.assertEqual("foo", vm_utils._vm_get_vbd_refs(session, "vm_ref"))
+ session.call_xenapi.assert_called_once_with("VM.get_VBDs", "vm_ref")
+
+ def test_vbd_get_rec(self):
+ session = mock.Mock()
+ session.call_xenapi.return_value = "foo"
+ self.assertEqual("foo", vm_utils._vbd_get_rec(session, "vbd_ref"))
+ session.call_xenapi.assert_called_once_with("VBD.get_record",
+ "vbd_ref")
+
+ def test_vdi_get_rec(self):
+ session = mock.Mock()
+ session.call_xenapi.return_value = "foo"
+ self.assertEqual("foo", vm_utils._vdi_get_rec(session, "vdi_ref"))
+ session.call_xenapi.assert_called_once_with("VDI.get_record",
+ "vdi_ref")
+
+ def test_vdi_snapshot(self):
+ session = mock.Mock()
+ session.call_xenapi.return_value = "foo"
+ self.assertEqual("foo", vm_utils._vdi_snapshot(session, "vdi_ref"))
+ session.call_xenapi.assert_called_once_with("VDI.snapshot",
+ "vdi_ref", {})
+
+ def test_vdi_get_virtual_size(self):
+ session = mock.Mock()
+ session.call_xenapi.return_value = "123"
+ self.assertEqual(123, vm_utils._vdi_get_virtual_size(session, "ref"))
+ session.call_xenapi.assert_called_once_with("VDI.get_virtual_size",
+ "ref")
+
+ @mock.patch.object(vm_utils, '_get_resize_func_name')
+ def test_vdi_resize(self, mock_get_resize_func_name):
+ session = mock.Mock()
+ mock_get_resize_func_name.return_value = "VDI.fake"
+ vm_utils._vdi_resize(session, "ref", 123)
+ session.call_xenapi.assert_called_once_with("VDI.fake", "ref", "123")
+
+ @mock.patch.object(vm_utils, '_vdi_resize')
+ @mock.patch.object(vm_utils, '_vdi_get_virtual_size')
+ def test_update_vdi_virtual_size_works(self, mock_get_size, mock_resize):
+ mock_get_size.return_value = (1024 ** 3) - 1
+ instance = {"uuid": "a"}
+
+ vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
+
+ mock_get_size.assert_called_once_with("s", "ref")
+ mock_resize.assert_called_once_with("s", "ref", 1024 ** 3)
+
+ @mock.patch.object(vm_utils, '_vdi_resize')
+ @mock.patch.object(vm_utils, '_vdi_get_virtual_size')
+ def test_update_vdi_virtual_size_skips_resize_down(self, mock_get_size,
+ mock_resize):
+ mock_get_size.return_value = 1024 ** 3
+ instance = {"uuid": "a"}
+
+ vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
+
+ mock_get_size.assert_called_once_with("s", "ref")
+ self.assertFalse(mock_resize.called)
+
+ @mock.patch.object(vm_utils, '_vdi_resize')
+ @mock.patch.object(vm_utils, '_vdi_get_virtual_size')
+ def test_update_vdi_virtual_size_raise_if_disk_big(self, mock_get_size,
+ mock_resize):
+ mock_get_size.return_value = 1024 ** 3 + 1
+ instance = {"uuid": "a"}
+
+ self.assertRaises(exception.ResizeError,
+ vm_utils.update_vdi_virtual_size,
+ "s", instance, "ref", 1)
+
+ mock_get_size.assert_called_once_with("s", "ref")
+ self.assertFalse(mock_resize.called)
+
+
+@mock.patch.object(vm_utils, '_vdi_get_rec')
+@mock.patch.object(vm_utils, '_vbd_get_rec')
+@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
+class GetVdiForVMTestCase(VMUtilsTestBase):
+ def test_get_vdi_for_vm_safely(self, vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_rec):
+ session = "session"
+
+ vm_get_vbd_refs.return_value = ["a", "b"]
+ vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
+ vdi_get_rec.return_value = {}
+
+ result = vm_utils.get_vdi_for_vm_safely(session, "vm_ref")
+ self.assertEqual(('vdi_ref', {}), result)
+
+ vm_get_vbd_refs.assert_called_once_with(session, "vm_ref")
+ vbd_get_rec.assert_called_once_with(session, "a")
+ vdi_get_rec.assert_called_once_with(session, "vdi_ref")
+
+ def test_get_vdi_for_vm_safely_fails(self, vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_rec):
+ session = "session"
+
+ vm_get_vbd_refs.return_value = ["a", "b"]
+ vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
+
+ self.assertRaises(exception.NovaException,
+ vm_utils.get_vdi_for_vm_safely,
+ session, "vm_ref", userdevice='1')
+
+ self.assertEqual([], vdi_get_rec.call_args_list)
+ self.assertEqual(2, len(vbd_get_rec.call_args_list))
+
+
+@mock.patch.object(vm_utils, '_vdi_get_uuid')
+@mock.patch.object(vm_utils, '_vbd_get_rec')
+@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
+class GetAllVdiForVMTestCase(VMUtilsTestBase):
+ def _setup_get_all_vdi_uuids_for_vm(self, vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_uuid):
+ def fake_vbd_get_rec(session, vbd_ref):
+ return {'userdevice': vbd_ref, 'VDI': "vdi_ref_%s" % vbd_ref}
+
+ def fake_vdi_get_uuid(session, vdi_ref):
+ return vdi_ref
+
+ vm_get_vbd_refs.return_value = ["0", "2"]
+ vbd_get_rec.side_effect = fake_vbd_get_rec
+ vdi_get_uuid.side_effect = fake_vdi_get_uuid
+
+ def test_get_all_vdi_uuids_for_vm_works(self, vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_uuid):
+ self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_uuid)
+
+ result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref")
+ expected = ['vdi_ref_0', 'vdi_ref_2']
+ self.assertEqual(expected, list(result))
+
+ def test_get_all_vdi_uuids_for_vm_finds_none(self, vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_uuid):
+ self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_uuid)
+
+ result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref",
+ min_userdevice=1)
+ expected = ["vdi_ref_2"]
+ self.assertEqual(expected, list(result))
+
+
+class GetAllVdisTestCase(VMUtilsTestBase):
+ def test_get_all_vdis_in_sr(self):
+
+ def fake_get_rec(record_type, ref):
+ if ref == "2":
+ return "vdi_rec_2"
+
+ session = mock.Mock()
+ session.call_xenapi.return_value = ["1", "2"]
+ session.get_rec.side_effect = fake_get_rec
+
+ sr_ref = "sr_ref"
+ actual = list(vm_utils._get_all_vdis_in_sr(session, sr_ref))
+ self.assertEqual(actual, [('2', 'vdi_rec_2')])
+
+ session.call_xenapi.assert_called_once_with("SR.get_VDIs", sr_ref)
+
+
+class VDIAttachedHere(VMUtilsTestBase):
+ @mock.patch.object(vm_utils, 'destroy_vbd')
+ @mock.patch.object(vm_utils, '_get_this_vm_ref')
+ @mock.patch.object(vm_utils, 'create_vbd')
+ @mock.patch.object(vm_utils, '_remap_vbd_dev')
+ @mock.patch.object(vm_utils, '_wait_for_device')
+ @mock.patch.object(utils, 'execute')
+ def test_sync_called(self, mock_execute, mock_wait_for_device,
+ mock_remap_vbd_dev, mock_create_vbd,
+ mock_get_this_vm_ref, mock_destroy_vbd):
+ session = _get_fake_session()
+ with vm_utils.vdi_attached_here(session, 'vdi_ref'):
+ pass
+ mock_execute.assert_called_with('sync', run_as_root=True)
+
+
+class SnapshotAttachedHereTestCase(VMUtilsTestBase):
+ @mock.patch.object(vm_utils, '_snapshot_attached_here_impl')
+ def test_snapshot_attached_here(self, mock_impl):
+ def fake_impl(session, instance, vm_ref, label, userdevice,
+ post_snapshot_callback):
+ self.assertEqual("session", session)
+ self.assertEqual("instance", instance)
+ self.assertEqual("vm_ref", vm_ref)
+ self.assertEqual("label", label)
+ self.assertEqual('0', userdevice)
+ self.assertIsNone(post_snapshot_callback)
+ yield "fake"
+
+ mock_impl.side_effect = fake_impl
+
+ with vm_utils.snapshot_attached_here("session", "instance", "vm_ref",
+ "label") as result:
+ self.assertEqual("fake", result)
+
+ mock_impl.assert_called_once_with("session", "instance", "vm_ref",
+ "label", '0', None)
+
+ @mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
+ @mock.patch.object(vm_utils, 'safe_destroy_vdis')
+ @mock.patch.object(vm_utils, '_walk_vdi_chain')
+ @mock.patch.object(vm_utils, '_wait_for_vhd_coalesce')
+ @mock.patch.object(vm_utils, '_vdi_get_uuid')
+ @mock.patch.object(vm_utils, '_vdi_snapshot')
+ @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
+ def test_snapshot_attached_here_impl(self, mock_get_vdi_for_vm_safely,
+ mock_vdi_snapshot, mock_vdi_get_uuid,
+ mock_wait_for_vhd_coalesce, mock_walk_vdi_chain,
+ mock_safe_destroy_vdis, mock_delete_snapshots_in_vdi_chain):
+ session = "session"
+ instance = {"uuid": "uuid"}
+ mock_callback = mock.Mock()
+
+ mock_get_vdi_for_vm_safely.return_value = ("vdi_ref",
+ {"SR": "sr_ref",
+ "uuid": "vdi_uuid"})
+ mock_vdi_snapshot.return_value = "snap_ref"
+ mock_vdi_get_uuid.return_value = "snap_uuid"
+ mock_walk_vdi_chain.return_value = [{"uuid": "a"}, {"uuid": "b"}]
+
+ try:
+ with vm_utils.snapshot_attached_here(session, instance, "vm_ref",
+ "label", '2', mock_callback) as result:
+ self.assertEqual(["a", "b"], result)
+ raise test.TestingException()
+ self.assertTrue(False)
+ except test.TestingException:
+ pass
+
+ mock_get_vdi_for_vm_safely.assert_called_once_with(session, "vm_ref",
+ '2')
+ mock_vdi_snapshot.assert_called_once_with(session, "vdi_ref")
+ mock_wait_for_vhd_coalesce.assert_called_once_with(session, instance,
+ "sr_ref", "vdi_ref", ['a', 'b'])
+ mock_vdi_get_uuid.assert_called_once_with(session, "snap_ref")
+ mock_walk_vdi_chain.assert_has_calls([mock.call(session, "vdi_uuid"),
+ mock.call(session, "snap_uuid")])
+ mock_callback.assert_called_once_with(
+ task_state="image_pending_upload")
+ mock_safe_destroy_vdis.assert_called_once_with(session, ["snap_ref"])
+ mock_delete_snapshots_in_vdi_chain.assert_called_once_with(session,
+ instance, ['a', 'b'], "sr_ref")
+
+ @mock.patch.object(greenthread, 'sleep')
+ def test_wait_for_vhd_coalesce_leaf_node(self, mock_sleep):
+ instance = {"uuid": "fake"}
+ vm_utils._wait_for_vhd_coalesce("session", instance,
+ "sr_ref", "vdi_ref", ["uuid"])
+ self.assertFalse(mock_sleep.called)
+
+ @mock.patch.object(vm_utils, '_count_children')
+ @mock.patch.object(greenthread, 'sleep')
+ def test_wait_for_vhd_coalesce_parent_snapshot(self, mock_sleep,
+ mock_count):
+ mock_count.return_value = 2
+ instance = {"uuid": "fake"}
+
+ vm_utils._wait_for_vhd_coalesce("session", instance,
+ "sr_ref", "vdi_ref", ["uuid1", "uuid2"])
+
+ self.assertFalse(mock_sleep.called)
+ self.assertTrue(mock_count.called)
+
+ @mock.patch.object(greenthread, 'sleep')
+ @mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
+ @mock.patch.object(vm_utils, '_count_children')
+ @mock.patch.object(vm_utils, '_scan_sr')
+ def test_wait_for_vhd_coalesce_raises(self, mock_scan_sr,
+ mock_count, mock_get_vhd_parent_uuid, mock_sleep):
+ mock_count.return_value = 1
+ instance = {"uuid": "fake"}
+
+ self.assertRaises(exception.NovaException,
+ vm_utils._wait_for_vhd_coalesce, "session", instance,
+ "sr_ref", "vdi_ref", ["uuid1", "uuid2"])
+
+ self.assertTrue(mock_count.called)
+ self.assertEqual(20, mock_sleep.call_count)
+ self.assertEqual(20, mock_scan_sr.call_count)
+
+ @mock.patch.object(greenthread, 'sleep')
+ @mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
+ @mock.patch.object(vm_utils, '_count_children')
+ @mock.patch.object(vm_utils, '_scan_sr')
+ def test_wait_for_vhd_coalesce_success(self, mock_scan_sr,
+ mock_count, mock_get_vhd_parent_uuid, mock_sleep):
+ mock_count.return_value = 1
+ instance = {"uuid": "fake"}
+ mock_get_vhd_parent_uuid.side_effect = ["bad", "uuid2"]
+
+ vm_utils._wait_for_vhd_coalesce("session", instance,
+ "sr_ref", "vdi_ref", ["uuid1", "uuid2"])
+
+ self.assertEqual(1, mock_sleep.call_count)
+ self.assertEqual(2, mock_scan_sr.call_count)
+
+ @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
+ def test_count_children(self, mock_get_all_vdis_in_sr):
+ vdis = [('child1', {'sm_config': {'vhd-parent': 'parent1'}}),
+ ('child2', {'sm_config': {'vhd-parent': 'parent2'}}),
+ ('child3', {'sm_config': {'vhd-parent': 'parent1'}})]
+ mock_get_all_vdis_in_sr.return_value = vdis
+ self.assertEqual(2, vm_utils._count_children('session',
+ 'parent1', 'sr'))
+
+
+class ImportMigratedDisksTestCase(VMUtilsTestBase):
+ @mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks')
+ @mock.patch.object(vm_utils, '_import_migrated_root_disk')
+ def test_import_all_migrated_disks(self, mock_root, mock_ephemeral):
+ session = "session"
+ instance = "instance"
+ mock_root.return_value = "root_vdi"
+ mock_ephemeral.return_value = ["a", "b"]
+
+ result = vm_utils.import_all_migrated_disks(session, instance)
+
+ expected = {'root': 'root_vdi', 'ephemerals': ["a", "b"]}
+ self.assertEqual(expected, result)
+ mock_root.assert_called_once_with(session, instance)
+ mock_ephemeral.assert_called_once_with(session, instance)
+
+ @mock.patch.object(vm_utils, '_import_migrated_vhds')
+ def test_import_migrated_root_disk(self, mock_migrate):
+ mock_migrate.return_value = "foo"
+ instance = {"uuid": "uuid", "name": "name"}
+
+ result = vm_utils._import_migrated_root_disk("s", instance)
+
+ self.assertEqual("foo", result)
+ mock_migrate.assert_called_once_with("s", instance, "uuid", "root",
+ "name")
+
+ @mock.patch.object(vm_utils, '_import_migrated_vhds')
+ def test_import_migrate_ephemeral_disks(self, mock_migrate):
+ mock_migrate.return_value = "foo"
+ instance = {"uuid": "uuid", "name": "name", "ephemeral_gb": 4000}
+
+ result = vm_utils._import_migrate_ephemeral_disks("s", instance)
+
+ self.assertEqual({'4': 'foo', '5': 'foo'}, result)
+ expected_calls = [mock.call("s", instance, "uuid_ephemeral_1",
+ "ephemeral", "name ephemeral (1)"),
+ mock.call("s", instance, "uuid_ephemeral_2",
+ "ephemeral", "name ephemeral (2)")]
+ self.assertEqual(expected_calls, mock_migrate.call_args_list)
+
+ @mock.patch.object(vm_utils, '_set_vdi_info')
+ @mock.patch.object(vm_utils, 'scan_default_sr')
+ @mock.patch.object(vm_utils, 'get_sr_path')
+ def test_import_migrated_vhds(self, mock_get_sr_path, mock_scan_sr,
+ mock_set_info):
+ session = mock.Mock()
+ instance = {"uuid": "uuid"}
+ session.call_plugin_serialized.return_value = {"root": {"uuid": "a"}}
+ session.call_xenapi.return_value = "vdi_ref"
+ mock_get_sr_path.return_value = "sr_path"
+
+ result = vm_utils._import_migrated_vhds(session, instance,
+ 'chain_label', 'disk_type', 'vdi_label')
+
+ expected = {'uuid': "a", 'ref': "vdi_ref"}
+ self.assertEqual(expected, result)
+ mock_get_sr_path.assert_called_once_with(session)
+ session.call_plugin_serialized.assert_called_once_with('migration',
+ 'move_vhds_into_sr', instance_uuid='chain_label',
+ sr_path='sr_path', uuid_stack=mock.ANY)
+ mock_scan_sr.assert_called_once_with(session)
+ session.call_xenapi.assert_called_once_with('VDI.get_by_uuid', 'a')
+ mock_set_info.assert_called_once_with(session, 'vdi_ref', 'disk_type',
+ 'vdi_label', 'disk_type', instance)
+
+ def test_get_vhd_parent_uuid_rec_provided(self):
+ session = mock.Mock()
+ vdi_ref = 'vdi_ref'
+ vdi_rec = {'sm_config': {}}
+ self.assertIsNone(vm_utils._get_vhd_parent_uuid(session,
+ vdi_ref,
+ vdi_rec))
+ self.assertFalse(session.call_xenapi.called)
+
+
+class MigrateVHDTestCase(VMUtilsTestBase):
+ def _assert_transfer_called(self, session, label):
+ session.call_plugin_serialized.assert_called_once_with(
+ 'migration', 'transfer_vhd', instance_uuid=label, host="dest",
+ vdi_uuid="vdi_uuid", sr_path="sr_path", seq_num=2)
+
+ def test_migrate_vhd_root(self):
+ session = mock.Mock()
+ instance = {"uuid": "a"}
+
+ vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
+ "sr_path", 2)
+
+ self._assert_transfer_called(session, "a")
+
+ def test_migrate_vhd_ephemeral(self):
+ session = mock.Mock()
+ instance = {"uuid": "a"}
+
+ vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
+ "sr_path", 2, 2)
+
+ self._assert_transfer_called(session, "a_ephemeral_2")
+
+ def test_migrate_vhd_converts_exceptions(self):
+ session = mock.Mock()
+ session.XenAPI.Failure = test.TestingException
+ session.call_plugin_serialized.side_effect = test.TestingException()
+ instance = {"uuid": "a"}
+
+ self.assertRaises(exception.MigrationError, vm_utils.migrate_vhd,
+ session, instance, "vdi_uuid", "dest", "sr_path", 2)
+ self._assert_transfer_called(session, "a")
+
+
+class StripBaseMirrorTestCase(VMUtilsTestBase):
+ def test_strip_base_mirror_from_vdi_works(self):
+ session = mock.Mock()
+ vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
+ session.call_xenapi.assert_called_once_with(
+ "VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
+
+ def test_strip_base_mirror_from_vdi_hides_error(self):
+ session = mock.Mock()
+ session.XenAPI.Failure = test.TestingException
+ session.call_xenapi.side_effect = test.TestingException()
+
+ vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
+
+ session.call_xenapi.assert_called_once_with(
+ "VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
+
+ @mock.patch.object(vm_utils, '_try_strip_base_mirror_from_vdi')
+ def test_strip_base_mirror_from_vdis(self, mock_strip):
+ def call_xenapi(method, arg):
+ if method == "VM.get_VBDs":
+ return ['VBD_ref_1', 'VBD_ref_2']
+ if method == "VBD.get_VDI":
+ return 'VDI' + arg[3:]
+ return "Unexpected call_xenapi: %s.%s" % (method, arg)
+
+ session = mock.Mock()
+ session.call_xenapi.side_effect = call_xenapi
+
+ vm_utils.strip_base_mirror_from_vdis(session, "vm_ref")
+
+ expected = [mock.call('VM.get_VBDs', "vm_ref"),
+ mock.call('VBD.get_VDI', "VBD_ref_1"),
+ mock.call('VBD.get_VDI', "VBD_ref_2")]
+ self.assertEqual(expected, session.call_xenapi.call_args_list)
+
+ expected = [mock.call(session, "VDI_ref_1"),
+ mock.call(session, "VDI_ref_2")]
+ self.assertEqual(expected, mock_strip.call_args_list)
+
+
+class DeviceIdTestCase(VMUtilsTestBase):
+ def test_device_id_is_none_if_not_specified_in_meta_data(self):
+ image_meta = {}
+ session = mock.Mock()
+ session.product_version = (6, 1, 0)
+ self.assertIsNone(vm_utils.get_vm_device_id(session, image_meta))
+
+ def test_get_device_id_if_hypervisor_version_is_greater_than_6_1(self):
+ image_meta = {'xenapi_device_id': '0002'}
+ session = mock.Mock()
+ session.product_version = (6, 2, 0)
+ self.assertEqual('0002',
+ vm_utils.get_vm_device_id(session, image_meta))
+ session.product_version = (6, 3, 1)
+ self.assertEqual('0002',
+ vm_utils.get_vm_device_id(session, image_meta))
+
+ def test_raise_exception_if_device_id_not_supported_by_hyp_version(self):
+ image_meta = {'xenapi_device_id': '0002'}
+ session = mock.Mock()
+ session.product_version = (6, 0)
+ exc = self.assertRaises(exception.NovaException,
+ vm_utils.get_vm_device_id, session, image_meta)
+ self.assertEqual("Device id 0002 specified is not supported by "
+ "hypervisor version (6, 0)", exc.message)
+ session.product_version = ('6a')
+ exc = self.assertRaises(exception.NovaException,
+ vm_utils.get_vm_device_id, session, image_meta)
+ self.assertEqual("Device id 0002 specified is not supported by "
+ "hypervisor version 6a", exc.message)
+
+
+class CreateVmRecordTestCase(VMUtilsTestBase):
+ @mock.patch.object(flavors, 'extract_flavor')
+ def test_create_vm_record_linux(self, mock_extract_flavor):
+ instance = {"uuid": "uuid123", "os_type": "linux"}
+ self._test_create_vm_record(mock_extract_flavor, instance, False)
+
+ @mock.patch.object(flavors, 'extract_flavor')
+ def test_create_vm_record_windows(self, mock_extract_flavor):
+ instance = {"uuid": "uuid123", "os_type": "windows"}
+ self._test_create_vm_record(mock_extract_flavor, instance, True)
+
+ def _test_create_vm_record(self, mock_extract_flavor, instance,
+ is_viridian):
+ session = _get_fake_session()
+ flavor = {"memory_mb": 1024, "vcpus": 1, "vcpu_weight": 2}
+ mock_extract_flavor.return_value = flavor
+
+ vm_utils.create_vm(session, instance, "name", "kernel", "ramdisk",
+ device_id="0002")
+
+ is_viridian_str = str(is_viridian).lower()
+
+ expected_vm_rec = {
+ 'VCPUs_params': {'cap': '0', 'weight': '2'},
+ 'PV_args': '',
+ 'memory_static_min': '0',
+ 'ha_restart_priority': '',
+ 'HVM_boot_policy': 'BIOS order',
+ 'PV_bootloader': '',
+ 'tags': [],
+ 'VCPUs_max': '1',
+ 'memory_static_max': '1073741824',
+ 'actions_after_shutdown': 'destroy',
+ 'memory_dynamic_max': '1073741824',
+ 'user_version': '0',
+ 'xenstore_data': {'vm-data/allowvssprovider': 'false'},
+ 'blocked_operations': {},
+ 'is_a_template': False,
+ 'name_description': '',
+ 'memory_dynamic_min': '1073741824',
+ 'actions_after_crash': 'destroy',
+ 'memory_target': '1073741824',
+ 'PV_ramdisk': '',
+ 'PV_bootloader_args': '',
+ 'PCI_bus': '',
+ 'other_config': {'nova_uuid': 'uuid123'},
+ 'name_label': 'name',
+ 'actions_after_reboot': 'restart',
+ 'VCPUs_at_startup': '1',
+ 'HVM_boot_params': {'order': 'dc'},
+ 'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
+ 'timeoffset': '0', 'viridian': is_viridian_str,
+ 'acpi': 'true', 'device_id': '0002'},
+ 'PV_legacy_args': '',
+ 'PV_kernel': '',
+ 'affinity': '',
+ 'recommendations': '',
+ 'ha_always_run': False}
+
+ session.call_xenapi.assert_called_with('VM.create', expected_vm_rec)
+
+ def test_list_vms(self):
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ fake.create_vm("foo1", "Halted")
+ vm_ref = fake.create_vm("foo2", "Running")
+
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+
+ result = list(vm_utils.list_vms(driver._session))
+
+ # Will have 3 VMs - but one is Dom0 and one is not running on the host
+ self.assertEqual(len(driver._session.call_xenapi('VM.get_all')), 3)
+ self.assertEqual(len(result), 1)
+
+ result_keys = [key for (key, value) in result]
+
+ self.assertIn(vm_ref, result_keys)
+
+
+class ChildVHDsTestCase(test.NoDBTestCase):
+ all_vdis = [
+ ("my-vdi-ref",
+ {"uuid": "my-uuid", "sm_config": {},
+ "is_a_snapshot": False, "other_config": {}}),
+ ("non-parent",
+ {"uuid": "uuid-1", "sm_config": {},
+ "is_a_snapshot": False, "other_config": {}}),
+ ("diff-parent",
+ {"uuid": "uuid-1", "sm_config": {"vhd-parent": "other-uuid"},
+ "is_a_snapshot": False, "other_config": {}}),
+ ("child",
+ {"uuid": "uuid-child", "sm_config": {"vhd-parent": "my-uuid"},
+ "is_a_snapshot": False, "other_config": {}}),
+ ("child-snap",
+ {"uuid": "uuid-child-snap", "sm_config": {"vhd-parent": "my-uuid"},
+ "is_a_snapshot": True, "other_config": {}}),
+ ]
+
+ @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
+ def test_child_vhds_defaults(self, mock_get_all):
+ mock_get_all.return_value = self.all_vdis
+
+ result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"])
+
+ self.assertEqual(['uuid-child', 'uuid-child-snap'], result)
+
+ @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
+ def test_child_vhds_only_snapshots(self, mock_get_all):
+ mock_get_all.return_value = self.all_vdis
+
+ result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"],
+ old_snapshots_only=True)
+
+ self.assertEqual(['uuid-child-snap'], result)
+
+ @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
+ def test_child_vhds_chain(self, mock_get_all):
+ mock_get_all.return_value = self.all_vdis
+
+ result = vm_utils._child_vhds("session", "sr_ref",
+ ["my-uuid", "other-uuid"], old_snapshots_only=True)
+
+ self.assertEqual(['uuid-child-snap'], result)
+
+ def test_is_vdi_a_snapshot_works(self):
+ vdi_rec = {"is_a_snapshot": True,
+ "other_config": {}}
+
+ self.assertTrue(vm_utils._is_vdi_a_snapshot(vdi_rec))
+
+ def test_is_vdi_a_snapshot_base_images_false(self):
+ vdi_rec = {"is_a_snapshot": True,
+ "other_config": {"image-id": "fake"}}
+
+ self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
+
+ def test_is_vdi_a_snapshot_false_for_non_snapshot(self):
+ vdi_rec = {"is_a_snapshot": False,
+ "other_config": {}}
+
+ self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
+
+
+class RemoveOldSnapshotsTestCase(test.NoDBTestCase):
+
+ @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
+ @mock.patch.object(vm_utils, '_walk_vdi_chain')
+ @mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
+ def test_remove_old_snapshots(self, mock_delete, mock_walk, mock_get):
+ instance = {"uuid": "fake"}
+ mock_get.return_value = ("ref", {"uuid": "vdi", "SR": "sr_ref"})
+ mock_walk.return_value = [{"uuid": "uuid1"}, {"uuid": "uuid2"}]
+
+ vm_utils.remove_old_snapshots("session", instance, "vm_ref")
+
+ mock_delete.assert_called_once_with("session", instance,
+ ["uuid1", "uuid2"], "sr_ref")
+ mock_get.assert_called_once_with("session", "vm_ref")
+ mock_walk.assert_called_once_with("session", "vdi")
+
+ @mock.patch.object(vm_utils, '_child_vhds')
+ def test_delete_snapshots_in_vdi_chain_no_chain(self, mock_child):
+ instance = {"uuid": "fake"}
+
+ vm_utils._delete_snapshots_in_vdi_chain("session", instance,
+ ["uuid"], "sr")
+
+ self.assertFalse(mock_child.called)
+
+ @mock.patch.object(vm_utils, '_child_vhds')
+ def test_delete_snapshots_in_vdi_chain_no_snapshots(self, mock_child):
+ instance = {"uuid": "fake"}
+ mock_child.return_value = []
+
+ vm_utils._delete_snapshots_in_vdi_chain("session", instance,
+ ["uuid1", "uuid2"], "sr")
+
+ mock_child.assert_called_once_with("session", "sr", ["uuid2"],
+ old_snapshots_only=True)
+
+ @mock.patch.object(vm_utils, '_scan_sr')
+ @mock.patch.object(vm_utils, 'safe_destroy_vdis')
+ @mock.patch.object(vm_utils, '_child_vhds')
+ def test_delete_snapshots_in_vdi_chain_calls_destroy(self, mock_child,
+ mock_destroy, mock_scan):
+ instance = {"uuid": "fake"}
+ mock_child.return_value = ["suuid1", "suuid2"]
+ session = mock.Mock()
+ session.VDI.get_by_uuid.side_effect = ["ref1", "ref2"]
+
+ vm_utils._delete_snapshots_in_vdi_chain(session, instance,
+ ["uuid1", "uuid2"], "sr")
+
+ mock_child.assert_called_once_with(session, "sr", ["uuid2"],
+ old_snapshots_only=True)
+ session.VDI.get_by_uuid.assert_has_calls([
+ mock.call("suuid1"), mock.call("suuid2")])
+ mock_destroy.assert_called_once_with(session, ["ref1", "ref2"])
+ mock_scan.assert_called_once_with(session, "sr")
+
+
+class ResizeFunctionTestCase(test.NoDBTestCase):
+ def _call_get_resize_func_name(self, brand, version):
+ session = mock.Mock()
+ session.product_brand = brand
+ session.product_version = version
+
+ return vm_utils._get_resize_func_name(session)
+
+ def _test_is_resize(self, brand, version):
+ result = self._call_get_resize_func_name(brand, version)
+ self.assertEqual("VDI.resize", result)
+
+ def _test_is_resize_online(self, brand, version):
+ result = self._call_get_resize_func_name(brand, version)
+ self.assertEqual("VDI.resize_online", result)
+
+ def test_xenserver_5_5(self):
+ self._test_is_resize_online("XenServer", (5, 5, 0))
+
+ def test_xenserver_6_0(self):
+ self._test_is_resize("XenServer", (6, 0, 0))
+
+ def test_xcp_1_1(self):
+ self._test_is_resize_online("XCP", (1, 1, 0))
+
+ def test_xcp_1_2(self):
+ self._test_is_resize("XCP", (1, 2, 0))
+
+ def test_xcp_2_0(self):
+ self._test_is_resize("XCP", (2, 0, 0))
+
+ def test_random_brand(self):
+ self._test_is_resize("asfd", (1, 1, 0))
+
+ def test_default(self):
+ self._test_is_resize(None, None)
+
+ def test_empty(self):
+ self._test_is_resize("", "")
+
+ def test_bad_version(self):
+ self._test_is_resize("XenServer", "asdf")
+
+
+class VMInfoTests(VMUtilsTestBase):
+ def setUp(self):
+ super(VMInfoTests, self).setUp()
+ self.session = mock.Mock()
+
+ def test_get_power_state_valid(self):
+ # Save on test setup calls by having these simple tests in one method
+ self.session.call_xenapi.return_value = "Running"
+ self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
+ power_state.RUNNING)
+
+ self.session.call_xenapi.return_value = "Halted"
+ self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
+ power_state.SHUTDOWN)
+
+ self.session.call_xenapi.return_value = "Paused"
+ self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
+ power_state.PAUSED)
+
+ self.session.call_xenapi.return_value = "Suspended"
+ self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
+ power_state.SUSPENDED)
+
+ self.session.call_xenapi.return_value = "Crashed"
+ self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
+ power_state.CRASHED)
+
+ def test_get_power_state_invalid(self):
+ self.session.call_xenapi.return_value = "Invalid"
+ self.assertRaises(KeyError,
+ vm_utils.get_power_state, self.session, "ref")
+
+ _XAPI_record = {'power_state': 'Running',
+ 'memory_static_max': str(10 << 10),
+ 'memory_dynamic_max': str(9 << 10),
+ 'VCPUs_max': '5'}
+
+ def test_compile_info(self):
+
+ def call_xenapi(method, *args):
+ if method.startswith('VM.get_') and args[0] == 'dummy':
+ return self._XAPI_record[method[7:]]
+
+ self.session.call_xenapi.side_effect = call_xenapi
+
+ expected = {'state': power_state.RUNNING,
+ 'max_mem': 10L,
+ 'mem': 9L,
+ 'num_cpu': '5',
+ 'cpu_time': 0}
+
+ self.assertEqual(vm_utils.compile_info(self.session, "dummy"),
+ expected)
diff --git a/nova/tests/unit/virt/xenapi/test_vmops.py b/nova/tests/unit/virt/xenapi/test_vmops.py
new file mode 100644
index 0000000000..8140f997d2
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_vmops.py
@@ -0,0 +1,1124 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from eventlet import greenthread
+import mock
+
+from nova.compute import power_state
+from nova.compute import task_states
+from nova import context
+from nova import exception
+from nova import objects
+from nova.pci import manager as pci_manager
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt import fake
+from nova.virt.xenapi import agent as xenapi_agent
+from nova.virt.xenapi.client import session as xenapi_session
+from nova.virt.xenapi import fake as xenapi_fake
+from nova.virt.xenapi import vm_utils
+from nova.virt.xenapi import vmops
+from nova.virt.xenapi import volume_utils
+from nova.virt.xenapi import volumeops
+
+
+class VMOpsTestBase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(VMOpsTestBase, self).setUp()
+ self._setup_mock_vmops()
+ self.vms = []
+
+ def _setup_mock_vmops(self, product_brand=None, product_version=None):
+ stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
+ self._session = xenapi_session.XenAPISession('test_url', 'root',
+ 'test_pass')
+ self.vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
+
+ def create_vm(self, name, state="Running"):
+ vm_ref = xenapi_fake.create_vm(name, state)
+ self.vms.append(vm_ref)
+ vm = xenapi_fake.get_record("VM", vm_ref)
+ return vm, vm_ref
+
+ def tearDown(self):
+ super(VMOpsTestBase, self).tearDown()
+ for vm in self.vms:
+ xenapi_fake.destroy_vm(vm)
+
+
+class VMOpsTestCase(VMOpsTestBase):
+ def setUp(self):
+ super(VMOpsTestCase, self).setUp()
+ self._setup_mock_vmops()
+
+ def _setup_mock_vmops(self, product_brand=None, product_version=None):
+ self._session = self._get_mock_session(product_brand, product_version)
+ self._vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
+
+ def _get_mock_session(self, product_brand, product_version):
+ class Mock(object):
+ pass
+
+ mock_session = Mock()
+ mock_session.product_brand = product_brand
+ mock_session.product_version = product_version
+ return mock_session
+
+ def _test_finish_revert_migration_after_crash(self, backup_made, new_made,
+ vm_shutdown=True):
+ instance = {'name': 'foo',
+ 'task_state': task_states.RESIZE_MIGRATING}
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ self.mox.StubOutWithMock(self._vmops, '_destroy')
+ self.mox.StubOutWithMock(vm_utils, 'set_vm_name_label')
+ self.mox.StubOutWithMock(self._vmops, '_attach_mapped_block_devices')
+ self.mox.StubOutWithMock(self._vmops, '_start')
+ self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+
+ vm_utils.lookup(self._session, 'foo-orig').AndReturn(
+ backup_made and 'foo' or None)
+ vm_utils.lookup(self._session, 'foo').AndReturn(
+ (not backup_made or new_made) and 'foo' or None)
+ if backup_made:
+ if new_made:
+ self._vmops._destroy(instance, 'foo')
+ vm_utils.set_vm_name_label(self._session, 'foo', 'foo')
+ self._vmops._attach_mapped_block_devices(instance, [])
+
+ vm_utils.is_vm_shutdown(self._session, 'foo').AndReturn(vm_shutdown)
+ if vm_shutdown:
+ self._vmops._start(instance, 'foo')
+
+ self.mox.ReplayAll()
+
+ self._vmops.finish_revert_migration(context, instance, [])
+
+ def test_finish_revert_migration_after_crash(self):
+ self._test_finish_revert_migration_after_crash(True, True)
+
+ def test_finish_revert_migration_after_crash_before_new(self):
+ self._test_finish_revert_migration_after_crash(True, False)
+
+ def test_finish_revert_migration_after_crash_before_backup(self):
+ self._test_finish_revert_migration_after_crash(False, False)
+
+ def test_xsm_sr_check_relaxed_cached(self):
+ self.make_plugin_call_count = 0
+
+ def fake_make_plugin_call(plugin, method, **args):
+ self.make_plugin_call_count = self.make_plugin_call_count + 1
+ return "true"
+
+ self.stubs.Set(self._vmops, "_make_plugin_call",
+ fake_make_plugin_call)
+
+ self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())
+ self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())
+
+ self.assertEqual(self.make_plugin_call_count, 1)
+
+ def test_get_vm_opaque_ref_raises_instance_not_found(self):
+ instance = {"name": "dummy"}
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ vm_utils.lookup(self._session, instance['name'], False).AndReturn(None)
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.InstanceNotFound,
+ self._vmops._get_vm_opaque_ref, instance)
+
+
+class InjectAutoDiskConfigTestCase(VMOpsTestBase):
+ def test_inject_auto_disk_config_when_present(self):
+ vm, vm_ref = self.create_vm("dummy")
+ instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": True}
+ self.vmops._inject_auto_disk_config(instance, vm_ref)
+ xenstore_data = vm['xenstore_data']
+ self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'True')
+
+ def test_inject_auto_disk_config_none_as_false(self):
+ vm, vm_ref = self.create_vm("dummy")
+ instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
+ self.vmops._inject_auto_disk_config(instance, vm_ref)
+ xenstore_data = vm['xenstore_data']
+ self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'False')
+
+
+class GetConsoleOutputTestCase(VMOpsTestBase):
+ def test_get_console_output_works(self):
+ self.mox.StubOutWithMock(self.vmops, '_get_dom_id')
+
+ instance = {"name": "dummy"}
+ self.vmops._get_dom_id(instance, check_rescue=True).AndReturn(42)
+ self.mox.ReplayAll()
+
+ self.assertEqual("dom_id: 42", self.vmops.get_console_output(instance))
+
+ def test_get_console_output_throws_nova_exception(self):
+ self.mox.StubOutWithMock(self.vmops, '_get_dom_id')
+
+ instance = {"name": "dummy"}
+ # dom_id=0 used to trigger exception in fake XenAPI
+ self.vmops._get_dom_id(instance, check_rescue=True).AndReturn(0)
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.NovaException,
+ self.vmops.get_console_output, instance)
+
+ def test_get_dom_id_works(self):
+ instance = {"name": "dummy"}
+ vm, vm_ref = self.create_vm("dummy")
+ self.assertEqual(vm["domid"], self.vmops._get_dom_id(instance))
+
+ def test_get_dom_id_works_with_rescue_vm(self):
+ instance = {"name": "dummy"}
+ vm, vm_ref = self.create_vm("dummy-rescue")
+ self.assertEqual(vm["domid"],
+ self.vmops._get_dom_id(instance, check_rescue=True))
+
+ def test_get_dom_id_raises_not_found(self):
+ instance = {"name": "dummy"}
+ self.create_vm("not-dummy")
+ self.assertRaises(exception.NotFound, self.vmops._get_dom_id, instance)
+
+ def test_get_dom_id_works_with_vmref(self):
+ vm, vm_ref = self.create_vm("dummy")
+ self.assertEqual(vm["domid"],
+ self.vmops._get_dom_id(vm_ref=vm_ref))
+
+
+class SpawnTestCase(VMOpsTestBase):
+ def _stub_out_common(self):
+ self.mox.StubOutWithMock(self.vmops, '_ensure_instance_name_unique')
+ self.mox.StubOutWithMock(self.vmops, '_ensure_enough_free_mem')
+ self.mox.StubOutWithMock(self.vmops, '_update_instance_progress')
+ self.mox.StubOutWithMock(vm_utils, 'determine_disk_image_type')
+ self.mox.StubOutWithMock(self.vmops, '_get_vdis_for_instance')
+ self.mox.StubOutWithMock(vm_utils, 'safe_destroy_vdis')
+ self.mox.StubOutWithMock(self.vmops._volumeops,
+ 'safe_cleanup_from_vdis')
+ self.mox.StubOutWithMock(self.vmops, '_resize_up_vdis')
+ self.mox.StubOutWithMock(vm_utils,
+ 'create_kernel_and_ramdisk')
+ self.mox.StubOutWithMock(vm_utils, 'destroy_kernel_ramdisk')
+ self.mox.StubOutWithMock(self.vmops, '_create_vm_record')
+ self.mox.StubOutWithMock(self.vmops, '_destroy')
+ self.mox.StubOutWithMock(self.vmops, '_attach_disks')
+ self.mox.StubOutWithMock(pci_manager, 'get_instance_pci_devs')
+ self.mox.StubOutWithMock(vm_utils, 'set_other_config_pci')
+ self.mox.StubOutWithMock(self.vmops, '_attach_orig_disks')
+ self.mox.StubOutWithMock(self.vmops, 'inject_network_info')
+ self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
+ self.mox.StubOutWithMock(self.vmops, '_inject_instance_metadata')
+ self.mox.StubOutWithMock(self.vmops, '_inject_auto_disk_config')
+ self.mox.StubOutWithMock(self.vmops, '_file_inject_vm_settings')
+ self.mox.StubOutWithMock(self.vmops, '_create_vifs')
+ self.mox.StubOutWithMock(self.vmops.firewall_driver,
+ 'setup_basic_filtering')
+ self.mox.StubOutWithMock(self.vmops.firewall_driver,
+ 'prepare_instance_filter')
+ self.mox.StubOutWithMock(self.vmops, '_start')
+ self.mox.StubOutWithMock(self.vmops, '_wait_for_instance_to_start')
+ self.mox.StubOutWithMock(self.vmops,
+ '_configure_new_instance_with_agent')
+ self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
+ self.mox.StubOutWithMock(self.vmops.firewall_driver,
+ 'apply_instance_filter')
+
+ def _test_spawn(self, name_label_param=None, block_device_info_param=None,
+ rescue=False, include_root_vdi=True, throw_exception=None,
+ attach_pci_dev=False):
+ self._stub_out_common()
+
+ instance = {"name": "dummy", "uuid": "fake_uuid"}
+ name_label = name_label_param
+ if name_label is None:
+ name_label = "dummy"
+ image_meta = {"id": "image_id"}
+ context = "context"
+ session = self.vmops._session
+ injected_files = "fake_files"
+ admin_password = "password"
+ network_info = "net_info"
+ steps = 10
+ if rescue:
+ steps += 1
+
+ block_device_info = block_device_info_param
+ if block_device_info and not block_device_info['root_device_name']:
+ block_device_info = dict(block_device_info_param)
+ block_device_info['root_device_name'] = \
+ self.vmops.default_root_dev
+
+ di_type = "di_type"
+ vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
+ step = 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ vdis = {"other": {"ref": "fake_ref_2", "osvol": True}}
+ if include_root_vdi:
+ vdis["root"] = {"ref": "fake_ref"}
+ self.vmops._get_vdis_for_instance(context, instance,
+ name_label, "image_id", di_type,
+ block_device_info).AndReturn(vdis)
+ self.vmops._resize_up_vdis(instance, vdis)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ kernel_file = "kernel"
+ ramdisk_file = "ramdisk"
+ vm_utils.create_kernel_and_ramdisk(context, session,
+ instance, name_label).AndReturn((kernel_file, ramdisk_file))
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ vm_ref = "fake_vm_ref"
+ self.vmops._ensure_instance_name_unique(name_label)
+ self.vmops._ensure_enough_free_mem(instance)
+ self.vmops._create_vm_record(context, instance, name_label,
+ di_type, kernel_file,
+ ramdisk_file, image_meta).AndReturn(vm_ref)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ self.vmops._attach_disks(instance, vm_ref, name_label, vdis, di_type,
+ network_info, rescue, admin_password, injected_files)
+ if attach_pci_dev:
+ fake_dev = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 1,
+ 'compute_node_id': 1,
+ 'address': '00:00.0',
+ 'vendor_id': '1234',
+ 'product_id': 'abcd',
+ 'dev_type': 'type-PCI',
+ 'status': 'available',
+ 'dev_id': 'devid',
+ 'label': 'label',
+ 'instance_uuid': None,
+ 'extra_info': '{}',
+ }
+ pci_manager.get_instance_pci_devs(instance).AndReturn([fake_dev])
+ vm_utils.set_other_config_pci(self.vmops._session,
+ vm_ref,
+ "0/0000:00:00.0")
+ else:
+ pci_manager.get_instance_pci_devs(instance).AndReturn([])
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ self.vmops._inject_instance_metadata(instance, vm_ref)
+ self.vmops._inject_auto_disk_config(instance, vm_ref)
+ self.vmops._inject_hostname(instance, vm_ref, rescue)
+ self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
+ network_info)
+ self.vmops.inject_network_info(instance, network_info, vm_ref)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ self.vmops._create_vifs(instance, vm_ref, network_info)
+ self.vmops.firewall_driver.setup_basic_filtering(instance,
+ network_info).AndRaise(NotImplementedError)
+ self.vmops.firewall_driver.prepare_instance_filter(instance,
+ network_info)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ if rescue:
+ self.vmops._attach_orig_disks(instance, vm_ref)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step,
+ steps)
+ self.vmops._start(instance, vm_ref)
+ self.vmops._wait_for_instance_to_start(instance, vm_ref)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ self.vmops._configure_new_instance_with_agent(instance, vm_ref,
+ injected_files, admin_password)
+ self.vmops._remove_hostname(instance, vm_ref)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ self.vmops.firewall_driver.apply_instance_filter(instance,
+ network_info)
+ step += 1
+ last_call = self.vmops._update_instance_progress(context, instance,
+ step, steps)
+ if throw_exception:
+ last_call.AndRaise(throw_exception)
+ self.vmops._destroy(instance, vm_ref, network_info=network_info)
+ vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
+ kernel_file, ramdisk_file)
+ vm_utils.safe_destroy_vdis(self.vmops._session, ["fake_ref"])
+ self.vmops._volumeops.safe_cleanup_from_vdis(["fake_ref_2"])
+
+ self.mox.ReplayAll()
+ self.vmops.spawn(context, instance, image_meta, injected_files,
+ admin_password, network_info,
+ block_device_info_param, name_label_param, rescue)
+
+ def test_spawn(self):
+ self._test_spawn()
+
+ def test_spawn_with_alternate_options(self):
+ self._test_spawn(include_root_vdi=False, rescue=True,
+ name_label_param="bob",
+ block_device_info_param={"root_device_name": ""})
+
+ def test_spawn_with_pci_available_on_the_host(self):
+ self._test_spawn(attach_pci_dev=True)
+
+ def test_spawn_performs_rollback_and_throws_exception(self):
+ self.assertRaises(test.TestingException, self._test_spawn,
+ throw_exception=test.TestingException())
+
+ def _test_finish_migration(self, power_on=True, resize_instance=True,
+ throw_exception=None):
+ self._stub_out_common()
+ self.mox.StubOutWithMock(vm_utils, "import_all_migrated_disks")
+ self.mox.StubOutWithMock(self.vmops, "_attach_mapped_block_devices")
+
+ context = "context"
+ migration = {}
+ name_label = "dummy"
+ instance = {"name": name_label, "uuid": "fake_uuid"}
+ disk_info = "disk_info"
+ network_info = "net_info"
+ image_meta = {"id": "image_id"}
+ block_device_info = "bdi"
+ session = self.vmops._session
+
+ self.vmops._ensure_instance_name_unique(name_label)
+ self.vmops._ensure_enough_free_mem(instance)
+
+ di_type = "di_type"
+ vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
+
+ root_vdi = {"ref": "fake_ref"}
+ ephemeral_vdi = {"ref": "fake_ref_e"}
+ vdis = {"root": root_vdi, "ephemerals": {4: ephemeral_vdi}}
+ vm_utils.import_all_migrated_disks(self.vmops._session,
+ instance).AndReturn(vdis)
+
+ kernel_file = "kernel"
+ ramdisk_file = "ramdisk"
+ vm_utils.create_kernel_and_ramdisk(context, session,
+ instance, name_label).AndReturn((kernel_file, ramdisk_file))
+
+ vm_ref = "fake_vm_ref"
+ self.vmops._create_vm_record(context, instance, name_label,
+ di_type, kernel_file,
+ ramdisk_file, image_meta).AndReturn(vm_ref)
+
+ if resize_instance:
+ self.vmops._resize_up_vdis(instance, vdis)
+ self.vmops._attach_disks(instance, vm_ref, name_label, vdis, di_type,
+ network_info, False, None, None)
+ self.vmops._attach_mapped_block_devices(instance, block_device_info)
+ pci_manager.get_instance_pci_devs(instance).AndReturn([])
+
+ self.vmops._inject_instance_metadata(instance, vm_ref)
+ self.vmops._inject_auto_disk_config(instance, vm_ref)
+ self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
+ network_info)
+ self.vmops.inject_network_info(instance, network_info, vm_ref)
+
+ self.vmops._create_vifs(instance, vm_ref, network_info)
+ self.vmops.firewall_driver.setup_basic_filtering(instance,
+ network_info).AndRaise(NotImplementedError)
+ self.vmops.firewall_driver.prepare_instance_filter(instance,
+ network_info)
+
+ if power_on:
+ self.vmops._start(instance, vm_ref)
+ self.vmops._wait_for_instance_to_start(instance, vm_ref)
+
+ self.vmops.firewall_driver.apply_instance_filter(instance,
+ network_info)
+
+ last_call = self.vmops._update_instance_progress(context, instance,
+ step=5, total_steps=5)
+ if throw_exception:
+ last_call.AndRaise(throw_exception)
+ self.vmops._destroy(instance, vm_ref, network_info=network_info)
+ vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
+ kernel_file, ramdisk_file)
+ vm_utils.safe_destroy_vdis(self.vmops._session,
+ ["fake_ref_e", "fake_ref"])
+
+ self.mox.ReplayAll()
+ self.vmops.finish_migration(context, migration, instance, disk_info,
+ network_info, image_meta, resize_instance,
+ block_device_info, power_on)
+
+ def test_finish_migration(self):
+ self._test_finish_migration()
+
+ def test_finish_migration_no_power_on(self):
+ self._test_finish_migration(power_on=False, resize_instance=False)
+
+ def test_finish_migrate_performs_rollback_on_error(self):
+ self.assertRaises(test.TestingException, self._test_finish_migration,
+ power_on=False, resize_instance=False,
+ throw_exception=test.TestingException())
+
+ def test_remove_hostname(self):
+ vm, vm_ref = self.create_vm("dummy")
+ instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
+ self.mox.StubOutWithMock(self._session, 'call_xenapi')
+ self._session.call_xenapi("VM.remove_from_xenstore_data", vm_ref,
+ "vm-data/hostname")
+
+ self.mox.ReplayAll()
+ self.vmops._remove_hostname(instance, vm_ref)
+ self.mox.VerifyAll()
+
+ def test_reset_network(self):
+ class mock_agent(object):
+ def __init__(self):
+ self.called = False
+
+ def resetnetwork(self):
+ self.called = True
+
+ vm, vm_ref = self.create_vm("dummy")
+ instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
+ agent = mock_agent()
+
+ self.mox.StubOutWithMock(self.vmops, 'agent_enabled')
+ self.mox.StubOutWithMock(self.vmops, '_get_agent')
+ self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
+ self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
+
+ self.vmops.agent_enabled(instance).AndReturn(True)
+ self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
+ self.vmops._inject_hostname(instance, vm_ref, False)
+ self.vmops._remove_hostname(instance, vm_ref)
+ self.mox.ReplayAll()
+ self.vmops.reset_network(instance)
+ self.assertTrue(agent.called)
+ self.mox.VerifyAll()
+
+ def test_inject_hostname(self):
+ instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
+ vm_ref = "vm_ref"
+
+ self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
+ self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname', 'dummy')
+
+ self.mox.ReplayAll()
+ self.vmops._inject_hostname(instance, vm_ref, rescue=False)
+
+ def test_inject_hostname_with_rescue_prefix(self):
+ instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
+ vm_ref = "vm_ref"
+
+ self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
+ self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
+ 'RESCUE-dummy')
+
+ self.mox.ReplayAll()
+ self.vmops._inject_hostname(instance, vm_ref, rescue=True)
+
+ def test_inject_hostname_with_windows_name_truncation(self):
+ instance = {"hostname": "dummydummydummydummydummy",
+ "os_type": "windows", "uuid": "uuid"}
+ vm_ref = "vm_ref"
+
+ self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
+ self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
+ 'RESCUE-dummydum')
+
+ self.mox.ReplayAll()
+ self.vmops._inject_hostname(instance, vm_ref, rescue=True)
+
+ def test_wait_for_instance_to_start(self):
+ instance = {"uuid": "uuid"}
+ vm_ref = "vm_ref"
+
+ self.mox.StubOutWithMock(vm_utils, 'get_power_state')
+ self.mox.StubOutWithMock(greenthread, 'sleep')
+ vm_utils.get_power_state(self._session, vm_ref).AndReturn(
+ power_state.SHUTDOWN)
+ greenthread.sleep(0.5)
+ vm_utils.get_power_state(self._session, vm_ref).AndReturn(
+ power_state.RUNNING)
+
+ self.mox.ReplayAll()
+ self.vmops._wait_for_instance_to_start(instance, vm_ref)
+
+ def test_attach_orig_disks(self):
+ instance = {"name": "dummy"}
+ vm_ref = "vm_ref"
+ vbd_refs = {vmops.DEVICE_ROOT: "vdi_ref"}
+
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ self.mox.StubOutWithMock(self.vmops, '_find_vdi_refs')
+ self.mox.StubOutWithMock(vm_utils, 'create_vbd')
+
+ vm_utils.lookup(self.vmops._session, "dummy").AndReturn("ref")
+ self.vmops._find_vdi_refs("ref", exclude_volumes=True).AndReturn(
+ vbd_refs)
+ vm_utils.create_vbd(self.vmops._session, vm_ref, "vdi_ref",
+ vmops.DEVICE_RESCUE, bootable=False)
+
+ self.mox.ReplayAll()
+ self.vmops._attach_orig_disks(instance, vm_ref)
+
+ def test_agent_update_setup(self):
+ # agent updates need to occur after networking is configured
+ instance = {'name': 'betelgeuse',
+ 'uuid': '1-2-3-4-5-6'}
+ vm_ref = 'vm_ref'
+ agent = xenapi_agent.XenAPIBasedAgent(self.vmops._session,
+ self.vmops._virtapi, instance, vm_ref)
+
+ self.mox.StubOutWithMock(xenapi_agent, 'should_use_agent')
+ self.mox.StubOutWithMock(self.vmops, '_get_agent')
+ self.mox.StubOutWithMock(agent, 'get_version')
+ self.mox.StubOutWithMock(agent, 'resetnetwork')
+ self.mox.StubOutWithMock(agent, 'update_if_needed')
+
+ xenapi_agent.should_use_agent(instance).AndReturn(True)
+ self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
+ agent.get_version().AndReturn('1.2.3')
+ agent.resetnetwork()
+ agent.update_if_needed('1.2.3')
+
+ self.mox.ReplayAll()
+ self.vmops._configure_new_instance_with_agent(instance, vm_ref,
+ None, None)
+
+
+class DestroyTestCase(VMOpsTestBase):
+ def setUp(self):
+ super(DestroyTestCase, self).setUp()
+ self.context = context.RequestContext(user_id=None, project_id=None)
+ self.instance = fake_instance.fake_instance_obj(self.context)
+
+ @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
+ @mock.patch.object(vm_utils, 'hard_shutdown_vm')
+ @mock.patch.object(volume_utils, 'find_sr_by_uuid')
+ @mock.patch.object(volume_utils, 'forget_sr')
+ def test_no_vm_no_bdm(self, forget_sr, find_sr_by_uuid, hard_shutdown_vm,
+ lookup):
+ self.vmops.destroy(self.instance, 'network_info',
+ {'block_device_mapping': []})
+ self.assertEqual(0, find_sr_by_uuid.call_count)
+ self.assertEqual(0, forget_sr.call_count)
+ self.assertEqual(0, hard_shutdown_vm.call_count)
+
+ @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
+ @mock.patch.object(vm_utils, 'hard_shutdown_vm')
+ @mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value=None)
+ @mock.patch.object(volume_utils, 'forget_sr')
+ def test_no_vm_orphaned_volume_no_sr(self, forget_sr, find_sr_by_uuid,
+ hard_shutdown_vm, lookup):
+ self.vmops.destroy(self.instance, 'network_info',
+ {'block_device_mapping': [{'connection_info':
+ {'data': {'volume_id': 'fake-uuid'}}}]})
+ find_sr_by_uuid.assert_called_once_with(self.vmops._session,
+ 'FA15E-D15C-fake-uuid')
+ self.assertEqual(0, forget_sr.call_count)
+ self.assertEqual(0, hard_shutdown_vm.call_count)
+
+ @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
+ @mock.patch.object(vm_utils, 'hard_shutdown_vm')
+ @mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value='sr_ref')
+ @mock.patch.object(volume_utils, 'forget_sr')
+ def test_no_vm_orphaned_volume(self, forget_sr, find_sr_by_uuid,
+ hard_shutdown_vm, lookup):
+ self.vmops.destroy(self.instance, 'network_info',
+ {'block_device_mapping': [{'connection_info':
+ {'data': {'volume_id': 'fake-uuid'}}}]})
+ find_sr_by_uuid.assert_called_once_with(self.vmops._session,
+ 'FA15E-D15C-fake-uuid')
+ forget_sr.assert_called_once_with(self.vmops._session, 'sr_ref')
+ self.assertEqual(0, hard_shutdown_vm.call_count)
+
+
+@mock.patch.object(vmops.VMOps, '_update_instance_progress')
+@mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref')
+@mock.patch.object(vm_utils, 'get_sr_path')
+@mock.patch.object(vmops.VMOps, '_detach_block_devices_from_orig_vm')
+@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_down')
+@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_up')
+class MigrateDiskAndPowerOffTestCase(VMOpsTestBase):
+ def test_migrate_disk_and_power_off_works_down(self,
+ migrate_up, migrate_down, *mocks):
+ instance = {"root_gb": 2, "ephemeral_gb": 0, "uuid": "uuid"}
+ flavor = {"root_gb": 1, "ephemeral_gb": 0}
+
+ self.vmops.migrate_disk_and_power_off(None, instance, None,
+ flavor, None)
+
+ self.assertFalse(migrate_up.called)
+ self.assertTrue(migrate_down.called)
+
+ def test_migrate_disk_and_power_off_works_up(self,
+ migrate_up, migrate_down, *mocks):
+ instance = {"root_gb": 1, "ephemeral_gb": 1, "uuid": "uuid"}
+ flavor = {"root_gb": 2, "ephemeral_gb": 2}
+
+ self.vmops.migrate_disk_and_power_off(None, instance, None,
+ flavor, None)
+
+ self.assertFalse(migrate_down.called)
+ self.assertTrue(migrate_up.called)
+
+ def test_migrate_disk_and_power_off_resize_down_ephemeral_fails(self,
+ migrate_up, migrate_down, *mocks):
+ instance = {"ephemeral_gb": 2}
+ flavor = {"ephemeral_gb": 1}
+
+ self.assertRaises(exception.ResizeError,
+ self.vmops.migrate_disk_and_power_off,
+ None, instance, None, flavor, None)
+
+
+@mock.patch.object(vm_utils, 'migrate_vhd')
+@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
+@mock.patch.object(vm_utils, 'get_all_vdi_uuids_for_vm')
+@mock.patch.object(vmops.VMOps, '_update_instance_progress')
+@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
+class MigrateDiskResizingUpTestCase(VMOpsTestBase):
+ def _fake_snapshot_attached_here(self, session, instance, vm_ref, label,
+ userdevice, post_snapshot_callback):
+ self.assertIsInstance(instance, dict)
+ if userdevice == '0':
+ self.assertEqual("vm_ref", vm_ref)
+ self.assertEqual("fake-snapshot", label)
+ yield ["leaf", "parent", "grandp"]
+ else:
+ leaf = userdevice + "-leaf"
+ parent = userdevice + "-parent"
+ yield [leaf, parent]
+
+ def test_migrate_disk_resizing_up_works_no_ephemeral(self,
+ mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
+ mock_shutdown, mock_migrate_vhd):
+ context = "ctxt"
+ instance = {"name": "fake", "uuid": "uuid"}
+ dest = "dest"
+ vm_ref = "vm_ref"
+ sr_path = "sr_path"
+
+ mock_get_all_vdi_uuids.return_value = None
+
+ with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
+ self._fake_snapshot_attached_here):
+ self.vmops._migrate_disk_resizing_up(context, instance, dest,
+ vm_ref, sr_path)
+
+ mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
+ vm_ref, min_userdevice=4)
+ mock_apply_orig.assert_called_once_with(instance, vm_ref)
+ mock_shutdown.assert_called_once_with(instance, vm_ref)
+
+ m_vhd_expected = [mock.call(self.vmops._session, instance, "parent",
+ dest, sr_path, 1),
+ mock.call(self.vmops._session, instance, "grandp",
+ dest, sr_path, 2),
+ mock.call(self.vmops._session, instance, "leaf",
+ dest, sr_path, 0)]
+ self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
+
+ prog_expected = [
+ mock.call(context, instance, 1, 5),
+ mock.call(context, instance, 2, 5),
+ mock.call(context, instance, 3, 5),
+ mock.call(context, instance, 4, 5)
+ # 5/5: step to be executed by finish migration.
+ ]
+ self.assertEqual(prog_expected, mock_update_progress.call_args_list)
+
+ def test_migrate_disk_resizing_up_works_with_two_ephemeral(self,
+ mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
+ mock_shutdown, mock_migrate_vhd):
+ context = "ctxt"
+ instance = {"name": "fake", "uuid": "uuid"}
+ dest = "dest"
+ vm_ref = "vm_ref"
+ sr_path = "sr_path"
+
+ mock_get_all_vdi_uuids.return_value = ["vdi-eph1", "vdi-eph2"]
+
+ with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
+ self._fake_snapshot_attached_here):
+ self.vmops._migrate_disk_resizing_up(context, instance, dest,
+ vm_ref, sr_path)
+
+ mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
+ vm_ref, min_userdevice=4)
+ mock_apply_orig.assert_called_once_with(instance, vm_ref)
+ mock_shutdown.assert_called_once_with(instance, vm_ref)
+
+ m_vhd_expected = [mock.call(self.vmops._session, instance,
+ "parent", dest, sr_path, 1),
+ mock.call(self.vmops._session, instance,
+ "grandp", dest, sr_path, 2),
+ mock.call(self.vmops._session, instance,
+ "4-parent", dest, sr_path, 1, 1),
+ mock.call(self.vmops._session, instance,
+ "5-parent", dest, sr_path, 1, 2),
+ mock.call(self.vmops._session, instance,
+ "leaf", dest, sr_path, 0),
+ mock.call(self.vmops._session, instance,
+ "4-leaf", dest, sr_path, 0, 1),
+ mock.call(self.vmops._session, instance,
+ "5-leaf", dest, sr_path, 0, 2)]
+ self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
+
+ prog_expected = [
+ mock.call(context, instance, 1, 5),
+ mock.call(context, instance, 2, 5),
+ mock.call(context, instance, 3, 5),
+ mock.call(context, instance, 4, 5)
+ # 5/5: step to be executed by finish migration.
+ ]
+ self.assertEqual(prog_expected, mock_update_progress.call_args_list)
+
+ @mock.patch.object(vmops.VMOps, '_restore_orig_vm_and_cleanup_orphan')
+ def test_migrate_disk_resizing_up_rollback(self,
+ mock_restore,
+ mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
+ mock_shutdown, mock_migrate_vhd):
+ context = "ctxt"
+ instance = {"name": "fake", "uuid": "fake"}
+ dest = "dest"
+ vm_ref = "vm_ref"
+ sr_path = "sr_path"
+
+ mock_migrate_vhd.side_effect = test.TestingException
+ mock_restore.side_effect = test.TestingException
+
+ with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
+ self._fake_snapshot_attached_here):
+ self.assertRaises(exception.InstanceFaultRollback,
+ self.vmops._migrate_disk_resizing_up,
+ context, instance, dest, vm_ref, sr_path)
+
+ mock_apply_orig.assert_called_once_with(instance, vm_ref)
+ mock_restore.assert_called_once_with(instance)
+ mock_migrate_vhd.assert_called_once_with(self.vmops._session,
+ instance, "parent", dest, sr_path, 1)
+
+
+class CreateVMRecordTestCase(VMOpsTestBase):
+ @mock.patch.object(vm_utils, 'determine_vm_mode')
+ @mock.patch.object(vm_utils, 'get_vm_device_id')
+ @mock.patch.object(vm_utils, 'create_vm')
+ def test_create_vm_record_with_vm_device_id(self, mock_create_vm,
+ mock_get_vm_device_id, mock_determine_vm_mode):
+
+ context = "context"
+ instance = objects.Instance(vm_mode="vm_mode", uuid="uuid123")
+ name_label = "dummy"
+ disk_image_type = "vhd"
+ kernel_file = "kernel"
+ ramdisk_file = "ram"
+ device_id = "0002"
+ image_properties = {"xenapi_device_id": device_id}
+ image_meta = {"properties": image_properties}
+ session = "session"
+ self.vmops._session = session
+ mock_get_vm_device_id.return_value = device_id
+ mock_determine_vm_mode.return_value = "vm_mode"
+
+ self.vmops._create_vm_record(context, instance, name_label,
+ disk_image_type, kernel_file, ramdisk_file, image_meta)
+
+ mock_get_vm_device_id.assert_called_with(session, image_properties)
+ mock_create_vm.assert_called_with(session, instance, name_label,
+ kernel_file, ramdisk_file, False, device_id)
+
+
+class BootableTestCase(VMOpsTestBase):
+
+ def setUp(self):
+ super(BootableTestCase, self).setUp()
+
+ self.instance = {"name": "test", "uuid": "fake"}
+ vm_rec, self.vm_ref = self.create_vm('test')
+
+ # sanity check bootlock is initially disabled:
+ self.assertEqual({}, vm_rec['blocked_operations'])
+
+ def _get_blocked(self):
+ vm_rec = self._session.call_xenapi("VM.get_record", self.vm_ref)
+ return vm_rec['blocked_operations']
+
+ def test_acquire_bootlock(self):
+ self.vmops._acquire_bootlock(self.vm_ref)
+ blocked = self._get_blocked()
+ self.assertIn('start', blocked)
+
+ def test_release_bootlock(self):
+ self.vmops._acquire_bootlock(self.vm_ref)
+ self.vmops._release_bootlock(self.vm_ref)
+ blocked = self._get_blocked()
+ self.assertNotIn('start', blocked)
+
+ def test_set_bootable(self):
+ self.vmops.set_bootable(self.instance, True)
+ blocked = self._get_blocked()
+ self.assertNotIn('start', blocked)
+
+ def test_set_not_bootable(self):
+ self.vmops.set_bootable(self.instance, False)
+ blocked = self._get_blocked()
+ self.assertIn('start', blocked)
+
+
+@mock.patch.object(vm_utils, 'update_vdi_virtual_size', autospec=True)
+class ResizeVdisTestCase(VMOpsTestBase):
+ def test_dont_resize_root_volumes_osvol_false(self, mock_resize):
+ instance = fake_instance.fake_db_instance(root_gb=20)
+ vdis = {'root': {'osvol': False, 'ref': 'vdi_ref'}}
+ self.vmops._resize_up_vdis(instance, vdis)
+ self.assertTrue(mock_resize.called)
+
+ def test_dont_resize_root_volumes_osvol_true(self, mock_resize):
+ instance = fake_instance.fake_db_instance(root_gb=20)
+ vdis = {'root': {'osvol': True}}
+ self.vmops._resize_up_vdis(instance, vdis)
+ self.assertFalse(mock_resize.called)
+
+ def test_dont_resize_root_volumes_no_osvol(self, mock_resize):
+ instance = fake_instance.fake_db_instance(root_gb=20)
+ vdis = {'root': {}}
+ self.vmops._resize_up_vdis(instance, vdis)
+ self.assertFalse(mock_resize.called)
+
+ @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
+ def test_ensure_ephemeral_resize_with_root_volume(self, mock_sizes,
+ mock_resize):
+ mock_sizes.return_value = [2000, 1000]
+ instance = fake_instance.fake_db_instance(root_gb=20, ephemeral_gb=20)
+ ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
+ vdis = {'root': {'osvol': True, 'ref': 'vdi_ref'},
+ 'ephemerals': ephemerals}
+ with mock.patch.object(vm_utils, 'generate_single_ephemeral',
+ autospec=True) as g:
+ self.vmops._resize_up_vdis(instance, vdis)
+ self.assertEqual([mock.call(self.vmops._session, instance, 4,
+ 2000),
+ mock.call(self.vmops._session, instance, 5,
+ 1000)],
+ mock_resize.call_args_list)
+ self.assertFalse(g.called)
+
+ def test_resize_up_vdis_root(self, mock_resize):
+ instance = {"root_gb": 20, "ephemeral_gb": 0}
+ self.vmops._resize_up_vdis(instance, {"root": {"ref": "vdi_ref"}})
+ mock_resize.assert_called_once_with(self.vmops._session, instance,
+ "vdi_ref", 20)
+
+ def test_resize_up_vdis_zero_disks(self, mock_resize):
+ instance = {"root_gb": 0, "ephemeral_gb": 0}
+ self.vmops._resize_up_vdis(instance, {"root": {}})
+ self.assertFalse(mock_resize.called)
+
+ def test_resize_up_vdis_no_vdis_like_initial_spawn(self, mock_resize):
+ instance = {"root_gb": 0, "ephemeral_gb": 3000}
+ vdis = {}
+
+ self.vmops._resize_up_vdis(instance, vdis)
+
+ self.assertFalse(mock_resize.called)
+
+ @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
+ def test_resize_up_vdis_ephemeral(self, mock_sizes, mock_resize):
+ mock_sizes.return_value = [2000, 1000]
+ instance = {"root_gb": 0, "ephemeral_gb": 3000}
+ ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
+ vdis = {"ephemerals": ephemerals}
+
+ self.vmops._resize_up_vdis(instance, vdis)
+
+ mock_sizes.assert_called_once_with(3000)
+ expected = [mock.call(self.vmops._session, instance, 4, 2000),
+ mock.call(self.vmops._session, instance, 5, 1000)]
+ self.assertEqual(expected, mock_resize.call_args_list)
+
+ @mock.patch.object(vm_utils, 'generate_single_ephemeral')
+ @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
+ def test_resize_up_vdis_ephemeral_with_generate(self, mock_sizes,
+ mock_generate,
+ mock_resize):
+ mock_sizes.return_value = [2000, 1000]
+ instance = {"root_gb": 0, "ephemeral_gb": 3000, "uuid": "a"}
+ ephemerals = {"4": {"ref": 4}}
+ vdis = {"ephemerals": ephemerals}
+
+ self.vmops._resize_up_vdis(instance, vdis)
+
+ mock_sizes.assert_called_once_with(3000)
+ mock_resize.assert_called_once_with(self.vmops._session, instance,
+ 4, 2000)
+ mock_generate.assert_called_once_with(self.vmops._session, instance,
+ None, 5, 1000)
+
+
+@mock.patch.object(vm_utils, 'remove_old_snapshots')
+class CleanupFailedSnapshotTestCase(VMOpsTestBase):
+ def test_post_interrupted_snapshot_cleanup(self, mock_remove):
+ self.vmops._get_vm_opaque_ref = mock.Mock()
+ self.vmops._get_vm_opaque_ref.return_value = "vm_ref"
+
+ self.vmops.post_interrupted_snapshot_cleanup("context", "instance")
+
+ mock_remove.assert_called_once_with(self.vmops._session,
+ "instance", "vm_ref")
+
+
+class LiveMigrateHelperTestCase(VMOpsTestBase):
+ def test_connect_block_device_volumes_none(self):
+ self.assertEqual({}, self.vmops.connect_block_device_volumes(None))
+
+ @mock.patch.object(volumeops.VolumeOps, "connect_volume")
+ def test_connect_block_device_volumes_calls_connect(self, mock_connect):
+ with mock.patch.object(self.vmops._session,
+ "call_xenapi") as mock_session:
+ mock_connect.return_value = ("sr_uuid", None)
+ mock_session.return_value = "sr_ref"
+ bdm = {"connection_info": "c_info"}
+ bdi = {"block_device_mapping": [bdm]}
+ result = self.vmops.connect_block_device_volumes(bdi)
+
+ self.assertEqual({'sr_uuid': 'sr_ref'}, result)
+
+ mock_connect.assert_called_once_with("c_info")
+ mock_session.assert_called_once_with("SR.get_by_uuid",
+ "sr_uuid")
+
+
+@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
+@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
+@mock.patch.object(vmops.VMOps, '_update_instance_progress')
+@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
+@mock.patch.object(vm_utils, 'resize_disk')
+@mock.patch.object(vm_utils, 'migrate_vhd')
+@mock.patch.object(vm_utils, 'destroy_vdi')
+class MigrateDiskResizingDownTestCase(VMOpsTestBase):
+ def test_migrate_disk_resizing_down_works_no_ephemeral(
+ self,
+ mock_destroy_vdi,
+ mock_migrate_vhd,
+ mock_resize_disk,
+ mock_get_vdi_for_vm_safely,
+ mock_update_instance_progress,
+ mock_apply_orig_vm_name_label,
+ mock_resize_ensure_vm_is_shutdown):
+
+ context = "ctx"
+ instance = {"name": "fake", "uuid": "uuid"}
+ dest = "dest"
+ vm_ref = "vm_ref"
+ sr_path = "sr_path"
+ instance_type = dict(root_gb=1)
+ old_vdi_ref = "old_ref"
+ new_vdi_ref = "new_ref"
+ new_vdi_uuid = "new_uuid"
+
+ mock_get_vdi_for_vm_safely.return_value = (old_vdi_ref, None)
+ mock_resize_disk.return_value = (new_vdi_ref, new_vdi_uuid)
+
+ self.vmops._migrate_disk_resizing_down(context, instance, dest,
+ instance_type, vm_ref, sr_path)
+
+ mock_get_vdi_for_vm_safely.assert_called_once_with(
+ self.vmops._session,
+ vm_ref)
+ mock_resize_ensure_vm_is_shutdown.assert_called_once_with(
+ instance, vm_ref)
+ mock_apply_orig_vm_name_label.assert_called_once_with(
+ instance, vm_ref)
+ mock_resize_disk.assert_called_once_with(
+ self.vmops._session,
+ instance,
+ old_vdi_ref,
+ instance_type)
+ mock_migrate_vhd.assert_called_once_with(
+ self.vmops._session,
+ instance,
+ new_vdi_uuid,
+ dest,
+ sr_path, 0)
+ mock_destroy_vdi.assert_called_once_with(
+ self.vmops._session,
+ new_vdi_ref)
+
+ prog_expected = [
+ mock.call(context, instance, 1, 5),
+ mock.call(context, instance, 2, 5),
+ mock.call(context, instance, 3, 5),
+ mock.call(context, instance, 4, 5)
+ # 5/5: step to be executed by finish migration.
+ ]
+ self.assertEqual(prog_expected,
+ mock_update_instance_progress.call_args_list)
+
+
+class GetVdisForInstanceTestCase(VMOpsTestBase):
+ """Tests get_vdis_for_instance utility method."""
+ def setUp(self):
+ super(GetVdisForInstanceTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ self.context.auth_token = 'auth_token'
+ self.session = mock.Mock()
+ self.vmops._session = self.session
+ self.instance = fake_instance.fake_instance_obj(self.context)
+ self.name_label = 'name'
+ self.image = 'fake_image_id'
+
+ @mock.patch.object(volumeops.VolumeOps, "connect_volume",
+ return_value=("sr", "vdi_uuid"))
+ def test_vdis_for_instance_bdi_password_scrubbed(self, get_uuid_mock):
+ # setup fake data
+ data = {'name_label': self.name_label,
+ 'sr_uuid': 'fake',
+ 'auth_password': 'scrubme'}
+ bdm = [{'mount_device': '/dev/vda',
+ 'connection_info': {'data': data}}]
+ bdi = {'root_device_name': 'vda',
+ 'block_device_mapping': bdm}
+
+ # Tests that the parameters to the to_xml method are sanitized for
+ # passwords when logged.
+ def fake_debug(*args, **kwargs):
+ if 'auth_password' in args[0]:
+ self.assertNotIn('scrubme', args[0])
+ fake_debug.matched = True
+
+ fake_debug.matched = False
+
+ with mock.patch.object(vmops.LOG, 'debug',
+ side_effect=fake_debug) as debug_mock:
+ vdis = self.vmops._get_vdis_for_instance(self.context,
+ self.instance, self.name_label, self.image,
+ image_type=4, block_device_info=bdi)
+ self.assertEqual(1, len(vdis))
+ get_uuid_mock.assert_called_once_with({"data": data})
+ # we don't care what the log message is, we just want to make sure
+ # our stub method is called which asserts the password is scrubbed
+ self.assertTrue(debug_mock.called)
+ self.assertTrue(fake_debug.matched)
diff --git a/nova/tests/unit/virt/xenapi/test_volume_utils.py b/nova/tests/unit/virt/xenapi/test_volume_utils.py
new file mode 100644
index 0000000000..59fd4626b9
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_volume_utils.py
@@ -0,0 +1,232 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from eventlet import greenthread
+import mock
+
+from nova import exception
+from nova import test
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import volume_utils
+
+
+class SROps(stubs.XenAPITestBaseNoDB):
+ def test_find_sr_valid_uuid(self):
+ self.session = mock.Mock()
+ self.session.call_xenapi.return_value = 'sr_ref'
+ self.assertEqual(volume_utils.find_sr_by_uuid(self.session,
+ 'sr_uuid'),
+ 'sr_ref')
+
+ def test_find_sr_invalid_uuid(self):
+ class UUIDException(Exception):
+ details = ["UUID_INVALID", "", "", ""]
+
+ self.session = mock.Mock()
+ self.session.XenAPI.Failure = UUIDException
+ self.session.call_xenapi.side_effect = UUIDException
+ self.assertIsNone(
+ volume_utils.find_sr_by_uuid(self.session, 'sr_uuid'))
+
+ def test_find_sr_from_vdi(self):
+ vdi_ref = 'fake-ref'
+
+ def fake_call_xenapi(method, *args):
+ self.assertEqual(method, 'VDI.get_SR')
+ self.assertEqual(args[0], vdi_ref)
+ return args[0]
+
+ session = mock.Mock()
+ session.call_xenapi.side_effect = fake_call_xenapi
+ self.assertEqual(volume_utils.find_sr_from_vdi(session, vdi_ref),
+ vdi_ref)
+
+ def test_find_sr_from_vdi_exception(self):
+ vdi_ref = 'fake-ref'
+
+ class FakeException(Exception):
+ pass
+
+ def fake_call_xenapi(method, *args):
+ self.assertEqual(method, 'VDI.get_SR')
+ self.assertEqual(args[0], vdi_ref)
+ return args[0]
+
+ session = mock.Mock()
+ session.XenAPI.Failure = FakeException
+ session.call_xenapi.side_effect = FakeException
+ self.assertRaises(exception.StorageError,
+ volume_utils.find_sr_from_vdi, session, vdi_ref)
+
+
+class ISCSIParametersTestCase(stubs.XenAPITestBaseNoDB):
+ def test_target_host(self):
+ self.assertEqual(volume_utils._get_target_host('host:port'),
+ 'host')
+
+ self.assertEqual(volume_utils._get_target_host('host'),
+ 'host')
+
+ # There is no default value
+ self.assertIsNone(volume_utils._get_target_host(':port'))
+
+ self.assertIsNone(volume_utils._get_target_host(None))
+
+ def test_target_port(self):
+ self.assertEqual(volume_utils._get_target_port('host:port'),
+ 'port')
+
+ self.assertEqual(volume_utils._get_target_port('host'),
+ '3260')
+
+
+class IntroduceTestCase(stubs.XenAPITestBaseNoDB):
+
+ @mock.patch.object(volume_utils, '_get_vdi_ref')
+ @mock.patch.object(greenthread, 'sleep')
+ def test_introduce_vdi_retry(self, mock_sleep, mock_get_vdi_ref):
+ def fake_get_vdi_ref(session, sr_ref, vdi_uuid, target_lun):
+ fake_get_vdi_ref.call_count += 1
+ if fake_get_vdi_ref.call_count == 2:
+ return 'vdi_ref'
+
+ def fake_call_xenapi(method, *args):
+ if method == 'SR.scan':
+ return
+ elif method == 'VDI.get_record':
+ return {'managed': 'true'}
+
+ session = mock.Mock()
+ session.call_xenapi.side_effect = fake_call_xenapi
+
+ mock_get_vdi_ref.side_effect = fake_get_vdi_ref
+ fake_get_vdi_ref.call_count = 0
+
+ self.assertEqual(volume_utils.introduce_vdi(session, 'sr_ref'),
+ 'vdi_ref')
+ mock_sleep.assert_called_once_with(20)
+
+ @mock.patch.object(volume_utils, '_get_vdi_ref')
+ @mock.patch.object(greenthread, 'sleep')
+ def test_introduce_vdi_exception(self, mock_sleep, mock_get_vdi_ref):
+ def fake_call_xenapi(method, *args):
+ if method == 'SR.scan':
+ return
+ elif method == 'VDI.get_record':
+ return {'managed': 'true'}
+
+ session = mock.Mock()
+ session.call_xenapi.side_effect = fake_call_xenapi
+ mock_get_vdi_ref.return_value = None
+
+ self.assertRaises(exception.StorageError,
+ volume_utils.introduce_vdi, session, 'sr_ref')
+ mock_sleep.assert_called_once_with(20)
+
+
+class ParseVolumeInfoTestCase(stubs.XenAPITestBaseNoDB):
+ def test_mountpoint_to_number(self):
+ cases = {
+ 'sda': 0,
+ 'sdp': 15,
+ 'hda': 0,
+ 'hdp': 15,
+ 'vda': 0,
+ 'xvda': 0,
+ '0': 0,
+ '10': 10,
+ 'vdq': -1,
+ 'sdq': -1,
+ 'hdq': -1,
+ 'xvdq': -1,
+ }
+
+ for (input, expected) in cases.iteritems():
+ actual = volume_utils._mountpoint_to_number(input)
+ self.assertEqual(actual, expected,
+ '%s yielded %s, not %s' % (input, actual, expected))
+
+ @classmethod
+ def _make_connection_info(cls):
+ target_iqn = 'iqn.2010-10.org.openstack:volume-00000001'
+ return {'driver_volume_type': 'iscsi',
+ 'data': {'volume_id': 1,
+ 'target_iqn': target_iqn,
+ 'target_portal': '127.0.0.1:3260,fake',
+ 'target_lun': None,
+ 'auth_method': 'CHAP',
+ 'auth_username': 'username',
+ 'auth_password': 'password'}}
+
+ def test_parse_volume_info_parsing_auth_details(self):
+ conn_info = self._make_connection_info()
+ result = volume_utils._parse_volume_info(conn_info['data'])
+
+ self.assertEqual('username', result['chapuser'])
+ self.assertEqual('password', result['chappassword'])
+
+ def test_get_device_number_raise_exception_on_wrong_mountpoint(self):
+ self.assertRaises(
+ exception.StorageError,
+ volume_utils.get_device_number,
+ 'dev/sd')
+
+
+class FindVBDTestCase(stubs.XenAPITestBaseNoDB):
+ def test_find_vbd_by_number_works(self):
+ session = mock.Mock()
+ session.VM.get_VBDs.return_value = ["a", "b"]
+ session.VBD.get_userdevice.return_value = "1"
+
+ result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
+
+ self.assertEqual("a", result)
+ session.VM.get_VBDs.assert_called_once_with("vm_ref")
+ session.VBD.get_userdevice.assert_called_once_with("a")
+
+ def test_find_vbd_by_number_no_matches(self):
+ session = mock.Mock()
+ session.VM.get_VBDs.return_value = ["a", "b"]
+ session.VBD.get_userdevice.return_value = "3"
+
+ result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
+
+ self.assertIsNone(result)
+ session.VM.get_VBDs.assert_called_once_with("vm_ref")
+ expected = [mock.call("a"), mock.call("b")]
+ self.assertEqual(expected,
+ session.VBD.get_userdevice.call_args_list)
+
+ def test_find_vbd_by_number_no_vbds(self):
+ session = mock.Mock()
+ session.VM.get_VBDs.return_value = []
+
+ result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
+
+ self.assertIsNone(result)
+ session.VM.get_VBDs.assert_called_once_with("vm_ref")
+ self.assertFalse(session.VBD.get_userdevice.called)
+
+ def test_find_vbd_by_number_ignores_exception(self):
+ session = mock.Mock()
+ session.XenAPI.Failure = test.TestingException
+ session.VM.get_VBDs.return_value = ["a"]
+ session.VBD.get_userdevice.side_effect = test.TestingException
+
+ result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
+
+ self.assertIsNone(result)
+ session.VM.get_VBDs.assert_called_once_with("vm_ref")
+ session.VBD.get_userdevice.assert_called_once_with("a")
diff --git a/nova/tests/unit/virt/xenapi/test_volumeops.py b/nova/tests/unit/virt/xenapi/test_volumeops.py
new file mode 100644
index 0000000000..0e840bb209
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_volumeops.py
@@ -0,0 +1,549 @@
+# Copyright (c) 2012 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import exception
+from nova import test
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import vm_utils
+from nova.virt.xenapi import volume_utils
+from nova.virt.xenapi import volumeops
+
+
+class VolumeOpsTestBase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(VolumeOpsTestBase, self).setUp()
+ self._setup_mock_volumeops()
+
+ def _setup_mock_volumeops(self):
+ self.session = stubs.FakeSessionForVolumeTests('fake_uri')
+ self.ops = volumeops.VolumeOps(self.session)
+
+
+class VolumeDetachTestCase(VolumeOpsTestBase):
+ def test_detach_volume_call(self):
+ registered_calls = []
+
+ def regcall(label):
+ def side_effect(*args, **kwargs):
+ registered_calls.append(label)
+ return side_effect
+
+ ops = volumeops.VolumeOps('session')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'lookup')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'find_vbd_by_number')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'is_vm_shutdown')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'unplug_vbd')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'destroy_vbd')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'find_sr_from_vbd')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'purge_sr')
+
+ volumeops.vm_utils.lookup('session', 'instance_1').AndReturn(
+ 'vmref')
+
+ volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
+ 'devnumber')
+
+ volumeops.volume_utils.find_vbd_by_number(
+ 'session', 'vmref', 'devnumber').AndReturn('vbdref')
+
+ volumeops.vm_utils.is_vm_shutdown('session', 'vmref').AndReturn(
+ False)
+
+ volumeops.vm_utils.unplug_vbd('session', 'vbdref', 'vmref')
+
+ volumeops.vm_utils.destroy_vbd('session', 'vbdref').WithSideEffects(
+ regcall('destroy_vbd'))
+
+ volumeops.volume_utils.find_sr_from_vbd(
+ 'session', 'vbdref').WithSideEffects(
+ regcall('find_sr_from_vbd')).AndReturn('srref')
+
+ volumeops.volume_utils.purge_sr('session', 'srref')
+
+ self.mox.ReplayAll()
+
+ ops.detach_volume(
+ dict(driver_volume_type='iscsi', data='conn_data'),
+ 'instance_1', 'mountpoint')
+
+ self.assertEqual(
+ ['find_sr_from_vbd', 'destroy_vbd'], registered_calls)
+
+ @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
+ @mock.patch.object(volume_utils, "find_vbd_by_number")
+ @mock.patch.object(vm_utils, "vm_ref_or_raise")
+ def test_detach_volume(self, mock_vm, mock_vbd, mock_detach):
+ mock_vm.return_value = "vm_ref"
+ mock_vbd.return_value = "vbd_ref"
+
+ self.ops.detach_volume({}, "name", "/dev/xvdd")
+
+ mock_vm.assert_called_once_with(self.session, "name")
+ mock_vbd.assert_called_once_with(self.session, "vm_ref", 3)
+ mock_detach.assert_called_once_with("vm_ref", ["vbd_ref"])
+
+ @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
+ @mock.patch.object(volume_utils, "find_vbd_by_number")
+ @mock.patch.object(vm_utils, "vm_ref_or_raise")
+ def test_detach_volume_skips_error_skip_attach(self, mock_vm, mock_vbd,
+ mock_detach):
+ mock_vm.return_value = "vm_ref"
+ mock_vbd.return_value = None
+
+ self.ops.detach_volume({}, "name", "/dev/xvdd")
+
+ self.assertFalse(mock_detach.called)
+
+ @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
+ @mock.patch.object(volume_utils, "find_vbd_by_number")
+ @mock.patch.object(vm_utils, "vm_ref_or_raise")
+ def test_detach_volume_raises(self, mock_vm, mock_vbd,
+ mock_detach):
+ mock_vm.return_value = "vm_ref"
+ mock_vbd.side_effect = test.TestingException
+
+ self.assertRaises(test.TestingException,
+ self.ops.detach_volume, {}, "name", "/dev/xvdd")
+ self.assertFalse(mock_detach.called)
+
+ @mock.patch.object(volume_utils, "purge_sr")
+ @mock.patch.object(vm_utils, "destroy_vbd")
+ @mock.patch.object(volume_utils, "find_sr_from_vbd")
+ @mock.patch.object(vm_utils, "unplug_vbd")
+ @mock.patch.object(vm_utils, "is_vm_shutdown")
+ def test_detach_vbds_and_srs_not_shutdown(self, mock_shutdown, mock_unplug,
+ mock_find_sr, mock_destroy, mock_purge):
+ mock_shutdown.return_value = False
+ mock_find_sr.return_value = "sr_ref"
+
+ self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref"])
+
+ mock_shutdown.assert_called_once_with(self.session, "vm_ref")
+ mock_find_sr.assert_called_once_with(self.session, "vbd_ref")
+ mock_unplug.assert_called_once_with(self.session, "vbd_ref", "vm_ref")
+ mock_destroy.assert_called_once_with(self.session, "vbd_ref")
+ mock_purge.assert_called_once_with(self.session, "sr_ref")
+
+ @mock.patch.object(volume_utils, "purge_sr")
+ @mock.patch.object(vm_utils, "destroy_vbd")
+ @mock.patch.object(volume_utils, "find_sr_from_vbd")
+ @mock.patch.object(vm_utils, "unplug_vbd")
+ @mock.patch.object(vm_utils, "is_vm_shutdown")
+ def test_detach_vbds_and_srs_is_shutdown(self, mock_shutdown, mock_unplug,
+ mock_find_sr, mock_destroy, mock_purge):
+ mock_shutdown.return_value = True
+ mock_find_sr.return_value = "sr_ref"
+
+ self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref_1", "vbd_ref_2"])
+
+ expected = [mock.call(self.session, "vbd_ref_1"),
+ mock.call(self.session, "vbd_ref_2")]
+ self.assertEqual(expected, mock_destroy.call_args_list)
+ mock_purge.assert_called_with(self.session, "sr_ref")
+ self.assertFalse(mock_unplug.called)
+
+ @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_detach_all_no_volumes(self, mock_get_all, mock_detach):
+ mock_get_all.return_value = []
+
+ self.ops.detach_all("vm_ref")
+
+ mock_get_all.assert_called_once_with("vm_ref")
+ self.assertFalse(mock_detach.called)
+
+ @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_detach_all_volumes(self, mock_get_all, mock_detach):
+ mock_get_all.return_value = ["1"]
+
+ self.ops.detach_all("vm_ref")
+
+ mock_get_all.assert_called_once_with("vm_ref")
+ mock_detach.assert_called_once_with("vm_ref", ["1"])
+
+ def test_get_all_volume_vbd_refs_no_vbds(self):
+ with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
+ with mock.patch.object(self.session.VBD,
+ "get_other_config") as mock_conf:
+ mock_get.return_value = []
+
+ result = self.ops._get_all_volume_vbd_refs("vm_ref")
+
+ self.assertEqual([], list(result))
+ mock_get.assert_called_once_with("vm_ref")
+ self.assertFalse(mock_conf.called)
+
+ def test_get_all_volume_vbd_refs_no_volumes(self):
+ with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
+ with mock.patch.object(self.session.VBD,
+ "get_other_config") as mock_conf:
+ mock_get.return_value = ["1"]
+ mock_conf.return_value = {}
+
+ result = self.ops._get_all_volume_vbd_refs("vm_ref")
+
+ self.assertEqual([], list(result))
+ mock_get.assert_called_once_with("vm_ref")
+ mock_conf.assert_called_once_with("1")
+
+ def test_get_all_volume_vbd_refs_with_volumes(self):
+ with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
+ with mock.patch.object(self.session.VBD,
+ "get_other_config") as mock_conf:
+ mock_get.return_value = ["1", "2"]
+ mock_conf.return_value = {"osvol": True}
+
+ result = self.ops._get_all_volume_vbd_refs("vm_ref")
+
+ self.assertEqual(["1", "2"], list(result))
+ mock_get.assert_called_once_with("vm_ref")
+
+
+class AttachVolumeTestCase(VolumeOpsTestBase):
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume")
+ @mock.patch.object(vm_utils, "vm_ref_or_raise")
+ def test_attach_volume_default_hotplug(self, mock_get_vm, mock_attach):
+ mock_get_vm.return_value = "vm_ref"
+
+ self.ops.attach_volume({}, "instance_name", "/dev/xvda")
+
+ mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0,
+ True)
+
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume")
+ @mock.patch.object(vm_utils, "vm_ref_or_raise")
+ def test_attach_volume_hotplug(self, mock_get_vm, mock_attach):
+ mock_get_vm.return_value = "vm_ref"
+
+ self.ops.attach_volume({}, "instance_name", "/dev/xvda", False)
+
+ mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0,
+ False)
+
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume")
+ def test_attach_volume_default_hotplug_connect_volume(self, mock_attach):
+ self.ops.connect_volume({})
+ mock_attach.assert_called_once_with({})
+
+ @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
+ def test_attach_volume_with_defaults(self, mock_attach, mock_hypervisor,
+ mock_provider, mock_driver):
+ connection_info = {"data": {}}
+ with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi:
+ mock_provider.return_value = ("sr_ref", "sr_uuid")
+ mock_vdi.return_value = "vdi_uuid"
+
+ result = self.ops._attach_volume(connection_info)
+
+ self.assertEqual(result, ("sr_uuid", "vdi_uuid"))
+
+ mock_driver.assert_called_once_with(connection_info)
+ mock_provider.assert_called_once_with({}, None)
+ mock_hypervisor.assert_called_once_with("sr_ref", {})
+ self.assertFalse(mock_attach.called)
+
+ @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
+ def test_attach_volume_with_hot_attach(self, mock_attach, mock_hypervisor,
+ mock_provider, mock_driver):
+ connection_info = {"data": {}}
+ with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi:
+ mock_provider.return_value = ("sr_ref", "sr_uuid")
+ mock_hypervisor.return_value = "vdi_ref"
+ mock_vdi.return_value = "vdi_uuid"
+
+ result = self.ops._attach_volume(connection_info, "vm_ref",
+ "name", 2, True)
+
+ self.assertEqual(result, ("sr_uuid", "vdi_uuid"))
+
+ mock_driver.assert_called_once_with(connection_info)
+ mock_provider.assert_called_once_with({}, "name")
+ mock_hypervisor.assert_called_once_with("sr_ref", {})
+ mock_attach.assert_called_once_with("vdi_ref", "vm_ref", "name", 2,
+ True)
+
+ @mock.patch.object(volume_utils, "forget_sr")
+ @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
+ def test_attach_volume_cleanup(self, mock_attach, mock_hypervisor,
+ mock_provider, mock_driver, mock_forget):
+ connection_info = {"data": {}}
+ mock_provider.return_value = ("sr_ref", "sr_uuid")
+ mock_hypervisor.side_effect = test.TestingException
+
+ self.assertRaises(test.TestingException,
+ self.ops._attach_volume, connection_info)
+
+ mock_driver.assert_called_once_with(connection_info)
+ mock_provider.assert_called_once_with({}, None)
+ mock_hypervisor.assert_called_once_with("sr_ref", {})
+ mock_forget.assert_called_once_with(self.session, "sr_ref")
+ self.assertFalse(mock_attach.called)
+
+ def test_check_is_supported_driver_type_pass_iscsi(self):
+ conn_info = {"driver_volume_type": "iscsi"}
+ self.ops._check_is_supported_driver_type(conn_info)
+
+ def test_check_is_supported_driver_type_pass_xensm(self):
+ conn_info = {"driver_volume_type": "xensm"}
+ self.ops._check_is_supported_driver_type(conn_info)
+
+ def test_check_is_supported_driver_type_pass_bad(self):
+ conn_info = {"driver_volume_type": "bad"}
+ self.assertRaises(exception.VolumeDriverNotFound,
+ self.ops._check_is_supported_driver_type, conn_info)
+
+ @mock.patch.object(volume_utils, "introduce_sr")
+ @mock.patch.object(volume_utils, "find_sr_by_uuid")
+ @mock.patch.object(volume_utils, "parse_sr_info")
+ def test_connect_to_volume_provider_new_sr(self, mock_parse, mock_find_sr,
+ mock_introduce_sr):
+ mock_parse.return_value = ("uuid", "label", "params")
+ mock_find_sr.return_value = None
+ mock_introduce_sr.return_value = "sr_ref"
+
+ ref, uuid = self.ops._connect_to_volume_provider({}, "name")
+
+ self.assertEqual("sr_ref", ref)
+ self.assertEqual("uuid", uuid)
+ mock_parse.assert_called_once_with({}, "Disk-for:name")
+ mock_find_sr.assert_called_once_with(self.session, "uuid")
+ mock_introduce_sr.assert_called_once_with(self.session, "uuid",
+ "label", "params")
+
+ @mock.patch.object(volume_utils, "introduce_sr")
+ @mock.patch.object(volume_utils, "find_sr_by_uuid")
+ @mock.patch.object(volume_utils, "parse_sr_info")
+ def test_connect_to_volume_provider_old_sr(self, mock_parse, mock_find_sr,
+ mock_introduce_sr):
+ mock_parse.return_value = ("uuid", "label", "params")
+ mock_find_sr.return_value = "sr_ref"
+
+ ref, uuid = self.ops._connect_to_volume_provider({}, "name")
+
+ self.assertEqual("sr_ref", ref)
+ self.assertEqual("uuid", uuid)
+ mock_parse.assert_called_once_with({}, "Disk-for:name")
+ mock_find_sr.assert_called_once_with(self.session, "uuid")
+ self.assertFalse(mock_introduce_sr.called)
+
+ @mock.patch.object(volume_utils, "introduce_vdi")
+ def test_connect_hypervisor_to_volume_regular(self, mock_intro):
+ mock_intro.return_value = "vdi"
+
+ result = self.ops._connect_hypervisor_to_volume("sr", {})
+
+ self.assertEqual("vdi", result)
+ mock_intro.assert_called_once_with(self.session, "sr")
+
+ @mock.patch.object(volume_utils, "introduce_vdi")
+ def test_connect_hypervisor_to_volume_vdi(self, mock_intro):
+ mock_intro.return_value = "vdi"
+
+ conn = {"vdi_uuid": "id"}
+ result = self.ops._connect_hypervisor_to_volume("sr", conn)
+
+ self.assertEqual("vdi", result)
+ mock_intro.assert_called_once_with(self.session, "sr",
+ vdi_uuid="id")
+
+ @mock.patch.object(volume_utils, "introduce_vdi")
+ def test_connect_hypervisor_to_volume_lun(self, mock_intro):
+ mock_intro.return_value = "vdi"
+
+ conn = {"target_lun": "lun"}
+ result = self.ops._connect_hypervisor_to_volume("sr", conn)
+
+ self.assertEqual("vdi", result)
+ mock_intro.assert_called_once_with(self.session, "sr",
+ target_lun="lun")
+
+ @mock.patch.object(vm_utils, "is_vm_shutdown")
+ @mock.patch.object(vm_utils, "create_vbd")
+ def test_attach_volume_to_vm_plug(self, mock_vbd, mock_shutdown):
+ mock_vbd.return_value = "vbd"
+ mock_shutdown.return_value = False
+
+ with mock.patch.object(self.session.VBD, "plug") as mock_plug:
+ self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True)
+ mock_plug.assert_called_once_with("vbd", "vm")
+
+ mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
+ bootable=False, osvol=True)
+ mock_shutdown.assert_called_once_with(self.session, "vm")
+
+ @mock.patch.object(vm_utils, "is_vm_shutdown")
+ @mock.patch.object(vm_utils, "create_vbd")
+ def test_attach_volume_to_vm_no_plug(self, mock_vbd, mock_shutdown):
+ mock_vbd.return_value = "vbd"
+ mock_shutdown.return_value = True
+
+ with mock.patch.object(self.session.VBD, "plug") as mock_plug:
+ self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True)
+ self.assertFalse(mock_plug.called)
+
+ mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
+ bootable=False, osvol=True)
+ mock_shutdown.assert_called_once_with(self.session, "vm")
+
+ @mock.patch.object(vm_utils, "is_vm_shutdown")
+ @mock.patch.object(vm_utils, "create_vbd")
+ def test_attach_volume_to_vm_no_hotplug(self, mock_vbd, mock_shutdown):
+ mock_vbd.return_value = "vbd"
+
+ with mock.patch.object(self.session.VBD, "plug") as mock_plug:
+ self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, False)
+ self.assertFalse(mock_plug.called)
+
+ mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
+ bootable=False, osvol=True)
+ self.assertFalse(mock_shutdown.called)
+
+
+class FindBadVolumeTestCase(VolumeOpsTestBase):
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_find_bad_volumes_no_vbds(self, mock_get_all):
+ mock_get_all.return_value = []
+
+ result = self.ops.find_bad_volumes("vm_ref")
+
+ mock_get_all.assert_called_once_with("vm_ref")
+ self.assertEqual([], result)
+
+ @mock.patch.object(volume_utils, "find_sr_from_vbd")
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_find_bad_volumes_no_bad_vbds(self, mock_get_all, mock_find_sr):
+ mock_get_all.return_value = ["1", "2"]
+ mock_find_sr.return_value = "sr_ref"
+
+ with mock.patch.object(self.session.SR, "scan") as mock_scan:
+ result = self.ops.find_bad_volumes("vm_ref")
+
+ mock_get_all.assert_called_once_with("vm_ref")
+ expected_find = [mock.call(self.session, "1"),
+ mock.call(self.session, "2")]
+ self.assertEqual(expected_find, mock_find_sr.call_args_list)
+ expected_scan = [mock.call("sr_ref"), mock.call("sr_ref")]
+ self.assertEqual(expected_scan, mock_scan.call_args_list)
+ self.assertEqual([], result)
+
+ @mock.patch.object(volume_utils, "find_sr_from_vbd")
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_find_bad_volumes_bad_vbds(self, mock_get_all, mock_find_sr):
+ mock_get_all.return_value = ["vbd_ref"]
+ mock_find_sr.return_value = "sr_ref"
+
+ class FakeException(Exception):
+ details = ['SR_BACKEND_FAILURE_40', "", "", ""]
+
+ session = mock.Mock()
+ session.XenAPI.Failure = FakeException
+ self.ops._session = session
+
+ with mock.patch.object(session.SR, "scan") as mock_scan:
+ with mock.patch.object(session.VBD,
+ "get_device") as mock_get:
+ mock_scan.side_effect = FakeException
+ mock_get.return_value = "xvdb"
+
+ result = self.ops.find_bad_volumes("vm_ref")
+
+ mock_get_all.assert_called_once_with("vm_ref")
+ mock_scan.assert_called_once_with("sr_ref")
+ mock_get.assert_called_once_with("vbd_ref")
+ self.assertEqual(["/dev/xvdb"], result)
+
+ @mock.patch.object(volume_utils, "find_sr_from_vbd")
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_find_bad_volumes_raises(self, mock_get_all, mock_find_sr):
+ mock_get_all.return_value = ["vbd_ref"]
+ mock_find_sr.return_value = "sr_ref"
+
+ class FakeException(Exception):
+ details = ['foo', "", "", ""]
+
+ session = mock.Mock()
+ session.XenAPI.Failure = FakeException
+ self.ops._session = session
+
+ with mock.patch.object(session.SR, "scan") as mock_scan:
+ with mock.patch.object(session.VBD,
+ "get_device") as mock_get:
+ mock_scan.side_effect = FakeException
+ mock_get.return_value = "xvdb"
+
+ self.assertRaises(FakeException,
+ self.ops.find_bad_volumes, "vm_ref")
+ mock_scan.assert_called_once_with("sr_ref")
+
+
+class CleanupFromVDIsTestCase(VolumeOpsTestBase):
+ def _check_find_purge_calls(self, find_sr_from_vdi, purge_sr, vdi_refs,
+ sr_refs):
+ find_sr_calls = [mock.call(self.ops._session, vdi_ref) for vdi_ref
+ in vdi_refs]
+ find_sr_from_vdi.assert_has_calls(find_sr_calls)
+ purge_sr_calls = [mock.call(self.ops._session, sr_ref) for sr_ref
+ in sr_refs]
+ purge_sr.assert_has_calls(purge_sr_calls)
+
+ @mock.patch.object(volume_utils, 'find_sr_from_vdi')
+ @mock.patch.object(volume_utils, 'purge_sr')
+ def test_safe_cleanup_from_vdis(self, purge_sr, find_sr_from_vdi):
+ vdi_refs = ['vdi_ref1', 'vdi_ref2']
+ sr_refs = ['sr_ref1', 'sr_ref2']
+ find_sr_from_vdi.side_effect = sr_refs
+ self.ops.safe_cleanup_from_vdis(vdi_refs)
+
+ self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
+ sr_refs)
+
+ @mock.patch.object(volume_utils, 'find_sr_from_vdi',
+ side_effect=[exception.StorageError(reason=''), 'sr_ref2'])
+ @mock.patch.object(volume_utils, 'purge_sr')
+ def test_safe_cleanup_from_vdis_handles_find_sr_exception(self, purge_sr,
+ find_sr_from_vdi):
+ vdi_refs = ['vdi_ref1', 'vdi_ref2']
+ sr_refs = ['sr_ref2']
+ find_sr_from_vdi.side_effect = [exception.StorageError(reason=''),
+ sr_refs[0]]
+ self.ops.safe_cleanup_from_vdis(vdi_refs)
+
+ self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
+ sr_refs)
+
+ @mock.patch.object(volume_utils, 'find_sr_from_vdi')
+ @mock.patch.object(volume_utils, 'purge_sr')
+ def test_safe_cleanup_from_vdis_handles_purge_sr_exception(self, purge_sr,
+ find_sr_from_vdi):
+ vdi_refs = ['vdi_ref1', 'vdi_ref2']
+ sr_refs = ['sr_ref1', 'sr_ref2']
+ find_sr_from_vdi.side_effect = sr_refs
+ purge_sr.side_effects = [test.TestingException, None]
+ self.ops.safe_cleanup_from_vdis(vdi_refs)
+
+ self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
+ sr_refs)
diff --git a/nova/tests/unit/virt/xenapi/test_xenapi.py b/nova/tests/unit/virt/xenapi/test_xenapi.py
new file mode 100644
index 0000000000..c90f8c2f63
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_xenapi.py
@@ -0,0 +1,4105 @@
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Test suite for XenAPI."""
+
+import ast
+import base64
+import contextlib
+import copy
+import functools
+import os
+import re
+
+import mock
+import mox
+from oslo.concurrency import lockutils
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import importutils
+
+from nova.compute import api as compute_api
+from nova.compute import arch
+from nova.compute import flavors
+from nova.compute import hvtype
+from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova.conductor import api as conductor_api
+from nova import context
+from nova import crypto
+from nova import db
+from nova import exception
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova.openstack.common.fixture import config as config_fixture
+from nova.openstack.common import log as logging
+from nova import test
+from nova.tests.unit.db import fakes as db_fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_processutils
+import nova.tests.unit.image.fake as fake_image
+from nova.tests.unit import matchers
+from nova.tests.unit.objects import test_aggregate
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt import fake
+from nova.virt.xenapi import agent
+from nova.virt.xenapi.client import session as xenapi_session
+from nova.virt.xenapi import driver as xenapi_conn
+from nova.virt.xenapi import fake as xenapi_fake
+from nova.virt.xenapi import host
+from nova.virt.xenapi.image import glance
+from nova.virt.xenapi import pool
+from nova.virt.xenapi import pool_states
+from nova.virt.xenapi import vm_utils
+from nova.virt.xenapi import vmops
+from nova.virt.xenapi import volume_utils
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('network_manager', 'nova.service')
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('default_availability_zone', 'nova.availability_zones')
+CONF.import_opt('login_timeout', 'nova.virt.xenapi.client.session',
+ group="xenserver")
+
+IMAGE_MACHINE = '1'
+IMAGE_KERNEL = '2'
+IMAGE_RAMDISK = '3'
+IMAGE_RAW = '4'
+IMAGE_VHD = '5'
+IMAGE_ISO = '6'
+IMAGE_IPXE_ISO = '7'
+IMAGE_FROM_VOLUME = '8'
+
+IMAGE_FIXTURES = {
+ IMAGE_MACHINE: {
+ 'image_meta': {'name': 'fakemachine', 'size': 0,
+ 'disk_format': 'ami',
+ 'container_format': 'ami'},
+ },
+ IMAGE_KERNEL: {
+ 'image_meta': {'name': 'fakekernel', 'size': 0,
+ 'disk_format': 'aki',
+ 'container_format': 'aki'},
+ },
+ IMAGE_RAMDISK: {
+ 'image_meta': {'name': 'fakeramdisk', 'size': 0,
+ 'disk_format': 'ari',
+ 'container_format': 'ari'},
+ },
+ IMAGE_RAW: {
+ 'image_meta': {'name': 'fakeraw', 'size': 0,
+ 'disk_format': 'raw',
+ 'container_format': 'bare'},
+ },
+ IMAGE_VHD: {
+ 'image_meta': {'name': 'fakevhd', 'size': 0,
+ 'disk_format': 'vhd',
+ 'container_format': 'ovf'},
+ },
+ IMAGE_ISO: {
+ 'image_meta': {'name': 'fakeiso', 'size': 0,
+ 'disk_format': 'iso',
+ 'container_format': 'bare'},
+ },
+ IMAGE_IPXE_ISO: {
+ 'image_meta': {'name': 'fake_ipxe_iso', 'size': 0,
+ 'disk_format': 'iso',
+ 'container_format': 'bare',
+ 'properties': {'ipxe_boot': 'true'}},
+ },
+ IMAGE_FROM_VOLUME: {
+ 'image_meta': {'name': 'fake_ipxe_iso',
+ 'properties': {'foo': 'bar'}},
+ },
+}
+
+
+def get_session():
+ return xenapi_session.XenAPISession('test_url', 'root', 'test_pass')
+
+
+def set_image_fixtures():
+ image_service = fake_image.FakeImageService()
+ image_service.images.clear()
+ for image_id, image_meta in IMAGE_FIXTURES.items():
+ image_meta = image_meta['image_meta']
+ image_meta['id'] = image_id
+ image_service.create(None, image_meta)
+
+
+def get_fake_device_info():
+ # FIXME: 'sr_uuid', 'introduce_sr_keys', sr_type and vdi_uuid
+ # can be removed from the dict when LP bug #1087308 is fixed
+ fake_vdi_ref = xenapi_fake.create_vdi('fake-vdi', None)
+ fake_vdi_uuid = xenapi_fake.get_record('VDI', fake_vdi_ref)['uuid']
+ fake = {'block_device_mapping':
+ [{'connection_info': {'driver_volume_type': 'iscsi',
+ 'data': {'sr_uuid': 'falseSR',
+ 'introduce_sr_keys': ['sr_type'],
+ 'sr_type': 'iscsi',
+ 'vdi_uuid': fake_vdi_uuid,
+ 'target_discovered': False,
+ 'target_iqn': 'foo_iqn:foo_volid',
+ 'target_portal': 'localhost:3260',
+ 'volume_id': 'foo_volid',
+ 'target_lun': 1,
+ 'auth_password': 'my-p@55w0rd',
+ 'auth_username': 'johndoe',
+ 'auth_method': u'CHAP'}, },
+ 'mount_device': 'vda',
+ 'delete_on_termination': False}, ],
+ 'root_device_name': '/dev/sda',
+ 'ephemerals': [],
+ 'swap': None, }
+ return fake
+
+
+def stub_vm_utils_with_vdi_attached_here(function):
+ """vm_utils.with_vdi_attached_here needs to be stubbed out because it
+ calls down to the filesystem to attach a vdi. This provides a
+ decorator to handle that.
+ """
+ @functools.wraps(function)
+ def decorated_function(self, *args, **kwargs):
+ @contextlib.contextmanager
+ def fake_vdi_attached_here(*args, **kwargs):
+ fake_dev = 'fakedev'
+ yield fake_dev
+
+ def fake_image_download(*args, **kwargs):
+ pass
+
+ orig_vdi_attached_here = vm_utils.vdi_attached_here
+ orig_image_download = fake_image._FakeImageService.download
+ try:
+ vm_utils.vdi_attached_here = fake_vdi_attached_here
+ fake_image._FakeImageService.download = fake_image_download
+ return function(self, *args, **kwargs)
+ finally:
+ fake_image._FakeImageService.download = orig_image_download
+ vm_utils.vdi_attached_here = orig_vdi_attached_here
+
+ return decorated_function
+
+
+def get_create_system_metadata(context, instance_type_id):
+ flavor = db.flavor_get(context, instance_type_id)
+ return flavors.save_flavor_info({}, flavor)
+
+
+def create_instance_with_system_metadata(context, instance_values):
+ instance_values['system_metadata'] = get_create_system_metadata(
+ context, instance_values['instance_type_id'])
+ instance_values['pci_devices'] = []
+ return db.instance_create(context, instance_values)
+
+
+class XenAPIVolumeTestCase(stubs.XenAPITestBaseNoDB):
+ """Unit tests for Volume operations."""
+ def setUp(self):
+ super(XenAPIVolumeTestCase, self).setUp()
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ self.instance = fake_instance.fake_db_instance(name='foo')
+
+ @classmethod
+ def _make_connection_info(cls):
+ target_iqn = 'iqn.2010-10.org.openstack:volume-00000001'
+ return {'driver_volume_type': 'iscsi',
+ 'data': {'volume_id': 1,
+ 'target_iqn': target_iqn,
+ 'target_portal': '127.0.0.1:3260,fake',
+ 'target_lun': None,
+ 'auth_method': 'CHAP',
+ 'auth_username': 'username',
+ 'auth_password': 'password'}}
+
+ def test_attach_volume(self):
+ # This shows how to test Ops classes' methods.
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vm = xenapi_fake.create_vm(self.instance['name'], 'Running')
+ conn_info = self._make_connection_info()
+ self.assertIsNone(
+ conn.attach_volume(None, conn_info, self.instance, '/dev/sdc'))
+
+ # check that the VM has a VBD attached to it
+ # Get XenAPI record for VBD
+ vbds = xenapi_fake.get_all('VBD')
+ vbd = xenapi_fake.get_record('VBD', vbds[0])
+ vm_ref = vbd['VM']
+ self.assertEqual(vm_ref, vm)
+
+ def test_attach_volume_raise_exception(self):
+ # This shows how to test when exceptions are raised.
+ stubs.stubout_session(self.stubs,
+ stubs.FakeSessionForVolumeFailedTests)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ xenapi_fake.create_vm(self.instance['name'], 'Running')
+ self.assertRaises(exception.VolumeDriverNotFound,
+ conn.attach_volume,
+ None, {'driver_volume_type': 'nonexist'},
+ self.instance, '/dev/sdc')
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIVMTestCase(stubs.XenAPITestBase):
+ """Unit tests for VM operations."""
+ def setUp(self):
+ super(XenAPIVMTestCase, self).setUp()
+ self.useFixture(test.SampleNetworks())
+ self.network = importutils.import_object(CONF.network_manager)
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ xenapi_fake.create_network('fake', 'fake_br1')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ stubs.stubout_get_this_vm_uuid(self.stubs)
+ stubs.stub_out_vm_methods(self.stubs)
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.conn._session.is_local_connection = False
+
+ fake_image.stub_out_image_service(self.stubs)
+ set_image_fixtures()
+ stubs.stubout_image_service_download(self.stubs)
+ stubs.stubout_stream_disk(self.stubs)
+
+ def fake_inject_instance_metadata(self, instance, vm):
+ pass
+ self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
+ fake_inject_instance_metadata)
+
+ def fake_safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
+ name_label = "fakenamelabel"
+ disk_type = "fakedisktype"
+ virtual_size = 777
+ return vm_utils.create_vdi(
+ session, sr_ref, instance, name_label, disk_type,
+ virtual_size)
+ self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi)
+
+ def tearDown(self):
+ fake_image.FakeImageService_reset()
+ super(XenAPIVMTestCase, self).tearDown()
+
+ def test_init_host(self):
+ session = get_session()
+ vm = vm_utils._get_this_vm_ref(session)
+ # Local root disk
+ vdi0 = xenapi_fake.create_vdi('compute', None)
+ vbd0 = xenapi_fake.create_vbd(vm, vdi0)
+ # Instance VDI
+ vdi1 = xenapi_fake.create_vdi('instance-aaaa', None,
+ other_config={'nova_instance_uuid': 'aaaa'})
+ xenapi_fake.create_vbd(vm, vdi1)
+ # Only looks like instance VDI
+ vdi2 = xenapi_fake.create_vdi('instance-bbbb', None)
+ vbd2 = xenapi_fake.create_vbd(vm, vdi2)
+
+ self.conn.init_host(None)
+ self.assertEqual(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2]))
+
+ def test_instance_exists(self):
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ vm_utils.lookup(mox.IgnoreArg(), 'foo').AndReturn(True)
+ self.mox.ReplayAll()
+
+ self.stubs.Set(objects.Instance, 'name', 'foo')
+ instance = objects.Instance(uuid='fake-uuid')
+ self.assertTrue(self.conn.instance_exists(instance))
+
+ def test_instance_not_exists(self):
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ vm_utils.lookup(mox.IgnoreArg(), 'bar').AndReturn(None)
+ self.mox.ReplayAll()
+
+ self.stubs.Set(objects.Instance, 'name', 'bar')
+ instance = objects.Instance(uuid='fake-uuid')
+ self.assertFalse(self.conn.instance_exists(instance))
+
+ def test_list_instances_0(self):
+ instances = self.conn.list_instances()
+ self.assertEqual(instances, [])
+
+ def test_list_instance_uuids_0(self):
+ instance_uuids = self.conn.list_instance_uuids()
+ self.assertEqual(instance_uuids, [])
+
+ def test_list_instance_uuids(self):
+ uuids = []
+ for x in xrange(1, 4):
+ instance = self._create_instance(x)
+ uuids.append(instance['uuid'])
+ instance_uuids = self.conn.list_instance_uuids()
+ self.assertEqual(len(uuids), len(instance_uuids))
+ self.assertEqual(set(uuids), set(instance_uuids))
+
+ def test_get_rrd_server(self):
+ self.flags(connection_url='myscheme://myaddress/',
+ group='xenserver')
+ server_info = vm_utils._get_rrd_server()
+ self.assertEqual(server_info[0], 'myscheme')
+ self.assertEqual(server_info[1], 'myaddress')
+
+ expected_raw_diagnostics = {
+ 'vbd_xvdb_write': '0.0',
+ 'memory_target': '4294967296.0000',
+ 'memory_internal_free': '1415564.0000',
+ 'memory': '4294967296.0000',
+ 'vbd_xvda_write': '0.0',
+ 'cpu0': '0.0042',
+ 'vif_0_tx': '287.4134',
+ 'vbd_xvda_read': '0.0',
+ 'vif_0_rx': '1816.0144',
+ 'vif_2_rx': '0.0',
+ 'vif_2_tx': '0.0',
+ 'vbd_xvdb_read': '0.0',
+ 'last_update': '1328795567',
+ }
+
+ def test_get_diagnostics(self):
+ def fake_get_rrd(host, vm_uuid):
+ path = os.path.dirname(os.path.realpath(__file__))
+ with open(os.path.join(path, 'vm_rrd.xml')) as f:
+ return re.sub(r'\s', '', f.read())
+ self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
+
+ expected = self.expected_raw_diagnostics
+ instance = self._create_instance()
+ actual = self.conn.get_diagnostics(instance)
+ self.assertThat(actual, matchers.DictMatches(expected))
+
+ def test_get_instance_diagnostics(self):
+ def fake_get_rrd(host, vm_uuid):
+ path = os.path.dirname(os.path.realpath(__file__))
+ with open(os.path.join(path, 'vm_rrd.xml')) as f:
+ return re.sub(r'\s', '', f.read())
+ self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
+
+ expected = {
+ 'config_drive': False,
+ 'state': 'running',
+ 'driver': 'xenapi',
+ 'version': '1.0',
+ 'uptime': 0,
+ 'hypervisor_os': None,
+ 'cpu_details': [{'time': 0}, {'time': 0},
+ {'time': 0}, {'time': 0}],
+ 'nic_details': [{'mac_address': '00:00:00:00:00:00',
+ 'rx_drop': 0,
+ 'rx_errors': 0,
+ 'rx_octets': 0,
+ 'rx_packets': 0,
+ 'tx_drop': 0,
+ 'tx_errors': 0,
+ 'tx_octets': 0,
+ 'tx_packets': 0}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 0,
+ 'read_requests': 0,
+ 'write_bytes': 0,
+ 'write_requests': 0}],
+ 'memory_details': {'maximum': 8192, 'used': 0}}
+
+ instance = self._create_instance()
+ actual = self.conn.get_instance_diagnostics(instance)
+ self.assertEqual(expected, actual.serialize())
+
+ def test_get_vnc_console(self):
+ instance = self._create_instance(obj=True)
+ session = get_session()
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vm_ref = vm_utils.lookup(session, instance['name'])
+
+ console = conn.get_vnc_console(self.context, instance)
+
+ # Note(sulo): We don't care about session id in test
+ # they will always differ so strip that out
+ actual_path = console.internal_access_path.split('&')[0]
+ expected_path = "/console?ref=%s" % str(vm_ref)
+
+ self.assertEqual(expected_path, actual_path)
+
+ def test_get_vnc_console_for_rescue(self):
+ instance = self._create_instance(obj=True)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
+ 'Running')
+ # Set instance state to rescued
+ instance['vm_state'] = 'rescued'
+
+ console = conn.get_vnc_console(self.context, instance)
+
+ # Note(sulo): We don't care about session id in test
+ # they will always differ so strip that out
+ actual_path = console.internal_access_path.split('&')[0]
+ expected_path = "/console?ref=%s" % str(rescue_vm)
+
+ self.assertEqual(expected_path, actual_path)
+
+ def test_get_vnc_console_instance_not_ready(self):
+ instance = self._create_instance(obj=True, spawn=False)
+ instance.vm_state = 'building'
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.InstanceNotFound,
+ conn.get_vnc_console, self.context, instance)
+
+ def test_get_vnc_console_rescue_not_ready(self):
+ instance = self._create_instance(obj=True, spawn=False)
+ instance.vm_state = 'rescued'
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.InstanceNotReady,
+ conn.get_vnc_console, self.context, instance)
+
+ def test_instance_snapshot_fails_with_no_primary_vdi(self):
+
+ def create_bad_vbd(session, vm_ref, vdi_ref, userdevice,
+ vbd_type='disk', read_only=False, bootable=False,
+ osvol=False):
+ vbd_rec = {'VM': vm_ref,
+ 'VDI': vdi_ref,
+ 'userdevice': 'fake',
+ 'currently_attached': False}
+ vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
+ xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
+ return vbd_ref
+
+ self.stubs.Set(vm_utils, 'create_vbd', create_bad_vbd)
+ stubs.stubout_instance_snapshot(self.stubs)
+ # Stubbing out firewall driver as previous stub sets alters
+ # xml rpc result parsing
+ stubs.stubout_firewall_driver(self.stubs, self.conn)
+ instance = self._create_instance()
+
+ image_id = "my_snapshot_id"
+ self.assertRaises(exception.NovaException, self.conn.snapshot,
+ self.context, instance, image_id,
+ lambda *args, **kwargs: None)
+
+ def test_instance_snapshot(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+ image_id = "my_snapshot_id"
+
+ stubs.stubout_instance_snapshot(self.stubs)
+ stubs.stubout_is_snapshot(self.stubs)
+ # Stubbing out firewall driver as previous stub sets alters
+ # xml rpc result parsing
+ stubs.stubout_firewall_driver(self.stubs, self.conn)
+
+ instance = self._create_instance()
+
+ self.fake_upload_called = False
+
+ def fake_image_upload(_self, ctx, session, inst, img_id, vdi_uuids):
+ self.fake_upload_called = True
+ self.assertEqual(ctx, self.context)
+ self.assertEqual(inst, instance)
+ self.assertIsInstance(vdi_uuids, list)
+ self.assertEqual(img_id, image_id)
+
+ self.stubs.Set(glance.GlanceStore, 'upload_image',
+ fake_image_upload)
+
+ self.conn.snapshot(self.context, instance, image_id,
+ func_call_matcher.call)
+
+ # Ensure VM was torn down
+ vm_labels = []
+ for vm_ref in xenapi_fake.get_all('VM'):
+ vm_rec = xenapi_fake.get_record('VM', vm_ref)
+ if not vm_rec["is_control_domain"]:
+ vm_labels.append(vm_rec["name_label"])
+
+ self.assertEqual(vm_labels, [instance['name']])
+
+ # Ensure VBDs were torn down
+ vbd_labels = []
+ for vbd_ref in xenapi_fake.get_all('VBD'):
+ vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
+ vbd_labels.append(vbd_rec["vm_name_label"])
+
+ self.assertEqual(vbd_labels, [instance['name']])
+
+ # Ensure task states changed in correct order
+ self.assertIsNone(func_call_matcher.match())
+
+ # Ensure VDIs were torn down
+ for vdi_ref in xenapi_fake.get_all('VDI'):
+ vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
+ name_label = vdi_rec["name_label"]
+ self.assertFalse(name_label.endswith('snapshot'))
+
+ self.assertTrue(self.fake_upload_called)
+
+ def create_vm_record(self, conn, os_type, name):
+ instances = conn.list_instances()
+ self.assertEqual(instances, [name])
+
+ # Get Nova record for VM
+ vm_info = conn.get_info({'name': name})
+ # Get XenAPI record for VM
+ vms = [rec for ref, rec
+ in xenapi_fake.get_all_records('VM').iteritems()
+ if not rec['is_control_domain']]
+ vm = vms[0]
+ self.vm_info = vm_info
+ self.vm = vm
+
+ def check_vm_record(self, conn, instance_type_id, check_injection):
+ flavor = db.flavor_get(conn, instance_type_id)
+ mem_kib = long(flavor['memory_mb']) << 10
+ mem_bytes = str(mem_kib << 10)
+ vcpus = flavor['vcpus']
+ vcpu_weight = flavor['vcpu_weight']
+
+ self.assertEqual(self.vm_info['max_mem'], mem_kib)
+ self.assertEqual(self.vm_info['mem'], mem_kib)
+ self.assertEqual(self.vm['memory_static_max'], mem_bytes)
+ self.assertEqual(self.vm['memory_dynamic_max'], mem_bytes)
+ self.assertEqual(self.vm['memory_dynamic_min'], mem_bytes)
+ self.assertEqual(self.vm['VCPUs_max'], str(vcpus))
+ self.assertEqual(self.vm['VCPUs_at_startup'], str(vcpus))
+ if vcpu_weight is None:
+ self.assertEqual(self.vm['VCPUs_params'], {})
+ else:
+ self.assertEqual(self.vm['VCPUs_params'],
+ {'weight': str(vcpu_weight), 'cap': '0'})
+
+ # Check that the VM is running according to Nova
+ self.assertEqual(self.vm_info['state'], power_state.RUNNING)
+
+ # Check that the VM is running according to XenAPI.
+ self.assertEqual(self.vm['power_state'], 'Running')
+
+ if check_injection:
+ xenstore_data = self.vm['xenstore_data']
+ self.assertNotIn('vm-data/hostname', xenstore_data)
+ key = 'vm-data/networking/DEADBEEF0001'
+ xenstore_value = xenstore_data[key]
+ tcpip_data = ast.literal_eval(xenstore_value)
+ self.assertEqual(tcpip_data,
+ {'broadcast': '192.168.1.255',
+ 'dns': ['192.168.1.4', '192.168.1.3'],
+ 'gateway': '192.168.1.1',
+ 'gateway_v6': '2001:db8:0:1::1',
+ 'ip6s': [{'enabled': '1',
+ 'ip': '2001:db8:0:1:dcad:beff:feef:1',
+ 'netmask': 64,
+ 'gateway': '2001:db8:0:1::1'}],
+ 'ips': [{'enabled': '1',
+ 'ip': '192.168.1.100',
+ 'netmask': '255.255.255.0',
+ 'gateway': '192.168.1.1'},
+ {'enabled': '1',
+ 'ip': '192.168.1.101',
+ 'netmask': '255.255.255.0',
+ 'gateway': '192.168.1.1'}],
+ 'label': 'test1',
+ 'mac': 'DE:AD:BE:EF:00:01'})
+
+ def check_vm_params_for_windows(self):
+ self.assertEqual(self.vm['platform']['nx'], 'true')
+ self.assertEqual(self.vm['HVM_boot_params'], {'order': 'dc'})
+ self.assertEqual(self.vm['HVM_boot_policy'], 'BIOS order')
+
+ # check that these are not set
+ self.assertEqual(self.vm['PV_args'], '')
+ self.assertEqual(self.vm['PV_bootloader'], '')
+ self.assertEqual(self.vm['PV_kernel'], '')
+ self.assertEqual(self.vm['PV_ramdisk'], '')
+
+ def check_vm_params_for_linux(self):
+ self.assertEqual(self.vm['platform']['nx'], 'false')
+ self.assertEqual(self.vm['PV_args'], '')
+ self.assertEqual(self.vm['PV_bootloader'], 'pygrub')
+
+ # check that these are not set
+ self.assertEqual(self.vm['PV_kernel'], '')
+ self.assertEqual(self.vm['PV_ramdisk'], '')
+ self.assertEqual(self.vm['HVM_boot_params'], {})
+ self.assertEqual(self.vm['HVM_boot_policy'], '')
+
+ def check_vm_params_for_linux_with_external_kernel(self):
+ self.assertEqual(self.vm['platform']['nx'], 'false')
+ self.assertEqual(self.vm['PV_args'], 'root=/dev/xvda1')
+ self.assertNotEqual(self.vm['PV_kernel'], '')
+ self.assertNotEqual(self.vm['PV_ramdisk'], '')
+
+ # check that these are not set
+ self.assertEqual(self.vm['HVM_boot_params'], {})
+ self.assertEqual(self.vm['HVM_boot_policy'], '')
+
+ def _list_vdis(self):
+ session = get_session()
+ return session.call_xenapi('VDI.get_all')
+
+ def _list_vms(self):
+ session = get_session()
+ return session.call_xenapi('VM.get_all')
+
+ def _check_vdis(self, start_list, end_list):
+ for vdi_ref in end_list:
+ if vdi_ref not in start_list:
+ vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
+ # If the cache is turned on then the base disk will be
+ # there even after the cleanup
+ if 'other_config' in vdi_rec:
+ if 'image-id' not in vdi_rec['other_config']:
+ self.fail('Found unexpected VDI:%s' % vdi_ref)
+ else:
+ self.fail('Found unexpected VDI:%s' % vdi_ref)
+
+ def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
+ instance_type_id="3", os_type="linux",
+ hostname="test", architecture="x86-64", instance_id=1,
+ injected_files=None, check_injection=False,
+ create_record=True, empty_dns=False,
+ block_device_info=None,
+ key_data=None):
+ if injected_files is None:
+ injected_files = []
+
+ # Fake out inject_instance_metadata
+ def fake_inject_instance_metadata(self, instance, vm):
+ pass
+ self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
+ fake_inject_instance_metadata)
+
+ if create_record:
+ instance = objects.Instance(context=self.context)
+ instance.project_id = self.project_id
+ instance.user_id = self.user_id
+ instance.image_ref = image_ref
+ instance.kernel_id = kernel_id
+ instance.ramdisk_id = ramdisk_id
+ instance.root_gb = 20
+ instance.ephemeral_gb = 0
+ instance.instance_type_id = instance_type_id
+ instance.os_type = os_type
+ instance.hostname = hostname
+ instance.key_data = key_data
+ instance.architecture = architecture
+ instance.system_metadata = get_create_system_metadata(
+ self.context, instance_type_id)
+ instance.create()
+ else:
+ instance = objects.Instance.get_by_id(self.context, instance_id)
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ if empty_dns:
+ # NOTE(tr3buchet): this is a terrible way to do this...
+ network_info[0]['network']['subnets'][0]['dns'] = []
+
+ image_meta = {}
+ if image_ref:
+ image_meta = IMAGE_FIXTURES[image_ref]["image_meta"]
+ self.conn.spawn(self.context, instance, image_meta, injected_files,
+ 'herp', network_info, block_device_info)
+ self.create_vm_record(self.conn, os_type, instance['name'])
+ self.check_vm_record(self.conn, instance_type_id, check_injection)
+ self.assertEqual(instance['os_type'], os_type)
+ self.assertEqual(instance['architecture'], architecture)
+
+ def test_spawn_ipxe_iso_success(self):
+ self.mox.StubOutWithMock(vm_utils, 'get_sr_path')
+ vm_utils.get_sr_path(mox.IgnoreArg()).AndReturn('/sr/path')
+
+ self.flags(ipxe_network_name='test1',
+ ipxe_boot_menu_url='http://boot.example.com',
+ ipxe_mkisofs_cmd='/root/mkisofs',
+ group='xenserver')
+ self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
+ self.conn._session.call_plugin_serialized(
+ 'ipxe', 'inject', '/sr/path', mox.IgnoreArg(),
+ 'http://boot.example.com', '192.168.1.100', '255.255.255.0',
+ '192.168.1.1', '192.168.1.3', '/root/mkisofs')
+
+ self.mox.ReplayAll()
+ self._test_spawn(IMAGE_IPXE_ISO, None, None)
+
+ def test_spawn_ipxe_iso_no_network_name(self):
+ self.flags(ipxe_network_name=None,
+ ipxe_boot_menu_url='http://boot.example.com',
+ group='xenserver')
+
+ # call_plugin_serialized shouldn't be called
+ self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
+
+ self.mox.ReplayAll()
+ self._test_spawn(IMAGE_IPXE_ISO, None, None)
+
+ def test_spawn_ipxe_iso_no_boot_menu_url(self):
+ self.flags(ipxe_network_name='test1',
+ ipxe_boot_menu_url=None,
+ group='xenserver')
+
+ # call_plugin_serialized shouldn't be called
+ self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
+
+ self.mox.ReplayAll()
+ self._test_spawn(IMAGE_IPXE_ISO, None, None)
+
+ def test_spawn_ipxe_iso_unknown_network_name(self):
+ self.flags(ipxe_network_name='test2',
+ ipxe_boot_menu_url='http://boot.example.com',
+ group='xenserver')
+
+ # call_plugin_serialized shouldn't be called
+ self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
+
+ self.mox.ReplayAll()
+ self._test_spawn(IMAGE_IPXE_ISO, None, None)
+
+ def test_spawn_empty_dns(self):
+ # Test spawning with an empty dns list.
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64",
+ empty_dns=True)
+ self.check_vm_params_for_linux()
+
+ def test_spawn_not_enough_memory(self):
+ self.assertRaises(exception.InsufficientFreeMemory,
+ self._test_spawn,
+ '1', 2, 3, "4") # m1.xlarge
+
+ def test_spawn_fail_cleanup_1(self):
+ """Simulates an error while downloading an image.
+
+ Verifies that the VM and VDIs created are properly cleaned up.
+ """
+ vdi_recs_start = self._list_vdis()
+ start_vms = self._list_vms()
+ stubs.stubout_fetch_disk_image(self.stubs, raise_failure=True)
+ self.assertRaises(xenapi_fake.Failure,
+ self._test_spawn, '1', 2, 3)
+ # No additional VDI should be found.
+ vdi_recs_end = self._list_vdis()
+ end_vms = self._list_vms()
+ self._check_vdis(vdi_recs_start, vdi_recs_end)
+ # No additional VMs should be found.
+ self.assertEqual(start_vms, end_vms)
+
+ def test_spawn_fail_cleanup_2(self):
+ """Simulates an error while creating VM record.
+
+ Verifies that the VM and VDIs created are properly cleaned up.
+ """
+ vdi_recs_start = self._list_vdis()
+ start_vms = self._list_vms()
+ stubs.stubout_create_vm(self.stubs)
+ self.assertRaises(xenapi_fake.Failure,
+ self._test_spawn, '1', 2, 3)
+ # No additional VDI should be found.
+ vdi_recs_end = self._list_vdis()
+ end_vms = self._list_vms()
+ self._check_vdis(vdi_recs_start, vdi_recs_end)
+ # No additional VMs should be found.
+ self.assertEqual(start_vms, end_vms)
+
+ def test_spawn_fail_cleanup_3(self):
+ """Simulates an error while attaching disks.
+
+ Verifies that the VM and VDIs created are properly cleaned up.
+ """
+ stubs.stubout_attach_disks(self.stubs)
+ vdi_recs_start = self._list_vdis()
+ start_vms = self._list_vms()
+ self.assertRaises(xenapi_fake.Failure,
+ self._test_spawn, '1', 2, 3)
+ # No additional VDI should be found.
+ vdi_recs_end = self._list_vdis()
+ end_vms = self._list_vms()
+ self._check_vdis(vdi_recs_start, vdi_recs_end)
+ # No additional VMs should be found.
+ self.assertEqual(start_vms, end_vms)
+
+ def test_spawn_raw_glance(self):
+ self._test_spawn(IMAGE_RAW, None, None, os_type=None)
+ self.check_vm_params_for_windows()
+
+ def test_spawn_vhd_glance_linux(self):
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64")
+ self.check_vm_params_for_linux()
+
+ def test_spawn_vhd_glance_windows(self):
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="windows", architecture="i386",
+ instance_type_id=5)
+ self.check_vm_params_for_windows()
+
+ def test_spawn_iso_glance(self):
+ self._test_spawn(IMAGE_ISO, None, None,
+ os_type="windows", architecture="i386")
+ self.check_vm_params_for_windows()
+
+ def test_spawn_glance(self):
+
+ def fake_fetch_disk_image(context, session, instance, name_label,
+ image_id, image_type):
+ sr_ref = vm_utils.safe_find_sr(session)
+ image_type_str = vm_utils.ImageType.to_string(image_type)
+ vdi_ref = vm_utils.create_vdi(session, sr_ref, instance,
+ name_label, image_type_str, "20")
+ vdi_role = vm_utils.ImageType.get_role(image_type)
+ vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
+ return {vdi_role: dict(uuid=vdi_uuid, file=None)}
+ self.stubs.Set(vm_utils, '_fetch_disk_image',
+ fake_fetch_disk_image)
+
+ self._test_spawn(IMAGE_MACHINE,
+ IMAGE_KERNEL,
+ IMAGE_RAMDISK)
+ self.check_vm_params_for_linux_with_external_kernel()
+
+ def test_spawn_boot_from_volume_no_image_meta(self):
+ dev_info = get_fake_device_info()
+ self._test_spawn(None, None, None,
+ block_device_info=dev_info)
+
+ def test_spawn_boot_from_volume_no_glance_image_meta(self):
+ dev_info = get_fake_device_info()
+ self._test_spawn(IMAGE_FROM_VOLUME, None, None,
+ block_device_info=dev_info)
+
+ def test_spawn_boot_from_volume_with_image_meta(self):
+ dev_info = get_fake_device_info()
+ self._test_spawn(IMAGE_VHD, None, None,
+ block_device_info=dev_info)
+
+ def test_spawn_netinject_file(self):
+ self.flags(flat_injected=True)
+ db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
+
+ self._tee_executed = False
+
+ def _tee_handler(cmd, **kwargs):
+ actual = kwargs.get('process_input', None)
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 192.168.1.100
+ netmask 255.255.255.0
+ broadcast 192.168.1.255
+ gateway 192.168.1.1
+ dns-nameservers 192.168.1.3 192.168.1.4
+iface eth0 inet6 static
+ address 2001:db8:0:1:dcad:beff:feef:1
+ netmask 64
+ gateway 2001:db8:0:1::1
+"""
+ self.assertEqual(expected, actual)
+ self._tee_executed = True
+ return '', ''
+
+ def _readlink_handler(cmd_parts, **kwargs):
+ return os.path.realpath(cmd_parts[2]), ''
+
+ fake_processutils.fake_execute_set_repliers([
+ # Capture the tee .../etc/network/interfaces command
+ (r'tee.*interfaces', _tee_handler),
+ (r'readlink -nm.*', _readlink_handler),
+ ])
+ self._test_spawn(IMAGE_MACHINE,
+ IMAGE_KERNEL,
+ IMAGE_RAMDISK,
+ check_injection=True)
+ self.assertTrue(self._tee_executed)
+
+ def test_spawn_netinject_xenstore(self):
+ db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
+
+ self._tee_executed = False
+
+ def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
+ # When mounting, create real files under the mountpoint to simulate
+ # files in the mounted filesystem
+
+ # mount point will be the last item of the command list
+ self._tmpdir = cmd[len(cmd) - 1]
+ LOG.debug('Creating files in %s to simulate guest agent',
+ self._tmpdir)
+ os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
+ # Touch the file using open
+ open(os.path.join(self._tmpdir, 'usr', 'sbin',
+ 'xe-update-networking'), 'w').close()
+ return '', ''
+
+ def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
+ # Umount would normally make files in the mounted filesystem
+ # disappear, so do that here
+ LOG.debug('Removing simulated guest agent files in %s',
+ self._tmpdir)
+ os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
+ 'xe-update-networking'))
+ os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
+ os.rmdir(os.path.join(self._tmpdir, 'usr'))
+ return '', ''
+
+ def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
+ self._tee_executed = True
+ return '', ''
+
+ fake_processutils.fake_execute_set_repliers([
+ (r'mount', _mount_handler),
+ (r'umount', _umount_handler),
+ (r'tee.*interfaces', _tee_handler)])
+ self._test_spawn('1', 2, 3, check_injection=True)
+
+ # tee must not run in this case, where an injection-capable
+ # guest agent is detected
+ self.assertFalse(self._tee_executed)
+
+ def test_spawn_injects_auto_disk_config_to_xenstore(self):
+ instance = self._create_instance(spawn=False)
+ self.mox.StubOutWithMock(self.conn._vmops, '_inject_auto_disk_config')
+ self.conn._vmops._inject_auto_disk_config(instance, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conn.spawn(self.context, instance,
+ IMAGE_FIXTURES['1']["image_meta"], [], 'herp', '')
+
+ def test_spawn_vlanmanager(self):
+ self.flags(network_manager='nova.network.manager.VlanManager',
+ vlan_interface='fake0')
+
+ def dummy(*args, **kwargs):
+ pass
+
+ self.stubs.Set(vmops.VMOps, '_create_vifs', dummy)
+ # Reset network table
+ xenapi_fake.reset_table('network')
+ # Instance id = 2 will use vlan network (see db/fakes.py)
+ ctxt = self.context.elevated()
+ self.network.conductor_api = conductor_api.LocalAPI()
+ self._create_instance(2, False)
+ networks = self.network.db.network_get_all(ctxt)
+ with mock.patch('nova.objects.network.Network._from_db_object'):
+ for network in networks:
+ self.network.set_network_host(ctxt, network)
+
+ self.network.allocate_for_instance(ctxt,
+ instance_id=2,
+ instance_uuid='00000000-0000-0000-0000-000000000002',
+ host=CONF.host,
+ vpn=None,
+ rxtx_factor=3,
+ project_id=self.project_id,
+ macs=None)
+ self._test_spawn(IMAGE_MACHINE,
+ IMAGE_KERNEL,
+ IMAGE_RAMDISK,
+ instance_id=2,
+ create_record=False)
+ # TODO(salvatore-orlando): a complete test here would require
+ # a check for making sure the bridge for the VM's VIF is
+ # consistent with bridge specified in nova db
+
+ def test_spawn_with_network_qos(self):
+ self._create_instance()
+ for vif_ref in xenapi_fake.get_all('VIF'):
+ vif_rec = xenapi_fake.get_record('VIF', vif_ref)
+ self.assertEqual(vif_rec['qos_algorithm_type'], 'ratelimit')
+ self.assertEqual(vif_rec['qos_algorithm_params']['kbps'],
+ str(3 * 10 * 1024))
+
+ def test_spawn_ssh_key_injection(self):
+ # Test spawning with key_data on an instance. Should use
+ # agent file injection.
+ self.flags(use_agent_default=True,
+ group='xenserver')
+ actual_injected_files = []
+
+ def fake_inject_file(self, method, args):
+ path = base64.b64decode(args['b64_path'])
+ contents = base64.b64decode(args['b64_contents'])
+ actual_injected_files.append((path, contents))
+ return jsonutils.dumps({'returncode': '0', 'message': 'success'})
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ '_plugin_agent_inject_file', fake_inject_file)
+
+ def fake_encrypt_text(sshkey, new_pass):
+ self.assertEqual("ssh-rsa fake_keydata", sshkey)
+ return "fake"
+
+ self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
+
+ expected_data = ('\n# The following ssh key was injected by '
+ 'Nova\nssh-rsa fake_keydata\n')
+
+ injected_files = [('/root/.ssh/authorized_keys', expected_data)]
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64",
+ key_data='ssh-rsa fake_keydata')
+ self.assertEqual(actual_injected_files, injected_files)
+
+ def test_spawn_ssh_key_injection_non_rsa(self):
+ # Test spawning with key_data on an instance. Should use
+ # agent file injection.
+ self.flags(use_agent_default=True,
+ group='xenserver')
+ actual_injected_files = []
+
+ def fake_inject_file(self, method, args):
+ path = base64.b64decode(args['b64_path'])
+ contents = base64.b64decode(args['b64_contents'])
+ actual_injected_files.append((path, contents))
+ return jsonutils.dumps({'returncode': '0', 'message': 'success'})
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ '_plugin_agent_inject_file', fake_inject_file)
+
+ def fake_encrypt_text(sshkey, new_pass):
+ raise NotImplementedError("Should not be called")
+
+ self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
+
+ expected_data = ('\n# The following ssh key was injected by '
+ 'Nova\nssh-dsa fake_keydata\n')
+
+ injected_files = [('/root/.ssh/authorized_keys', expected_data)]
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64",
+ key_data='ssh-dsa fake_keydata')
+ self.assertEqual(actual_injected_files, injected_files)
+
+ def test_spawn_injected_files(self):
+ # Test spawning with injected_files.
+ self.flags(use_agent_default=True,
+ group='xenserver')
+ actual_injected_files = []
+
+ def fake_inject_file(self, method, args):
+ path = base64.b64decode(args['b64_path'])
+ contents = base64.b64decode(args['b64_contents'])
+ actual_injected_files.append((path, contents))
+ return jsonutils.dumps({'returncode': '0', 'message': 'success'})
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ '_plugin_agent_inject_file', fake_inject_file)
+
+ injected_files = [('/tmp/foo', 'foobar')]
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64",
+ injected_files=injected_files)
+ self.check_vm_params_for_linux()
+ self.assertEqual(actual_injected_files, injected_files)
+
+ @mock.patch('nova.db.agent_build_get_by_triple')
+ def test_spawn_agent_upgrade(self, mock_get):
+ self.flags(use_agent_default=True,
+ group='xenserver')
+
+ mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64",
+ "hypervisor": "xen", "os": "windows",
+ "url": "url", "md5hash": "asdf",
+ 'created_at': None, 'updated_at': None,
+ 'deleted_at': None, 'deleted': False,
+ 'id': 1}
+
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64")
+
+ @mock.patch('nova.db.agent_build_get_by_triple')
+ def test_spawn_agent_upgrade_fails_silently(self, mock_get):
+ mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64",
+ "hypervisor": "xen", "os": "windows",
+ "url": "url", "md5hash": "asdf",
+ 'created_at': None, 'updated_at': None,
+ 'deleted_at': None, 'deleted': False,
+ 'id': 1}
+
+ self._test_spawn_fails_silently_with(exception.AgentError,
+ method="_plugin_agent_agentupdate", failure="fake_error")
+
+ def test_spawn_with_resetnetwork_alternative_returncode(self):
+ self.flags(use_agent_default=True,
+ group='xenserver')
+
+ def fake_resetnetwork(self, method, args):
+ fake_resetnetwork.called = True
+ # NOTE(johngarbutt): as returned by FreeBSD and Gentoo
+ return jsonutils.dumps({'returncode': '500',
+ 'message': 'success'})
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ '_plugin_agent_resetnetwork', fake_resetnetwork)
+ fake_resetnetwork.called = False
+
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64")
+ self.assertTrue(fake_resetnetwork.called)
+
+ def _test_spawn_fails_silently_with(self, expected_exception_cls,
+ method="_plugin_agent_version",
+ failure=None, value=None):
+ self.flags(use_agent_default=True,
+ agent_version_timeout=0,
+ group='xenserver')
+
+ def fake_agent_call(self, method, args):
+ if failure:
+ raise xenapi_fake.Failure([failure])
+ else:
+ return value
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ method, fake_agent_call)
+
+ called = {}
+
+ def fake_add_instance_fault(*args, **kwargs):
+ called["fake_add_instance_fault"] = args[2]
+
+ self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
+ fake_add_instance_fault)
+
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64")
+ actual_exception = called["fake_add_instance_fault"]
+ self.assertIsInstance(actual_exception, expected_exception_cls)
+
+ def test_spawn_fails_silently_with_agent_timeout(self):
+ self._test_spawn_fails_silently_with(exception.AgentTimeout,
+ failure="TIMEOUT:fake")
+
+ def test_spawn_fails_silently_with_agent_not_implemented(self):
+ self._test_spawn_fails_silently_with(exception.AgentNotImplemented,
+ failure="NOT IMPLEMENTED:fake")
+
+ def test_spawn_fails_silently_with_agent_error(self):
+ self._test_spawn_fails_silently_with(exception.AgentError,
+ failure="fake_error")
+
+ def test_spawn_fails_silently_with_agent_bad_return(self):
+ error = jsonutils.dumps({'returncode': -1, 'message': 'fake'})
+ self._test_spawn_fails_silently_with(exception.AgentError,
+ value=error)
+
+ def test_rescue(self):
+ instance = self._create_instance(spawn=False)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+
+ session = get_session()
+ vm_ref = vm_utils.lookup(session, instance['name'])
+
+ swap_vdi_ref = xenapi_fake.create_vdi('swap', None)
+ root_vdi_ref = xenapi_fake.create_vdi('root', None)
+ eph1_vdi_ref = xenapi_fake.create_vdi('eph', None)
+ eph2_vdi_ref = xenapi_fake.create_vdi('eph', None)
+ vol_vdi_ref = xenapi_fake.create_vdi('volume', None)
+
+ xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=2)
+ xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0)
+ xenapi_fake.create_vbd(vm_ref, eph1_vdi_ref, userdevice=4)
+ xenapi_fake.create_vbd(vm_ref, eph2_vdi_ref, userdevice=5)
+ xenapi_fake.create_vbd(vm_ref, vol_vdi_ref, userdevice=6,
+ other_config={'osvol': True})
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ image_meta = {'id': IMAGE_VHD,
+ 'disk_format': 'vhd'}
+ conn.rescue(self.context, instance, [], image_meta, '')
+
+ vm = xenapi_fake.get_record('VM', vm_ref)
+ rescue_name = "%s-rescue" % vm["name_label"]
+ rescue_ref = vm_utils.lookup(session, rescue_name)
+ rescue_vm = xenapi_fake.get_record('VM', rescue_ref)
+
+ vdi_refs = {}
+ for vbd_ref in rescue_vm['VBDs']:
+ vbd = xenapi_fake.get_record('VBD', vbd_ref)
+ vdi_refs[vbd['VDI']] = vbd['userdevice']
+
+ self.assertEqual('1', vdi_refs[root_vdi_ref])
+ self.assertEqual('2', vdi_refs[swap_vdi_ref])
+ self.assertEqual('4', vdi_refs[eph1_vdi_ref])
+ self.assertEqual('5', vdi_refs[eph2_vdi_ref])
+ self.assertNotIn(vol_vdi_ref, vdi_refs)
+
+ def test_rescue_preserve_disk_on_failure(self):
+ # test that the original disk is preserved if rescue setup fails
+ # bug #1227898
+ instance = self._create_instance()
+ session = get_session()
+ image_meta = {'id': IMAGE_VHD,
+ 'disk_format': 'vhd'}
+
+ vm_ref = vm_utils.lookup(session, instance['name'])
+ vdi_ref, vdi_rec = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
+
+ # raise an error in the spawn setup process and trigger the
+ # undo manager logic:
+ def fake_start(*args, **kwargs):
+ raise test.TestingException('Start Error')
+
+ self.stubs.Set(self.conn._vmops, '_start', fake_start)
+
+ self.assertRaises(test.TestingException, self.conn.rescue,
+ self.context, instance, [], image_meta, '')
+
+ # confirm original disk still exists:
+ vdi_ref2, vdi_rec2 = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
+ self.assertEqual(vdi_ref, vdi_ref2)
+ self.assertEqual(vdi_rec['uuid'], vdi_rec2['uuid'])
+
+ def test_unrescue(self):
+ instance = self._create_instance()
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ # Unrescue expects the original instance to be powered off
+ conn.power_off(instance)
+ xenapi_fake.create_vm(instance['name'] + '-rescue', 'Running')
+ conn.unrescue(instance, None)
+
+ def test_unrescue_not_in_rescue(self):
+ instance = self._create_instance()
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ # Ensure that it will not unrescue a non-rescued instance.
+ self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue,
+ instance, None)
+
+ def test_finish_revert_migration(self):
+ instance = self._create_instance()
+
+ class VMOpsMock():
+
+ def __init__(self):
+ self.finish_revert_migration_called = False
+
+ def finish_revert_migration(self, context, instance, block_info,
+ power_on):
+ self.finish_revert_migration_called = True
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn._vmops = VMOpsMock()
+ conn.finish_revert_migration(self.context, instance, None)
+ self.assertTrue(conn._vmops.finish_revert_migration_called)
+
+ def test_reboot_hard(self):
+ instance = self._create_instance()
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn.reboot(self.context, instance, None, "HARD")
+
+ def test_poll_rebooting_instances(self):
+ self.mox.StubOutWithMock(compute_api.API, 'reboot')
+ compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+ instance = self._create_instance()
+ instances = [instance]
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn.poll_rebooting_instances(60, instances)
+
+ def test_reboot_soft(self):
+ instance = self._create_instance()
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn.reboot(self.context, instance, None, "SOFT")
+
+ def test_reboot_halted(self):
+ session = get_session()
+ instance = self._create_instance(spawn=False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ xenapi_fake.create_vm(instance['name'], 'Halted')
+ conn.reboot(self.context, instance, None, "SOFT")
+ vm_ref = vm_utils.lookup(session, instance['name'])
+ vm = xenapi_fake.get_record('VM', vm_ref)
+ self.assertEqual(vm['power_state'], 'Running')
+
+ def test_reboot_unknown_state(self):
+ instance = self._create_instance(spawn=False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ xenapi_fake.create_vm(instance['name'], 'Unknown')
+ self.assertRaises(xenapi_fake.Failure, conn.reboot, self.context,
+ instance, None, "SOFT")
+
+ def test_reboot_rescued(self):
+ instance = self._create_instance()
+ instance['vm_state'] = vm_states.RESCUED
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ real_result = vm_utils.lookup(conn._session, instance['name'])
+
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ vm_utils.lookup(conn._session, instance['name'],
+ True).AndReturn(real_result)
+ self.mox.ReplayAll()
+
+ conn.reboot(self.context, instance, None, "SOFT")
+
+ def test_get_console_output_succeeds(self):
+
+ def fake_get_console_output(instance):
+ self.assertEqual("instance", instance)
+ return "console_log"
+ self.stubs.Set(self.conn._vmops, 'get_console_output',
+ fake_get_console_output)
+
+ self.assertEqual(self.conn.get_console_output('context', "instance"),
+ "console_log")
+
+ def _test_maintenance_mode(self, find_host, find_aggregate):
+ real_call_xenapi = self.conn._session.call_xenapi
+ instance = self._create_instance(spawn=True)
+ api_calls = {}
+
+ # Record all the xenapi calls, and return a fake list of hosts
+ # for the host.get_all call
+ def fake_call_xenapi(method, *args):
+ api_calls[method] = args
+ if method == 'host.get_all':
+ return ['foo', 'bar', 'baz']
+ return real_call_xenapi(method, *args)
+ self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi)
+
+ def fake_aggregate_get(context, host, key):
+ if find_aggregate:
+ return [test_aggregate.fake_aggregate]
+ else:
+ return []
+ self.stubs.Set(db, 'aggregate_get_by_host',
+ fake_aggregate_get)
+
+ def fake_host_find(context, session, src, dst):
+ if find_host:
+ return 'bar'
+ else:
+ raise exception.NoValidHost("I saw this one coming...")
+ self.stubs.Set(host, '_host_find', fake_host_find)
+
+ result = self.conn.host_maintenance_mode('bar', 'on_maintenance')
+ self.assertEqual(result, 'on_maintenance')
+
+ # We expect the VM.pool_migrate call to have been called to
+ # migrate our instance to the 'bar' host
+ vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
+ host_ref = "foo"
+ expected = (vm_ref, host_ref, {"live": "true"})
+ self.assertEqual(api_calls.get('VM.pool_migrate'), expected)
+
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
+ self.assertEqual(instance['task_state'], task_states.MIGRATING)
+
+ def test_maintenance_mode(self):
+ self._test_maintenance_mode(True, True)
+
+ def test_maintenance_mode_no_host(self):
+ self.assertRaises(exception.NoValidHost,
+ self._test_maintenance_mode, False, True)
+
+ def test_maintenance_mode_no_aggregate(self):
+ self.assertRaises(exception.NotFound,
+ self._test_maintenance_mode, True, False)
+
+ def test_uuid_find(self):
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ fake_inst = fake_instance.fake_db_instance(id=123)
+ fake_inst2 = fake_instance.fake_db_instance(id=456)
+ db.instance_get_all_by_host(self.context, fake_inst['host'],
+ columns_to_join=None,
+ use_slave=False
+ ).AndReturn([fake_inst, fake_inst2])
+ self.mox.ReplayAll()
+ expected_name = CONF.instance_name_template % fake_inst['id']
+ inst_uuid = host._uuid_find(self.context, fake_inst['host'],
+ expected_name)
+ self.assertEqual(inst_uuid, fake_inst['uuid'])
+
+ def test_session_virtapi(self):
+ was = {'called': False}
+
+ def fake_aggregate_get_by_host(self, *args, **kwargs):
+ was['called'] = True
+ raise test.TestingException()
+ self.stubs.Set(db, "aggregate_get_by_host",
+ fake_aggregate_get_by_host)
+
+ self.stubs.Set(self.conn._session, "is_slave", True)
+
+ self.assertRaises(test.TestingException,
+ self.conn._session._get_host_uuid)
+ self.assertTrue(was['called'])
+
+ def test_per_instance_usage_running(self):
+ instance = self._create_instance(spawn=True)
+ flavor = flavors.get_flavor(3)
+
+ expected = {instance['uuid']: {'memory_mb': flavor['memory_mb'],
+ 'uuid': instance['uuid']}}
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual(expected, actual)
+
+ # Paused instances still consume resources:
+ self.conn.pause(instance)
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual(expected, actual)
+
+ def test_per_instance_usage_suspended(self):
+ # Suspended instances do not consume memory:
+ instance = self._create_instance(spawn=True)
+ self.conn.suspend(instance)
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual({}, actual)
+
+ def test_per_instance_usage_halted(self):
+ instance = self._create_instance(spawn=True)
+ self.conn.power_off(instance)
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual({}, actual)
+
+ def _create_instance(self, instance_id=1, spawn=True, obj=False, **attrs):
+ """Creates and spawns a test instance."""
+ instance_values = {
+ 'id': instance_id,
+ 'uuid': '00000000-0000-0000-0000-00000000000%d' % instance_id,
+ 'display_name': 'host-%d' % instance_id,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
+ 'image_ref': 1,
+ 'kernel_id': 2,
+ 'ramdisk_id': 3,
+ 'root_gb': 80,
+ 'ephemeral_gb': 0,
+ 'instance_type_id': '3', # m1.large
+ 'os_type': 'linux',
+ 'vm_mode': 'hvm',
+ 'architecture': 'x86-64'}
+ instance_values.update(attrs)
+
+ instance = create_instance_with_system_metadata(self.context,
+ instance_values)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ image_meta = {'id': IMAGE_VHD,
+ 'disk_format': 'vhd'}
+ if spawn:
+ self.conn.spawn(self.context, instance, image_meta, [], 'herp',
+ network_info)
+ if obj:
+ instance = objects.Instance._from_db_object(
+ self.context, objects.Instance(), instance,
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
+ return instance
+
+ def test_destroy_clean_up_kernel_and_ramdisk(self):
+ def fake_lookup_kernel_ramdisk(session, vm_ref):
+ return "kernel", "ramdisk"
+
+ self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
+ fake_lookup_kernel_ramdisk)
+
+ def fake_destroy_kernel_ramdisk(session, instance, kernel, ramdisk):
+ fake_destroy_kernel_ramdisk.called = True
+ self.assertEqual("kernel", kernel)
+ self.assertEqual("ramdisk", ramdisk)
+
+ fake_destroy_kernel_ramdisk.called = False
+
+ self.stubs.Set(vm_utils, "destroy_kernel_ramdisk",
+ fake_destroy_kernel_ramdisk)
+
+ instance = self._create_instance(spawn=True)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ self.conn.destroy(self.context, instance, network_info)
+
+ vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
+ self.assertIsNone(vm_ref)
+ self.assertTrue(fake_destroy_kernel_ramdisk.called)
+
+
+class XenAPIDiffieHellmanTestCase(test.NoDBTestCase):
+ """Unit tests for Diffie-Hellman code."""
+ def setUp(self):
+ super(XenAPIDiffieHellmanTestCase, self).setUp()
+ self.alice = agent.SimpleDH()
+ self.bob = agent.SimpleDH()
+
+ def test_shared(self):
+ alice_pub = self.alice.get_public()
+ bob_pub = self.bob.get_public()
+ alice_shared = self.alice.compute_shared(bob_pub)
+ bob_shared = self.bob.compute_shared(alice_pub)
+ self.assertEqual(alice_shared, bob_shared)
+
+ def _test_encryption(self, message):
+ enc = self.alice.encrypt(message)
+ self.assertFalse(enc.endswith('\n'))
+ dec = self.bob.decrypt(enc)
+ self.assertEqual(dec, message)
+
+ def test_encrypt_simple_message(self):
+ self._test_encryption('This is a simple message.')
+
+ def test_encrypt_message_with_newlines_at_end(self):
+ self._test_encryption('This message has a newline at the end.\n')
+
+ def test_encrypt_many_newlines_at_end(self):
+ self._test_encryption('Message with lotsa newlines.\n\n\n')
+
+ def test_encrypt_newlines_inside_message(self):
+ self._test_encryption('Message\nwith\ninterior\nnewlines.')
+
+ def test_encrypt_with_leading_newlines(self):
+ self._test_encryption('\n\nMessage with leading newlines.')
+
+ def test_encrypt_really_long_message(self):
+ self._test_encryption(''.join(['abcd' for i in xrange(1024)]))
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIMigrateInstance(stubs.XenAPITestBase):
+ """Unit test for verifying migration-related actions."""
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(XenAPIMigrateInstance, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ xenapi_fake.create_network('fake', 'fake_br1')
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ self.instance_values = {'id': 1,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
+ 'image_ref': 1,
+ 'kernel_id': None,
+ 'ramdisk_id': None,
+ 'root_gb': 80,
+ 'ephemeral_gb': 0,
+ 'instance_type_id': '3', # m1.large
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
+
+ migration_values = {
+ 'source_compute': 'nova-compute',
+ 'dest_compute': 'nova-compute',
+ 'dest_host': '10.127.5.114',
+ 'status': 'post-migrating',
+ 'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7',
+ 'old_instance_type_id': 5,
+ 'new_instance_type_id': 1
+ }
+ self.migration = db.migration_create(
+ context.get_admin_context(), migration_values)
+
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ stubs.stub_out_migration_methods(self.stubs)
+ stubs.stubout_get_this_vm_uuid(self.stubs)
+
+ def fake_inject_instance_metadata(self, instance, vm):
+ pass
+ self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
+ fake_inject_instance_metadata)
+
+ def test_migrate_disk_and_power_off(self):
+ instance = db.instance_create(self.context, self.instance_values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ flavor = {"root_gb": 80, 'ephemeral_gb': 0}
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn.migrate_disk_and_power_off(self.context, instance,
+ '127.0.0.1', flavor, None)
+
+ def test_migrate_disk_and_power_off_passes_exceptions(self):
+ instance = db.instance_create(self.context, self.instance_values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ flavor = {"root_gb": 80, 'ephemeral_gb': 0}
+
+ def fake_raise(*args, **kwargs):
+ raise exception.MigrationError(reason='test failure')
+ self.stubs.Set(vmops.VMOps, "_migrate_disk_resizing_up", fake_raise)
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.MigrationError,
+ conn.migrate_disk_and_power_off,
+ self.context, instance,
+ '127.0.0.1', flavor, None)
+
+ def test_migrate_disk_and_power_off_throws_on_zero_gb_resize_down(self):
+ instance = db.instance_create(self.context, self.instance_values)
+ flavor = {"root_gb": 0, 'ephemeral_gb': 0}
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.ResizeError,
+ conn.migrate_disk_and_power_off,
+ self.context, instance,
+ 'fake_dest', flavor, None)
+
+ def test_migrate_disk_and_power_off_with_zero_gb_old_and_new_works(self):
+ flavor = {"root_gb": 0, 'ephemeral_gb': 0}
+ values = copy.copy(self.instance_values)
+ values["root_gb"] = 0
+ values["ephemeral_gb"] = 0
+ instance = db.instance_create(self.context, values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn.migrate_disk_and_power_off(self.context, instance,
+ '127.0.0.1', flavor, None)
+
+ def _test_revert_migrate(self, power_on):
+ instance = create_instance_with_system_metadata(self.context,
+ self.instance_values)
+ self.called = False
+ self.fake_vm_start_called = False
+ self.fake_finish_revert_migration_called = False
+ context = 'fake_context'
+
+ def fake_vm_start(*args, **kwargs):
+ self.fake_vm_start_called = True
+
+ def fake_vdi_resize(*args, **kwargs):
+ self.called = True
+
+ def fake_finish_revert_migration(*args, **kwargs):
+ self.fake_finish_revert_migration_called = True
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ "VDI_resize_online", fake_vdi_resize)
+ self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
+ self.stubs.Set(vmops.VMOps, 'finish_revert_migration',
+ fake_finish_revert_migration)
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
+ product_version=(4, 0, 0),
+ product_brand='XenServer')
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
+ base = xenapi_fake.create_vdi('hurr', 'fake')
+ base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
+ cow = xenapi_fake.create_vdi('durr', 'fake')
+ cow_uuid = xenapi_fake.get_record('VDI', cow)['uuid']
+ conn.finish_migration(self.context, self.migration, instance,
+ dict(base_copy=base_uuid, cow=cow_uuid),
+ network_info, image_meta, resize_instance=True,
+ block_device_info=None, power_on=power_on)
+ self.assertEqual(self.called, True)
+ self.assertEqual(self.fake_vm_start_called, power_on)
+
+ conn.finish_revert_migration(context, instance, network_info)
+ self.assertEqual(self.fake_finish_revert_migration_called, True)
+
+ def test_revert_migrate_power_on(self):
+ self._test_revert_migrate(True)
+
+ def test_revert_migrate_power_off(self):
+ self._test_revert_migrate(False)
+
+ def _test_finish_migrate(self, power_on):
+ instance = create_instance_with_system_metadata(self.context,
+ self.instance_values)
+ self.called = False
+ self.fake_vm_start_called = False
+
+ def fake_vm_start(*args, **kwargs):
+ self.fake_vm_start_called = True
+
+ def fake_vdi_resize(*args, **kwargs):
+ self.called = True
+
+ self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ "VDI_resize_online", fake_vdi_resize)
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
+ product_version=(4, 0, 0),
+ product_brand='XenServer')
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
+ conn.finish_migration(self.context, self.migration, instance,
+ dict(base_copy='hurr', cow='durr'),
+ network_info, image_meta, resize_instance=True,
+ block_device_info=None, power_on=power_on)
+ self.assertEqual(self.called, True)
+ self.assertEqual(self.fake_vm_start_called, power_on)
+
+ def test_finish_migrate_power_on(self):
+ self._test_finish_migrate(True)
+
+ def test_finish_migrate_power_off(self):
+ self._test_finish_migrate(False)
+
+ def test_finish_migrate_no_local_storage(self):
+ values = copy.copy(self.instance_values)
+ values["root_gb"] = 0
+ values["ephemeral_gb"] = 0
+ instance = create_instance_with_system_metadata(self.context, values)
+
+ def fake_vdi_resize(*args, **kwargs):
+ raise Exception("This shouldn't be called")
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ "VDI_resize_online", fake_vdi_resize)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
+ conn.finish_migration(self.context, self.migration, instance,
+ dict(base_copy='hurr', cow='durr'),
+ network_info, image_meta, resize_instance=True)
+
+ def test_finish_migrate_no_resize_vdi(self):
+ instance = create_instance_with_system_metadata(self.context,
+ self.instance_values)
+
+ def fake_vdi_resize(*args, **kwargs):
+ raise Exception("This shouldn't be called")
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ "VDI_resize_online", fake_vdi_resize)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ # Resize instance would be determined by the compute call
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
+ conn.finish_migration(self.context, self.migration, instance,
+ dict(base_copy='hurr', cow='durr'),
+ network_info, image_meta, resize_instance=False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_migrate_too_many_partitions_no_resize_down(self):
+ instance_values = self.instance_values
+ instance = db.instance_create(self.context, instance_values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ flavor = db.flavor_get_by_name(self.context, 'm1.small')
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_get_partitions(partition):
+ return [(1, 2, 3, 4, "", ""), (1, 2, 3, 4, "", "")]
+
+ self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
+
+ self.assertRaises(exception.InstanceFaultRollback,
+ conn.migrate_disk_and_power_off,
+ self.context, instance,
+ '127.0.0.1', flavor, None)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_migrate_bad_fs_type_no_resize_down(self):
+ instance_values = self.instance_values
+ instance = db.instance_create(self.context, instance_values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ flavor = db.flavor_get_by_name(self.context, 'm1.small')
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_get_partitions(partition):
+ return [(1, 2, 3, "ext2", "", "boot")]
+
+ self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
+
+ self.assertRaises(exception.InstanceFaultRollback,
+ conn.migrate_disk_and_power_off,
+ self.context, instance,
+ '127.0.0.1', flavor, None)
+
+ def test_migrate_rollback_when_resize_down_fs_fails(self):
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vmops = conn._vmops
+
+ self.mox.StubOutWithMock(vmops, '_resize_ensure_vm_is_shutdown')
+ self.mox.StubOutWithMock(vmops, '_apply_orig_vm_name_label')
+ self.mox.StubOutWithMock(vm_utils, 'resize_disk')
+ self.mox.StubOutWithMock(vm_utils, 'migrate_vhd')
+ self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
+ self.mox.StubOutWithMock(vm_utils, 'get_vdi_for_vm_safely')
+ self.mox.StubOutWithMock(vmops, '_restore_orig_vm_and_cleanup_orphan')
+
+ instance = objects.Instance(context=self.context,
+ auto_disk_config=True, uuid='uuid')
+ instance.obj_reset_changes()
+ vm_ref = "vm_ref"
+ dest = "dest"
+ flavor = "type"
+ sr_path = "sr_path"
+
+ vmops._resize_ensure_vm_is_shutdown(instance, vm_ref)
+ vmops._apply_orig_vm_name_label(instance, vm_ref)
+ old_vdi_ref = "old_ref"
+ vm_utils.get_vdi_for_vm_safely(vmops._session, vm_ref).AndReturn(
+ (old_vdi_ref, None))
+ new_vdi_ref = "new_ref"
+ new_vdi_uuid = "new_uuid"
+ vm_utils.resize_disk(vmops._session, instance, old_vdi_ref,
+ flavor).AndReturn((new_vdi_ref, new_vdi_uuid))
+ vm_utils.migrate_vhd(vmops._session, instance, new_vdi_uuid, dest,
+ sr_path, 0).AndRaise(
+ exception.ResizeError(reason="asdf"))
+
+ vm_utils.destroy_vdi(vmops._session, new_vdi_ref)
+ vmops._restore_orig_vm_and_cleanup_orphan(instance)
+
+ self.mox.ReplayAll()
+
+ with mock.patch.object(instance, 'save') as mock_save:
+ self.assertRaises(exception.InstanceFaultRollback,
+ vmops._migrate_disk_resizing_down, self.context,
+ instance, dest, flavor, vm_ref, sr_path)
+ self.assertEqual(3, mock_save.call_count)
+ self.assertEqual(60.0, instance.progress)
+
+ def test_resize_ensure_vm_is_shutdown_cleanly(self):
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vmops = conn._vmops
+ fake_instance = {'uuid': 'uuid'}
+
+ self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
+ self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
+
+ vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
+ vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
+ "ref").AndReturn(True)
+
+ self.mox.ReplayAll()
+
+ vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
+
+ def test_resize_ensure_vm_is_shutdown_forced(self):
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vmops = conn._vmops
+ fake_instance = {'uuid': 'uuid'}
+
+ self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
+ self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
+
+ vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
+ vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
+ "ref").AndReturn(False)
+ vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
+ "ref").AndReturn(True)
+
+ self.mox.ReplayAll()
+
+ vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
+
+ def test_resize_ensure_vm_is_shutdown_fails(self):
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vmops = conn._vmops
+ fake_instance = {'uuid': 'uuid'}
+
+ self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
+ self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
+
+ vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
+ vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
+ "ref").AndReturn(False)
+ vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
+ "ref").AndReturn(False)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.ResizeError,
+ vmops._resize_ensure_vm_is_shutdown, fake_instance, "ref")
+
+ def test_resize_ensure_vm_is_shutdown_already_shutdown(self):
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vmops = conn._vmops
+ fake_instance = {'uuid': 'uuid'}
+
+ self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
+ self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
+
+ vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(True)
+
+ self.mox.ReplayAll()
+
+ vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
+
+
+class XenAPIImageTypeTestCase(test.NoDBTestCase):
+ """Test ImageType class."""
+
+ def test_to_string(self):
+ # Can convert from type id to type string.
+ self.assertEqual(
+ vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
+ vm_utils.ImageType.KERNEL_STR)
+
+ def _assert_role(self, expected_role, image_type_id):
+ self.assertEqual(
+ expected_role,
+ vm_utils.ImageType.get_role(image_type_id))
+
+ def test_get_image_role_kernel(self):
+ self._assert_role('kernel', vm_utils.ImageType.KERNEL)
+
+ def test_get_image_role_ramdisk(self):
+ self._assert_role('ramdisk', vm_utils.ImageType.RAMDISK)
+
+ def test_get_image_role_disk(self):
+ self._assert_role('root', vm_utils.ImageType.DISK)
+
+ def test_get_image_role_disk_raw(self):
+ self._assert_role('root', vm_utils.ImageType.DISK_RAW)
+
+ def test_get_image_role_disk_vhd(self):
+ self._assert_role('root', vm_utils.ImageType.DISK_VHD)
+
+
+class XenAPIDetermineDiskImageTestCase(test.NoDBTestCase):
+ """Unit tests for code that detects the ImageType."""
+ def assert_disk_type(self, image_meta, expected_disk_type):
+ actual = vm_utils.determine_disk_image_type(image_meta)
+ self.assertEqual(expected_disk_type, actual)
+
+ def test_machine(self):
+ image_meta = {'id': 'a', 'disk_format': 'ami'}
+ self.assert_disk_type(image_meta, vm_utils.ImageType.DISK)
+
+ def test_raw(self):
+ image_meta = {'id': 'a', 'disk_format': 'raw'}
+ self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW)
+
+ def test_vhd(self):
+ image_meta = {'id': 'a', 'disk_format': 'vhd'}
+ self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)
+
+ def test_none(self):
+ image_meta = None
+ self.assert_disk_type(image_meta, None)
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIHostTestCase(stubs.XenAPITestBase):
+ """Tests HostState, which holds metrics from XenServer that get
+ reported back to the Schedulers.
+ """
+
+ def setUp(self):
+ super(XenAPIHostTestCase, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.context = context.get_admin_context()
+ self.flags(use_local=True, group='conductor')
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.instance = fake_instance.fake_db_instance(name='foo')
+
+ def test_host_state(self):
+ stats = self.conn.host_state.get_host_stats(False)
+ # Values from fake.create_local_srs (ext SR)
+ self.assertEqual(stats['disk_total'], 40000)
+ self.assertEqual(stats['disk_used'], 20000)
+ # Values from fake._plugin_xenhost_host_data
+ self.assertEqual(stats['host_memory_total'], 10)
+ self.assertEqual(stats['host_memory_overhead'], 20)
+ self.assertEqual(stats['host_memory_free'], 30)
+ self.assertEqual(stats['host_memory_free_computed'], 40)
+ self.assertEqual(stats['hypervisor_hostname'], 'fake-xenhost')
+ self.assertThat({'cpu_count': 50},
+ matchers.DictMatches(stats['host_cpu_info']))
+ # No VMs running
+ self.assertEqual(stats['vcpus_used'], 0)
+
+ def test_host_state_vcpus_used(self):
+ stats = self.conn.host_state.get_host_stats(True)
+ self.assertEqual(stats['vcpus_used'], 0)
+ xenapi_fake.create_vm(self.instance['name'], 'Running')
+ stats = self.conn.host_state.get_host_stats(True)
+ self.assertEqual(stats['vcpus_used'], 4)
+
+ def test_pci_passthrough_devices_whitelist(self):
+ # NOTE(guillaume-thouvenin): This pci whitelist will be used to
+ # match with _plugin_xenhost_get_pci_device_details method in fake.py.
+ white_list = '{"vendor_id":"10de", "product_id":"11bf"}'
+ self.flags(pci_passthrough_whitelist=[white_list])
+ stats = self.conn.host_state.get_host_stats(False)
+ self.assertEqual(len(stats['pci_passthrough_devices']), 1)
+
+ def test_pci_passthrough_devices_no_whitelist(self):
+ stats = self.conn.host_state.get_host_stats(False)
+ self.assertEqual(len(stats['pci_passthrough_devices']), 0)
+
+ def test_host_state_missing_sr(self):
+ # Must trigger construction of 'host_state' property
+ # before introducing the stub which raises the error
+ hs = self.conn.host_state
+
+ def fake_safe_find_sr(session):
+ raise exception.StorageRepositoryNotFound('not there')
+
+ self.stubs.Set(vm_utils, 'safe_find_sr', fake_safe_find_sr)
+ self.assertRaises(exception.StorageRepositoryNotFound,
+ hs.get_host_stats,
+ refresh=True)
+
+ def _test_host_action(self, method, action, expected=None):
+ result = method('host', action)
+ if not expected:
+ expected = action
+ self.assertEqual(result, expected)
+
+ def test_host_reboot(self):
+ self._test_host_action(self.conn.host_power_action, 'reboot')
+
+ def test_host_shutdown(self):
+ self._test_host_action(self.conn.host_power_action, 'shutdown')
+
+ def test_host_startup(self):
+ self.assertRaises(NotImplementedError,
+ self.conn.host_power_action, 'host', 'startup')
+
+ def test_host_maintenance_on(self):
+ self._test_host_action(self.conn.host_maintenance_mode,
+ True, 'on_maintenance')
+
+ def test_host_maintenance_off(self):
+ self._test_host_action(self.conn.host_maintenance_mode,
+ False, 'off_maintenance')
+
+ def test_set_enable_host_enable(self):
+ _create_service_entries(self.context, values={'nova': ['fake-mini']})
+ self._test_host_action(self.conn.set_host_enabled, True, 'enabled')
+ service = db.service_get_by_args(self.context, 'fake-mini',
+ 'nova-compute')
+ self.assertEqual(service.disabled, False)
+
+ def test_set_enable_host_disable(self):
+ _create_service_entries(self.context, values={'nova': ['fake-mini']})
+ self._test_host_action(self.conn.set_host_enabled, False, 'disabled')
+ service = db.service_get_by_args(self.context, 'fake-mini',
+ 'nova-compute')
+ self.assertEqual(service.disabled, True)
+
+ def test_get_host_uptime(self):
+ result = self.conn.get_host_uptime('host')
+ self.assertEqual(result, 'fake uptime')
+
+ def test_supported_instances_is_included_in_host_state(self):
+ stats = self.conn.host_state.get_host_stats(False)
+ self.assertIn('supported_instances', stats)
+
+ def test_supported_instances_is_calculated_by_to_supported_instances(self):
+
+ def to_supported_instances(somedata):
+ self.assertIsNone(somedata)
+ return "SOMERETURNVALUE"
+ self.stubs.Set(host, 'to_supported_instances', to_supported_instances)
+
+ stats = self.conn.host_state.get_host_stats(False)
+ self.assertEqual("SOMERETURNVALUE", stats['supported_instances'])
+
+ def test_update_stats_caches_hostname(self):
+ self.mox.StubOutWithMock(host, 'call_xenhost')
+ self.mox.StubOutWithMock(vm_utils, 'scan_default_sr')
+ self.mox.StubOutWithMock(vm_utils, 'list_vms')
+ self.mox.StubOutWithMock(self.conn._session, 'call_xenapi')
+ data = {'disk_total': 0,
+ 'disk_used': 0,
+ 'disk_available': 0,
+ 'supported_instances': 0,
+ 'host_capabilities': [],
+ 'host_hostname': 'foo',
+ 'vcpus_used': 0,
+ }
+ sr_rec = {
+ 'physical_size': 0,
+ 'physical_utilisation': 0,
+ 'virtual_allocation': 0,
+ }
+
+ for i in range(3):
+ host.call_xenhost(mox.IgnoreArg(), 'host_data', {}).AndReturn(data)
+ vm_utils.scan_default_sr(self.conn._session).AndReturn("ref")
+ vm_utils.list_vms(self.conn._session).AndReturn([])
+ self.conn._session.call_xenapi('SR.get_record', "ref").AndReturn(
+ sr_rec)
+ if i == 2:
+ # On the third call (the second below) change the hostname
+ data = dict(data, host_hostname='bar')
+
+ self.mox.ReplayAll()
+ stats = self.conn.host_state.get_host_stats(refresh=True)
+ self.assertEqual('foo', stats['hypervisor_hostname'])
+ stats = self.conn.host_state.get_host_stats(refresh=True)
+ self.assertEqual('foo', stats['hypervisor_hostname'])
+
+
+class ToSupportedInstancesTestCase(test.NoDBTestCase):
+ def test_default_return_value(self):
+ self.assertEqual([],
+ host.to_supported_instances(None))
+
+ def test_return_value(self):
+ self.assertEqual([(arch.X86_64, hvtype.XEN, 'xen')],
+ host.to_supported_instances([u'xen-3.0-x86_64']))
+
+ def test_invalid_values_do_not_break(self):
+ self.assertEqual([(arch.X86_64, hvtype.XEN, 'xen')],
+ host.to_supported_instances([u'xen-3.0-x86_64', 'spam']))
+
+ def test_multiple_values(self):
+ self.assertEqual(
+ [
+ (arch.X86_64, hvtype.XEN, 'xen'),
+ (arch.I686, hvtype.XEN, 'hvm')
+ ],
+ host.to_supported_instances([u'xen-3.0-x86_64', 'hvm-3.0-x86_32'])
+ )
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
+ def setUp(self):
+ super(XenAPIAutoDiskConfigTestCase, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+
+ self.instance_values = {'id': 1,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
+ 'image_ref': 1,
+ 'kernel_id': 2,
+ 'ramdisk_id': 3,
+ 'root_gb': 80,
+ 'ephemeral_gb': 0,
+ 'instance_type_id': '3', # m1.large
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
+
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
+ vbd_type='disk', read_only=False, bootable=True,
+ osvol=False):
+ pass
+
+ self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
+
+ def assertIsPartitionCalled(self, called):
+ marker = {"partition_called": False}
+
+ def fake_resize_part_and_fs(dev, start, old_sectors, new_sectors,
+ flags):
+ marker["partition_called"] = True
+ self.stubs.Set(vm_utils, "_resize_part_and_fs",
+ fake_resize_part_and_fs)
+
+ context.RequestContext(self.user_id, self.project_id)
+ session = get_session()
+
+ disk_image_type = vm_utils.ImageType.DISK_VHD
+ instance = create_instance_with_system_metadata(self.context,
+ self.instance_values)
+ vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
+ vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
+
+ vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
+ vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
+
+ self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
+ vdis, disk_image_type, "fake_nw_inf")
+
+ self.assertEqual(marker["partition_called"], called)
+
+ def test_instance_not_auto_disk_config(self):
+ """Should not partition unless instance is marked as
+ auto_disk_config.
+ """
+ self.instance_values['auto_disk_config'] = False
+ self.assertIsPartitionCalled(False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_instance_auto_disk_config_fails_safe_two_partitions(self):
+ # Should not partition unless fail safes pass.
+ self.instance_values['auto_disk_config'] = True
+
+ def fake_get_partitions(dev):
+ return [(1, 0, 100, 'ext4', "", ""), (2, 100, 200, 'ext4' "", "")]
+ self.stubs.Set(vm_utils, "_get_partitions",
+ fake_get_partitions)
+
+ self.assertIsPartitionCalled(False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_instance_auto_disk_config_fails_safe_badly_numbered(self):
+ # Should not partition unless fail safes pass.
+ self.instance_values['auto_disk_config'] = True
+
+ def fake_get_partitions(dev):
+ return [(2, 100, 200, 'ext4', "", "")]
+ self.stubs.Set(vm_utils, "_get_partitions",
+ fake_get_partitions)
+
+ self.assertIsPartitionCalled(False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_instance_auto_disk_config_fails_safe_bad_fstype(self):
+ # Should not partition unless fail safes pass.
+ self.instance_values['auto_disk_config'] = True
+
+ def fake_get_partitions(dev):
+ return [(1, 100, 200, 'asdf', "", "")]
+ self.stubs.Set(vm_utils, "_get_partitions",
+ fake_get_partitions)
+
+ self.assertIsPartitionCalled(False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_instance_auto_disk_config_passes_fail_safes(self):
+ """Should partition if instance is marked as auto_disk_config=True and
+ virt-layer specific fail-safe checks pass.
+ """
+ self.instance_values['auto_disk_config'] = True
+
+ def fake_get_partitions(dev):
+ return [(1, 0, 100, 'ext4', "", "boot")]
+ self.stubs.Set(vm_utils, "_get_partitions",
+ fake_get_partitions)
+
+ self.assertIsPartitionCalled(True)
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIGenerateLocal(stubs.XenAPITestBase):
+ """Test generating of local disks, like swap and ephemeral."""
+ def setUp(self):
+ super(XenAPIGenerateLocal, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+
+ self.instance_values = {'id': 1,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
+ 'image_ref': 1,
+ 'kernel_id': 2,
+ 'ramdisk_id': 3,
+ 'root_gb': 80,
+ 'ephemeral_gb': 0,
+ 'instance_type_id': '3', # m1.large
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
+
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
+ vbd_type='disk', read_only=False, bootable=True,
+ osvol=False, empty=False, unpluggable=True):
+ return session.call_xenapi('VBD.create', {'VM': vm_ref,
+ 'VDI': vdi_ref})
+
+ self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
+
+ def assertCalled(self, instance,
+ disk_image_type=vm_utils.ImageType.DISK_VHD):
+ context.RequestContext(self.user_id, self.project_id)
+ session = get_session()
+
+ vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
+ vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
+
+ vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
+
+ vdi_key = 'root'
+ if disk_image_type == vm_utils.ImageType.DISK_ISO:
+ vdi_key = 'iso'
+ vdis = {vdi_key: {'uuid': vdi_uuid, 'ref': vdi_ref}}
+
+ self.called = False
+ self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
+ vdis, disk_image_type, "fake_nw_inf")
+ self.assertTrue(self.called)
+
+ def test_generate_swap(self):
+ # Test swap disk generation.
+ instance_values = dict(self.instance_values, instance_type_id=5)
+ instance = create_instance_with_system_metadata(self.context,
+ instance_values)
+
+ def fake_generate_swap(*args, **kwargs):
+ self.called = True
+ self.stubs.Set(vm_utils, 'generate_swap', fake_generate_swap)
+
+ self.assertCalled(instance)
+
+ def test_generate_ephemeral(self):
+ # Test ephemeral disk generation.
+ instance_values = dict(self.instance_values, instance_type_id=4)
+ instance = create_instance_with_system_metadata(self.context,
+ instance_values)
+
+ def fake_generate_ephemeral(*args):
+ self.called = True
+ self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
+
+ self.assertCalled(instance)
+
+ def test_generate_iso_blank_root_disk(self):
+ instance_values = dict(self.instance_values, instance_type_id=4)
+ instance_values.pop('kernel_id')
+ instance_values.pop('ramdisk_id')
+ instance = create_instance_with_system_metadata(self.context,
+ instance_values)
+
+ def fake_generate_ephemeral(*args):
+ pass
+ self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
+
+ def fake_generate_iso(*args):
+ self.called = True
+ self.stubs.Set(vm_utils, 'generate_iso_blank_root_disk',
+ fake_generate_iso)
+
+ self.assertCalled(instance, vm_utils.ImageType.DISK_ISO)
+
+
+class XenAPIBWCountersTestCase(stubs.XenAPITestBaseNoDB):
+ FAKE_VMS = {'test1:ref': dict(name_label='test1',
+ other_config=dict(nova_uuid='hash'),
+ domid='12',
+ _vifmap={'0': "a:b:c:d...",
+ '1': "e:f:12:q..."}),
+ 'test2:ref': dict(name_label='test2',
+ other_config=dict(nova_uuid='hash'),
+ domid='42',
+ _vifmap={'0': "a:3:c:d...",
+ '1': "e:f:42:q..."}),
+ }
+
+ def setUp(self):
+ super(XenAPIBWCountersTestCase, self).setUp()
+ self.stubs.Set(vm_utils, 'list_vms',
+ XenAPIBWCountersTestCase._fake_list_vms)
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def _fake_get_vif_device_map(vm_rec):
+ return vm_rec['_vifmap']
+
+ self.stubs.Set(self.conn._vmops, "_get_vif_device_map",
+ _fake_get_vif_device_map)
+
+ @classmethod
+ def _fake_list_vms(cls, session):
+ return cls.FAKE_VMS.iteritems()
+
+ @staticmethod
+ def _fake_fetch_bandwidth_mt(session):
+ return {}
+
+ @staticmethod
+ def _fake_fetch_bandwidth(session):
+ return {'42':
+ {'0': {'bw_in': 21024, 'bw_out': 22048},
+ '1': {'bw_in': 231337, 'bw_out': 221212121}},
+ '12':
+ {'0': {'bw_in': 1024, 'bw_out': 2048},
+ '1': {'bw_in': 31337, 'bw_out': 21212121}},
+ }
+
+ def test_get_all_bw_counters(self):
+ instances = [dict(name='test1', uuid='1-2-3'),
+ dict(name='test2', uuid='4-5-6')]
+
+ self.stubs.Set(vm_utils, 'fetch_bandwidth',
+ self._fake_fetch_bandwidth)
+ result = self.conn.get_all_bw_counters(instances)
+ self.assertEqual(len(result), 4)
+ self.assertIn(dict(uuid='1-2-3',
+ mac_address="a:b:c:d...",
+ bw_in=1024,
+ bw_out=2048), result)
+ self.assertIn(dict(uuid='1-2-3',
+ mac_address="e:f:12:q...",
+ bw_in=31337,
+ bw_out=21212121), result)
+
+ self.assertIn(dict(uuid='4-5-6',
+ mac_address="a:3:c:d...",
+ bw_in=21024,
+ bw_out=22048), result)
+ self.assertIn(dict(uuid='4-5-6',
+ mac_address="e:f:42:q...",
+ bw_in=231337,
+ bw_out=221212121), result)
+
+ def test_get_all_bw_counters_in_failure_case(self):
+ """Test that get_all_bw_conters returns an empty list when
+ no data returned from Xenserver. c.f. bug #910045.
+ """
+ instances = [dict(name='instance-0001', uuid='1-2-3-4-5')]
+
+ self.stubs.Set(vm_utils, 'fetch_bandwidth',
+ self._fake_fetch_bandwidth_mt)
+ result = self.conn.get_all_bw_counters(instances)
+ self.assertEqual(result, [])
+
+
+# TODO(salvatore-orlando): this class and
+# nova.tests.unit.virt.test_libvirt.IPTablesFirewallDriverTestCase
+# share a lot of code. Consider abstracting common code in a base
+# class for firewall driver testing.
+#
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
+
+ REQUIRES_LOCKING = True
+
+ _in_rules = [
+ '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
+ '*nat',
+ ':PREROUTING ACCEPT [1170:189210]',
+ ':INPUT ACCEPT [844:71028]',
+ ':OUTPUT ACCEPT [5149:405186]',
+ ':POSTROUTING ACCEPT [5063:386098]',
+ '# Completed on Mon Dec 6 11:54:13 2010',
+ '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
+ '*mangle',
+ ':INPUT ACCEPT [969615:281627771]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [915599:63811649]',
+ ':nova-block-ipv4 - [0:0]',
+ '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
+ ',ESTABLISHED -j ACCEPT ',
+ '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -o virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ '[0:0] -A FORWARD -i virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ 'COMMIT',
+ '# Completed on Mon Dec 6 11:54:13 2010',
+ '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
+ '*filter',
+ ':INPUT ACCEPT [969615:281627771]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [915599:63811649]',
+ ':nova-block-ipv4 - [0:0]',
+ '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
+ ',ESTABLISHED -j ACCEPT ',
+ '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -o virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ '[0:0] -A FORWARD -i virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ 'COMMIT',
+ '# Completed on Mon Dec 6 11:54:13 2010',
+ ]
+
+ _in6_filter_rules = [
+ '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
+ '*filter',
+ ':INPUT ACCEPT [349155:75810423]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [349256:75777230]',
+ 'COMMIT',
+ '# Completed on Tue Jan 18 23:47:56 2011',
+ ]
+
+ def setUp(self):
+ super(XenAPIDom0IptablesFirewallTestCase, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.user_id = 'mappin'
+ self.project_id = 'fake'
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
+ test_case=self)
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ self.network = importutils.import_object(CONF.network_manager)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.fw = self.conn._vmops.firewall_driver
+
+ def _create_instance_ref(self):
+ return db.instance_create(self.context,
+ {'user_id': self.user_id,
+ 'project_id': self.project_id,
+ 'instance_type_id': 1})
+
+ def _create_test_security_group(self):
+ admin_ctxt = context.get_admin_context()
+ secgroup = db.security_group_create(admin_ctxt,
+ {'user_id': self.user_id,
+ 'project_id': self.project_id,
+ 'name': 'testgroup',
+ 'description': 'test group'})
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'icmp',
+ 'from_port': -1,
+ 'to_port': -1,
+ 'cidr': '192.168.11.0/24'})
+
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'icmp',
+ 'from_port': 8,
+ 'to_port': -1,
+ 'cidr': '192.168.11.0/24'})
+
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'tcp',
+ 'from_port': 80,
+ 'to_port': 81,
+ 'cidr': '192.168.10.0/24'})
+ return secgroup
+
+ def _validate_security_group(self):
+ in_rules = filter(lambda l: not l.startswith('#'),
+ self._in_rules)
+ for rule in in_rules:
+ if 'nova' not in rule:
+ self.assertTrue(rule in self._out_rules,
+ 'Rule went missing: %s' % rule)
+
+ instance_chain = None
+ for rule in self._out_rules:
+ # This is pretty crude, but it'll do for now
+ # last two octets change
+ if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
+ instance_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(instance_chain, "The instance chain wasn't added")
+ security_group_chain = None
+ for rule in self._out_rules:
+ # This is pretty crude, but it'll do for now
+ if '-A %s -j' % instance_chain in rule:
+ security_group_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(security_group_chain,
+ "The security group chain wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp'
+ ' -s 192.168.11.0/24')
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "ICMP acceptance rule wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp'
+ ' --icmp-type 8 -s 192.168.11.0/24')
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "ICMP Echo Request acceptance rule wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp --dport 80:81'
+ ' -s 192.168.10.0/24')
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "TCP port 80/81 acceptance rule wasn't added")
+
+ def test_static_filters(self):
+ instance_ref = self._create_instance_ref()
+ src_instance_ref = self._create_instance_ref()
+ admin_ctxt = context.get_admin_context()
+ secgroup = self._create_test_security_group()
+
+ src_secgroup = db.security_group_create(admin_ctxt,
+ {'user_id': self.user_id,
+ 'project_id': self.project_id,
+ 'name': 'testsourcegroup',
+ 'description': 'src group'})
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'tcp',
+ 'from_port': 80,
+ 'to_port': 81,
+ 'group_id': src_secgroup['id']})
+
+ db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
+ secgroup['id'])
+ db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
+ src_secgroup['id'])
+ instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
+ src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
+
+ network_model = fake_network.fake_get_instance_nw_info(self.stubs, 1)
+
+ from nova.compute import utils as compute_utils # noqa
+ self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
+ lambda instance: network_model)
+
+ self.fw.prepare_instance_filter(instance_ref, network_model)
+ self.fw.apply_instance_filter(instance_ref, network_model)
+
+ self._validate_security_group()
+ # Extra test for TCP acceptance rules
+ for ip in network_model.fixed_ips():
+ if ip['version'] != 4:
+ continue
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp'
+ ' --dport 80:81 -s %s' % ip['address'])
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "TCP port 80/81 acceptance rule wasn't added")
+
+ db.instance_destroy(admin_ctxt, instance_ref['uuid'])
+
+ def test_filters_for_instance_with_ip_v6(self):
+ self.flags(use_ipv6=True)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
+ rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
+ self.assertEqual(len(rulesv4), 2)
+ self.assertEqual(len(rulesv6), 1)
+
+ def test_filters_for_instance_without_ip_v6(self):
+ self.flags(use_ipv6=False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
+ rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
+ self.assertEqual(len(rulesv4), 2)
+ self.assertEqual(len(rulesv6), 0)
+
+ def test_multinic_iptables(self):
+ ipv4_rules_per_addr = 1
+ ipv4_addr_per_network = 2
+ ipv6_rules_per_addr = 1
+ ipv6_addr_per_network = 1
+ networks_count = 5
+ instance_ref = self._create_instance_ref()
+ _get_instance_nw_info = fake_network.fake_get_instance_nw_info
+ network_info = _get_instance_nw_info(self.stubs,
+ networks_count,
+ ipv4_addr_per_network)
+ network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
+ '1.1.1.1'
+ ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
+ ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
+ inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
+ network_info)
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ ipv4 = self.fw.iptables.ipv4['filter'].rules
+ ipv6 = self.fw.iptables.ipv6['filter'].rules
+ ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
+ ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
+ # Extra rules are for the DHCP request
+ rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
+ networks_count) + 2
+ self.assertEqual(ipv4_network_rules, rules)
+ self.assertEqual(ipv6_network_rules,
+ ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
+
+ def test_do_refresh_security_group_rules(self):
+ admin_ctxt = context.get_admin_context()
+ instance_ref = self._create_instance_ref()
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
+ secgroup = self._create_test_security_group()
+ db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
+ secgroup['id'])
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.fw.instance_info[instance_ref['id']] = (instance_ref,
+ network_info)
+ self._validate_security_group()
+ # add a rule to the security group
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'udp',
+ 'from_port': 200,
+ 'to_port': 299,
+ 'cidr': '192.168.99.0/24'})
+ # validate the extra rule
+ self.fw.refresh_security_group_rules(secgroup)
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299'
+ ' -s 192.168.99.0/24')
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "Rules were not updated properly."
+ "The rule for UDP acceptance is missing")
+
+ def test_provider_firewall_rules(self):
+ # setup basic instance data
+ instance_ref = self._create_instance_ref()
+ # FRAGILE: as in libvirt tests
+ # peeks at how the firewall names chains
+ chain_name = 'inst-%s' % instance_ref['id']
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.assertIn('provider', self.fw.iptables.ipv4['filter'].chains)
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(0, len(rules))
+
+ admin_ctxt = context.get_admin_context()
+ # add a rule and send the update message, check for 1 rule
+ db.provider_fw_rule_create(admin_ctxt,
+ {'protocol': 'tcp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535})
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(1, len(rules))
+
+ # Add another, refresh, and make sure number of rules goes to two
+ provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
+ {'protocol': 'udp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535})
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(2, len(rules))
+
+ # create the instance filter and make sure it has a jump rule
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.fw.apply_instance_filter(instance_ref, network_info)
+ inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == chain_name]
+ jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
+ provjump_rules = []
+ # IptablesTable doesn't make rules unique internally
+ for rule in jump_rules:
+ if 'provider' in rule.rule and rule not in provjump_rules:
+ provjump_rules.append(rule)
+ self.assertEqual(1, len(provjump_rules))
+
+ # remove a rule from the db, cast to compute to refresh rule
+ db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(1, len(rules))
+
+
+class XenAPISRSelectionTestCase(stubs.XenAPITestBaseNoDB):
+ """Unit tests for testing we find the right SR."""
+ def test_safe_find_sr_raise_exception(self):
+ # Ensure StorageRepositoryNotFound is raise when wrong filter.
+ self.flags(sr_matching_filter='yadayadayada', group='xenserver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ session = get_session()
+ self.assertRaises(exception.StorageRepositoryNotFound,
+ vm_utils.safe_find_sr, session)
+
+ def test_safe_find_sr_local_storage(self):
+ # Ensure the default local-storage is found.
+ self.flags(sr_matching_filter='other-config:i18n-key=local-storage',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ session = get_session()
+ # This test is only guaranteed if there is one host in the pool
+ self.assertEqual(len(xenapi_fake.get_all('host')), 1)
+ host_ref = xenapi_fake.get_all('host')[0]
+ pbd_refs = xenapi_fake.get_all('PBD')
+ for pbd_ref in pbd_refs:
+ pbd_rec = xenapi_fake.get_record('PBD', pbd_ref)
+ if pbd_rec['host'] != host_ref:
+ continue
+ sr_rec = xenapi_fake.get_record('SR', pbd_rec['SR'])
+ if sr_rec['other_config']['i18n-key'] == 'local-storage':
+ local_sr = pbd_rec['SR']
+ expected = vm_utils.safe_find_sr(session)
+ self.assertEqual(local_sr, expected)
+
+ def test_safe_find_sr_by_other_criteria(self):
+ # Ensure the SR is found when using a different filter.
+ self.flags(sr_matching_filter='other-config:my_fake_sr=true',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ session = get_session()
+ host_ref = xenapi_fake.get_all('host')[0]
+ local_sr = xenapi_fake.create_sr(name_label='Fake Storage',
+ type='lvm',
+ other_config={'my_fake_sr': 'true'},
+ host_ref=host_ref)
+ expected = vm_utils.safe_find_sr(session)
+ self.assertEqual(local_sr, expected)
+
+ def test_safe_find_sr_default(self):
+ # Ensure the default SR is found regardless of other-config.
+ self.flags(sr_matching_filter='default-sr:true',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ session = get_session()
+ pool_ref = session.call_xenapi('pool.get_all')[0]
+ expected = vm_utils.safe_find_sr(session)
+ self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref),
+ expected)
+
+
+def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
+ 'fake_host2'],
+ 'avail_zone2': ['fake_host3'], }):
+ for avail_zone, hosts in values.iteritems():
+ for service_host in hosts:
+ db.service_create(context,
+ {'host': service_host,
+ 'binary': 'nova-compute',
+ 'topic': 'compute',
+ 'report_count': 0})
+ return values
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIAggregateTestCase(stubs.XenAPITestBase):
+ """Unit tests for aggregate operations."""
+ def setUp(self):
+ super(XenAPIAggregateTestCase, self).setUp()
+ self.flags(connection_url='http://test_url',
+ connection_username='test_user',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver',
+ host='host',
+ compute_driver='xenapi.XenAPIDriver',
+ default_availability_zone='avail_zone1')
+ self.flags(use_local=True, group='conductor')
+ host_ref = xenapi_fake.get_all('host')[0]
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.context = context.get_admin_context()
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.compute = importutils.import_object(CONF.compute_manager)
+ self.api = compute_api.AggregateAPI()
+ values = {'name': 'test_aggr',
+ 'metadata': {'availability_zone': 'test_zone',
+ pool_states.POOL_FLAG: 'XenAPI'}}
+ self.aggr = db.aggregate_create(self.context, values)
+ self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI',
+ 'master_compute': 'host',
+ 'availability_zone': 'fake_zone',
+ pool_states.KEY: pool_states.ACTIVE,
+ 'host': xenapi_fake.get_record('host',
+ host_ref)['uuid']}
+
+ def test_pool_add_to_aggregate_called_by_driver(self):
+
+ calls = []
+
+ def pool_add_to_aggregate(context, aggregate, host, slave_info=None):
+ self.assertEqual("CONTEXT", context)
+ self.assertEqual("AGGREGATE", aggregate)
+ self.assertEqual("HOST", host)
+ self.assertEqual("SLAVEINFO", slave_info)
+ calls.append(pool_add_to_aggregate)
+ self.stubs.Set(self.conn._pool,
+ "add_to_aggregate",
+ pool_add_to_aggregate)
+
+ self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST",
+ slave_info="SLAVEINFO")
+
+ self.assertIn(pool_add_to_aggregate, calls)
+
+ def test_pool_remove_from_aggregate_called_by_driver(self):
+
+ calls = []
+
+ def pool_remove_from_aggregate(context, aggregate, host,
+ slave_info=None):
+ self.assertEqual("CONTEXT", context)
+ self.assertEqual("AGGREGATE", aggregate)
+ self.assertEqual("HOST", host)
+ self.assertEqual("SLAVEINFO", slave_info)
+ calls.append(pool_remove_from_aggregate)
+ self.stubs.Set(self.conn._pool,
+ "remove_from_aggregate",
+ pool_remove_from_aggregate)
+
+ self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST",
+ slave_info="SLAVEINFO")
+
+ self.assertIn(pool_remove_from_aggregate, calls)
+
+ def test_add_to_aggregate_for_first_host_sets_metadata(self):
+ def fake_init_pool(id, name):
+ fake_init_pool.called = True
+ self.stubs.Set(self.conn._pool, "_init_pool", fake_init_pool)
+
+ aggregate = self._aggregate_setup()
+ self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
+ result = db.aggregate_get(self.context, aggregate['id'])
+ self.assertTrue(fake_init_pool.called)
+ self.assertThat(self.fake_metadata,
+ matchers.DictMatches(result['metadetails']))
+
+ def test_join_slave(self):
+ # Ensure join_slave gets called when the request gets to master.
+ def fake_join_slave(id, compute_uuid, host, url, user, password):
+ fake_join_slave.called = True
+ self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
+
+ aggregate = self._aggregate_setup(hosts=['host', 'host2'],
+ metadata=self.fake_metadata)
+ self.conn._pool.add_to_aggregate(self.context, aggregate, "host2",
+ dict(compute_uuid='fake_uuid',
+ url='fake_url',
+ user='fake_user',
+ passwd='fake_pass',
+ xenhost_uuid='fake_uuid'))
+ self.assertTrue(fake_join_slave.called)
+
+ def test_add_to_aggregate_first_host(self):
+ def fake_pool_set_name_label(self, session, pool_ref, name):
+ fake_pool_set_name_label.called = True
+ self.stubs.Set(xenapi_fake.SessionBase, "pool_set_name_label",
+ fake_pool_set_name_label)
+ self.conn._session.call_xenapi("pool.create", {"name": "asdf"})
+
+ metadata = {'availability_zone': 'fake_zone',
+ pool_states.POOL_FLAG: "XenAPI",
+ pool_states.KEY: pool_states.CREATED}
+
+ aggregate = objects.Aggregate()
+ aggregate.name = 'fake_aggregate'
+ aggregate.metadata = dict(metadata)
+ aggregate.create(self.context)
+ aggregate.add_host('host')
+ self.assertEqual(["host"], aggregate.hosts)
+ self.assertEqual(metadata, aggregate.metadata)
+
+ self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
+ self.assertTrue(fake_pool_set_name_label.called)
+
+ def test_remove_from_aggregate_called(self):
+ def fake_remove_from_aggregate(context, aggregate, host):
+ fake_remove_from_aggregate.called = True
+ self.stubs.Set(self.conn._pool,
+ "remove_from_aggregate",
+ fake_remove_from_aggregate)
+
+ self.conn.remove_from_aggregate(None, None, None)
+ self.assertTrue(fake_remove_from_aggregate.called)
+
+ def test_remove_from_empty_aggregate(self):
+ result = self._aggregate_setup()
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.conn._pool.remove_from_aggregate,
+ self.context, result, "test_host")
+
+ def test_remove_slave(self):
+ # Ensure eject slave gets called.
+ def fake_eject_slave(id, compute_uuid, host_uuid):
+ fake_eject_slave.called = True
+ self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave)
+
+ self.fake_metadata['host2'] = 'fake_host2_uuid'
+ aggregate = self._aggregate_setup(hosts=['host', 'host2'],
+ metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE)
+ self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2")
+ self.assertTrue(fake_eject_slave.called)
+
+ def test_remove_master_solo(self):
+ # Ensure metadata are cleared after removal.
+ def fake_clear_pool(id):
+ fake_clear_pool.called = True
+ self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool)
+
+ aggregate = self._aggregate_setup(metadata=self.fake_metadata)
+ self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
+ result = db.aggregate_get(self.context, aggregate['id'])
+ self.assertTrue(fake_clear_pool.called)
+ self.assertThat({'availability_zone': 'fake_zone',
+ pool_states.POOL_FLAG: 'XenAPI',
+ pool_states.KEY: pool_states.ACTIVE},
+ matchers.DictMatches(result['metadetails']))
+
+ def test_remote_master_non_empty_pool(self):
+ # Ensure AggregateError is raised if removing the master.
+ aggregate = self._aggregate_setup(hosts=['host', 'host2'],
+ metadata=self.fake_metadata)
+
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.conn._pool.remove_from_aggregate,
+ self.context, aggregate, "host")
+
+ def _aggregate_setup(self, aggr_name='fake_aggregate',
+ aggr_zone='fake_zone',
+ aggr_state=pool_states.CREATED,
+ hosts=['host'], metadata=None):
+ aggregate = objects.Aggregate()
+ aggregate.name = aggr_name
+ aggregate.metadata = {'availability_zone': aggr_zone,
+ pool_states.POOL_FLAG: 'XenAPI',
+ pool_states.KEY: aggr_state,
+ }
+ if metadata:
+ aggregate.metadata.update(metadata)
+ aggregate.create(self.context)
+ for aggregate_host in hosts:
+ aggregate.add_host(aggregate_host)
+ return aggregate
+
+ def test_add_host_to_aggregate_invalid_changing_status(self):
+ """Ensure InvalidAggregateAction is raised when adding host while
+ aggregate is not ready.
+ """
+ aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
+ ex = self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.add_to_aggregate, self.context,
+ aggregate, 'host')
+ self.assertIn('setup in progress', str(ex))
+
+ def test_add_host_to_aggregate_invalid_dismissed_status(self):
+ """Ensure InvalidAggregateAction is raised when aggregate is
+ deleted.
+ """
+ aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
+ ex = self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.add_to_aggregate, self.context,
+ aggregate, 'fake_host')
+ self.assertIn('aggregate deleted', str(ex))
+
+ def test_add_host_to_aggregate_invalid_error_status(self):
+ """Ensure InvalidAggregateAction is raised when aggregate is
+ in error.
+ """
+ aggregate = self._aggregate_setup(aggr_state=pool_states.ERROR)
+ ex = self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.add_to_aggregate, self.context,
+ aggregate, 'fake_host')
+ self.assertIn('aggregate in error', str(ex))
+
+ def test_remove_host_from_aggregate_error(self):
+ # Ensure we can remove a host from an aggregate even if in error.
+ values = _create_service_entries(self.context)
+ fake_zone = values.keys()[0]
+ aggr = self.api.create_aggregate(self.context,
+ 'fake_aggregate', fake_zone)
+ # let's mock the fact that the aggregate is ready!
+ metadata = {pool_states.POOL_FLAG: "XenAPI",
+ pool_states.KEY: pool_states.ACTIVE}
+ db.aggregate_metadata_add(self.context, aggr['id'], metadata)
+ for aggregate_host in values[fake_zone]:
+ aggr = self.api.add_host_to_aggregate(self.context,
+ aggr['id'], aggregate_host)
+ # let's mock the fact that the aggregate is in error!
+ expected = self.api.remove_host_from_aggregate(self.context,
+ aggr['id'],
+ values[fake_zone][0])
+ self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
+ self.assertEqual(expected['metadata'][pool_states.KEY],
+ pool_states.ACTIVE)
+
+ def test_remove_host_from_aggregate_invalid_dismissed_status(self):
+ """Ensure InvalidAggregateAction is raised when aggregate is
+ deleted.
+ """
+ aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.remove_from_aggregate, self.context,
+ aggregate, 'fake_host')
+
+ def test_remove_host_from_aggregate_invalid_changing_status(self):
+ """Ensure InvalidAggregateAction is raised when aggregate is
+ changing.
+ """
+ aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.remove_from_aggregate, self.context,
+ aggregate, 'fake_host')
+
+ def test_add_aggregate_host_raise_err(self):
+ # Ensure the undo operation works correctly on add.
+ def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
+ raise exception.AggregateError(
+ aggregate_id='', action='', reason='')
+ self.stubs.Set(self.compute.driver, "add_to_aggregate",
+ fake_driver_add_to_aggregate)
+ metadata = {pool_states.POOL_FLAG: "XenAPI",
+ pool_states.KEY: pool_states.ACTIVE}
+ db.aggregate_metadata_add(self.context, self.aggr['id'], metadata)
+ db.aggregate_host_add(self.context, self.aggr['id'], 'fake_host')
+
+ self.assertRaises(exception.AggregateError,
+ self.compute.add_aggregate_host,
+ self.context, host="fake_host",
+ aggregate=jsonutils.to_primitive(self.aggr),
+ slave_info=None)
+ excepted = db.aggregate_get(self.context, self.aggr['id'])
+ self.assertEqual(excepted['metadetails'][pool_states.KEY],
+ pool_states.ERROR)
+ self.assertEqual(excepted['hosts'], [])
+
+
+class MockComputeAPI(object):
+ def __init__(self):
+ self._mock_calls = []
+
+ def add_aggregate_host(self, ctxt, aggregate,
+ host_param, host, slave_info):
+ self._mock_calls.append((
+ self.add_aggregate_host, ctxt, aggregate,
+ host_param, host, slave_info))
+
+ def remove_aggregate_host(self, ctxt, aggregate_id, host_param,
+ host, slave_info):
+ self._mock_calls.append((
+ self.remove_aggregate_host, ctxt, aggregate_id,
+ host_param, host, slave_info))
+
+
+class StubDependencies(object):
+ """Stub dependencies for ResourcePool."""
+
+ def __init__(self):
+ self.compute_rpcapi = MockComputeAPI()
+
+ def _is_hv_pool(self, *_ignore):
+ return True
+
+ def _get_metadata(self, *_ignore):
+ return {
+ pool_states.KEY: {},
+ 'master_compute': 'master'
+ }
+
+ def _create_slave_info(self, *ignore):
+ return "SLAVE_INFO"
+
+
+class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool):
+ """A ResourcePool, use stub dependencies."""
+
+
+class HypervisorPoolTestCase(test.NoDBTestCase):
+
+ fake_aggregate = {
+ 'id': 98,
+ 'hosts': [],
+ 'metadata': {
+ 'master_compute': 'master',
+ pool_states.POOL_FLAG: {},
+ pool_states.KEY: {}
+ }
+ }
+
+ def test_slave_asks_master_to_add_slave_to_pool(self):
+ slave = ResourcePoolWithStubs()
+
+ slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave")
+
+ self.assertIn(
+ (slave.compute_rpcapi.add_aggregate_host,
+ "CONTEXT", jsonutils.to_primitive(self.fake_aggregate),
+ "slave", "master", "SLAVE_INFO"),
+ slave.compute_rpcapi._mock_calls)
+
+ def test_slave_asks_master_to_remove_slave_from_pool(self):
+ slave = ResourcePoolWithStubs()
+
+ slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave")
+
+ self.assertIn(
+ (slave.compute_rpcapi.remove_aggregate_host,
+ "CONTEXT", 98, "slave", "master", "SLAVE_INFO"),
+ slave.compute_rpcapi._mock_calls)
+
+
+class SwapXapiHostTestCase(test.NoDBTestCase):
+
+ def test_swapping(self):
+ self.assertEqual(
+ "http://otherserver:8765/somepath",
+ pool.swap_xapi_host(
+ "http://someserver:8765/somepath", 'otherserver'))
+
+ def test_no_port(self):
+ self.assertEqual(
+ "http://otherserver/somepath",
+ pool.swap_xapi_host(
+ "http://someserver/somepath", 'otherserver'))
+
+ def test_no_path(self):
+ self.assertEqual(
+ "http://otherserver",
+ pool.swap_xapi_host(
+ "http://someserver", 'otherserver'))
+
+
+class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
+ """Unit tests for live_migration."""
+ def setUp(self):
+ super(XenAPILiveMigrateTestCase, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver',
+ host='host')
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ self.context = context.get_admin_context()
+
+ def test_live_migration_calls_vmops(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_live_migrate(context, instance_ref, dest, post_method,
+ recover_method, block_migration, migrate_data):
+ fake_live_migrate.called = True
+
+ self.stubs.Set(self.conn._vmops, "live_migrate", fake_live_migrate)
+
+ self.conn.live_migration(None, None, None, None, None)
+ self.assertTrue(fake_live_migrate.called)
+
+ def test_pre_live_migration(self):
+ # ensure method is present
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.conn.pre_live_migration(None, None, None, None, None)
+
+ def test_post_live_migration_at_destination(self):
+ # ensure method is present
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ fake_instance = {"name": "name"}
+ fake_network_info = "network_info"
+
+ def fake_fw(instance, network_info):
+ self.assertEqual(instance, fake_instance)
+ self.assertEqual(network_info, fake_network_info)
+ fake_fw.call_count += 1
+
+ def fake_create_kernel_and_ramdisk(context, session, instance,
+ name_label):
+ return "fake-kernel-file", "fake-ramdisk-file"
+
+ fake_fw.call_count = 0
+ _vmops = self.conn._vmops
+ self.stubs.Set(_vmops.firewall_driver,
+ 'setup_basic_filtering', fake_fw)
+ self.stubs.Set(_vmops.firewall_driver,
+ 'prepare_instance_filter', fake_fw)
+ self.stubs.Set(_vmops.firewall_driver,
+ 'apply_instance_filter', fake_fw)
+ self.stubs.Set(vm_utils, "create_kernel_and_ramdisk",
+ fake_create_kernel_and_ramdisk)
+
+ def fake_get_vm_opaque_ref(instance):
+ fake_get_vm_opaque_ref.called = True
+ self.stubs.Set(_vmops, "_get_vm_opaque_ref", fake_get_vm_opaque_ref)
+ fake_get_vm_opaque_ref.called = False
+
+ def fake_strip_base_mirror_from_vdis(session, vm_ref):
+ fake_strip_base_mirror_from_vdis.called = True
+ self.stubs.Set(vm_utils, "strip_base_mirror_from_vdis",
+ fake_strip_base_mirror_from_vdis)
+ fake_strip_base_mirror_from_vdis.called = False
+
+ self.conn.post_live_migration_at_destination(None, fake_instance,
+ fake_network_info, None)
+ self.assertEqual(fake_fw.call_count, 3)
+ self.assertTrue(fake_get_vm_opaque_ref.called)
+ self.assertTrue(fake_strip_base_mirror_from_vdis.called)
+
+ def test_check_can_live_migrate_destination_with_block_migration(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
+
+ expected = {'block_migration': True,
+ 'migrate_data': {
+ 'migrate_send_data': "fake_migrate_data",
+ 'destination_sr_ref': 'asdf'
+ }
+ }
+ result = self.conn.check_can_live_migrate_destination(self.context,
+ {'host': 'host'},
+ {}, {},
+ True, False)
+ self.assertEqual(expected, result)
+
+ def test_check_live_migrate_destination_verifies_ip(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ for pif_ref in xenapi_fake.get_all('PIF'):
+ pif_rec = xenapi_fake.get_record('PIF', pif_ref)
+ pif_rec['IP'] = ''
+ pif_rec['IPv6'] = ''
+
+ self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
+
+ self.assertRaises(exception.MigrationError,
+ self.conn.check_can_live_migrate_destination,
+ self.context, {'host': 'host'},
+ {}, {},
+ True, False)
+
+ def test_check_can_live_migrate_destination_block_migration_fails(self):
+ stubs.stubout_session(self.stubs,
+ stubs.FakeSessionForFailedMigrateTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.MigrationError,
+ self.conn.check_can_live_migrate_destination,
+ self.context, {'host': 'host'},
+ {}, {},
+ True, False)
+
+ def _add_default_live_migrate_stubs(self, conn):
+ def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
+ pass
+
+ def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
+ return []
+
+ def fake_get_vm_opaque_ref(instance):
+ return "fake_vm"
+
+ def fake_lookup_kernel_ramdisk(session, vm):
+ return ("fake_PV_kernel", "fake_PV_ramdisk")
+
+ self.stubs.Set(conn._vmops, "_generate_vdi_map",
+ fake_generate_vdi_map)
+ self.stubs.Set(conn._vmops, "_get_iscsi_srs",
+ fake_get_iscsi_srs)
+ self.stubs.Set(conn._vmops, "_get_vm_opaque_ref",
+ fake_get_vm_opaque_ref)
+ self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
+ fake_lookup_kernel_ramdisk)
+
+ def test_check_can_live_migrate_source_with_block_migrate(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ dest_check_data = {'block_migration': True,
+ 'migrate_data': {
+ 'destination_sr_ref': None,
+ 'migrate_send_data': None
+ }}
+ result = self.conn.check_can_live_migrate_source(self.context,
+ {'host': 'host'},
+ dest_check_data)
+ self.assertEqual(dest_check_data, result)
+
+ def test_check_can_live_migrate_source_with_block_migrate_iscsi(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
+ return ['sr_ref']
+ self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
+ fake_get_iscsi_srs)
+
+ def fake_make_plugin_call(plugin, method, **args):
+ return "true"
+ self.stubs.Set(self.conn._vmops, "_make_plugin_call",
+ fake_make_plugin_call)
+
+ dest_check_data = {'block_migration': True,
+ 'migrate_data': {
+ 'destination_sr_ref': None,
+ 'migrate_send_data': None
+ }}
+ result = self.conn.check_can_live_migrate_source(self.context,
+ {'host': 'host'},
+ dest_check_data)
+ self.assertEqual(dest_check_data, result)
+
+ def test_check_can_live_migrate_source_with_block_iscsi_fails(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
+ return ['sr_ref']
+ self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
+ fake_get_iscsi_srs)
+
+ def fake_make_plugin_call(plugin, method, **args):
+ return {'returncode': 'error', 'message': 'Plugin not found'}
+ self.stubs.Set(self.conn._vmops, "_make_plugin_call",
+ fake_make_plugin_call)
+
+ self.assertRaises(exception.MigrationError,
+ self.conn.check_can_live_migrate_source,
+ self.context, {'host': 'host'},
+ {})
+
+ def test_check_can_live_migrate_source_with_block_migrate_fails(self):
+ stubs.stubout_session(self.stubs,
+ stubs.FakeSessionForFailedMigrateTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ dest_check_data = {'block_migration': True,
+ 'migrate_data': {
+ 'destination_sr_ref': None,
+ 'migrate_send_data': None
+ }}
+ self.assertRaises(exception.MigrationError,
+ self.conn.check_can_live_migrate_source,
+ self.context,
+ {'host': 'host'},
+ dest_check_data)
+
+ def test_check_can_live_migrate_works(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_aggregate_get_by_host(context, host, key=None):
+ self.assertEqual(CONF.host, host)
+ return [dict(test_aggregate.fake_aggregate,
+ metadetails={"host": "test_host_uuid"})]
+
+ self.stubs.Set(db, "aggregate_get_by_host",
+ fake_aggregate_get_by_host)
+ self.conn.check_can_live_migrate_destination(self.context,
+ {'host': 'host'}, False, False)
+
+ def test_check_can_live_migrate_fails(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_aggregate_get_by_host(context, host, key=None):
+ self.assertEqual(CONF.host, host)
+ return [dict(test_aggregate.fake_aggregate,
+ metadetails={"dest_other": "test_host_uuid"})]
+
+ self.stubs.Set(db, "aggregate_get_by_host",
+ fake_aggregate_get_by_host)
+ self.assertRaises(exception.MigrationError,
+ self.conn.check_can_live_migrate_destination,
+ self.context, {'host': 'host'}, None, None)
+
+ def test_live_migration(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_get_vm_opaque_ref(instance):
+ return "fake_vm"
+ self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
+ fake_get_vm_opaque_ref)
+
+ def fake_get_host_opaque_ref(context, destination_hostname):
+ return "fake_host"
+ self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
+ fake_get_host_opaque_ref)
+
+ def post_method(context, instance, destination_hostname,
+ block_migration, migrate_data):
+ post_method.called = True
+
+ self.conn.live_migration(self.conn, None, None, post_method, None)
+
+ self.assertTrue(post_method.called, "post_method.called")
+
+ def test_live_migration_on_failure(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_get_vm_opaque_ref(instance):
+ return "fake_vm"
+ self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
+ fake_get_vm_opaque_ref)
+
+ def fake_get_host_opaque_ref(context, destination_hostname):
+ return "fake_host"
+ self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
+ fake_get_host_opaque_ref)
+
+ def fake_call_xenapi(*args):
+ raise NotImplementedError()
+ self.stubs.Set(self.conn._vmops._session, "call_xenapi",
+ fake_call_xenapi)
+
+ def recover_method(context, instance, destination_hostname,
+ block_migration):
+ recover_method.called = True
+
+ self.assertRaises(NotImplementedError, self.conn.live_migration,
+ self.conn, None, None, None, recover_method)
+ self.assertTrue(recover_method.called, "recover_method.called")
+
+ def test_live_migration_calls_post_migration(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def post_method(context, instance, destination_hostname,
+ block_migration, migrate_data):
+ post_method.called = True
+
+ # pass block_migration = True and migrate data
+ migrate_data = {"destination_sr_ref": "foo",
+ "migrate_send_data": "bar"}
+ self.conn.live_migration(self.conn, None, None, post_method, None,
+ True, migrate_data)
+ self.assertTrue(post_method.called, "post_method.called")
+
+ def test_live_migration_block_cleans_srs(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def fake_get_iscsi_srs(context, instance):
+ return ['sr_ref']
+ self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
+ fake_get_iscsi_srs)
+
+ def fake_forget_sr(context, instance):
+ fake_forget_sr.called = True
+ self.stubs.Set(volume_utils, "forget_sr",
+ fake_forget_sr)
+
+ def post_method(context, instance, destination_hostname,
+ block_migration, migrate_data):
+ post_method.called = True
+
+ migrate_data = {"destination_sr_ref": "foo",
+ "migrate_send_data": "bar"}
+ self.conn.live_migration(self.conn, None, None, post_method, None,
+ True, migrate_data)
+
+ self.assertTrue(post_method.called, "post_method.called")
+ self.assertTrue(fake_forget_sr.called, "forget_sr.called")
+
+ def test_live_migration_with_block_migration_raises_invalid_param(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def recover_method(context, instance, destination_hostname,
+ block_migration):
+ recover_method.called = True
+ # pass block_migration = True and no migrate data
+ self.assertRaises(exception.InvalidParameterValue,
+ self.conn.live_migration, self.conn,
+ None, None, None, recover_method, True, None)
+ self.assertTrue(recover_method.called, "recover_method.called")
+
+ def test_live_migration_with_block_migration_fails_migrate_send(self):
+ stubs.stubout_session(self.stubs,
+ stubs.FakeSessionForFailedMigrateTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def recover_method(context, instance, destination_hostname,
+ block_migration):
+ recover_method.called = True
+ # pass block_migration = True and migrate data
+ migrate_data = dict(destination_sr_ref='foo', migrate_send_data='bar')
+ self.assertRaises(exception.MigrationError,
+ self.conn.live_migration, self.conn,
+ None, None, None, recover_method, True, migrate_data)
+ self.assertTrue(recover_method.called, "recover_method.called")
+
+ def test_live_migrate_block_migration_xapi_call_parameters(self):
+
+ fake_vdi_map = object()
+
+ class Session(xenapi_fake.SessionBase):
+ def VM_migrate_send(self_, session, vmref, migrate_data, islive,
+ vdi_map, vif_map, options):
+ self.assertEqual('SOMEDATA', migrate_data)
+ self.assertEqual(fake_vdi_map, vdi_map)
+
+ stubs.stubout_session(self.stubs, Session)
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(conn)
+
+ def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
+ return fake_vdi_map
+
+ self.stubs.Set(conn._vmops, "_generate_vdi_map",
+ fake_generate_vdi_map)
+
+ def dummy_callback(*args, **kwargs):
+ pass
+
+ conn.live_migration(
+ self.context, instance=dict(name='ignore'), dest=None,
+ post_method=dummy_callback, recover_method=dummy_callback,
+ block_migration="SOMEDATA",
+ migrate_data=dict(migrate_send_data='SOMEDATA',
+ destination_sr_ref="TARGET_SR_OPAQUE_REF"))
+
+ def test_live_migrate_pool_migration_xapi_call_parameters(self):
+
+ class Session(xenapi_fake.SessionBase):
+ def VM_pool_migrate(self_, session, vm_ref, host_ref, options):
+ self.assertEqual("fake_ref", host_ref)
+ self.assertEqual({"live": "true"}, options)
+ raise IOError()
+
+ stubs.stubout_session(self.stubs, Session)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self._add_default_live_migrate_stubs(conn)
+
+ def fake_get_host_opaque_ref(context, destination):
+ return "fake_ref"
+
+ self.stubs.Set(conn._vmops, "_get_host_opaque_ref",
+ fake_get_host_opaque_ref)
+
+ def dummy_callback(*args, **kwargs):
+ pass
+
+ self.assertRaises(IOError, conn.live_migration,
+ self.context, instance=dict(name='ignore'), dest=None,
+ post_method=dummy_callback, recover_method=dummy_callback,
+ block_migration=False, migrate_data={})
+
+ def test_generate_vdi_map(self):
+ stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ vm_ref = "fake_vm_ref"
+
+ def fake_find_sr(_session):
+ self.assertEqual(conn._session, _session)
+ return "source_sr_ref"
+ self.stubs.Set(vm_utils, "safe_find_sr", fake_find_sr)
+
+ def fake_get_instance_vdis_for_sr(_session, _vm_ref, _sr_ref):
+ self.assertEqual(conn._session, _session)
+ self.assertEqual(vm_ref, _vm_ref)
+ self.assertEqual("source_sr_ref", _sr_ref)
+ return ["vdi0", "vdi1"]
+
+ self.stubs.Set(vm_utils, "get_instance_vdis_for_sr",
+ fake_get_instance_vdis_for_sr)
+
+ result = conn._vmops._generate_vdi_map("dest_sr_ref", vm_ref)
+
+ self.assertEqual({"vdi0": "dest_sr_ref",
+ "vdi1": "dest_sr_ref"}, result)
+
+ def test_rollback_live_migration_at_destination(self):
+ stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ with mock.patch.object(conn, "destroy") as mock_destroy:
+ conn.rollback_live_migration_at_destination("context",
+ "instance", [], None)
+ self.assertFalse(mock_destroy.called)
+
+
+class XenAPIInjectMetadataTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(XenAPIInjectMetadataTestCase, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self.xenstore = dict(persist={}, ephem={})
+
+ self.called_fake_get_vm_opaque_ref = False
+
+ def fake_get_vm_opaque_ref(inst, instance):
+ self.called_fake_get_vm_opaque_ref = True
+ if instance["uuid"] == "not_found":
+ raise exception.NotFound
+ self.assertEqual(instance, {'uuid': 'fake'})
+ return 'vm_ref'
+
+ def fake_add_to_param_xenstore(inst, vm_ref, key, val):
+ self.assertEqual(vm_ref, 'vm_ref')
+ self.xenstore['persist'][key] = val
+
+ def fake_remove_from_param_xenstore(inst, vm_ref, key):
+ self.assertEqual(vm_ref, 'vm_ref')
+ if key in self.xenstore['persist']:
+ del self.xenstore['persist'][key]
+
+ def fake_write_to_xenstore(inst, instance, path, value, vm_ref=None):
+ self.assertEqual(instance, {'uuid': 'fake'})
+ self.assertEqual(vm_ref, 'vm_ref')
+ self.xenstore['ephem'][path] = jsonutils.dumps(value)
+
+ def fake_delete_from_xenstore(inst, instance, path, vm_ref=None):
+ self.assertEqual(instance, {'uuid': 'fake'})
+ self.assertEqual(vm_ref, 'vm_ref')
+ if path in self.xenstore['ephem']:
+ del self.xenstore['ephem'][path]
+
+ self.stubs.Set(vmops.VMOps, '_get_vm_opaque_ref',
+ fake_get_vm_opaque_ref)
+ self.stubs.Set(vmops.VMOps, '_add_to_param_xenstore',
+ fake_add_to_param_xenstore)
+ self.stubs.Set(vmops.VMOps, '_remove_from_param_xenstore',
+ fake_remove_from_param_xenstore)
+ self.stubs.Set(vmops.VMOps, '_write_to_xenstore',
+ fake_write_to_xenstore)
+ self.stubs.Set(vmops.VMOps, '_delete_from_xenstore',
+ fake_delete_from_xenstore)
+
+ def test_inject_instance_metadata(self):
+
+ # Add some system_metadata to ensure it doesn't get added
+ # to xenstore
+ instance = dict(metadata=[{'key': 'a', 'value': 1},
+ {'key': 'b', 'value': 2},
+ {'key': 'c', 'value': 3},
+ # Check xenstore key sanitizing
+ {'key': 'hi.there', 'value': 4},
+ {'key': 'hi!t.e/e', 'value': 5}],
+ # Check xenstore key sanitizing
+ system_metadata=[{'key': 'sys_a', 'value': 1},
+ {'key': 'sys_b', 'value': 2},
+ {'key': 'sys_c', 'value': 3}],
+ uuid='fake')
+ self.conn._vmops._inject_instance_metadata(instance, 'vm_ref')
+
+ self.assertEqual(self.xenstore, {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ 'vm-data/user-metadata/hi_there': '4',
+ 'vm-data/user-metadata/hi_t_e_e': '5',
+ },
+ 'ephem': {},
+ })
+
+ def test_change_instance_metadata_add(self):
+ # Test XenStore key sanitizing here, too.
+ diff = {'test.key': ['+', 4]}
+ instance = {'uuid': 'fake'}
+ self.xenstore = {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ }
+
+ self.conn._vmops.change_instance_metadata(instance, diff)
+
+ self.assertEqual(self.xenstore, {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ 'vm-data/user-metadata/test_key': '4',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ 'vm-data/user-metadata/test_key': '4',
+ },
+ })
+
+ def test_change_instance_metadata_update(self):
+ diff = dict(b=['+', 4])
+ instance = {'uuid': 'fake'}
+ self.xenstore = {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ }
+
+ self.conn._vmops.change_instance_metadata(instance, diff)
+
+ self.assertEqual(self.xenstore, {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '4',
+ 'vm-data/user-metadata/c': '3',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '4',
+ 'vm-data/user-metadata/c': '3',
+ },
+ })
+
+ def test_change_instance_metadata_delete(self):
+ diff = dict(b=['-'])
+ instance = {'uuid': 'fake'}
+ self.xenstore = {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ }
+
+ self.conn._vmops.change_instance_metadata(instance, diff)
+
+ self.assertEqual(self.xenstore, {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/c': '3',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/c': '3',
+ },
+ })
+
+ def test_change_instance_metadata_not_found(self):
+ instance = {'uuid': 'not_found'}
+ self.conn._vmops.change_instance_metadata(instance, "fake_diff")
+ self.assertTrue(self.called_fake_get_vm_opaque_ref)
+
+
+class XenAPISessionTestCase(test.NoDBTestCase):
+ def _get_mock_xapisession(self, software_version):
+ class MockXapiSession(xenapi_session.XenAPISession):
+ def __init__(_ignore):
+ "Skip the superclass's dirty init"
+
+ def _get_software_version(_ignore):
+ return software_version
+
+ return MockXapiSession()
+
+ def test_local_session(self):
+ session = self._get_mock_xapisession({})
+ session.is_local_connection = True
+ session.XenAPI = self.mox.CreateMockAnything()
+ session.XenAPI.xapi_local().AndReturn("local_connection")
+
+ self.mox.ReplayAll()
+ self.assertEqual("local_connection",
+ session._create_session("unix://local"))
+
+ def test_remote_session(self):
+ session = self._get_mock_xapisession({})
+ session.is_local_connection = False
+ session.XenAPI = self.mox.CreateMockAnything()
+ session.XenAPI.Session("url").AndReturn("remote_connection")
+
+ self.mox.ReplayAll()
+ self.assertEqual("remote_connection", session._create_session("url"))
+
+ def test_get_product_version_product_brand_does_not_fail(self):
+ session = self._get_mock_xapisession({
+ 'build_number': '0',
+ 'date': '2012-08-03',
+ 'hostname': 'komainu',
+ 'linux': '3.2.0-27-generic',
+ 'network_backend': 'bridge',
+ 'platform_name': 'XCP_Kronos',
+ 'platform_version': '1.6.0',
+ 'xapi': '1.3',
+ 'xen': '4.1.2',
+ 'xencenter_max': '1.10',
+ 'xencenter_min': '1.10'
+ })
+
+ self.assertEqual(
+ ((1, 6, 0), None),
+ session._get_product_version_and_brand()
+ )
+
+ def test_get_product_version_product_brand_xs_6(self):
+ session = self._get_mock_xapisession({
+ 'product_brand': 'XenServer',
+ 'product_version': '6.0.50',
+ 'platform_version': '0.0.1'
+ })
+
+ self.assertEqual(
+ ((6, 0, 50), 'XenServer'),
+ session._get_product_version_and_brand()
+ )
+
+ def test_verify_plugin_version_same(self):
+ session = self._get_mock_xapisession({})
+
+ session.PLUGIN_REQUIRED_VERSION = '2.4'
+
+ self.mox.StubOutWithMock(session, 'call_plugin_serialized')
+ session.call_plugin_serialized('nova_plugin_version', 'get_version',
+ ).AndReturn("2.4")
+
+ self.mox.ReplayAll()
+ session._verify_plugin_version()
+
+ def test_verify_plugin_version_compatible(self):
+ session = self._get_mock_xapisession({})
+ session.XenAPI = xenapi_fake.FakeXenAPI()
+
+ session.PLUGIN_REQUIRED_VERSION = '2.4'
+
+ self.mox.StubOutWithMock(session, 'call_plugin_serialized')
+ session.call_plugin_serialized('nova_plugin_version', 'get_version',
+ ).AndReturn("2.5")
+
+ self.mox.ReplayAll()
+ session._verify_plugin_version()
+
+ def test_verify_plugin_version_bad_maj(self):
+ session = self._get_mock_xapisession({})
+ session.XenAPI = xenapi_fake.FakeXenAPI()
+
+ session.PLUGIN_REQUIRED_VERSION = '2.4'
+
+ self.mox.StubOutWithMock(session, 'call_plugin_serialized')
+ session.call_plugin_serialized('nova_plugin_version', 'get_version',
+ ).AndReturn("3.0")
+
+ self.mox.ReplayAll()
+ self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version)
+
+ def test_verify_plugin_version_bad_min(self):
+ session = self._get_mock_xapisession({})
+ session.XenAPI = xenapi_fake.FakeXenAPI()
+
+ session.PLUGIN_REQUIRED_VERSION = '2.4'
+
+ self.mox.StubOutWithMock(session, 'call_plugin_serialized')
+ session.call_plugin_serialized('nova_plugin_version', 'get_version',
+ ).AndReturn("2.3")
+
+ self.mox.ReplayAll()
+ self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version)
+
+ def test_verify_current_version_matches(self):
+ session = self._get_mock_xapisession({})
+
+ # Import the plugin to extract its version
+ path = os.path.dirname(__file__)
+ rel_path_elem = "../../../../../plugins/xenserver/xenapi/etc/xapi.d/" \
+ "plugins/nova_plugin_version"
+ for elem in rel_path_elem.split('/'):
+ path = os.path.join(path, elem)
+ path = os.path.realpath(path)
+
+ plugin_version = None
+ with open(path) as plugin_file:
+ for line in plugin_file:
+ if "PLUGIN_VERSION = " in line:
+ plugin_version = line.strip()[17:].strip('"')
+
+ self.assertEqual(session.PLUGIN_REQUIRED_VERSION,
+ plugin_version)
+
+
+class XenAPIFakeTestCase(test.NoDBTestCase):
+ def test_query_matches(self):
+ record = {'a': '1', 'b': '2', 'c_d': '3'}
+
+ tests = {'field "a"="1"': True,
+ 'field "b"="2"': True,
+ 'field "b"="4"': False,
+ 'not field "b"="4"': True,
+ 'field "a"="1" and field "b"="4"': False,
+ 'field "a"="1" or field "b"="4"': True,
+ 'field "c__d"="3"': True,
+ 'field \'b\'=\'2\'': True,
+ }
+
+ for query in tests.keys():
+ expected = tests[query]
+ fail_msg = "for test '%s'" % query
+ self.assertEqual(xenapi_fake._query_matches(record, query),
+ expected, fail_msg)
+
+ def test_query_bad_format(self):
+ record = {'a': '1', 'b': '2', 'c': '3'}
+
+ tests = ['"a"="1" or "b"="4"',
+ 'a=1',
+ ]
+
+ for query in tests:
+ fail_msg = "for test '%s'" % query
+ self.assertFalse(xenapi_fake._query_matches(record, query),
+ fail_msg)
diff --git a/nova/tests/unit/virt/xenapi/vm_rrd.xml b/nova/tests/unit/virt/xenapi/vm_rrd.xml
new file mode 100644
index 0000000000..f9a7c8083e
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/vm_rrd.xml
@@ -0,0 +1,1101 @@
+<rrd>
+ <version>0003</version>
+ <step>5</step>
+ <lastupdate>1328795567</lastupdate>
+ <ds>
+ <name>cpu0</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>0.0</min>
+ <max>1.0000</max>
+ <last_ds>5102.8417</last_ds>
+ <value>0.0110</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>memory</name>
+ <type>GAUGE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>0.0</min>
+ <max>Infinity</max>
+ <last_ds>4294967296</last_ds>
+ <value>10961792000.0000</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>memory_target</name>
+ <type>GAUGE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>0.0</min>
+ <max>Infinity</max>
+ <last_ds>4294967296</last_ds>
+ <value>10961792000.0000</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>vif_0_tx</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>1079132206</last_ds>
+ <value>752.4007</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>vif_0_rx</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>1093250983</last_ds>
+ <value>4837.8805</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>vbd_xvda_write</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>4552440832</last_ds>
+ <value>0.0</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>vbd_xvda_read</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>1371223040</last_ds>
+ <value>0.0</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>memory_internal_free</name>
+ <type>GAUGE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>1415564</last_ds>
+ <value>3612860.6020</value>
+ <unknown_sec>0</unknown_sec>
+ </ds>
+ <ds>
+ <name>vbd_xvdb_write</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>0.0</last_ds>
+ <value>0.0</value>
+ <unknown_sec>2</unknown_sec>
+ </ds>
+ <ds>
+ <name>vbd_xvdb_read</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>0.0</last_ds>
+ <value>0.0</value>
+ <unknown_sec>2</unknown_sec>
+ </ds>
+ <ds>
+ <name>vif_2_tx</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>0.0</last_ds>
+ <value>0.0</value>
+ <unknown_sec>2</unknown_sec>
+ </ds>
+ <ds>
+ <name>vif_2_rx</name>
+ <type>DERIVE</type>
+ <minimal_heartbeat>300.0000</minimal_heartbeat>
+ <min>-Infinity</min>
+ <max>Infinity</max>
+ <last_ds>0.0</last_ds>
+ <value>0.0</value>
+ <unknown_sec>2</unknown_sec>
+ </ds>
+ <rra>
+ <cf>AVERAGE</cf>
+ <pdp_per_row>1</pdp_per_row>
+ <params>
+ <xff>0.5000</xff>
+ </params>
+ <cdp_prep>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ </cdp_prep>
+ <database>
+ <row>
+ <v>0.0259</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>270.6642</v>
+ <v>1968.1381</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0042</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>258.6530</v>
+ <v>1890.5522</v>
+ <v>565.3453</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0043</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>249.1120</v>
+ <v>1778.2501</v>
+ <v>817.5985</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0039</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>270.5131</v>
+ <v>1806.3336</v>
+ <v>9811.4443</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0041</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>264.3683</v>
+ <v>1952.4054</v>
+ <v>4370.4121</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0034</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>251.6331</v>
+ <v>1958.8002</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0042</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>274.5222</v>
+ <v>2067.5947</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0046</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>260.9790</v>
+ <v>2042.7045</v>
+ <v>1671.6940</v>
+ <v>0.0</v>
+ <v>1433552.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0163</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>249.0992</v>
+ <v>1845.3728</v>
+ <v>4119.4312</v>
+ <v>0.0</v>
+ <v>1431698.1250</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0098</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>273.9898</v>
+ <v>1879.1331</v>
+ <v>5459.4102</v>
+ <v>0.0</v>
+ <v>1430824.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0043</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>261.3513</v>
+ <v>2335.3000</v>
+ <v>6837.4907</v>
+ <v>0.0</v>
+ <v>1430824.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0793</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>249.2620</v>
+ <v>2092.4504</v>
+ <v>2391.9744</v>
+ <v>0.0</v>
+ <v>1430824.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0406</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>270.0746</v>
+ <v>1859.9802</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>1430824.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0043</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>263.4259</v>
+ <v>2010.8950</v>
+ <v>550.1484</v>
+ <v>0.0</v>
+ <v>1430824.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0565</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>29891.2227</v>
+ <v>26210.6699</v>
+ <v>3213.4324</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0645</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>31501.1562</v>
+ <v>29642.1641</v>
+ <v>400.9566</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0381</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>17350.7676</v>
+ <v>20748.6133</v>
+ <v>1247.4755</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0212</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>11981.0918</v>
+ <v>12866.9775</v>
+ <v>5774.9497</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0045</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>249.0901</v>
+ <v>1898.6758</v>
+ <v>4446.3750</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0614</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>249.0959</v>
+ <v>2255.1912</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0609</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>253.1091</v>
+ <v>2099.0601</v>
+ <v>1230.0925</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0047</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>268.6620</v>
+ <v>1759.5667</v>
+ <v>2861.2107</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0100</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>292.2647</v>
+ <v>1828.5435</v>
+ <v>3270.3474</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0093</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>303.5810</v>
+ <v>1932.1176</v>
+ <v>4485.4355</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0038</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>291.6633</v>
+ <v>1842.4425</v>
+ <v>2898.5137</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0042</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>287.4134</v>
+ <v>1816.0144</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>1415564.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ </database>
+ </rra>
+ <rra>
+ <cf>AVERAGE</cf>
+ <pdp_per_row>12</pdp_per_row>
+ <params>
+ <xff>0.5000</xff>
+ </params>
+ <cdp_prep>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0150</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>3221225472.0000</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>3221225472.0000</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>1181.3309</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>2358.2158</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>2080.5770</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>1061673.0000</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ </cdp_prep>
+ <database>
+ <row>
+ <v>0.0130</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>261.6000</v>
+ <v>1990.6442</v>
+ <v>1432.2385</v>
+ <v>0.0</v>
+ <v>1441908.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0172</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>318.8885</v>
+ <v>1979.7030</v>
+ <v>1724.9528</v>
+ <v>0.0</v>
+ <v>1441912.7500</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0483</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>3108.1233</v>
+ <v>4815.9639</v>
+ <v>4962.0503</v>
+ <v>68.2667</v>
+ <v>1441916.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0229</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>1944.2039</v>
+ <v>3757.9177</v>
+ <v>10861.6670</v>
+ <v>0.0</v>
+ <v>1439546.7500</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0639</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>44504.8789</v>
+ <v>34745.1523</v>
+ <v>9571.1455</v>
+ <v>0.0</v>
+ <v>1437892.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.2945</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>79219.1641</v>
+ <v>102827.0781</v>
+ <v>438999.3438</v>
+ <v>0.0</v>
+ <v>1415337.7500</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.1219</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>61093.7109</v>
+ <v>49836.3164</v>
+ <v>8734.3730</v>
+ <v>0.0</v>
+ <v>1399324.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0151</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>48.3914</v>
+ <v>1922.5935</v>
+ <v>2251.4346</v>
+ <v>0.0</v>
+ <v>1421237.1250</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.3162</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>80667.4922</v>
+ <v>53950.0430</v>
+ <v>416858.5000</v>
+ <v>0.0</v>
+ <v>1437032.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ </database>
+ </rra>
+ <rra>
+ <cf>AVERAGE</cf>
+ <pdp_per_row>720</pdp_per_row>
+ <params>
+ <xff>0.5000</xff>
+ </params>
+ <cdp_prep>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0848</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>3775992081.0667</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>3775992081.0667</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>16179.3166</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>13379.7997</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>109091.4636</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>323.1289</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>1259057.5294</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ </cdp_prep>
+ <database>
+ <row>
+ <v>0.1458</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>6454.3096</v>
+ <v>5327.6709</v>
+ <v>116520.9609</v>
+ <v>738.4178</v>
+ <v>2653538.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0971</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>10180.4941</v>
+ <v>10825.1777</v>
+ <v>98749.3438</v>
+ <v>523.3778</v>
+ <v>2381725.7500</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0683</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>23183.2695</v>
+ <v>19607.6523</v>
+ <v>93946.5703</v>
+ <v>807.8222</v>
+ <v>2143269.2500</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0352</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>7552.5708</v>
+ <v>7320.5391</v>
+ <v>30907.9453</v>
+ <v>150384.6406</v>
+ <v>1583336.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ </database>
+ </rra>
+ <rra>
+ <cf>AVERAGE</cf>
+ <pdp_per_row>17280</pdp_per_row>
+ <params>
+ <xff>0.5000</xff>
+ </params>
+ <cdp_prep>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0187</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>2483773622.0445</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>2483773622.0445</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>2648.2715</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>3002.4238</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>19129.3156</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>6365.7244</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>1468863.7753</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ <ds>
+ <primary_value>0.0</primary_value>
+ <secondary_value>0.0</secondary_value>
+ <value>0.0</value>
+ <unknown_datapoints>0</unknown_datapoints>
+ </ds>
+ </cdp_prep>
+ <database>
+ <row>
+ <v>0.0579</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>6291.0151</v>
+ <v>7489.2583</v>
+ <v>70915.3750</v>
+ <v>50.1570</v>
+ <v>613674.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0541</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>10406.3682</v>
+ <v>10638.9365</v>
+ <v>32972.1250</v>
+ <v>7.6800</v>
+ <v>647683.5625</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0189</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>207.0768</v>
+ <v>2145.3167</v>
+ <v>1685.8905</v>
+ <v>0.0</v>
+ <v>599934.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0202</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>71.0270</v>
+ <v>2046.6521</v>
+ <v>6703.9795</v>
+ <v>182.0444</v>
+ <v>595963.8750</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0661</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>8520.3213</v>
+ <v>8488.0664</v>
+ <v>52978.7930</v>
+ <v>7.3956</v>
+ <v>727540.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0219</v>
+ <v>4294967296.0000</v>
+ <v>4294967296.0000</v>
+ <v>40443.0117</v>
+ <v>20702.5996</v>
+ <v>-1377536.8750</v>
+ <v>36990.5898</v>
+ <v>1823778.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ <row>
+ <v>0.0265</v>
+ <v>4294971904.0000</v>
+ <v>4294754304.0000</v>
+ <v>6384.6367</v>
+ <v>6513.4951</v>
+ <v>22415.6348</v>
+ <v>2486.9690</v>
+ <v>3072170.0000</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ <v>0.0</v>
+ </row>
+ </database>
+ </rra>
+</rrd>
diff --git a/nova/tests/unit/volume/__init__.py b/nova/tests/unit/volume/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/volume/__init__.py
diff --git a/nova/tests/unit/volume/encryptors/__init__.py b/nova/tests/unit/volume/encryptors/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/volume/encryptors/__init__.py
diff --git a/nova/tests/unit/volume/encryptors/test_base.py b/nova/tests/unit/volume/encryptors/test_base.py
new file mode 100644
index 0000000000..d60c20ecd3
--- /dev/null
+++ b/nova/tests/unit/volume/encryptors/test_base.py
@@ -0,0 +1,54 @@
+# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.i18n import _LE
+from nova import keymgr
+from nova import test
+from nova.tests.unit.keymgr import fake
+from nova.volume import encryptors
+
+
+class VolumeEncryptorTestCase(test.TestCase):
+ def _create(self, device_path):
+ pass
+
+ def setUp(self):
+ super(VolumeEncryptorTestCase, self).setUp()
+
+ self.stubs.Set(keymgr, 'API', fake.fake_api)
+
+ self.connection_info = {
+ "data": {
+ "device_path": "/dev/disk/by-path/"
+ "ip-192.0.2.0:3260-iscsi-iqn.2010-10.org.openstack"
+ ":volume-fake_uuid-lun-1",
+ },
+ }
+ self.encryptor = self._create(self.connection_info)
+
+ @mock.patch('nova.volume.encryptors.LOG')
+ def test_error_log(self, log):
+ encryption = {'control_location': 'front-end',
+ 'provider': 'TestEncryptor'}
+ provider = 'TestEncryptor'
+ try:
+ encryptors.get_volume_encryptor(self.connection_info, **encryption)
+ except Exception as e:
+ log.error.assert_called_once_with(_LE("Error instantiating "
+ "%(provider)s: "
+ "%(exception)s"),
+ {'provider': provider, 'exception': e})
diff --git a/nova/tests/unit/volume/encryptors/test_cryptsetup.py b/nova/tests/unit/volume/encryptors/test_cryptsetup.py
new file mode 100644
index 0000000000..ab84d17d9b
--- /dev/null
+++ b/nova/tests/unit/volume/encryptors/test_cryptsetup.py
@@ -0,0 +1,83 @@
+# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import array
+import os
+
+from nova.keymgr import key
+from nova.tests.unit.volume.encryptors import test_base
+from nova import utils
+from nova.volume.encryptors import cryptsetup
+
+
+def fake__get_key(context):
+ raw = array.array('B', ('0' * 64).decode('hex')).tolist()
+
+ symmetric_key = key.SymmetricKey('AES', raw)
+ return symmetric_key
+
+
+class CryptsetupEncryptorTestCase(test_base.VolumeEncryptorTestCase):
+ def _create(self, connection_info):
+ return cryptsetup.CryptsetupEncryptor(connection_info)
+
+ def setUp(self):
+ super(CryptsetupEncryptorTestCase, self).setUp()
+
+ self.executes = []
+
+ def fake_execute(*cmd, **kwargs):
+ self.executes.append(cmd)
+ return None, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ self.stubs.Set(os.path, "realpath", lambda x: x)
+
+ self.dev_path = self.connection_info['data']['device_path']
+ self.dev_name = self.dev_path.split('/')[-1]
+
+ self.symlink_path = self.dev_path
+
+ def test__open_volume(self):
+ self.encryptor._open_volume("passphrase")
+
+ expected_commands = [('cryptsetup', 'create', '--key-file=-',
+ self.dev_name, self.dev_path)]
+ self.assertEqual(expected_commands, self.executes)
+
+ def test_attach_volume(self):
+ self.stubs.Set(self.encryptor, '_get_key', fake__get_key)
+
+ self.encryptor.attach_volume(None)
+
+ expected_commands = [('cryptsetup', 'create', '--key-file=-',
+ self.dev_name, self.dev_path),
+ ('ln', '--symbolic', '--force',
+ '/dev/mapper/%s' % self.dev_name,
+ self.symlink_path)]
+ self.assertEqual(expected_commands, self.executes)
+
+ def test__close_volume(self):
+ self.encryptor.detach_volume()
+
+ expected_commands = [('cryptsetup', 'remove', self.dev_name)]
+ self.assertEqual(expected_commands, self.executes)
+
+ def test_detach_volume(self):
+ self.encryptor.detach_volume()
+
+ expected_commands = [('cryptsetup', 'remove', self.dev_name)]
+ self.assertEqual(expected_commands, self.executes)
diff --git a/nova/tests/unit/volume/encryptors/test_luks.py b/nova/tests/unit/volume/encryptors/test_luks.py
new file mode 100644
index 0000000000..00e03053ea
--- /dev/null
+++ b/nova/tests/unit/volume/encryptors/test_luks.py
@@ -0,0 +1,71 @@
+# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.tests.unit.volume.encryptors import test_cryptsetup
+from nova.volume.encryptors import luks
+
+
+"""
+The utility of these test cases is limited given the simplicity of the
+LuksEncryptor class. The attach_volume method has the only significant logic
+to handle cases where the volume has not previously been formatted, but
+exercising this logic requires "real" devices and actually executing the
+various cryptsetup commands rather than simply logging them.
+"""
+
+
+class LuksEncryptorTestCase(test_cryptsetup.CryptsetupEncryptorTestCase):
+ def _create(self, connection_info):
+ return luks.LuksEncryptor(connection_info)
+
+ def test__format_volume(self):
+ self.encryptor._format_volume("passphrase")
+
+ expected_commands = [('cryptsetup', '--batch-mode', 'luksFormat',
+ '--key-file=-', self.dev_path)]
+ self.assertEqual(expected_commands, self.executes)
+
+ def test__open_volume(self):
+ self.encryptor._open_volume("passphrase")
+
+ expected_commands = [('cryptsetup', 'luksOpen', '--key-file=-',
+ self.dev_path, self.dev_name)]
+ self.assertEqual(expected_commands, self.executes)
+
+ def test_attach_volume(self):
+ self.stubs.Set(self.encryptor, '_get_key',
+ test_cryptsetup.fake__get_key)
+
+ self.encryptor.attach_volume(None)
+
+ expected_commands = [('cryptsetup', 'luksOpen', '--key-file=-',
+ self.dev_path, self.dev_name),
+ ('ln', '--symbolic', '--force',
+ '/dev/mapper/%s' % self.dev_name,
+ self.symlink_path)]
+ self.assertEqual(expected_commands, self.executes)
+
+ def test__close_volume(self):
+ self.encryptor.detach_volume()
+
+ expected_commands = [('cryptsetup', 'luksClose', self.dev_name)]
+ self.assertEqual(expected_commands, self.executes)
+
+ def test_detach_volume(self):
+ self.encryptor.detach_volume()
+
+ expected_commands = [('cryptsetup', 'luksClose', self.dev_name)]
+ self.assertEqual(expected_commands, self.executes)
diff --git a/nova/tests/unit/volume/encryptors/test_nop.py b/nova/tests/unit/volume/encryptors/test_nop.py
new file mode 100644
index 0000000000..aa32a9c0e6
--- /dev/null
+++ b/nova/tests/unit/volume/encryptors/test_nop.py
@@ -0,0 +1,28 @@
+# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.volume.encryptors import test_base
+from nova.volume.encryptors import nop
+
+
+class NoOpEncryptorTestCase(test_base.VolumeEncryptorTestCase):
+ def _create(self, connection_info):
+ return nop.NoOpEncryptor(connection_info)
+
+ def test_attach_volume(self):
+ self.encryptor.attach_volume(None)
+
+ def test_detach_volume(self):
+ self.encryptor.detach_volume()
diff --git a/nova/tests/unit/volume/test_cinder.py b/nova/tests/unit/volume/test_cinder.py
new file mode 100644
index 0000000000..1aa3f85c97
--- /dev/null
+++ b/nova/tests/unit/volume/test_cinder.py
@@ -0,0 +1,451 @@
+# Copyright 2013 Mirantis, Inc.
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from cinderclient import exceptions as cinder_exception
+import mock
+
+from nova import context
+from nova import exception
+from nova import test
+from nova.volume import cinder
+
+
+class FakeCinderClient(object):
+ class Volumes(object):
+ def get(self, volume_id):
+ return {'id': volume_id}
+
+ def list(self, detailed):
+ return [{'id': 'id1'}, {'id': 'id2'}]
+
+ def create(self, *args, **kwargs):
+ return {'id': 'created_id'}
+
+ def __getattr__(self, item):
+ return None
+
+ def __init__(self):
+ self.volumes = self.Volumes()
+ self.volume_snapshots = self.volumes
+
+
+class FakeVolume(object):
+ def __init__(self, dict=dict()):
+ self.id = dict.get('id') or '1234'
+ self.status = dict.get('status') or 'available'
+ self.size = dict.get('size') or 1
+ self.availability_zone = dict.get('availability_zone') or 'cinder'
+ self.created_at = dict.get('created_at')
+ self.attach_time = dict.get('attach_time')
+ self.mountpoint = dict.get('mountpoint')
+ self.display_name = dict.get('display_name') or 'volume-' + self.id
+ self.display_description = dict.get('display_description') or 'fake'
+ self.volume_type_id = dict.get('volume_type_id')
+ self.snapshot_id = dict.get('snapshot_id')
+ self.metadata = dict.get('volume_metadata') or {}
+
+
+class CinderApiTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(CinderApiTestCase, self).setUp()
+
+ self.api = cinder.API()
+ self.cinderclient = FakeCinderClient()
+ self.ctx = context.get_admin_context()
+ self.mox.StubOutWithMock(cinder, 'cinderclient')
+ self.mox.StubOutWithMock(cinder, '_untranslate_volume_summary_view')
+ self.mox.StubOutWithMock(cinder, '_untranslate_snapshot_summary_view')
+ self.mox.StubOutWithMock(cinder, 'get_cinder_client_version')
+
+ def test_get(self):
+ volume_id = 'volume_id1'
+ cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
+ cinder._untranslate_volume_summary_view(self.ctx, {'id': 'volume_id1'})
+ self.mox.ReplayAll()
+
+ self.api.get(self.ctx, volume_id)
+
+ def test_get_failed(self):
+ volume_id = 'volume_id'
+ cinder.cinderclient(self.ctx).AndRaise(cinder_exception.NotFound(''))
+ cinder.cinderclient(self.ctx).AndRaise(cinder_exception.BadRequest(''))
+ cinder.cinderclient(self.ctx).AndRaise(
+ cinder_exception.ConnectionError(''))
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.VolumeNotFound,
+ self.api.get, self.ctx, volume_id)
+ self.assertRaises(exception.InvalidInput,
+ self.api.get, self.ctx, volume_id)
+ self.assertRaises(exception.CinderConnectionFailed,
+ self.api.get, self.ctx, volume_id)
+
+ def test_create(self):
+ cinder.get_cinder_client_version(self.ctx).AndReturn('2')
+ cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
+ cinder._untranslate_volume_summary_view(self.ctx, {'id': 'created_id'})
+ self.mox.ReplayAll()
+
+ self.api.create(self.ctx, 1, '', '')
+
+ def test_create_failed(self):
+ cinder.get_cinder_client_version(self.ctx).AndReturn('2')
+ cinder.cinderclient(self.ctx).AndRaise(cinder_exception.BadRequest(''))
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.InvalidInput,
+ self.api.create, self.ctx, 1, '', '')
+
+ @mock.patch('nova.volume.cinder.get_cinder_client_version')
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_create_over_quota_failed(self, mock_cinderclient,
+ mock_get_version):
+ mock_get_version.return_value = '2'
+ mock_cinderclient.return_value.volumes.create.side_effect = (
+ cinder_exception.OverLimit(413))
+ self.assertRaises(exception.OverQuota, self.api.create, self.ctx,
+ 1, '', '')
+ mock_cinderclient.return_value.volumes.create.assert_called_once_with(
+ 1, user_id=None, imageRef=None, availability_zone=None,
+ volume_type=None, description='', snapshot_id=None, name='',
+ project_id=None, metadata=None)
+
+ def test_get_all(self):
+ cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
+ cinder._untranslate_volume_summary_view(self.ctx,
+ {'id': 'id1'}).AndReturn('id1')
+ cinder._untranslate_volume_summary_view(self.ctx,
+ {'id': 'id2'}).AndReturn('id2')
+ self.mox.ReplayAll()
+
+ self.assertEqual(['id1', 'id2'], self.api.get_all(self.ctx))
+
+ def test_check_attach_volume_status_error(self):
+ volume = {'status': 'error'}
+ self.assertRaises(exception.InvalidVolume,
+ self.api.check_attach, self.ctx, volume)
+
+ def test_check_attach_volume_already_attached(self):
+ volume = {'status': 'available'}
+ volume['attach_status'] = "attached"
+ self.assertRaises(exception.InvalidVolume,
+ self.api.check_attach, self.ctx, volume)
+
+ def test_check_attach_availability_zone_differs(self):
+ volume = {'status': 'available'}
+ volume['attach_status'] = "detached"
+ instance = {'availability_zone': 'zone1', 'host': 'fakehost'}
+
+ with mock.patch.object(cinder.az, 'get_instance_availability_zone',
+ side_effect=lambda context,
+ instance: 'zone1') as mock_get_instance_az:
+
+ cinder.CONF.set_override('cross_az_attach', False, group='cinder')
+ volume['availability_zone'] = 'zone1'
+ self.assertIsNone(self.api.check_attach(self.ctx,
+ volume, instance))
+ mock_get_instance_az.assert_called_once_with(self.ctx, instance)
+ mock_get_instance_az.reset_mock()
+ volume['availability_zone'] = 'zone2'
+ self.assertRaises(exception.InvalidVolume,
+ self.api.check_attach, self.ctx, volume, instance)
+ mock_get_instance_az.assert_called_once_with(self.ctx, instance)
+ mock_get_instance_az.reset_mock()
+ del instance['host']
+ volume['availability_zone'] = 'zone1'
+ self.assertIsNone(self.api.check_attach(
+ self.ctx, volume, instance))
+ self.assertFalse(mock_get_instance_az.called)
+ volume['availability_zone'] = 'zone2'
+ self.assertRaises(exception.InvalidVolume,
+ self.api.check_attach, self.ctx, volume, instance)
+ self.assertFalse(mock_get_instance_az.called)
+ cinder.CONF.reset()
+
+ def test_check_attach(self):
+ volume = {'status': 'available'}
+ volume['attach_status'] = "detached"
+ volume['availability_zone'] = 'zone1'
+ instance = {'availability_zone': 'zone1', 'host': 'fakehost'}
+ cinder.CONF.set_override('cross_az_attach', False, group='cinder')
+
+ with mock.patch.object(cinder.az, 'get_instance_availability_zone',
+ side_effect=lambda context, instance: 'zone1'):
+ self.assertIsNone(self.api.check_attach(
+ self.ctx, volume, instance))
+
+ cinder.CONF.reset()
+
+ def test_check_detach(self):
+ volume = {'status': 'available'}
+ self.assertRaises(exception.InvalidVolume,
+ self.api.check_detach, self.ctx, volume)
+ volume['status'] = 'non-available'
+ self.assertIsNone(self.api.check_detach(self.ctx, volume))
+
+ def test_reserve_volume(self):
+ cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
+ self.mox.StubOutWithMock(self.cinderclient.volumes,
+ 'reserve')
+ self.cinderclient.volumes.reserve('id1')
+ self.mox.ReplayAll()
+
+ self.api.reserve_volume(self.ctx, 'id1')
+
+ def test_unreserve_volume(self):
+ cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
+ self.mox.StubOutWithMock(self.cinderclient.volumes,
+ 'unreserve')
+ self.cinderclient.volumes.unreserve('id1')
+ self.mox.ReplayAll()
+
+ self.api.unreserve_volume(self.ctx, 'id1')
+
+ def test_begin_detaching(self):
+ cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
+ self.mox.StubOutWithMock(self.cinderclient.volumes,
+ 'begin_detaching')
+ self.cinderclient.volumes.begin_detaching('id1')
+ self.mox.ReplayAll()
+
+ self.api.begin_detaching(self.ctx, 'id1')
+
+ def test_roll_detaching(self):
+ cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
+ self.mox.StubOutWithMock(self.cinderclient.volumes,
+ 'roll_detaching')
+ self.cinderclient.volumes.roll_detaching('id1')
+ self.mox.ReplayAll()
+
+ self.api.roll_detaching(self.ctx, 'id1')
+
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_attach(self, mock_cinderclient):
+ mock_volumes = mock.MagicMock()
+ mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
+
+ self.api.attach(self.ctx, 'id1', 'uuid', 'point')
+
+ mock_cinderclient.assert_called_once_with(self.ctx)
+ mock_volumes.attach.assert_called_once_with('id1', 'uuid', 'point',
+ mode='rw')
+
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_attach_with_mode(self, mock_cinderclient):
+ mock_volumes = mock.MagicMock()
+ mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
+
+ self.api.attach(self.ctx, 'id1', 'uuid', 'point', mode='ro')
+
+ mock_cinderclient.assert_called_once_with(self.ctx)
+ mock_volumes.attach.assert_called_once_with('id1', 'uuid', 'point',
+ mode='ro')
+
+ def test_detach(self):
+ cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
+ self.mox.StubOutWithMock(self.cinderclient.volumes,
+ 'detach')
+ self.cinderclient.volumes.detach('id1')
+ self.mox.ReplayAll()
+
+ self.api.detach(self.ctx, 'id1')
+
+ def test_initialize_connection(self):
+ cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
+ self.mox.StubOutWithMock(self.cinderclient.volumes,
+ 'initialize_connection')
+ self.cinderclient.volumes.initialize_connection('id1', 'connector')
+ self.mox.ReplayAll()
+
+ self.api.initialize_connection(self.ctx, 'id1', 'connector')
+
+ def test_terminate_connection(self):
+ cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
+ self.mox.StubOutWithMock(self.cinderclient.volumes,
+ 'terminate_connection')
+ self.cinderclient.volumes.terminate_connection('id1', 'connector')
+ self.mox.ReplayAll()
+
+ self.api.terminate_connection(self.ctx, 'id1', 'connector')
+
+ def test_delete(self):
+ cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
+ self.mox.StubOutWithMock(self.cinderclient.volumes,
+ 'delete')
+ self.cinderclient.volumes.delete('id1')
+ self.mox.ReplayAll()
+
+ self.api.delete(self.ctx, 'id1')
+
+ def test_update(self):
+ self.assertRaises(NotImplementedError,
+ self.api.update, self.ctx, '', '')
+
+ def test_get_snapshot(self):
+ snapshot_id = 'snapshot_id'
+ cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
+ cinder._untranslate_snapshot_summary_view(self.ctx,
+ {'id': snapshot_id})
+ self.mox.ReplayAll()
+
+ self.api.get_snapshot(self.ctx, snapshot_id)
+
+ def test_get_snapshot_failed(self):
+ snapshot_id = 'snapshot_id'
+ cinder.cinderclient(self.ctx).AndRaise(cinder_exception.NotFound(''))
+ cinder.cinderclient(self.ctx).AndRaise(
+ cinder_exception.ConnectionError(''))
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.SnapshotNotFound,
+ self.api.get_snapshot, self.ctx, snapshot_id)
+ self.assertRaises(exception.CinderConnectionFailed,
+ self.api.get_snapshot, self.ctx, snapshot_id)
+
+ def test_get_all_snapshots(self):
+ cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
+ cinder._untranslate_snapshot_summary_view(self.ctx,
+ {'id': 'id1'}).AndReturn('id1')
+ cinder._untranslate_snapshot_summary_view(self.ctx,
+ {'id': 'id2'}).AndReturn('id2')
+ self.mox.ReplayAll()
+
+ self.assertEqual(['id1', 'id2'], self.api.get_all_snapshots(self.ctx))
+
+ def test_create_snapshot(self):
+ cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
+ cinder._untranslate_snapshot_summary_view(self.ctx,
+ {'id': 'created_id'})
+ self.mox.ReplayAll()
+
+ self.api.create_snapshot(self.ctx, {'id': 'id1'}, '', '')
+
+ def test_create_force(self):
+ cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
+ cinder._untranslate_snapshot_summary_view(self.ctx,
+ {'id': 'created_id'})
+ self.mox.ReplayAll()
+
+ self.api.create_snapshot_force(self.ctx, {'id': 'id1'}, '', '')
+
+ def test_delete_snapshot(self):
+ cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
+ self.mox.StubOutWithMock(self.cinderclient.volume_snapshots,
+ 'delete')
+ self.cinderclient.volume_snapshots.delete('id1')
+ self.mox.ReplayAll()
+
+ self.api.delete_snapshot(self.ctx, 'id1')
+
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_get_volume_metadata(self, mock_cinderclient):
+ volume_id = 'id1'
+ metadata = {'key1': 'value1', 'key2': 'value2'}
+ volume = FakeVolume({'id': volume_id, 'volume_metadata': metadata})
+
+ mock_volumes = mock.MagicMock()
+ mock_volumes.get.return_value = volume
+ mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
+
+ results = self.api.get_volume_metadata(self.ctx, volume_id)
+
+ mock_cinderclient.assert_called_once_with(self.ctx)
+ mock_volumes.get.assert_called_once_with(volume_id)
+ self.assertEqual(results, metadata)
+
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_get_volume_metadata_value(self, mock_cinderclient):
+ volume_id = 'id1'
+ metadata = {'key1': 'value1'}
+ volume = FakeVolume({'id': volume_id, 'volume_metadata': metadata})
+
+ mock_volumes = mock.MagicMock()
+ mock_volumes.get.return_value = volume
+ mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
+
+ results = self.api.get_volume_metadata_value(self.ctx, volume_id,
+ 'key1')
+ mock_cinderclient.assert_called_once_with(self.ctx)
+ mock_volumes.get.assert_called_once_with(volume_id)
+ self.assertEqual(results, 'value1')
+
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_delete_volume_metadata(self, mock_cinderclient):
+ volume_id = 'id1'
+ keys = ['key1', 'key2', 'key3']
+
+ mock_volumes = mock.MagicMock()
+ mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
+
+ self.api.delete_volume_metadata(self.ctx, volume_id, keys)
+
+ mock_cinderclient.assert_called_once_with(self.ctx)
+ mock_volumes.delete_metadata.assert_called_once_with(volume_id, keys)
+
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_update_volume_metadata(self, mock_cinderclient):
+ volume_id = 'id1'
+ metadata = {'key1': 'value1'}
+
+ mock_volumes = mock.MagicMock()
+ mock_volumes.set_metadata.return_value = metadata
+ mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
+
+ updated_meta = self.api.update_volume_metadata(self.ctx, volume_id,
+ metadata)
+
+ mock_cinderclient.assert_called_once_with(self.ctx)
+ self.assertFalse(mock_volumes.update_all_metadata.called)
+ mock_volumes.set_metadata.assert_called_once_with(volume_id, metadata)
+ self.assertEqual(metadata, updated_meta)
+
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_update_volume_metadata_delete(self, mock_cinderclient):
+ volume_id = 'id1'
+ metadata = {'key1': 'value1', 'key2': 'value2'}
+
+ mock_volumes = mock.MagicMock()
+ mock_volumes.update_all_metadata.return_value = metadata
+ mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
+
+ updated_meta = self.api.update_volume_metadata(self.ctx, volume_id,
+ metadata, delete=True)
+
+ mock_cinderclient.assert_called_once_with(self.ctx)
+ mock_volumes.update_all_metadata.assert_called_once_with(volume_id,
+ metadata)
+ self.assertFalse(mock_volumes.set_metadata.called)
+ self.assertEqual(metadata, updated_meta)
+
+ def test_update_snapshot_status(self):
+ cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
+ self.mox.StubOutWithMock(self.cinderclient.volume_snapshots,
+ 'update_snapshot_status')
+ self.cinderclient.volume_snapshots.update_snapshot_status(
+ 'id1', {'status': 'error', 'progress': '90%'})
+ self.mox.ReplayAll()
+ self.api.update_snapshot_status(self.ctx, 'id1', 'error')
+
+ def test_get_volume_encryption_metadata(self):
+ cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
+ self.mox.StubOutWithMock(self.cinderclient.volumes,
+ 'get_encryption_metadata')
+ self.cinderclient.volumes.\
+ get_encryption_metadata({'encryption_key_id': 'fake_key'})
+ self.mox.ReplayAll()
+
+ self.api.get_volume_encryption_metadata(self.ctx,
+ {'encryption_key_id':
+ 'fake_key'})